1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <math.h>
8 #include <sys/param.h>
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12
13 #include "tf_core.h"
14 #include "tf_util.h"
15 #include "tf_common.h"
16 #include "tf_em.h"
17 #include "tf_em_common.h"
18 #include "tf_msg.h"
19 #include "tfp.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "hcapi_cfa.h"
23
24 #include "bnxt.h"
25
26
27 /** Invalid table scope id */
28 #define TF_TBL_SCOPE_INVALID 0xffffffff
29
30 /* Number of pointers per page_size */
31 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32
33 /**
34 * Host or system
35 */
36 static enum tf_mem_type mem_type;
37
38 /* API defined in tf_em.h */
39 int
tf_create_tbl_pool_external(enum tf_dir dir,struct tf_tbl_scope_cb * tbl_scope_cb,uint32_t num_entries,uint32_t entry_sz_bytes)40 tf_create_tbl_pool_external(enum tf_dir dir,
41 struct tf_tbl_scope_cb *tbl_scope_cb,
42 uint32_t num_entries,
43 uint32_t entry_sz_bytes)
44 {
45 struct tfp_calloc_parms parms;
46 uint32_t i;
47 int32_t j;
48 int rc = 0;
49 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
50
51 parms.nitems = num_entries;
52 parms.size = sizeof(uint32_t);
53 parms.alignment = 0;
54
55 if (tfp_calloc(&parms) != 0) {
56 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
57 tf_dir_2_str(dir), strerror(ENOMEM));
58 return -ENOMEM;
59 }
60
61 /* Create empty stack
62 */
63 rc = stack_init(num_entries, parms.mem_va, pool);
64
65 if (rc != 0) {
66 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
67 tf_dir_2_str(dir), strerror(-rc));
68 goto cleanup;
69 }
70
71 /* Save the malloced memory address so that it can
72 * be freed when the table scope is freed.
73 */
74 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
75
76 /* Fill pool with indexes in reverse
77 */
78 j = (num_entries - 1) * entry_sz_bytes;
79
80 for (i = 0; i < num_entries; i++) {
81 rc = stack_push(pool, j);
82 if (rc != 0) {
83 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
84 tf_dir_2_str(dir), strerror(-rc));
85 goto cleanup;
86 }
87
88 if (j < 0) {
89 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
90 dir, j);
91 goto cleanup;
92 }
93 j -= entry_sz_bytes;
94 }
95
96 if (!stack_is_full(pool)) {
97 rc = -EINVAL;
98 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
99 tf_dir_2_str(dir), strerror(-rc));
100 goto cleanup;
101 }
102 return 0;
103 cleanup:
104 tfp_free((void *)parms.mem_va);
105 return rc;
106 }
107
108 /**
109 * Destroy External Tbl pool of memory indexes.
110 *
111 * [in] dir
112 * direction
113 * [in] tbl_scope_cb
114 * pointer to the table scope
115 */
116 void
tf_destroy_tbl_pool_external(enum tf_dir dir,struct tf_tbl_scope_cb * tbl_scope_cb)117 tf_destroy_tbl_pool_external(enum tf_dir dir,
118 struct tf_tbl_scope_cb *tbl_scope_cb)
119 {
120 uint32_t *ext_act_pool_mem =
121 tbl_scope_cb->ext_act_pool_mem[dir];
122
123 tfp_free(ext_act_pool_mem);
124 }
125
126 /**
127 * Looks up table scope control block using tbl_scope_id from tf_session.
128 *
129 * [in] tfp
130 * Pointer to Truflow Handle
131 * [in] tbl_scope_id
132 * table scope id
133 *
134 * Return:
135 * - Pointer to the tf_tbl_scope_cb, if found.
136 * - (NULL) on failure, not found.
137 */
138 struct tf_tbl_scope_cb *
tf_em_ext_common_tbl_scope_find(struct tf * tfp,uint32_t tbl_scope_id)139 tf_em_ext_common_tbl_scope_find(struct tf *tfp,
140 uint32_t tbl_scope_id)
141 {
142 int rc;
143 struct em_ext_db *ext_db;
144 void *ext_ptr = NULL;
145 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
146 struct ll_entry *entry;
147
148 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
149 if (rc)
150 return NULL;
151
152 ext_db = (struct em_ext_db *)ext_ptr;
153
154 for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
155 entry = entry->next) {
156 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
157 if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
158 return tbl_scope_cb;
159 }
160
161 return NULL;
162 }
163
164 /**
165 * Allocate External Tbl entry from the scope pool.
166 *
167 * [in] tfp
168 * Pointer to Truflow Handle
169 * [in] parms
170 * Allocation parameters
171 *
172 * Return:
173 * 0 - Success, entry allocated - no search support
174 * -ENOMEM -EINVAL -EOPNOTSUPP
175 * - Failure, entry not allocated, out of resources
176 */
177 int
tf_tbl_ext_alloc(struct tf * tfp,struct tf_tbl_alloc_parms * parms)178 tf_tbl_ext_alloc(struct tf *tfp,
179 struct tf_tbl_alloc_parms *parms)
180 {
181 int rc;
182 uint32_t index;
183 struct tf_tbl_scope_cb *tbl_scope_cb;
184 struct stack *pool;
185
186 TF_CHECK_PARMS2(tfp, parms);
187
188 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
189 if (tbl_scope_cb == NULL) {
190 TFP_DRV_LOG(ERR,
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
193 return -EINVAL;
194 }
195
196 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
197
198 /* Allocate an element
199 */
200 rc = stack_pop(pool, &index);
201
202 if (rc != 0) {
203 TFP_DRV_LOG(ERR,
204 "%s, Allocation failed, type:%d\n",
205 tf_dir_2_str(parms->dir),
206 parms->type);
207 return rc;
208 }
209
210 *parms->idx = index;
211 return rc;
212 }
213
214 /**
215 * Free External Tbl entry to the scope pool.
216 *
217 * [in] tfp
218 * Pointer to Truflow Handle
219 * [in] parms
220 * Allocation parameters
221 *
222 * Return:
223 * 0 - Success, entry freed
224 *
225 * - Failure, entry not successfully freed for these reasons
226 * -ENOMEM
227 * -EOPNOTSUPP
228 * -EINVAL
229 */
230 int
tf_tbl_ext_free(struct tf * tfp,struct tf_tbl_free_parms * parms)231 tf_tbl_ext_free(struct tf *tfp,
232 struct tf_tbl_free_parms *parms)
233 {
234 int rc = 0;
235 uint32_t index;
236 struct tf_tbl_scope_cb *tbl_scope_cb;
237 struct stack *pool;
238
239 TF_CHECK_PARMS2(tfp, parms);
240
241 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
242 if (tbl_scope_cb == NULL) {
243 TFP_DRV_LOG(ERR,
244 "%s, table scope error\n",
245 tf_dir_2_str(parms->dir));
246 return -EINVAL;
247 }
248 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
249
250 index = parms->idx;
251
252 rc = stack_push(pool, index);
253
254 if (rc != 0) {
255 TFP_DRV_LOG(ERR,
256 "%s, consistency error, stack full, type:%d, idx:%d\n",
257 tf_dir_2_str(parms->dir),
258 parms->type,
259 index);
260 }
261 return rc;
262 }
263
264 uint32_t
tf_em_get_key_mask(int num_entries)265 tf_em_get_key_mask(int num_entries)
266 {
267 uint32_t mask = num_entries - 1;
268
269 if (num_entries & TF_EM_MAX_MASK)
270 return 0;
271
272 if (num_entries > TF_EM_MAX_ENTRY)
273 return 0;
274
275 return mask;
276 }
277
278 void
tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr * result,uint8_t * in_key,struct cfa_p4_eem_64b_entry * key_entry)279 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
280 uint8_t *in_key,
281 struct cfa_p4_eem_64b_entry *key_entry)
282 {
283 key_entry->hdr.word1 = result->word1;
284 key_entry->hdr.pointer = result->pointer;
285 memcpy(key_entry->key, in_key, TF_P4_HW_EM_KEY_MAX_SIZE + 4);
286 }
287
288
289 /**
290 * Return the number of page table pages needed to
291 * reference the given number of next level pages.
292 *
293 * [in] num_pages
294 * Number of EM pages
295 *
296 * [in] page_size
297 * Size of each EM page
298 *
299 * Returns:
300 * Number of EM page table pages
301 */
302 static uint32_t
tf_em_page_tbl_pgcnt(uint32_t num_pages,uint32_t page_size)303 tf_em_page_tbl_pgcnt(uint32_t num_pages,
304 uint32_t page_size)
305 {
306 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
307 MAX_PAGE_PTRS(page_size);
308 return 0;
309 }
310
311 /**
312 * Given the number of data pages, page_size and the maximum
313 * number of page table levels (already determined), size
314 * the number of page table pages required at each level.
315 *
316 * [in] max_lvl
317 * Max number of levels
318 *
319 * [in] num_data_pages
320 * Number of EM data pages
321 *
322 * [in] page_size
323 * Size of an EM page
324 *
325 * [out] *page_cnt
326 * EM page count
327 */
328 static void
tf_em_size_page_tbls(int max_lvl,uint64_t num_data_pages,uint32_t page_size,uint32_t * page_cnt)329 tf_em_size_page_tbls(int max_lvl,
330 uint64_t num_data_pages,
331 uint32_t page_size,
332 uint32_t *page_cnt)
333 {
334 if (max_lvl == TF_PT_LVL_0) {
335 page_cnt[TF_PT_LVL_0] = num_data_pages;
336 } else if (max_lvl == TF_PT_LVL_1) {
337 page_cnt[TF_PT_LVL_1] = num_data_pages;
338 page_cnt[TF_PT_LVL_0] =
339 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
340 } else if (max_lvl == TF_PT_LVL_2) {
341 page_cnt[TF_PT_LVL_2] = num_data_pages;
342 page_cnt[TF_PT_LVL_1] =
343 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
344 page_cnt[TF_PT_LVL_0] =
345 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
346 } else {
347 return;
348 }
349 }
350
351 /**
352 * Given the page size, size of each data item (entry size),
353 * and the total number of entries needed, determine the number
354 * of page table levels and the number of data pages required.
355 *
356 * [in] page_size
357 * Page size
358 *
359 * [in] entry_size
360 * Entry size
361 *
362 * [in] num_entries
363 * Number of entries needed
364 *
365 * [out] num_data_pages
366 * Number of pages required
367 *
368 * Returns:
369 * Success - Number of EM page levels required
370 * -ENOMEM - Out of memory
371 */
372 static int
tf_em_size_page_tbl_lvl(uint32_t page_size,uint32_t entry_size,uint32_t num_entries,uint64_t * num_data_pages)373 tf_em_size_page_tbl_lvl(uint32_t page_size,
374 uint32_t entry_size,
375 uint32_t num_entries,
376 uint64_t *num_data_pages)
377 {
378 uint64_t lvl_data_size = page_size;
379 int lvl = TF_PT_LVL_0;
380 uint64_t data_size;
381
382 *num_data_pages = 0;
383 data_size = (uint64_t)num_entries * entry_size;
384
385 while (lvl_data_size < data_size) {
386 lvl++;
387
388 if (lvl == TF_PT_LVL_1)
389 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
390 page_size;
391 else if (lvl == TF_PT_LVL_2)
392 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
393 MAX_PAGE_PTRS(page_size) * page_size;
394 else
395 return -ENOMEM;
396 }
397
398 *num_data_pages = roundup(data_size, page_size) / page_size;
399
400 return lvl;
401 }
402
403 /**
404 * Size the EM table based on capabilities
405 *
406 * [in] tbl
407 * EM table to size
408 *
409 * Returns:
410 * 0 - Success
411 * - EINVAL - Parameter error
412 * - ENOMEM - Out of memory
413 */
414 int
tf_em_size_table(struct hcapi_cfa_em_table * tbl,uint32_t page_size)415 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
416 uint32_t page_size)
417 {
418 uint64_t num_data_pages;
419 uint32_t *page_cnt;
420 int max_lvl;
421 uint32_t num_entries;
422 uint32_t cnt = TF_EM_MIN_ENTRIES;
423
424 /* Ignore entry if both size and number are zero */
425 if (!tbl->entry_size && !tbl->num_entries)
426 return 0;
427
428 /* If only one is set then error */
429 if (!tbl->entry_size || !tbl->num_entries)
430 return -EINVAL;
431
432 /* Determine number of page table levels and the number
433 * of data pages needed to process the given eem table.
434 */
435 if (tbl->type == TF_RECORD_TABLE) {
436 /*
437 * For action records just a memory size is provided. Work
438 * backwards to resolve to number of entries
439 */
440 num_entries = tbl->num_entries / tbl->entry_size;
441 if (num_entries < TF_EM_MIN_ENTRIES) {
442 num_entries = TF_EM_MIN_ENTRIES;
443 } else {
444 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
445 cnt *= 2;
446 num_entries = cnt;
447 }
448 } else {
449 num_entries = tbl->num_entries;
450 }
451
452 max_lvl = tf_em_size_page_tbl_lvl(page_size,
453 tbl->entry_size,
454 tbl->num_entries,
455 &num_data_pages);
456 if (max_lvl < 0) {
457 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
458 TFP_DRV_LOG(WARNING,
459 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
460 tbl->type, (uint64_t)num_entries * tbl->entry_size,
461 page_size);
462 return -ENOMEM;
463 }
464
465 tbl->num_lvl = max_lvl + 1;
466 tbl->num_data_pages = num_data_pages;
467
468 /* Determine the number of pages needed at each level */
469 page_cnt = tbl->page_cnt;
470 memset(page_cnt, 0, sizeof(tbl->page_cnt));
471 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
472 page_cnt);
473
474 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
475 TFP_DRV_LOG(INFO,
476 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
477 " l0: %u l1: %u l2: %u\n",
478 max_lvl + 1,
479 (uint64_t)num_data_pages * page_size,
480 num_data_pages,
481 page_cnt[TF_PT_LVL_0],
482 page_cnt[TF_PT_LVL_1],
483 page_cnt[TF_PT_LVL_2]);
484
485 return 0;
486 }
487
488 /**
489 * Validates EM number of entries requested
490 *
491 * [in] tbl_scope_cb
492 * Pointer to table scope control block to be populated
493 *
494 * [in] parms
495 * Pointer to input parameters
496 *
497 * Returns:
498 * 0 - Success
499 * -EINVAL - Parameter error
500 */
501 int
tf_em_validate_num_entries(struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_alloc_tbl_scope_parms * parms)502 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
503 struct tf_alloc_tbl_scope_parms *parms)
504 {
505 uint32_t cnt;
506
507 if (parms->rx_mem_size_in_mb != 0) {
508 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
509 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
510 + 1);
511 uint32_t num_entries = (parms->rx_mem_size_in_mb *
512 TF_MEGABYTE) / (key_b + action_b);
513
514 if (num_entries < TF_EM_MIN_ENTRIES) {
515 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
516 "%uMB\n",
517 parms->rx_mem_size_in_mb);
518 return -EINVAL;
519 }
520
521 cnt = TF_EM_MIN_ENTRIES;
522 while (num_entries > cnt &&
523 cnt <= TF_EM_MAX_ENTRIES)
524 cnt *= 2;
525
526 if (cnt > TF_EM_MAX_ENTRIES) {
527 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
528 "%u\n",
529 (parms->tx_num_flows_in_k * TF_KILOBYTE));
530 return -EINVAL;
531 }
532
533 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
534 } else {
535 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
536 TF_EM_MIN_ENTRIES ||
537 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
538 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
539 TFP_DRV_LOG(ERR,
540 "EEM: Invalid number of Rx flows "
541 "requested:%u max:%u\n",
542 parms->rx_num_flows_in_k * TF_KILOBYTE,
543 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
544 return -EINVAL;
545 }
546
547 /* must be a power-of-2 supported value
548 * in the range 32K - 128M
549 */
550 cnt = TF_EM_MIN_ENTRIES;
551 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
552 cnt <= TF_EM_MAX_ENTRIES)
553 cnt *= 2;
554
555 if (cnt > TF_EM_MAX_ENTRIES) {
556 TFP_DRV_LOG(ERR,
557 "EEM: Invalid number of Rx requested: %u\n",
558 (parms->rx_num_flows_in_k * TF_KILOBYTE));
559 return -EINVAL;
560 }
561 }
562
563 if (parms->tx_mem_size_in_mb != 0) {
564 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
565 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
566 + 1);
567 uint32_t num_entries = (parms->tx_mem_size_in_mb *
568 (TF_KILOBYTE * TF_KILOBYTE)) /
569 (key_b + action_b);
570
571 if (num_entries < TF_EM_MIN_ENTRIES) {
572 TFP_DRV_LOG(ERR,
573 "EEM: Insufficient memory requested:%uMB\n",
574 parms->rx_mem_size_in_mb);
575 return -EINVAL;
576 }
577
578 cnt = TF_EM_MIN_ENTRIES;
579 while (num_entries > cnt &&
580 cnt <= TF_EM_MAX_ENTRIES)
581 cnt *= 2;
582
583 if (cnt > TF_EM_MAX_ENTRIES) {
584 TFP_DRV_LOG(ERR,
585 "EEM: Invalid number of Tx requested: %u\n",
586 (parms->tx_num_flows_in_k * TF_KILOBYTE));
587 return -EINVAL;
588 }
589
590 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
591 } else {
592 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
593 TF_EM_MIN_ENTRIES ||
594 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
595 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
596 TFP_DRV_LOG(ERR,
597 "EEM: Invalid number of Tx flows "
598 "requested:%u max:%u\n",
599 (parms->tx_num_flows_in_k * TF_KILOBYTE),
600 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
601 return -EINVAL;
602 }
603
604 cnt = TF_EM_MIN_ENTRIES;
605 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
606 cnt <= TF_EM_MAX_ENTRIES)
607 cnt *= 2;
608
609 if (cnt > TF_EM_MAX_ENTRIES) {
610 TFP_DRV_LOG(ERR,
611 "EEM: Invalid number of Tx requested: %u\n",
612 (parms->tx_num_flows_in_k * TF_KILOBYTE));
613 return -EINVAL;
614 }
615 }
616
617 if (parms->rx_num_flows_in_k != 0 &&
618 parms->rx_max_key_sz_in_bits / 8 == 0) {
619 TFP_DRV_LOG(ERR,
620 "EEM: Rx key size required: %u\n",
621 (parms->rx_max_key_sz_in_bits));
622 return -EINVAL;
623 }
624
625 if (parms->tx_num_flows_in_k != 0 &&
626 parms->tx_max_key_sz_in_bits / 8 == 0) {
627 TFP_DRV_LOG(ERR,
628 "EEM: Tx key size required: %u\n",
629 (parms->tx_max_key_sz_in_bits));
630 return -EINVAL;
631 }
632 /* Rx */
633 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
634 parms->rx_num_flows_in_k * TF_KILOBYTE;
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
636 parms->rx_max_key_sz_in_bits / 8;
637
638 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
639 parms->rx_num_flows_in_k * TF_KILOBYTE;
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
641 parms->rx_max_key_sz_in_bits / 8;
642
643 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
644 parms->rx_num_flows_in_k * TF_KILOBYTE;
645 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
646 parms->rx_max_action_entry_sz_in_bits / 8;
647
648 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
649 0;
650
651 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
652 parms->rx_num_flows_in_k * TF_KILOBYTE;
653 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
654 parms->rx_max_action_entry_sz_in_bits / 8;
655
656 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
657 parms->rx_num_flows_in_k * TF_KILOBYTE;
658 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
659 parms->rx_max_key_sz_in_bits / 8;
660
661 /* Tx */
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
665 parms->tx_max_key_sz_in_bits / 8;
666
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
668 parms->tx_num_flows_in_k * TF_KILOBYTE;
669 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
670 parms->tx_max_key_sz_in_bits / 8;
671
672 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
673 parms->tx_num_flows_in_k * TF_KILOBYTE;
674 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
675 parms->tx_max_action_entry_sz_in_bits / 8;
676
677 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
678 0;
679
680 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
681 parms->rx_num_flows_in_k * TF_KILOBYTE;
682 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
683 parms->tx_max_action_entry_sz_in_bits / 8;
684
685 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
686 parms->rx_num_flows_in_k * TF_KILOBYTE;
687 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
688 parms->tx_max_key_sz_in_bits / 8;
689
690 return 0;
691 }
692
693 /** insert EEM entry API
694 *
695 * returns:
696 * 0
697 * TF_ERR - unable to get lock
698 *
699 * insert callback returns:
700 * 0
701 * TF_ERR_EM_DUP - key is already in table
702 */
703 static int
tf_insert_eem_entry(struct tf_dev_info * dev,struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_insert_em_entry_parms * parms)704 tf_insert_eem_entry(struct tf_dev_info *dev,
705 struct tf_tbl_scope_cb *tbl_scope_cb,
706 struct tf_insert_em_entry_parms *parms)
707 {
708 uint32_t mask;
709 uint32_t key0_hash;
710 uint32_t key1_hash;
711 uint32_t key0_index;
712 uint32_t key1_index;
713 struct cfa_p4_eem_64b_entry key_entry;
714 uint32_t index;
715 enum hcapi_cfa_em_table_type table_type;
716 uint32_t gfid;
717 struct hcapi_cfa_hwop op;
718 struct hcapi_cfa_key_tbl key_tbl;
719 struct hcapi_cfa_key_data key_obj;
720 struct hcapi_cfa_key_loc key_loc;
721 uint64_t big_hash;
722 int rc;
723
724 /* Get mask to use on hash */
725 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
726
727 if (!mask)
728 return -EINVAL;
729
730 if (dev->ops->tf_dev_cfa_key_hash == NULL)
731 return -EINVAL;
732
733 big_hash = dev->ops->tf_dev_cfa_key_hash((uint64_t *)parms->key,
734 (TF_P4_HW_EM_KEY_MAX_SIZE + 4) * 8);
735 key0_hash = (uint32_t)(big_hash >> 32);
736 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
737
738 key0_index = key0_hash & mask;
739 key1_index = key1_hash & mask;
740
741 /*
742 * Use the "result" arg to populate all of the key entry then
743 * store the byte swapped "raw" entry in a local copy ready
744 * for insertion in to the table.
745 */
746 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
747 ((uint8_t *)parms->key),
748 &key_entry);
749
750 /*
751 * Try to add to Key0 table, if that does not work then
752 * try the key1 table.
753 */
754 index = key0_index;
755 op.opcode = HCAPI_CFA_HWOPS_ADD;
756 key_tbl.base0 =
757 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
758 key_tbl.page_size = TF_EM_PAGE_SIZE;
759 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
760 key_obj.data = (uint8_t *)&key_entry;
761 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
762
763 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
764 &key_tbl,
765 &key_obj,
766 &key_loc);
767
768 if (rc == 0) {
769 table_type = TF_KEY0_TABLE;
770 } else {
771 index = key1_index;
772
773 key_tbl.base0 =
774 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
775 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
776
777 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
778 &key_tbl,
779 &key_obj,
780 &key_loc);
781 if (rc != 0)
782 return rc;
783
784 table_type = TF_KEY1_TABLE;
785 }
786
787 TF_SET_GFID(gfid,
788 index,
789 table_type);
790 TF_SET_FLOW_ID(parms->flow_id,
791 gfid,
792 TF_GFID_TABLE_EXTERNAL,
793 parms->dir);
794 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
795 0,
796 0,
797 0,
798 index,
799 0,
800 table_type);
801
802 return 0;
803 }
804
805 /** delete EEM hash entry API
806 *
807 * returns:
808 * 0
809 * -EINVAL - parameter error
810 * TF_NO_SESSION - bad session ID
811 * TF_ERR_TBL_SCOPE - invalid table scope
812 * TF_ERR_TBL_IF - invalid table interface
813 *
814 * insert callback returns
815 * 0
816 * TF_NO_EM_MATCH - entry not found
817 */
818 static int
tf_delete_eem_entry(struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_delete_em_entry_parms * parms)819 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
820 struct tf_delete_em_entry_parms *parms)
821 {
822 enum hcapi_cfa_em_table_type hash_type;
823 uint32_t index;
824 struct hcapi_cfa_hwop op;
825 struct hcapi_cfa_key_tbl key_tbl;
826 struct hcapi_cfa_key_data key_obj;
827 struct hcapi_cfa_key_loc key_loc;
828 int rc;
829
830 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
831 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
832
833 op.opcode = HCAPI_CFA_HWOPS_DEL;
834 key_tbl.base0 =
835 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
836 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
837 key_tbl.page_size = TF_EM_PAGE_SIZE;
838 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
839 key_obj.data = NULL;
840 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
841
842 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
843 &key_tbl,
844 &key_obj,
845 &key_loc);
846
847 if (!rc)
848 return rc;
849
850 return 0;
851 }
852
853 /** insert EM hash entry API
854 *
855 * returns:
856 * 0 - Success
857 * -EINVAL - Error
858 */
859 int
tf_em_insert_ext_entry(struct tf * tfp,struct tf_insert_em_entry_parms * parms)860 tf_em_insert_ext_entry(struct tf *tfp,
861 struct tf_insert_em_entry_parms *parms)
862 {
863 int rc;
864 struct tf_tbl_scope_cb *tbl_scope_cb;
865 struct tf_session *tfs;
866 struct tf_dev_info *dev;
867
868 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
869 if (tbl_scope_cb == NULL) {
870 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
871 return -EINVAL;
872 }
873
874 /* Retrieve the session information */
875 rc = tf_session_get_session_internal(tfp, &tfs);
876 if (rc)
877 return rc;
878
879 /* Retrieve the device information */
880 rc = tf_session_get_device(tfs, &dev);
881 if (rc)
882 return rc;
883
884 return tf_insert_eem_entry
885 (dev,
886 tbl_scope_cb,
887 parms);
888 }
889
890 /** Delete EM hash entry API
891 *
892 * returns:
893 * 0 - Success
894 * -EINVAL - Error
895 */
896 int
tf_em_delete_ext_entry(struct tf * tfp,struct tf_delete_em_entry_parms * parms)897 tf_em_delete_ext_entry(struct tf *tfp,
898 struct tf_delete_em_entry_parms *parms)
899 {
900 struct tf_tbl_scope_cb *tbl_scope_cb;
901
902 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
903 if (tbl_scope_cb == NULL) {
904 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
905 return -EINVAL;
906 }
907
908 return tf_delete_eem_entry(tbl_scope_cb, parms);
909 }
910
911
912 int
tf_em_ext_common_bind(struct tf * tfp,struct tf_em_cfg_parms * parms)913 tf_em_ext_common_bind(struct tf *tfp,
914 struct tf_em_cfg_parms *parms)
915 {
916 int rc;
917 int i;
918 struct tf_rm_create_db_parms db_cfg = { 0 };
919 struct em_ext_db *ext_db;
920 struct tfp_calloc_parms cparms;
921
922 TF_CHECK_PARMS2(tfp, parms);
923
924 cparms.nitems = 1;
925 cparms.size = sizeof(struct em_ext_db);
926 cparms.alignment = 0;
927 if (tfp_calloc(&cparms) != 0) {
928 TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
929 strerror(ENOMEM));
930 return -ENOMEM;
931 }
932
933 ext_db = cparms.mem_va;
934 ll_init(&ext_db->tbl_scope_ll);
935 for (i = 0; i < TF_DIR_MAX; i++)
936 ext_db->eem_db[i] = NULL;
937 tf_session_set_em_ext_db(tfp, ext_db);
938
939 db_cfg.module = TF_MODULE_TYPE_EM;
940 db_cfg.num_elements = parms->num_elements;
941 db_cfg.cfg = parms->cfg;
942
943 for (i = 0; i < TF_DIR_MAX; i++) {
944 db_cfg.dir = i;
945 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
946
947 /* Check if we got any request to support EEM, if so
948 * we build an EM Ext DB holding Table Scopes.
949 */
950 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
951 continue;
952
953 db_cfg.rm_db = (void *)&ext_db->eem_db[i];
954 rc = tf_rm_create_db(tfp, &db_cfg);
955 if (rc) {
956 TFP_DRV_LOG(ERR,
957 "%s: EM Ext DB creation failed\n",
958 tf_dir_2_str(i));
959
960 return rc;
961 }
962 }
963
964 mem_type = parms->mem_type;
965
966 return 0;
967 }
968
969 int
tf_em_ext_common_unbind(struct tf * tfp)970 tf_em_ext_common_unbind(struct tf *tfp)
971 {
972 int rc;
973 int i;
974 struct tf_rm_free_db_parms fparms = { 0 };
975 struct em_ext_db *ext_db = NULL;
976 struct tf_session *tfs = NULL;
977 struct tf_dev_info *dev;
978 struct ll_entry *entry;
979 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
980 void *ext_ptr = NULL;
981 struct tf_free_tbl_scope_parms tparms = { 0 };
982
983 TF_CHECK_PARMS1(tfp);
984
985 rc = tf_session_get_session_internal(tfp, &tfs);
986 if (rc) {
987 TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
988 strerror(-rc));
989 return rc;
990 }
991
992 /* Retrieve the device information */
993 rc = tf_session_get_device(tfs, &dev);
994 if (rc) {
995 TFP_DRV_LOG(ERR,
996 "Failed to lookup device, rc:%s\n",
997 strerror(-rc));
998 return rc;
999 }
1000
1001 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
1002 if (rc) {
1003 TFP_DRV_LOG(ERR,
1004 "Failed to get em_ext_db from session, rc:%s\n",
1005 strerror(-rc));
1006 return rc;
1007 }
1008 ext_db = (struct em_ext_db *)ext_ptr;
1009
1010 if (ext_db != NULL) {
1011 entry = ext_db->tbl_scope_ll.head;
1012 while (entry != NULL) {
1013 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
1014 entry = entry->next;
1015 tparms.tbl_scope_id =
1016 tbl_scope_cb->tbl_scope_id;
1017
1018 if (dev->ops->tf_dev_free_tbl_scope) {
1019 dev->ops->tf_dev_free_tbl_scope(tfp,
1020 &tparms);
1021 } else {
1022 /* should not reach here */
1023 ll_delete(&ext_db->tbl_scope_ll,
1024 &tbl_scope_cb->ll_entry);
1025 tfp_free(tbl_scope_cb);
1026 }
1027 }
1028
1029 for (i = 0; i < TF_DIR_MAX; i++) {
1030 if (ext_db->eem_db[i] == NULL)
1031 continue;
1032
1033 fparms.dir = i;
1034 fparms.rm_db = ext_db->eem_db[i];
1035 rc = tf_rm_free_db(tfp, &fparms);
1036 if (rc)
1037 return rc;
1038
1039 ext_db->eem_db[i] = NULL;
1040 }
1041
1042 tfp_free(ext_db);
1043 }
1044
1045 tf_session_set_em_ext_db(tfp, NULL);
1046
1047 return 0;
1048 }
1049
1050 /**
1051 * Sets the specified external table type element.
1052 *
1053 * This API sets the specified element data
1054 *
1055 * [in] tfp
1056 * Pointer to TF handle
1057 *
1058 * [in] parms
1059 * Pointer to table set parameters
1060 *
1061 * Returns
1062 * - (0) if successful.
1063 * - (-EINVAL) on failure.
1064 */
tf_tbl_ext_common_set(struct tf * tfp,struct tf_tbl_set_parms * parms)1065 int tf_tbl_ext_common_set(struct tf *tfp,
1066 struct tf_tbl_set_parms *parms)
1067 {
1068 int rc = 0;
1069 struct tf_tbl_scope_cb *tbl_scope_cb;
1070 uint32_t tbl_scope_id;
1071 struct hcapi_cfa_hwop op;
1072 struct hcapi_cfa_key_tbl key_tbl;
1073 struct hcapi_cfa_key_data key_obj;
1074 struct hcapi_cfa_key_loc key_loc;
1075
1076 TF_CHECK_PARMS2(tfp, parms);
1077
1078 if (parms->data == NULL) {
1079 TFP_DRV_LOG(ERR,
1080 "%s, invalid parms->data\n",
1081 tf_dir_2_str(parms->dir));
1082 return -EINVAL;
1083 }
1084
1085 tbl_scope_id = parms->tbl_scope_id;
1086
1087 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1088 TFP_DRV_LOG(ERR,
1089 "%s, Table scope not allocated\n",
1090 tf_dir_2_str(parms->dir));
1091 return -EINVAL;
1092 }
1093
1094 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
1095 if (tbl_scope_cb == NULL) {
1096 TFP_DRV_LOG(ERR,
1097 "%s, table scope error\n",
1098 tf_dir_2_str(parms->dir));
1099 return -EINVAL;
1100 }
1101
1102 op.opcode = HCAPI_CFA_HWOPS_PUT;
1103 key_tbl.base0 =
1104 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1105 key_tbl.page_size = TF_EM_PAGE_SIZE;
1106 key_obj.offset = parms->idx;
1107 key_obj.data = parms->data;
1108 key_obj.size = parms->data_sz_in_bytes;
1109
1110 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
1111 &key_tbl,
1112 &key_obj,
1113 &key_loc);
1114
1115 return rc;
1116 }
1117
1118 int
tf_em_ext_common_alloc(struct tf * tfp,struct tf_alloc_tbl_scope_parms * parms)1119 tf_em_ext_common_alloc(struct tf *tfp,
1120 struct tf_alloc_tbl_scope_parms *parms)
1121 {
1122 return tf_em_ext_alloc(tfp, parms);
1123 }
1124
1125 int
tf_em_ext_common_free(struct tf * tfp,struct tf_free_tbl_scope_parms * parms)1126 tf_em_ext_common_free(struct tf *tfp,
1127 struct tf_free_tbl_scope_parms *parms)
1128 {
1129 return tf_em_ext_free(tfp, parms);
1130 }
1131
tf_em_ext_map_tbl_scope(struct tf * tfp,struct tf_map_tbl_scope_parms * parms)1132 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1133 struct tf_map_tbl_scope_parms *parms)
1134 {
1135 int rc = 0;
1136 struct tf_session *tfs;
1137 struct tf_tbl_scope_cb *tbl_scope_cb;
1138 struct tf_global_cfg_parms gcfg_parms = { 0 };
1139 struct tfp_calloc_parms aparms;
1140 uint32_t *data, *mask;
1141 uint32_t sz_in_bytes = 8;
1142 struct tf_dev_info *dev;
1143
1144 /* Retrieve the session information */
1145 rc = tf_session_get_session_internal(tfp, &tfs);
1146 if (rc)
1147 return rc;
1148
1149 /* Retrieve the device information */
1150 rc = tf_session_get_device(tfs, &dev);
1151 if (rc)
1152 return rc;
1153
1154 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
1155 if (tbl_scope_cb == NULL) {
1156 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1157 parms->tbl_scope_id);
1158 return -EINVAL;
1159 }
1160
1161 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1162 rc = -EOPNOTSUPP;
1163 TFP_DRV_LOG(ERR,
1164 "Map table scope operation not supported, rc:%s\n",
1165 strerror(-rc));
1166 return rc;
1167 }
1168
1169 aparms.nitems = 2;
1170 aparms.size = sizeof(uint32_t);
1171 aparms.alignment = 0;
1172
1173 if (tfp_calloc(&aparms) != 0) {
1174 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1175 strerror(ENOMEM));
1176 return -ENOMEM;
1177 }
1178 data = aparms.mem_va;
1179
1180 if (tfp_calloc(&aparms) != 0) {
1181 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1182 strerror(ENOMEM));
1183 rc = -ENOMEM;
1184 goto clean;
1185 }
1186 mask = aparms.mem_va;
1187
1188 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1189 tbl_scope_cb->pf,
1190 (uint8_t *)data, (uint8_t *)mask,
1191 sz_in_bytes);
1192
1193 if (rc) {
1194 TFP_DRV_LOG(ERR,
1195 "Map table scope config failure, rc:%s\n",
1196 strerror(-rc));
1197 goto cleaner;
1198 }
1199
1200 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1201 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1202 gcfg_parms.offset = 0;
1203 gcfg_parms.config = (uint8_t *)data;
1204 gcfg_parms.config_mask = (uint8_t *)mask;
1205 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1206
1207
1208 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1209 if (rc) {
1210 TFP_DRV_LOG(ERR,
1211 "Map tbl scope, set failed, rc:%s\n",
1212 strerror(-rc));
1213 }
1214 cleaner:
1215 tfp_free(mask);
1216 clean:
1217 tfp_free(data);
1218
1219 return rc;
1220 }
1221