1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "tf_common.h"
7 #include "tf_util.h"
8 #include "tfp.h"
9 #include "tf_shadow_tcam.h"
10 #include "tf_hash.h"
11 
12 /**
13  * The implementation includes 3 tables per tcam table type.
14  * - hash table
15  *   - sized so that a minimum of 4 slots per shadow entry are available to
16  *   minimize the likelihood of collisions.
17  * - shadow key table
18  *   - sized to the number of entries requested and is directly indexed
19  *   - the index is zero based and is the tcam index - the base address
20  *   - the key and mask are stored in the key table.
21  *   - The stored key is the AND of the key/mask in order to eliminate the need
22  *   to compare both the key and mask.
23  * - shadow result table
24  *   - the result table is stored separately since it only needs to be accessed
25  *   when the key matches.
26  *   - the result has a back pointer to the hash table via the hb handle.  The
27  *   hb handle is a 32 bit represention of the hash with a valid bit, bucket
28  *   element index, and the hash index.  It is necessary to store the hb handle
29  *   with the result since subsequent removes only provide the tcam index.
30  *
31  * - Max entries is limited in the current implementation since bit 15 is the
32  *   valid bit in the hash table.
33  * - A 16bit hash is calculated and masked based on the number of entries
34  * - 64b wide bucket is used and broken into 4x16bit elements.
35  *   This decision is based on quicker bucket scanning to determine if any
36  *   elements are in use.
37  * - bit 15 of each bucket element is the valid, this is done to prevent having
38  *   to read the larger key/result data for determining VALID.  It also aids
39  *   in the more efficient scanning of the bucket for slot usage.
40  */
41 
42 /*
43  * The maximum number of shadow entries supported.  The value also doubles as
44  * the maximum number of hash buckets.  There are only 15 bits of data per
45  * bucket to point to the shadow tables.
46  */
47 #define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
48 
49 /* The number of elements(BE) per hash bucket (HB) */
50 #define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
51 #define TF_SHADOW_TCAM_BE_VALID (1 << 15)
52 #define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
53 
54 /**
55  * The hash bucket handle is 32b
56  * - bit 31, the Valid bit
57  * - bit 29-30, the element
58  * - bits 0-15, the hash idx (is masked based on the allocated size)
59  */
60 #define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
61 #define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
62 						  ((be) << 29) | (idx))
63 
64 #define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
65 					      (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
66 
67 #define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
68 						     (ctxt)->hash_ctxt.hid_mask)
69 
70 /**
71  * The idx provided by the caller is within a region, so currently the base is
72  * either added or subtracted from the idx to ensure it can be used as a
73  * compressed index
74  */
75 
76 /* Convert the tcam index to a shadow index */
77 #define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
78 						(ctxt)->shadow_ctxt.base_addr)
79 
80 /* Convert the shadow index to a tcam index */
81 #define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
82 						(ctxt)->shadow_ctxt.base_addr)
83 
84 /* Simple helper masks for clearing en element from the bucket */
85 #define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
86 #define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
87 #define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
88 #define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
89 
90 /**
91  * This should be coming from external, but for now it is assumed that no key
92  * is greater than 1K bits and no result is bigger than 128 bits.  This makes
93  * allocation of the hash table easier without having to allocate on the fly.
94  */
95 #define TF_SHADOW_TCAM_MAX_KEY_SZ 128
96 #define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
97 
98 /*
99  * Local only defines for the internal data.
100  */
101 
102 /**
103  * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
104  * The key stored in the table is the masked version of the key.  This is done
105  * to eliminate the need of comparing both the key and mask.
106  */
107 struct tf_shadow_tcam_shadow_key_entry {
108 	uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
109 	uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
110 };
111 
112 /**
113  * tf_shadow_tcam_shadow_result_entry is the result table entry.
114  * The result table writes are broken into two phases:
115  * - The search phase, which stores the hb_handle and key size and
116  * - The set phase, which writes the result, refcnt, and result size
117  */
118 struct tf_shadow_tcam_shadow_result_entry {
119 	uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
120 	uint16_t result_size;
121 	uint16_t key_size;
122 	uint32_t refcnt;
123 	uint32_t hb_handle;
124 };
125 
126 /**
127  * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
128  * result tables.
129  */
130 struct tf_shadow_tcam_shadow_ctxt {
131 	struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
132 	struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
133 	uint32_t base_addr;
134 	uint16_t num_entries;
135 	uint16_t alloc_idx;
136 };
137 
138 /**
139  * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
140  * table.
141  */
142 struct tf_shadow_tcam_hash_ctxt {
143 	uint64_t *hashtbl;
144 	uint16_t hid_mask;
145 	uint16_t hash_entries;
146 };
147 
148 /**
149  * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
150  * tcam db.  This structure is per tcam table type as each tcam table has it's
151  * own shadow and hash table.
152  */
153 struct tf_shadow_tcam_ctxt {
154 	struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
155 	struct tf_shadow_tcam_hash_ctxt hash_ctxt;
156 };
157 
158 /**
159  * tf_shadow_tcam_db is the allocated db structure returned as an opaque
160  * void * pointer to the caller during create db.  It holds the pointers for
161  * each tcam associated with the db.
162  */
163 struct tf_shadow_tcam_db {
164 	/* Each context holds the shadow and hash table information */
165 	struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
166 };
167 
168 /**
169  * Returns the number of entries in the contexts shadow table.
170  */
171 static inline uint16_t
tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt * ctxt)172 tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
173 {
174 	return ctxt->shadow_ctxt.num_entries;
175 }
176 
177 /**
178  * Compare the give key with the key in the shadow table.
179  *
180  * Returns 0 if the keys match
181  */
182 static int
tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt * ctxt,uint8_t * key,uint8_t * mask,uint16_t sh_idx,uint16_t size)183 tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
184 		       uint8_t *key,
185 		       uint8_t *mask,
186 		       uint16_t sh_idx,
187 		       uint16_t size)
188 {
189 	if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
190 	    sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
191 		return -1;
192 
193 	return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
194 }
195 
196 /**
197  * Copies the shadow result to the result.
198  *
199  * Returns 0 on failure
200  */
201 static void *
tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt * ctxt,uint8_t * result,uint16_t sh_idx,uint16_t size)202 tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
203 		       uint8_t *result,
204 		       uint16_t sh_idx,
205 		       uint16_t size)
206 {
207 	if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
208 		return 0;
209 
210 	if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
211 		return 0;
212 
213 	return memcpy(result,
214 		      ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
215 		      size);
216 }
217 
218 /**
219  * Using a software based CRC function for now, but will look into using hw
220  * assisted in the future.
221  */
222 static uint32_t
tf_shadow_tcam_crc32_calc(uint8_t * key,uint32_t len)223 tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
224 {
225 	return tf_hash_calc_crc32(key, len);
226 }
227 
228 /**
229  * Free the memory associated with the context.
230  */
231 static void
tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt * ctxt)232 tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
233 {
234 	if (!ctxt)
235 		return;
236 
237 	tfp_free(ctxt->hash_ctxt.hashtbl);
238 	tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
239 	tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
240 }
241 
242 /**
243  * The TF Shadow TCAM context is per TCAM and holds all information relating to
244  * managing the shadow and search capability.  This routine allocated data that
245  * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
246  * the shadow db.
247  */
248 static int
tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt * ctxt,uint16_t num_entries,uint16_t base_addr)249 tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
250 			   uint16_t num_entries,
251 			   uint16_t base_addr)
252 {
253 	struct tfp_calloc_parms cparms;
254 	uint16_t hash_size = 1;
255 	uint16_t hash_mask;
256 	int rc;
257 
258 	/* Hash table is a power of two that holds the number of entries */
259 	if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
260 		TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
261 			    num_entries,
262 			    TF_SHADOW_TCAM_ENTRIES_MAX);
263 		return -ENOMEM;
264 	}
265 
266 	while (hash_size < num_entries)
267 		hash_size = hash_size << 1;
268 
269 	hash_mask = hash_size - 1;
270 
271 	/* Allocate the hash table */
272 	cparms.nitems = hash_size;
273 	cparms.size = sizeof(uint64_t);
274 	cparms.alignment = 0;
275 	rc = tfp_calloc(&cparms);
276 	if (rc)
277 		goto error;
278 	ctxt->hash_ctxt.hashtbl = cparms.mem_va;
279 	ctxt->hash_ctxt.hid_mask = hash_mask;
280 	ctxt->hash_ctxt.hash_entries = hash_size;
281 
282 	/* allocate the shadow tables */
283 	/* allocate the shadow key table */
284 	cparms.nitems = num_entries;
285 	cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
286 	cparms.alignment = 0;
287 	rc = tfp_calloc(&cparms);
288 	if (rc)
289 		goto error;
290 	ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
291 
292 	/* allocate the shadow result table */
293 	cparms.nitems = num_entries;
294 	cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
295 	cparms.alignment = 0;
296 	rc = tfp_calloc(&cparms);
297 	if (rc)
298 		goto error;
299 	ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
300 
301 	ctxt->shadow_ctxt.num_entries = num_entries;
302 	ctxt->shadow_ctxt.base_addr = base_addr;
303 
304 	return 0;
305 error:
306 	tf_shadow_tcam_ctxt_delete(ctxt);
307 
308 	return -ENOMEM;
309 }
310 
311 /**
312  * Get a shadow TCAM context given the db and the TCAM type
313  */
314 static struct tf_shadow_tcam_ctxt *
tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db * shadow_db,enum tf_tcam_tbl_type type)315 tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
316 			enum tf_tcam_tbl_type type)
317 {
318 	if (type >= TF_TCAM_TBL_TYPE_MAX ||
319 	    !shadow_db ||
320 	    !shadow_db->ctxt[type])
321 		return NULL;
322 
323 	return shadow_db->ctxt[type];
324 }
325 
326 /**
327  * Sets the hash entry into the table given the TCAM context, hash bucket
328  * handle, and shadow index.
329  */
330 static inline int
tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt * ctxt,uint32_t hb_handle,uint16_t sh_idx)331 tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
332 			      uint32_t hb_handle,
333 			      uint16_t sh_idx)
334 {
335 	uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
336 	uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
337 	uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
338 
339 	if (hid >= ctxt->hash_ctxt.hash_entries)
340 		return -EINVAL;
341 
342 	ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
343 	return 0;
344 }
345 
346 /**
347  * Clears the hash entry given the TCAM context and hash bucket handle.
348  */
349 static inline void
tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt * ctxt,uint32_t hb_handle)350 tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
351 				uint32_t hb_handle)
352 {
353 	uint16_t hid, be;
354 	uint64_t *bucket;
355 
356 	if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
357 		return;
358 
359 	hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
360 	be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
361 	bucket = &ctxt->hash_ctxt.hashtbl[hid];
362 
363 	switch (be) {
364 	case 0:
365 		*bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
366 		break;
367 	case 1:
368 		*bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
369 		break;
370 	case 2:
371 		*bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
372 		break;
373 	case 3:
374 		*bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
375 		break;
376 	default:
377 		/*
378 		 * Since the BE_GET masks non-inclusive bits, this will not
379 		 * happen.
380 		 */
381 		break;
382 	}
383 }
384 
385 /**
386  * Clears the shadow key and result entries given the TCAM context and
387  * shadow index.
388  */
389 static void
tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt * ctxt,uint16_t sh_idx)390 tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
391 			      uint16_t sh_idx)
392 {
393 	struct tf_shadow_tcam_shadow_key_entry *sk_entry;
394 	struct tf_shadow_tcam_shadow_result_entry *sr_entry;
395 
396 	if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
397 		return;
398 
399 	sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
400 	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
401 
402 	/*
403 	 * memset key/result to zero for now, possibly leave the data alone
404 	 * in the future and rely on the valid bit in the hash table.
405 	 */
406 	memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
407 	memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
408 }
409 
410 /**
411  * Binds the allocated tcam index with the hash and shadow tables.
412  * The entry will be incomplete until the set has happened with the result
413  * data.
414  */
415 int
tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms * parms)416 tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
417 {
418 	int rc;
419 	int i;
420 	uint16_t idx, klen;
421 	struct tf_shadow_tcam_ctxt *ctxt;
422 	struct tf_shadow_tcam_db *shadow_db;
423 	struct tf_shadow_tcam_shadow_key_entry *sk_entry;
424 	struct tf_shadow_tcam_shadow_result_entry *sr_entry;
425 	uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
426 
427 	if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
428 	    !parms->key || !parms->mask) {
429 		TFP_DRV_LOG(ERR, "Invalid parms\n");
430 		return -EINVAL;
431 	}
432 
433 	shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
434 	ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
435 	if (!ctxt) {
436 		TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
437 			    tf_tcam_tbl_2_str(parms->type));
438 		return -EINVAL;
439 	}
440 
441 	memset(tkey, 0, sizeof(tkey));
442 	idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
443 	klen = parms->key_size;
444 	if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
445 	    klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
446 		TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
447 			    tf_dir_2_str(parms->dir),
448 			    tf_tcam_tbl_2_str(parms->type),
449 			    klen,
450 			    TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
451 
452 		return -EINVAL;
453 	}
454 
455 	rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
456 	if (rc)
457 		return -EINVAL;
458 
459 	sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
460 	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
461 
462 	/*
463 	 * Write the masked key to the table for more efficient comparisons
464 	 * later.
465 	 */
466 	for (i = 0; i < klen; i++)
467 		tkey[i] = parms->key[i] & parms->mask[i];
468 
469 	memcpy(sk_entry->key, tkey, klen);
470 	memcpy(sk_entry->mask, parms->mask, klen);
471 
472 	/* Write the result table */
473 	sr_entry->key_size = parms->key_size;
474 	sr_entry->hb_handle = parms->hb_handle;
475 	sr_entry->refcnt = 1;
476 
477 	return 0;
478 }
479 
480 /**
481  * Deletes hash/shadow information if no more references.
482  *
483  * Returns 0 - The caller should delete the tcam entry in hardware.
484  * Returns non-zero - The number of references to the entry
485  */
486 int
tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms * parms)487 tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
488 {
489 	uint16_t idx;
490 	uint32_t hb_handle;
491 	struct tf_shadow_tcam_ctxt *ctxt;
492 	struct tf_shadow_tcam_db *shadow_db;
493 	struct tf_tcam_free_parms *fparms;
494 	struct tf_shadow_tcam_shadow_result_entry *sr_entry;
495 
496 	if (!parms || !parms->fparms) {
497 		TFP_DRV_LOG(ERR, "Invalid parms\n");
498 		return -EINVAL;
499 	}
500 
501 	fparms = parms->fparms;
502 
503 	/*
504 	 * Initialize the reference count to zero.  It will only be changed if
505 	 * non-zero.
506 	 */
507 	fparms->ref_cnt = 0;
508 
509 	shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
510 	ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
511 	if (!ctxt) {
512 		TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
513 			    tf_tcam_tbl_2_str(fparms->type));
514 		return 0;
515 	}
516 
517 	idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
518 	if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
519 		TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
520 			    tf_tcam_tbl_2_str(fparms->type),
521 			    fparms->idx,
522 			    tf_shadow_tcam_sh_num_entries_get(ctxt));
523 		return 0;
524 	}
525 
526 	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
527 	if (sr_entry->refcnt <= 1) {
528 		hb_handle = sr_entry->hb_handle;
529 		tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
530 		tf_shadow_tcam_clear_sh_entry(ctxt, idx);
531 	} else {
532 		sr_entry->refcnt--;
533 		fparms->ref_cnt = sr_entry->refcnt;
534 	}
535 
536 	return 0;
537 }
538 
539 int
tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms * parms)540 tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
541 {
542 	uint16_t len;
543 	uint8_t rcopy;
544 	uint64_t bucket;
545 	uint32_t i, hid32;
546 	struct tf_shadow_tcam_ctxt *ctxt;
547 	struct tf_shadow_tcam_db *shadow_db;
548 	uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
549 	struct tf_tcam_alloc_search_parms *sparms;
550 	uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
551 	uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
552 
553 	if (!parms || !parms->sparms) {
554 		TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
555 		return -EINVAL;
556 	}
557 
558 	memset(tkey, 0, sizeof(tkey));
559 	sparms = parms->sparms;
560 
561 	/* Initialize return values to invalid */
562 	sparms->hit = 0;
563 	sparms->search_status = REJECT;
564 	parms->hb_handle = 0;
565 	sparms->ref_cnt = 0;
566 	/* see if caller wanted the result */
567 	rcopy = sparms->result && sparms->result_size;
568 
569 	shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
570 	ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
571 	if (!ctxt) {
572 		TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
573 			    tf_tcam_tbl_2_str(sparms->type));
574 		return -EINVAL;
575 	}
576 
577 	hid_mask = ctxt->hash_ctxt.hid_mask;
578 
579 	len = sparms->key_size;
580 
581 	if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
582 	    !sparms->key || !sparms->mask || !len) {
583 		TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
584 			    tf_dir_2_str(sparms->dir),
585 			    tf_tcam_tbl_2_str(sparms->type),
586 			    len,
587 			    sparms->key,
588 			    sparms->mask);
589 		return -EINVAL;
590 	}
591 
592 	/* Combine the key and mask */
593 	for (i = 0; i < len; i++)
594 		tkey[i] = sparms->key[i] & sparms->mask[i];
595 
596 	/*
597 	 * Calculate the crc32
598 	 * Fold it to create a 16b value
599 	 * Reduce it to fit the table
600 	 */
601 	hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
602 	hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
603 	hb_idx = hid16 & hid_mask;
604 
605 	bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
606 
607 	if (!bucket) {
608 		/* empty bucket means a miss and available entry */
609 		sparms->search_status = MISS;
610 		parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
611 		sparms->idx = 0;
612 		return 0;
613 	}
614 
615 	/* Set the avail to max so we can detect when there is an avail entry */
616 	be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
617 	for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
618 		shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
619 		be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
620 		if (!be_valid) {
621 			/* The element is avail, keep going */
622 			be_avail = i;
623 			continue;
624 		}
625 		/* There is a valid entry, compare it */
626 		shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
627 		if (!tf_shadow_tcam_key_cmp(ctxt,
628 					    sparms->key,
629 					    sparms->mask,
630 					    shtbl_key,
631 					    sparms->key_size)) {
632 			/*
633 			 * It matches, increment the ref count if the caller
634 			 * requested allocation and return the info
635 			 */
636 			if (sparms->alloc)
637 				ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
638 			ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
639 
640 			sparms->hit = 1;
641 			sparms->search_status = HIT;
642 			parms->hb_handle =
643 				TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
644 			sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
645 								  shtbl_key);
646 			sparms->ref_cnt =
647 				ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
648 
649 			/* copy the result, if caller wanted it. */
650 			if (rcopy &&
651 			    !tf_shadow_tcam_res_cpy(ctxt,
652 						    sparms->result,
653 						    shtbl_key,
654 						    sparms->result_size)) {
655 				/*
656 				 * Should never get here, possible memory
657 				 * corruption or something unexpected.
658 				 */
659 				TFP_DRV_LOG(ERR, "Error copying result\n");
660 				return -EINVAL;
661 			}
662 
663 			return 0;
664 		}
665 	}
666 
667 	/* No hits, return avail entry if exists */
668 	if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
669 		parms->hb_handle =
670 			TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
671 		sparms->search_status = MISS;
672 		sparms->hit = 0;
673 		sparms->idx = 0;
674 	} else {
675 		sparms->search_status = REJECT;
676 	}
677 
678 	return 0;
679 }
680 
681 int
tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms * parms)682 tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
683 {
684 	uint16_t idx;
685 	struct tf_shadow_tcam_ctxt *ctxt;
686 	struct tf_tcam_set_parms *sparms;
687 	struct tf_shadow_tcam_db *shadow_db;
688 	struct tf_shadow_tcam_shadow_result_entry *sr_entry;
689 
690 	if (!parms || !parms->sparms) {
691 		TFP_DRV_LOG(ERR, "Null parms\n");
692 		return -EINVAL;
693 	}
694 
695 	sparms = parms->sparms;
696 	if (!sparms->result || !sparms->result_size) {
697 		TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
698 			    tf_dir_2_str(sparms->dir),
699 			    tf_tcam_tbl_2_str(sparms->type));
700 		return -EINVAL;
701 	}
702 
703 	shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
704 	ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
705 	if (!ctxt) {
706 		/* We aren't tracking this table, so return success */
707 		TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
708 			    tf_tcam_tbl_2_str(sparms->type));
709 		return 0;
710 	}
711 
712 	idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
713 	if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
714 		TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
715 			    tf_dir_2_str(sparms->dir),
716 			    tf_tcam_tbl_2_str(sparms->type),
717 			    sparms->idx);
718 		return -EINVAL;
719 	}
720 
721 	/* Write the result table, the key/hash has been written already */
722 	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
723 
724 	/*
725 	 * If the handle is not valid, the bind was never called.  We aren't
726 	 * tracking this entry.
727 	 */
728 	if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
729 		return 0;
730 
731 	if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
732 		TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
733 			    tf_dir_2_str(sparms->dir),
734 			    tf_tcam_tbl_2_str(sparms->type),
735 			    sparms->result_size,
736 			    TF_SHADOW_TCAM_MAX_RESULT_SZ);
737 		return -EINVAL;
738 	}
739 
740 	memcpy(sr_entry->result, sparms->result, sparms->result_size);
741 	sr_entry->result_size = sparms->result_size;
742 
743 	return 0;
744 }
745 
746 int
tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms * parms)747 tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
748 {
749 	struct tf_shadow_tcam_db *shadow_db;
750 	int i;
751 
752 	TF_CHECK_PARMS1(parms);
753 
754 	shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
755 	if (!shadow_db) {
756 		TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
757 		return -EINVAL;
758 	}
759 
760 	for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
761 		if (shadow_db->ctxt[i]) {
762 			tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
763 			tfp_free(shadow_db->ctxt[i]);
764 		}
765 	}
766 
767 	tfp_free(shadow_db);
768 
769 	return 0;
770 }
771 
772 /**
773  * Allocate the TCAM resources for search and allocate
774  *
775  */
tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms * parms)776 int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
777 {
778 	int rc;
779 	int i;
780 	uint16_t base;
781 	struct tfp_calloc_parms cparms;
782 	struct tf_shadow_tcam_db *shadow_db = NULL;
783 
784 	TF_CHECK_PARMS1(parms);
785 
786 	/* Build the shadow DB per the request */
787 	cparms.nitems = 1;
788 	cparms.size = sizeof(struct tf_shadow_tcam_db);
789 	cparms.alignment = 0;
790 	rc = tfp_calloc(&cparms);
791 	if (rc)
792 		return rc;
793 	shadow_db = (void *)cparms.mem_va;
794 
795 	for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
796 		/* If the element didn't request an allocation no need
797 		 * to create a pool nor verify if we got a reservation.
798 		 */
799 		if (!parms->cfg->alloc_cnt[i]) {
800 			shadow_db->ctxt[i] = NULL;
801 			continue;
802 		}
803 
804 		cparms.nitems = 1;
805 		cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
806 		cparms.alignment = 0;
807 		rc = tfp_calloc(&cparms);
808 		if (rc)
809 			goto error;
810 
811 		shadow_db->ctxt[i] = cparms.mem_va;
812 		base = parms->cfg->base_addr[i];
813 		rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
814 						parms->cfg->alloc_cnt[i],
815 						base);
816 		if (rc)
817 			goto error;
818 	}
819 
820 	*parms->shadow_db = (void *)shadow_db;
821 
822 	TFP_DRV_LOG(INFO,
823 		    "TF SHADOW TCAM - initialized\n");
824 
825 	return 0;
826 error:
827 	for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
828 		if (shadow_db->ctxt[i]) {
829 			tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
830 			tfp_free(shadow_db->ctxt[i]);
831 		}
832 	}
833 
834 	tfp_free(shadow_db);
835 
836 	return -ENOMEM;
837 }
838