1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_errno.h>
6 #include <rte_malloc.h>
7 #include <rte_prefetch.h>
8 #include <rte_random.h>
9 #include <rte_log.h>
10 #include <rte_vect.h>
11 
12 #include "rte_member.h"
13 #include "rte_member_ht.h"
14 
15 #if defined(RTE_ARCH_X86)
16 #include "rte_member_x86.h"
17 #endif
18 
19 /* Search bucket for entry with tmp_sig and update set_id */
20 static inline int
update_entry_search(uint32_t bucket_id,member_sig_t tmp_sig,struct member_ht_bucket * buckets,member_set_t set_id)21 update_entry_search(uint32_t bucket_id, member_sig_t tmp_sig,
22 		struct member_ht_bucket *buckets,
23 		member_set_t set_id)
24 {
25 	uint32_t i;
26 
27 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
28 		if (buckets[bucket_id].sigs[i] == tmp_sig) {
29 			buckets[bucket_id].sets[i] = set_id;
30 			return 1;
31 		}
32 	}
33 	return 0;
34 }
35 
36 static inline int
search_bucket_single(uint32_t bucket_id,member_sig_t tmp_sig,struct member_ht_bucket * buckets,member_set_t * set_id)37 search_bucket_single(uint32_t bucket_id, member_sig_t tmp_sig,
38 		struct member_ht_bucket *buckets,
39 		member_set_t *set_id)
40 {
41 	uint32_t iter;
42 
43 	for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
44 		if (tmp_sig == buckets[bucket_id].sigs[iter] &&
45 				buckets[bucket_id].sets[iter] !=
46 				RTE_MEMBER_NO_MATCH) {
47 			*set_id = buckets[bucket_id].sets[iter];
48 			return 1;
49 		}
50 	}
51 	return 0;
52 }
53 
54 static inline void
search_bucket_multi(uint32_t bucket_id,member_sig_t tmp_sig,struct member_ht_bucket * buckets,uint32_t * counter,uint32_t matches_per_key,member_set_t * set_id)55 search_bucket_multi(uint32_t bucket_id, member_sig_t tmp_sig,
56 		struct member_ht_bucket *buckets,
57 		uint32_t *counter,
58 		uint32_t matches_per_key,
59 		member_set_t *set_id)
60 {
61 	uint32_t iter;
62 
63 	for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
64 		if (tmp_sig == buckets[bucket_id].sigs[iter] &&
65 				buckets[bucket_id].sets[iter] !=
66 				RTE_MEMBER_NO_MATCH) {
67 			set_id[*counter] = buckets[bucket_id].sets[iter];
68 			(*counter)++;
69 			if (*counter >= matches_per_key)
70 				return;
71 		}
72 	}
73 }
74 
75 int
rte_member_create_ht(struct rte_member_setsum * ss,const struct rte_member_parameters * params)76 rte_member_create_ht(struct rte_member_setsum *ss,
77 		const struct rte_member_parameters *params)
78 {
79 	uint32_t i, j;
80 	uint32_t size_bucket_t;
81 	uint32_t num_entries = rte_align32pow2(params->num_keys);
82 
83 	if ((num_entries > RTE_MEMBER_ENTRIES_MAX) ||
84 			!rte_is_power_of_2(RTE_MEMBER_BUCKET_ENTRIES) ||
85 			num_entries < RTE_MEMBER_BUCKET_ENTRIES) {
86 		rte_errno = EINVAL;
87 		RTE_MEMBER_LOG(ERR,
88 			"Membership HT create with invalid parameters\n");
89 		return -EINVAL;
90 	}
91 
92 	uint32_t num_buckets = num_entries / RTE_MEMBER_BUCKET_ENTRIES;
93 
94 	size_bucket_t = sizeof(struct member_ht_bucket);
95 
96 	struct member_ht_bucket *buckets = rte_zmalloc_socket(NULL,
97 			num_buckets * size_bucket_t,
98 			RTE_CACHE_LINE_SIZE, ss->socket_id);
99 
100 	if (buckets == NULL) {
101 		RTE_MEMBER_LOG(ERR, "memory allocation failed for HT "
102 						"setsummary\n");
103 		return -ENOMEM;
104 	}
105 
106 	ss->table = buckets;
107 	ss->bucket_cnt = num_buckets;
108 	ss->bucket_mask = num_buckets - 1;
109 	ss->cache = params->is_cache;
110 
111 	for (i = 0; i < num_buckets; i++) {
112 		for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
113 			buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;
114 	}
115 #if defined(RTE_ARCH_X86)
116 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
117 			RTE_MEMBER_BUCKET_ENTRIES == 16 &&
118 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
119 		ss->sig_cmp_fn = RTE_MEMBER_COMPARE_AVX2;
120 	else
121 #endif
122 		ss->sig_cmp_fn = RTE_MEMBER_COMPARE_SCALAR;
123 
124 	RTE_MEMBER_LOG(DEBUG, "Hash table based filter created, "
125 			"the table has %u entries, %u buckets\n",
126 			num_entries, num_buckets);
127 	return 0;
128 }
129 
130 static inline void
get_buckets_index(const struct rte_member_setsum * ss,const void * key,uint32_t * prim_bkt,uint32_t * sec_bkt,member_sig_t * sig)131 get_buckets_index(const struct rte_member_setsum *ss, const void *key,
132 		uint32_t *prim_bkt, uint32_t *sec_bkt, member_sig_t *sig)
133 {
134 	uint32_t first_hash = MEMBER_HASH_FUNC(key, ss->key_len,
135 						ss->prim_hash_seed);
136 	uint32_t sec_hash = MEMBER_HASH_FUNC(&first_hash, sizeof(uint32_t),
137 						ss->sec_hash_seed);
138 	/*
139 	 * We use the first hash value for the signature, and the second hash
140 	 * value to derive the primary and secondary bucket locations.
141 	 *
142 	 * For non-cache mode, we use the lower bits for the primary bucket
143 	 * location. Then we xor primary bucket location and the signature
144 	 * to get the secondary bucket location. This is called "partial-key
145 	 * cuckoo hashing" proposed by B. Fan, et al's paper
146 	 * "Cuckoo Filter: Practically Better Than Bloom". The benefit to use
147 	 * xor is that one could derive the alternative bucket location
148 	 * by only using the current bucket location and the signature. This is
149 	 * generally required by non-cache mode's eviction and deletion
150 	 * process without the need to store alternative hash value nor the full
151 	 * key.
152 	 *
153 	 * For cache mode, we use the lower bits for the primary bucket
154 	 * location and the higher bits for the secondary bucket location. In
155 	 * cache mode, keys are simply overwritten if bucket is full. We do not
156 	 * use xor since lower/higher bits are more independent hash values thus
157 	 * should provide slightly better table load.
158 	 */
159 	*sig = first_hash;
160 	if (ss->cache) {
161 		*prim_bkt = sec_hash & ss->bucket_mask;
162 		*sec_bkt =  (sec_hash >> 16) & ss->bucket_mask;
163 	} else {
164 		*prim_bkt = sec_hash & ss->bucket_mask;
165 		*sec_bkt =  (*prim_bkt ^ *sig) & ss->bucket_mask;
166 	}
167 }
168 
169 int
rte_member_lookup_ht(const struct rte_member_setsum * ss,const void * key,member_set_t * set_id)170 rte_member_lookup_ht(const struct rte_member_setsum *ss,
171 		const void *key, member_set_t *set_id)
172 {
173 	uint32_t prim_bucket, sec_bucket;
174 	member_sig_t tmp_sig;
175 	struct member_ht_bucket *buckets = ss->table;
176 
177 	*set_id = RTE_MEMBER_NO_MATCH;
178 	get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
179 
180 	switch (ss->sig_cmp_fn) {
181 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
182 	case RTE_MEMBER_COMPARE_AVX2:
183 		if (search_bucket_single_avx(prim_bucket, tmp_sig, buckets,
184 				set_id) ||
185 				search_bucket_single_avx(sec_bucket, tmp_sig,
186 					buckets, set_id))
187 			return 1;
188 		break;
189 #endif
190 	default:
191 		if (search_bucket_single(prim_bucket, tmp_sig, buckets,
192 				set_id) ||
193 				search_bucket_single(sec_bucket, tmp_sig,
194 					buckets, set_id))
195 			return 1;
196 	}
197 
198 	return 0;
199 }
200 
201 uint32_t
rte_member_lookup_bulk_ht(const struct rte_member_setsum * ss,const void ** keys,uint32_t num_keys,member_set_t * set_id)202 rte_member_lookup_bulk_ht(const struct rte_member_setsum *ss,
203 		const void **keys, uint32_t num_keys, member_set_t *set_id)
204 {
205 	uint32_t i;
206 	uint32_t num_matches = 0;
207 	struct member_ht_bucket *buckets = ss->table;
208 	member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
209 	uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
210 	uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
211 
212 	for (i = 0; i < num_keys; i++) {
213 		get_buckets_index(ss, keys[i], &prim_buckets[i],
214 				&sec_buckets[i], &tmp_sig[i]);
215 		rte_prefetch0(&buckets[prim_buckets[i]]);
216 		rte_prefetch0(&buckets[sec_buckets[i]]);
217 	}
218 
219 	for (i = 0; i < num_keys; i++) {
220 		switch (ss->sig_cmp_fn) {
221 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
222 		case RTE_MEMBER_COMPARE_AVX2:
223 			if (search_bucket_single_avx(prim_buckets[i],
224 					tmp_sig[i], buckets, &set_id[i]) ||
225 				search_bucket_single_avx(sec_buckets[i],
226 					tmp_sig[i], buckets, &set_id[i]))
227 				num_matches++;
228 			else
229 				set_id[i] = RTE_MEMBER_NO_MATCH;
230 			break;
231 #endif
232 		default:
233 			if (search_bucket_single(prim_buckets[i], tmp_sig[i],
234 					buckets, &set_id[i]) ||
235 					search_bucket_single(sec_buckets[i],
236 					tmp_sig[i], buckets, &set_id[i]))
237 				num_matches++;
238 			else
239 				set_id[i] = RTE_MEMBER_NO_MATCH;
240 		}
241 	}
242 	return num_matches;
243 }
244 
245 uint32_t
rte_member_lookup_multi_ht(const struct rte_member_setsum * ss,const void * key,uint32_t match_per_key,member_set_t * set_id)246 rte_member_lookup_multi_ht(const struct rte_member_setsum *ss,
247 		const void *key, uint32_t match_per_key,
248 		member_set_t *set_id)
249 {
250 	uint32_t num_matches = 0;
251 	uint32_t prim_bucket, sec_bucket;
252 	member_sig_t tmp_sig;
253 	struct member_ht_bucket *buckets = ss->table;
254 
255 	get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
256 
257 	switch (ss->sig_cmp_fn) {
258 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
259 	case RTE_MEMBER_COMPARE_AVX2:
260 		search_bucket_multi_avx(prim_bucket, tmp_sig, buckets,
261 			&num_matches, match_per_key, set_id);
262 		if (num_matches < match_per_key)
263 			search_bucket_multi_avx(sec_bucket, tmp_sig,
264 				buckets, &num_matches, match_per_key, set_id);
265 		return num_matches;
266 #endif
267 	default:
268 		search_bucket_multi(prim_bucket, tmp_sig, buckets, &num_matches,
269 				 match_per_key, set_id);
270 		if (num_matches < match_per_key)
271 			search_bucket_multi(sec_bucket, tmp_sig,
272 				buckets, &num_matches, match_per_key, set_id);
273 		return num_matches;
274 	}
275 }
276 
277 uint32_t
rte_member_lookup_multi_bulk_ht(const struct rte_member_setsum * ss,const void ** keys,uint32_t num_keys,uint32_t match_per_key,uint32_t * match_count,member_set_t * set_ids)278 rte_member_lookup_multi_bulk_ht(const struct rte_member_setsum *ss,
279 		const void **keys, uint32_t num_keys, uint32_t match_per_key,
280 		uint32_t *match_count,
281 		member_set_t *set_ids)
282 {
283 	uint32_t i;
284 	uint32_t num_matches = 0;
285 	struct member_ht_bucket *buckets = ss->table;
286 	uint32_t match_cnt_tmp;
287 	member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
288 	uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
289 	uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
290 
291 	for (i = 0; i < num_keys; i++) {
292 		get_buckets_index(ss, keys[i], &prim_buckets[i],
293 				&sec_buckets[i], &tmp_sig[i]);
294 		rte_prefetch0(&buckets[prim_buckets[i]]);
295 		rte_prefetch0(&buckets[sec_buckets[i]]);
296 	}
297 	for (i = 0; i < num_keys; i++) {
298 		match_cnt_tmp = 0;
299 
300 		switch (ss->sig_cmp_fn) {
301 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
302 		case RTE_MEMBER_COMPARE_AVX2:
303 			search_bucket_multi_avx(prim_buckets[i], tmp_sig[i],
304 				buckets, &match_cnt_tmp, match_per_key,
305 				&set_ids[i*match_per_key]);
306 			if (match_cnt_tmp < match_per_key)
307 				search_bucket_multi_avx(sec_buckets[i],
308 					tmp_sig[i], buckets, &match_cnt_tmp,
309 					match_per_key,
310 					&set_ids[i*match_per_key]);
311 			match_count[i] = match_cnt_tmp;
312 			if (match_cnt_tmp != 0)
313 				num_matches++;
314 			break;
315 #endif
316 		default:
317 			search_bucket_multi(prim_buckets[i], tmp_sig[i],
318 				buckets, &match_cnt_tmp, match_per_key,
319 				&set_ids[i*match_per_key]);
320 			if (match_cnt_tmp < match_per_key)
321 				search_bucket_multi(sec_buckets[i], tmp_sig[i],
322 					buckets, &match_cnt_tmp, match_per_key,
323 					&set_ids[i*match_per_key]);
324 			match_count[i] = match_cnt_tmp;
325 			if (match_cnt_tmp != 0)
326 				num_matches++;
327 		}
328 	}
329 	return num_matches;
330 }
331 
332 static inline int
try_insert(struct member_ht_bucket * buckets,uint32_t prim,uint32_t sec,member_sig_t sig,member_set_t set_id)333 try_insert(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
334 		member_sig_t sig, member_set_t set_id)
335 {
336 	int i;
337 	/* If not full then insert into one slot */
338 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
339 		if (buckets[prim].sets[i] == RTE_MEMBER_NO_MATCH) {
340 			buckets[prim].sigs[i] = sig;
341 			buckets[prim].sets[i] = set_id;
342 			return 0;
343 		}
344 	}
345 	/* If prim failed, we need to access second bucket */
346 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
347 		if (buckets[sec].sets[i] == RTE_MEMBER_NO_MATCH) {
348 			buckets[sec].sigs[i] = sig;
349 			buckets[sec].sets[i] = set_id;
350 			return 0;
351 		}
352 	}
353 	return -1;
354 }
355 
356 static inline int
try_update(struct member_ht_bucket * buckets,uint32_t prim,uint32_t sec,member_sig_t sig,member_set_t set_id,enum rte_member_sig_compare_function cmp_fn)357 try_update(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
358 		member_sig_t sig, member_set_t set_id,
359 		enum rte_member_sig_compare_function cmp_fn)
360 {
361 	switch (cmp_fn) {
362 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
363 	case RTE_MEMBER_COMPARE_AVX2:
364 		if (update_entry_search_avx(prim, sig, buckets, set_id) ||
365 				update_entry_search_avx(sec, sig, buckets,
366 					set_id))
367 			return 0;
368 		break;
369 #endif
370 	default:
371 		if (update_entry_search(prim, sig, buckets, set_id) ||
372 				update_entry_search(sec, sig, buckets,
373 					set_id))
374 			return 0;
375 	}
376 	return -1;
377 }
378 
379 static inline int
evict_from_bucket(void)380 evict_from_bucket(void)
381 {
382 	/* For now, we randomly pick one entry to evict */
383 	return rte_rand() & (RTE_MEMBER_BUCKET_ENTRIES - 1);
384 }
385 
386 /*
387  * This function is similar to the cuckoo hash make_space function in hash
388  * library
389  */
390 static inline int
make_space_bucket(const struct rte_member_setsum * ss,uint32_t bkt_idx,unsigned int * nr_pushes)391 make_space_bucket(const struct rte_member_setsum *ss, uint32_t bkt_idx,
392 			unsigned int *nr_pushes)
393 {
394 	unsigned int i, j;
395 	int ret;
396 	struct member_ht_bucket *buckets = ss->table;
397 	uint32_t next_bucket_idx;
398 	struct member_ht_bucket *next_bkt[RTE_MEMBER_BUCKET_ENTRIES];
399 	struct member_ht_bucket *bkt = &buckets[bkt_idx];
400 	/* MSB is set to indicate if an entry has been already pushed */
401 	member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
402 
403 	/*
404 	 * Push existing item (search for bucket with space in
405 	 * alternative locations) to its alternative location
406 	 */
407 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
408 		/* Search for space in alternative locations */
409 		next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
410 		next_bkt[i] = &buckets[next_bucket_idx];
411 		for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++) {
412 			if (next_bkt[i]->sets[j] == RTE_MEMBER_NO_MATCH)
413 				break;
414 		}
415 
416 		if (j != RTE_MEMBER_BUCKET_ENTRIES)
417 			break;
418 	}
419 
420 	/* Alternative location has spare room (end of recursive function) */
421 	if (i != RTE_MEMBER_BUCKET_ENTRIES) {
422 		next_bkt[i]->sigs[j] = bkt->sigs[i];
423 		next_bkt[i]->sets[j] = bkt->sets[i];
424 		return i;
425 	}
426 
427 	/* Pick entry that has not been pushed yet */
428 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++)
429 		if ((bkt->sets[i] & flag_mask) == 0)
430 			break;
431 
432 	/* All entries have been pushed, so entry cannot be added */
433 	if (i == RTE_MEMBER_BUCKET_ENTRIES ||
434 			++(*nr_pushes) > RTE_MEMBER_MAX_PUSHES)
435 		return -ENOSPC;
436 
437 	next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
438 	/* Set flag to indicate that this entry is going to be pushed */
439 	bkt->sets[i] |= flag_mask;
440 
441 	/* Need room in alternative bucket to insert the pushed entry */
442 	ret = make_space_bucket(ss, next_bucket_idx, nr_pushes);
443 	/*
444 	 * After recursive function.
445 	 * Clear flags and insert the pushed entry
446 	 * in its alternative location if successful,
447 	 * or return error
448 	 */
449 	bkt->sets[i] &= ~flag_mask;
450 	if (ret >= 0) {
451 		next_bkt[i]->sigs[ret] = bkt->sigs[i];
452 		next_bkt[i]->sets[ret] = bkt->sets[i];
453 		return i;
454 	} else
455 		return ret;
456 }
457 
458 int
rte_member_add_ht(const struct rte_member_setsum * ss,const void * key,member_set_t set_id)459 rte_member_add_ht(const struct rte_member_setsum *ss,
460 		const void *key, member_set_t set_id)
461 {
462 	int ret;
463 	unsigned int nr_pushes = 0;
464 	uint32_t prim_bucket, sec_bucket;
465 	member_sig_t tmp_sig;
466 	struct member_ht_bucket *buckets = ss->table;
467 	member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
468 
469 	if (set_id == RTE_MEMBER_NO_MATCH || (set_id & flag_mask) != 0)
470 		return -EINVAL;
471 
472 	get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
473 
474 	/*
475 	 * If it is cache based setsummary, we try overwriting (updating)
476 	 * existing entry with the same signature first. In cache mode, we allow
477 	 * false negatives and only cache the most recent keys.
478 	 *
479 	 * For non-cache mode, we do not update existing entry with the same
480 	 * signature. This is because if two keys with same signature update
481 	 * each other, false negative may happen, which is not the expected
482 	 * behavior for non-cache setsummary.
483 	 */
484 	if (ss->cache) {
485 		ret = try_update(buckets, prim_bucket, sec_bucket, tmp_sig,
486 					set_id, ss->sig_cmp_fn);
487 		if (ret != -1)
488 			return ret;
489 	}
490 	/* If not full then insert into one slot */
491 	ret = try_insert(buckets, prim_bucket, sec_bucket, tmp_sig, set_id);
492 	if (ret != -1)
493 		return ret;
494 
495 	/* Random pick prim or sec for recursive displacement */
496 	uint32_t select_bucket = (tmp_sig && 1U) ? prim_bucket : sec_bucket;
497 	if (ss->cache) {
498 		ret = evict_from_bucket();
499 		buckets[select_bucket].sigs[ret] = tmp_sig;
500 		buckets[select_bucket].sets[ret] = set_id;
501 		return 1;
502 	}
503 
504 	ret = make_space_bucket(ss, select_bucket, &nr_pushes);
505 	if (ret >= 0) {
506 		buckets[select_bucket].sigs[ret] = tmp_sig;
507 		buckets[select_bucket].sets[ret] = set_id;
508 		ret = 1;
509 	}
510 
511 	return ret;
512 }
513 
514 void
rte_member_free_ht(struct rte_member_setsum * ss)515 rte_member_free_ht(struct rte_member_setsum *ss)
516 {
517 	rte_free(ss->table);
518 }
519 
520 int
rte_member_delete_ht(const struct rte_member_setsum * ss,const void * key,member_set_t set_id)521 rte_member_delete_ht(const struct rte_member_setsum *ss, const void *key,
522 		member_set_t set_id)
523 {
524 	int i;
525 	uint32_t prim_bucket, sec_bucket;
526 	member_sig_t tmp_sig;
527 	struct member_ht_bucket *buckets = ss->table;
528 
529 	get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
530 
531 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
532 		if (tmp_sig == buckets[prim_bucket].sigs[i] &&
533 				set_id == buckets[prim_bucket].sets[i]) {
534 			buckets[prim_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
535 			return 0;
536 		}
537 	}
538 
539 	for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
540 		if (tmp_sig == buckets[sec_bucket].sigs[i] &&
541 				set_id == buckets[sec_bucket].sets[i]) {
542 			buckets[sec_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
543 			return 0;
544 		}
545 	}
546 	return -ENOENT;
547 }
548 
549 void
rte_member_reset_ht(const struct rte_member_setsum * ss)550 rte_member_reset_ht(const struct rte_member_setsum *ss)
551 {
552 	uint32_t i, j;
553 	struct member_ht_bucket *buckets = ss->table;
554 
555 	for (i = 0; i < ss->bucket_cnt; i++) {
556 		for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
557 			buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;
558 	}
559 }
560