1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4 #include <string.h>
5 #include <stdio.h>
6
7 #include <rte_common.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12
13 #include "rte_table_hash.h"
14 #include "rte_lru.h"
15
16 #define KEY_SIZE 16
17
18 #define KEYS_PER_BUCKET 4
19
20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
21
22 #ifdef RTE_TABLE_STATS_COLLECT
23
24 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
28
29 #else
30
31 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val)
33
34 #endif
35
36 #ifdef RTE_ARCH_64
37 struct rte_bucket_4_16 {
38 /* Cache line 0 */
39 uint64_t signature[4 + 1];
40 uint64_t lru_list;
41 struct rte_bucket_4_16 *next;
42 uint64_t next_valid;
43
44 /* Cache line 1 */
45 uint64_t key[4][2];
46
47 /* Cache line 2 */
48 uint8_t data[0];
49 };
50 #else
51 struct rte_bucket_4_16 {
52 /* Cache line 0 */
53 uint64_t signature[4 + 1];
54 uint64_t lru_list;
55 struct rte_bucket_4_16 *next;
56 uint32_t pad;
57 uint64_t next_valid;
58
59 /* Cache line 1 */
60 uint64_t key[4][2];
61
62 /* Cache line 2 */
63 uint8_t data[0];
64 };
65 #endif
66
67 struct rte_table_hash {
68 struct rte_table_stats stats;
69
70 /* Input parameters */
71 uint32_t n_buckets;
72 uint32_t key_size;
73 uint32_t entry_size;
74 uint32_t bucket_size;
75 uint32_t key_offset;
76 uint64_t key_mask[2];
77 rte_table_hash_op_hash f_hash;
78 uint64_t seed;
79
80 /* Extendible buckets */
81 uint32_t n_buckets_ext;
82 uint32_t stack_pos;
83 uint32_t *stack;
84
85 /* Lookup table */
86 uint8_t memory[0] __rte_cache_aligned;
87 };
88
89 static int
keycmp(void * a,void * b,void * b_mask)90 keycmp(void *a, void *b, void *b_mask)
91 {
92 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
93
94 return (a64[0] != (b64[0] & b_mask64[0])) ||
95 (a64[1] != (b64[1] & b_mask64[1]));
96 }
97
98 static void
keycpy(void * dst,void * src,void * src_mask)99 keycpy(void *dst, void *src, void *src_mask)
100 {
101 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
102
103 dst64[0] = src64[0] & src_mask64[0];
104 dst64[1] = src64[1] & src_mask64[1];
105 }
106
107 static int
check_params_create(struct rte_table_hash_params * params)108 check_params_create(struct rte_table_hash_params *params)
109 {
110 /* name */
111 if (params->name == NULL) {
112 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
113 return -EINVAL;
114 }
115
116 /* key_size */
117 if (params->key_size != KEY_SIZE) {
118 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
119 return -EINVAL;
120 }
121
122 /* n_keys */
123 if (params->n_keys == 0) {
124 RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
125 return -EINVAL;
126 }
127
128 /* n_buckets */
129 if ((params->n_buckets == 0) ||
130 (!rte_is_power_of_2(params->n_buckets))) {
131 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
132 return -EINVAL;
133 }
134
135 /* f_hash */
136 if (params->f_hash == NULL) {
137 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
138 __func__);
139 return -EINVAL;
140 }
141
142 return 0;
143 }
144
145 static void *
rte_table_hash_create_key16_lru(void * params,int socket_id,uint32_t entry_size)146 rte_table_hash_create_key16_lru(void *params,
147 int socket_id,
148 uint32_t entry_size)
149 {
150 struct rte_table_hash_params *p = params;
151 struct rte_table_hash *f;
152 uint64_t bucket_size, total_size;
153 uint32_t n_buckets, i;
154
155 /* Check input parameters */
156 if ((check_params_create(p) != 0) ||
157 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
158 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
159 return NULL;
160
161 /*
162 * Table dimensioning
163 *
164 * Objective: Pick the number of buckets (n_buckets) so that there a chance
165 * to store n_keys keys in the table.
166 *
167 * Note: Since the buckets do not get extended, it is not possible to
168 * guarantee that n_keys keys can be stored in the table at any time. In the
169 * worst case scenario when all the n_keys fall into the same bucket, only
170 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
171 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
172 * n_keys to n_buckets ratio.
173 *
174 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
175 */
176 n_buckets = rte_align32pow2(
177 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
178 n_buckets = RTE_MAX(n_buckets, p->n_buckets);
179
180 /* Memory allocation */
181 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
182 KEYS_PER_BUCKET * entry_size);
183 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
184
185 if (total_size > SIZE_MAX) {
186 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
187 "for hash table %s\n",
188 __func__, total_size, p->name);
189 return NULL;
190 }
191
192 f = rte_zmalloc_socket(p->name,
193 (size_t)total_size,
194 RTE_CACHE_LINE_SIZE,
195 socket_id);
196 if (f == NULL) {
197 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
198 "for hash table %s\n",
199 __func__, total_size, p->name);
200 return NULL;
201 }
202 RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
203 "is %" PRIu64 " bytes\n",
204 __func__, p->name, total_size);
205
206 /* Memory initialization */
207 f->n_buckets = n_buckets;
208 f->key_size = KEY_SIZE;
209 f->entry_size = entry_size;
210 f->bucket_size = bucket_size;
211 f->key_offset = p->key_offset;
212 f->f_hash = p->f_hash;
213 f->seed = p->seed;
214
215 if (p->key_mask != NULL) {
216 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
217 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
218 } else {
219 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
220 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
221 }
222
223 for (i = 0; i < n_buckets; i++) {
224 struct rte_bucket_4_16 *bucket;
225
226 bucket = (struct rte_bucket_4_16 *) &f->memory[i *
227 f->bucket_size];
228 lru_init(bucket);
229 }
230
231 return f;
232 }
233
234 static int
rte_table_hash_free_key16_lru(void * table)235 rte_table_hash_free_key16_lru(void *table)
236 {
237 struct rte_table_hash *f = table;
238
239 /* Check input parameters */
240 if (f == NULL) {
241 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
242 return -EINVAL;
243 }
244
245 rte_free(f);
246 return 0;
247 }
248
249 static int
rte_table_hash_entry_add_key16_lru(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)250 rte_table_hash_entry_add_key16_lru(
251 void *table,
252 void *key,
253 void *entry,
254 int *key_found,
255 void **entry_ptr)
256 {
257 struct rte_table_hash *f = table;
258 struct rte_bucket_4_16 *bucket;
259 uint64_t signature, pos;
260 uint32_t bucket_index, i;
261
262 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
263 bucket_index = signature & (f->n_buckets - 1);
264 bucket = (struct rte_bucket_4_16 *)
265 &f->memory[bucket_index * f->bucket_size];
266 signature |= RTE_BUCKET_ENTRY_VALID;
267
268 /* Key is present in the bucket */
269 for (i = 0; i < 4; i++) {
270 uint64_t bucket_signature = bucket->signature[i];
271 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
272
273 if ((bucket_signature == signature) &&
274 (keycmp(bucket_key, key, f->key_mask) == 0)) {
275 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
276
277 memcpy(bucket_data, entry, f->entry_size);
278 lru_update(bucket, i);
279 *key_found = 1;
280 *entry_ptr = (void *) bucket_data;
281 return 0;
282 }
283 }
284
285 /* Key is not present in the bucket */
286 for (i = 0; i < 4; i++) {
287 uint64_t bucket_signature = bucket->signature[i];
288 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
289
290 if (bucket_signature == 0) {
291 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
292
293 bucket->signature[i] = signature;
294 keycpy(bucket_key, key, f->key_mask);
295 memcpy(bucket_data, entry, f->entry_size);
296 lru_update(bucket, i);
297 *key_found = 0;
298 *entry_ptr = (void *) bucket_data;
299
300 return 0;
301 }
302 }
303
304 /* Bucket full: replace LRU entry */
305 pos = lru_pos(bucket);
306 bucket->signature[pos] = signature;
307 keycpy(&bucket->key[pos], key, f->key_mask);
308 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
309 lru_update(bucket, pos);
310 *key_found = 0;
311 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
312
313 return 0;
314 }
315
316 static int
rte_table_hash_entry_delete_key16_lru(void * table,void * key,int * key_found,void * entry)317 rte_table_hash_entry_delete_key16_lru(
318 void *table,
319 void *key,
320 int *key_found,
321 void *entry)
322 {
323 struct rte_table_hash *f = table;
324 struct rte_bucket_4_16 *bucket;
325 uint64_t signature;
326 uint32_t bucket_index, i;
327
328 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
329 bucket_index = signature & (f->n_buckets - 1);
330 bucket = (struct rte_bucket_4_16 *)
331 &f->memory[bucket_index * f->bucket_size];
332 signature |= RTE_BUCKET_ENTRY_VALID;
333
334 /* Key is present in the bucket */
335 for (i = 0; i < 4; i++) {
336 uint64_t bucket_signature = bucket->signature[i];
337 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
338
339 if ((bucket_signature == signature) &&
340 (keycmp(bucket_key, key, f->key_mask) == 0)) {
341 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
342
343 bucket->signature[i] = 0;
344 *key_found = 1;
345 if (entry)
346 memcpy(entry, bucket_data, f->entry_size);
347 return 0;
348 }
349 }
350
351 /* Key is not present in the bucket */
352 *key_found = 0;
353 return 0;
354 }
355
356 static void *
rte_table_hash_create_key16_ext(void * params,int socket_id,uint32_t entry_size)357 rte_table_hash_create_key16_ext(void *params,
358 int socket_id,
359 uint32_t entry_size)
360 {
361 struct rte_table_hash_params *p = params;
362 struct rte_table_hash *f;
363 uint64_t bucket_size, stack_size, total_size;
364 uint32_t n_buckets_ext, i;
365
366 /* Check input parameters */
367 if ((check_params_create(p) != 0) ||
368 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
369 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
370 return NULL;
371
372 /*
373 * Table dimensioning
374 *
375 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
376 * it is guaranteed that n_keys keys can be stored in the table at any time.
377 *
378 * The worst case scenario takes place when all the n_keys keys fall into
379 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
380 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
381 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
382 * into a different bucket. This case defeats the purpose of the hash table.
383 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
384 *
385 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
386 */
387 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
388
389 /* Memory allocation */
390 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
391 KEYS_PER_BUCKET * entry_size);
392 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
393 total_size = sizeof(struct rte_table_hash) +
394 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
395 if (total_size > SIZE_MAX) {
396 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
397 "for hash table %s\n",
398 __func__, total_size, p->name);
399 return NULL;
400 }
401
402 f = rte_zmalloc_socket(p->name,
403 (size_t)total_size,
404 RTE_CACHE_LINE_SIZE,
405 socket_id);
406 if (f == NULL) {
407 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
408 "for hash table %s\n",
409 __func__, total_size, p->name);
410 return NULL;
411 }
412 RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
413 "is %" PRIu64 " bytes\n",
414 __func__, p->name, total_size);
415
416 /* Memory initialization */
417 f->n_buckets = p->n_buckets;
418 f->key_size = KEY_SIZE;
419 f->entry_size = entry_size;
420 f->bucket_size = bucket_size;
421 f->key_offset = p->key_offset;
422 f->f_hash = p->f_hash;
423 f->seed = p->seed;
424
425 f->n_buckets_ext = n_buckets_ext;
426 f->stack_pos = n_buckets_ext;
427 f->stack = (uint32_t *)
428 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
429
430 if (p->key_mask != NULL) {
431 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
432 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
433 } else {
434 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
435 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
436 }
437
438 for (i = 0; i < n_buckets_ext; i++)
439 f->stack[i] = i;
440
441 return f;
442 }
443
444 static int
rte_table_hash_free_key16_ext(void * table)445 rte_table_hash_free_key16_ext(void *table)
446 {
447 struct rte_table_hash *f = table;
448
449 /* Check input parameters */
450 if (f == NULL) {
451 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
452 return -EINVAL;
453 }
454
455 rte_free(f);
456 return 0;
457 }
458
459 static int
rte_table_hash_entry_add_key16_ext(void * table,void * key,void * entry,int * key_found,void ** entry_ptr)460 rte_table_hash_entry_add_key16_ext(
461 void *table,
462 void *key,
463 void *entry,
464 int *key_found,
465 void **entry_ptr)
466 {
467 struct rte_table_hash *f = table;
468 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
469 uint64_t signature;
470 uint32_t bucket_index, i;
471
472 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
473 bucket_index = signature & (f->n_buckets - 1);
474 bucket0 = (struct rte_bucket_4_16 *)
475 &f->memory[bucket_index * f->bucket_size];
476 signature |= RTE_BUCKET_ENTRY_VALID;
477
478 /* Key is present in the bucket */
479 for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
480 for (i = 0; i < 4; i++) {
481 uint64_t bucket_signature = bucket->signature[i];
482 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
483
484 if ((bucket_signature == signature) &&
485 (keycmp(bucket_key, key, f->key_mask) == 0)) {
486 uint8_t *bucket_data = &bucket->data[i *
487 f->entry_size];
488
489 memcpy(bucket_data, entry, f->entry_size);
490 *key_found = 1;
491 *entry_ptr = (void *) bucket_data;
492 return 0;
493 }
494 }
495
496 /* Key is not present in the bucket */
497 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
498 bucket_prev = bucket, bucket = bucket->next)
499 for (i = 0; i < 4; i++) {
500 uint64_t bucket_signature = bucket->signature[i];
501 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
502
503 if (bucket_signature == 0) {
504 uint8_t *bucket_data = &bucket->data[i *
505 f->entry_size];
506
507 bucket->signature[i] = signature;
508 keycpy(bucket_key, key, f->key_mask);
509 memcpy(bucket_data, entry, f->entry_size);
510 *key_found = 0;
511 *entry_ptr = (void *) bucket_data;
512
513 return 0;
514 }
515 }
516
517 /* Bucket full: extend bucket */
518 if (f->stack_pos > 0) {
519 bucket_index = f->stack[--f->stack_pos];
520
521 bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
522 bucket_index) * f->bucket_size];
523 bucket_prev->next = bucket;
524 bucket_prev->next_valid = 1;
525
526 bucket->signature[0] = signature;
527 keycpy(&bucket->key[0], key, f->key_mask);
528 memcpy(&bucket->data[0], entry, f->entry_size);
529 *key_found = 0;
530 *entry_ptr = (void *) &bucket->data[0];
531 return 0;
532 }
533
534 return -ENOSPC;
535 }
536
537 static int
rte_table_hash_entry_delete_key16_ext(void * table,void * key,int * key_found,void * entry)538 rte_table_hash_entry_delete_key16_ext(
539 void *table,
540 void *key,
541 int *key_found,
542 void *entry)
543 {
544 struct rte_table_hash *f = table;
545 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
546 uint64_t signature;
547 uint32_t bucket_index, i;
548
549 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
550 bucket_index = signature & (f->n_buckets - 1);
551 bucket0 = (struct rte_bucket_4_16 *)
552 &f->memory[bucket_index * f->bucket_size];
553 signature |= RTE_BUCKET_ENTRY_VALID;
554
555 /* Key is present in the bucket */
556 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
557 bucket_prev = bucket, bucket = bucket->next)
558 for (i = 0; i < 4; i++) {
559 uint64_t bucket_signature = bucket->signature[i];
560 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
561
562 if ((bucket_signature == signature) &&
563 (keycmp(bucket_key, key, f->key_mask) == 0)) {
564 uint8_t *bucket_data = &bucket->data[i *
565 f->entry_size];
566
567 bucket->signature[i] = 0;
568 *key_found = 1;
569 if (entry)
570 memcpy(entry, bucket_data, f->entry_size);
571
572 if ((bucket->signature[0] == 0) &&
573 (bucket->signature[1] == 0) &&
574 (bucket->signature[2] == 0) &&
575 (bucket->signature[3] == 0) &&
576 (bucket_prev != NULL)) {
577 bucket_prev->next = bucket->next;
578 bucket_prev->next_valid =
579 bucket->next_valid;
580
581 memset(bucket, 0,
582 sizeof(struct rte_bucket_4_16));
583 bucket_index = (((uint8_t *)bucket -
584 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
585 f->stack[f->stack_pos++] = bucket_index;
586 }
587
588 return 0;
589 }
590 }
591
592 /* Key is not present in the bucket */
593 *key_found = 0;
594 return 0;
595 }
596
597 #define lookup_key16_cmp(key_in, bucket, pos, f) \
598 { \
599 uint64_t xor[4][2], or[4], signature[4], k[2]; \
600 \
601 k[0] = key_in[0] & f->key_mask[0]; \
602 k[1] = key_in[1] & f->key_mask[1]; \
603 signature[0] = (~bucket->signature[0]) & 1; \
604 signature[1] = (~bucket->signature[1]) & 1; \
605 signature[2] = (~bucket->signature[2]) & 1; \
606 signature[3] = (~bucket->signature[3]) & 1; \
607 \
608 xor[0][0] = k[0] ^ bucket->key[0][0]; \
609 xor[0][1] = k[1] ^ bucket->key[0][1]; \
610 \
611 xor[1][0] = k[0] ^ bucket->key[1][0]; \
612 xor[1][1] = k[1] ^ bucket->key[1][1]; \
613 \
614 xor[2][0] = k[0] ^ bucket->key[2][0]; \
615 xor[2][1] = k[1] ^ bucket->key[2][1]; \
616 \
617 xor[3][0] = k[0] ^ bucket->key[3][0]; \
618 xor[3][1] = k[1] ^ bucket->key[3][1]; \
619 \
620 or[0] = xor[0][0] | xor[0][1] | signature[0]; \
621 or[1] = xor[1][0] | xor[1][1] | signature[1]; \
622 or[2] = xor[2][0] | xor[2][1] | signature[2]; \
623 or[3] = xor[3][0] | xor[3][1] | signature[3]; \
624 \
625 pos = 4; \
626 if (or[0] == 0) \
627 pos = 0; \
628 if (or[1] == 0) \
629 pos = 1; \
630 if (or[2] == 0) \
631 pos = 2; \
632 if (or[3] == 0) \
633 pos = 3; \
634 }
635
636 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
637 { \
638 uint64_t pkt_mask; \
639 uint32_t key_offset = f->key_offset;\
640 \
641 pkt0_index = __builtin_ctzll(pkts_mask); \
642 pkt_mask = 1LLU << pkt0_index; \
643 pkts_mask &= ~pkt_mask; \
644 \
645 mbuf0 = pkts[pkt0_index]; \
646 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
647 }
648
649 #define lookup1_stage1(mbuf1, bucket1, f) \
650 { \
651 uint64_t *key; \
652 uint64_t signature = 0; \
653 uint32_t bucket_index; \
654 \
655 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
656 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
657 \
658 bucket_index = signature & (f->n_buckets - 1); \
659 bucket1 = (struct rte_bucket_4_16 *) \
660 &f->memory[bucket_index * f->bucket_size]; \
661 rte_prefetch0(bucket1); \
662 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
663 }
664
665 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
666 pkts_mask_out, entries, f) \
667 { \
668 void *a; \
669 uint64_t pkt_mask; \
670 uint64_t *key; \
671 uint32_t pos; \
672 \
673 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
674 lookup_key16_cmp(key, bucket2, pos, f); \
675 \
676 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
677 pkts_mask_out |= pkt_mask; \
678 \
679 a = (void *) &bucket2->data[pos * f->entry_size]; \
680 rte_prefetch0(a); \
681 entries[pkt2_index] = a; \
682 lru_update(bucket2, pos); \
683 }
684
685 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
686 buckets_mask, buckets, keys, f) \
687 { \
688 struct rte_bucket_4_16 *bucket_next; \
689 void *a; \
690 uint64_t pkt_mask, bucket_mask; \
691 uint64_t *key; \
692 uint32_t pos; \
693 \
694 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
695 lookup_key16_cmp(key, bucket2, pos, f); \
696 \
697 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
698 pkts_mask_out |= pkt_mask; \
699 \
700 a = (void *) &bucket2->data[pos * f->entry_size]; \
701 rte_prefetch0(a); \
702 entries[pkt2_index] = a; \
703 \
704 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
705 buckets_mask |= bucket_mask; \
706 bucket_next = bucket2->next; \
707 buckets[pkt2_index] = bucket_next; \
708 keys[pkt2_index] = key; \
709 }
710
711 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
712 buckets_mask, f) \
713 { \
714 struct rte_bucket_4_16 *bucket, *bucket_next; \
715 void *a; \
716 uint64_t pkt_mask, bucket_mask; \
717 uint64_t *key; \
718 uint32_t pos; \
719 \
720 bucket = buckets[pkt_index]; \
721 key = keys[pkt_index]; \
722 lookup_key16_cmp(key, bucket, pos, f); \
723 \
724 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
725 pkts_mask_out |= pkt_mask; \
726 \
727 a = (void *) &bucket->data[pos * f->entry_size]; \
728 rte_prefetch0(a); \
729 entries[pkt_index] = a; \
730 \
731 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
732 buckets_mask |= bucket_mask; \
733 bucket_next = bucket->next; \
734 rte_prefetch0(bucket_next); \
735 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
736 buckets[pkt_index] = bucket_next; \
737 keys[pkt_index] = key; \
738 }
739
740 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
741 pkts, pkts_mask, f) \
742 { \
743 uint64_t pkt00_mask, pkt01_mask; \
744 uint32_t key_offset = f->key_offset; \
745 \
746 pkt00_index = __builtin_ctzll(pkts_mask); \
747 pkt00_mask = 1LLU << pkt00_index; \
748 pkts_mask &= ~pkt00_mask; \
749 \
750 mbuf00 = pkts[pkt00_index]; \
751 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
752 \
753 pkt01_index = __builtin_ctzll(pkts_mask); \
754 pkt01_mask = 1LLU << pkt01_index; \
755 pkts_mask &= ~pkt01_mask; \
756 \
757 mbuf01 = pkts[pkt01_index]; \
758 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
759 }
760
761 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
762 mbuf00, mbuf01, pkts, pkts_mask, f) \
763 { \
764 uint64_t pkt00_mask, pkt01_mask; \
765 uint32_t key_offset = f->key_offset; \
766 \
767 pkt00_index = __builtin_ctzll(pkts_mask); \
768 pkt00_mask = 1LLU << pkt00_index; \
769 pkts_mask &= ~pkt00_mask; \
770 \
771 mbuf00 = pkts[pkt00_index]; \
772 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
773 \
774 pkt01_index = __builtin_ctzll(pkts_mask); \
775 if (pkts_mask == 0) \
776 pkt01_index = pkt00_index; \
777 pkt01_mask = 1LLU << pkt01_index; \
778 pkts_mask &= ~pkt01_mask; \
779 \
780 mbuf01 = pkts[pkt01_index]; \
781 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
782 }
783
784 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
785 { \
786 uint64_t *key10, *key11; \
787 uint64_t signature10, signature11; \
788 uint32_t bucket10_index, bucket11_index; \
789 \
790 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset);\
791 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed);\
792 bucket10_index = signature10 & (f->n_buckets - 1); \
793 bucket10 = (struct rte_bucket_4_16 *) \
794 &f->memory[bucket10_index * f->bucket_size]; \
795 rte_prefetch0(bucket10); \
796 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
797 \
798 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset);\
799 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
800 bucket11_index = signature11 & (f->n_buckets - 1); \
801 bucket11 = (struct rte_bucket_4_16 *) \
802 &f->memory[bucket11_index * f->bucket_size]; \
803 rte_prefetch0(bucket11); \
804 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
805 }
806
807 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
808 bucket20, bucket21, pkts_mask_out, entries, f) \
809 { \
810 void *a20, *a21; \
811 uint64_t pkt20_mask, pkt21_mask; \
812 uint64_t *key20, *key21; \
813 uint32_t pos20, pos21; \
814 \
815 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
816 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
817 \
818 lookup_key16_cmp(key20, bucket20, pos20, f); \
819 lookup_key16_cmp(key21, bucket21, pos21, f); \
820 \
821 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
822 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
823 pkts_mask_out |= pkt20_mask | pkt21_mask; \
824 \
825 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
826 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
827 rte_prefetch0(a20); \
828 rte_prefetch0(a21); \
829 entries[pkt20_index] = a20; \
830 entries[pkt21_index] = a21; \
831 lru_update(bucket20, pos20); \
832 lru_update(bucket21, pos21); \
833 }
834
835 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
836 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
837 { \
838 struct rte_bucket_4_16 *bucket20_next, *bucket21_next; \
839 void *a20, *a21; \
840 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
841 uint64_t *key20, *key21; \
842 uint32_t pos20, pos21; \
843 \
844 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
845 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
846 \
847 lookup_key16_cmp(key20, bucket20, pos20, f); \
848 lookup_key16_cmp(key21, bucket21, pos21, f); \
849 \
850 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
851 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
852 pkts_mask_out |= pkt20_mask | pkt21_mask; \
853 \
854 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
855 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
856 rte_prefetch0(a20); \
857 rte_prefetch0(a21); \
858 entries[pkt20_index] = a20; \
859 entries[pkt21_index] = a21; \
860 \
861 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
862 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
863 buckets_mask |= bucket20_mask | bucket21_mask; \
864 bucket20_next = bucket20->next; \
865 bucket21_next = bucket21->next; \
866 buckets[pkt20_index] = bucket20_next; \
867 buckets[pkt21_index] = bucket21_next; \
868 keys[pkt20_index] = key20; \
869 keys[pkt21_index] = key21; \
870 }
871
872 static int
rte_table_hash_lookup_key16_lru(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)873 rte_table_hash_lookup_key16_lru(
874 void *table,
875 struct rte_mbuf **pkts,
876 uint64_t pkts_mask,
877 uint64_t *lookup_hit_mask,
878 void **entries)
879 {
880 struct rte_table_hash *f = (struct rte_table_hash *) table;
881 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
882 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
883 uint32_t pkt00_index, pkt01_index, pkt10_index;
884 uint32_t pkt11_index, pkt20_index, pkt21_index;
885 uint64_t pkts_mask_out = 0;
886
887 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
888
889 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
890
891 /* Cannot run the pipeline with less than 5 packets */
892 if (__builtin_popcountll(pkts_mask) < 5) {
893 for ( ; pkts_mask; ) {
894 struct rte_bucket_4_16 *bucket;
895 struct rte_mbuf *mbuf;
896 uint32_t pkt_index;
897
898 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
899 lookup1_stage1(mbuf, bucket, f);
900 lookup1_stage2_lru(pkt_index, mbuf, bucket,
901 pkts_mask_out, entries, f);
902 }
903
904 *lookup_hit_mask = pkts_mask_out;
905 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
906 __builtin_popcountll(pkts_mask_out));
907 return 0;
908 }
909
910 /*
911 * Pipeline fill
912 *
913 */
914 /* Pipeline stage 0 */
915 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
916 pkts_mask, f);
917
918 /* Pipeline feed */
919 mbuf10 = mbuf00;
920 mbuf11 = mbuf01;
921 pkt10_index = pkt00_index;
922 pkt11_index = pkt01_index;
923
924 /* Pipeline stage 0 */
925 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
926 pkts_mask, f);
927
928 /* Pipeline stage 1 */
929 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
930
931 /*
932 * Pipeline run
933 *
934 */
935 for ( ; pkts_mask; ) {
936 /* Pipeline feed */
937 bucket20 = bucket10;
938 bucket21 = bucket11;
939 mbuf20 = mbuf10;
940 mbuf21 = mbuf11;
941 mbuf10 = mbuf00;
942 mbuf11 = mbuf01;
943 pkt20_index = pkt10_index;
944 pkt21_index = pkt11_index;
945 pkt10_index = pkt00_index;
946 pkt11_index = pkt01_index;
947
948 /* Pipeline stage 0 */
949 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
950 mbuf00, mbuf01, pkts, pkts_mask, f);
951
952 /* Pipeline stage 1 */
953 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
954
955 /* Pipeline stage 2 */
956 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
957 bucket20, bucket21, pkts_mask_out, entries, f);
958 }
959
960 /*
961 * Pipeline flush
962 *
963 */
964 /* Pipeline feed */
965 bucket20 = bucket10;
966 bucket21 = bucket11;
967 mbuf20 = mbuf10;
968 mbuf21 = mbuf11;
969 mbuf10 = mbuf00;
970 mbuf11 = mbuf01;
971 pkt20_index = pkt10_index;
972 pkt21_index = pkt11_index;
973 pkt10_index = pkt00_index;
974 pkt11_index = pkt01_index;
975
976 /* Pipeline stage 1 */
977 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
978
979 /* Pipeline stage 2 */
980 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
981 bucket20, bucket21, pkts_mask_out, entries, f);
982
983 /* Pipeline feed */
984 bucket20 = bucket10;
985 bucket21 = bucket11;
986 mbuf20 = mbuf10;
987 mbuf21 = mbuf11;
988 pkt20_index = pkt10_index;
989 pkt21_index = pkt11_index;
990
991 /* Pipeline stage 2 */
992 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
993 bucket20, bucket21, pkts_mask_out, entries, f);
994
995 *lookup_hit_mask = pkts_mask_out;
996 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
997 __builtin_popcountll(pkts_mask_out));
998 return 0;
999 } /* lookup LRU */
1000
1001 static int
rte_table_hash_lookup_key16_ext(void * table,struct rte_mbuf ** pkts,uint64_t pkts_mask,uint64_t * lookup_hit_mask,void ** entries)1002 rte_table_hash_lookup_key16_ext(
1003 void *table,
1004 struct rte_mbuf **pkts,
1005 uint64_t pkts_mask,
1006 uint64_t *lookup_hit_mask,
1007 void **entries)
1008 {
1009 struct rte_table_hash *f = (struct rte_table_hash *) table;
1010 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
1011 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1012 uint32_t pkt00_index, pkt01_index, pkt10_index;
1013 uint32_t pkt11_index, pkt20_index, pkt21_index;
1014 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1015 struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1016 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1017
1018 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1019
1020 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1021
1022 /* Cannot run the pipeline with less than 5 packets */
1023 if (__builtin_popcountll(pkts_mask) < 5) {
1024 for ( ; pkts_mask; ) {
1025 struct rte_bucket_4_16 *bucket;
1026 struct rte_mbuf *mbuf;
1027 uint32_t pkt_index;
1028
1029 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1030 lookup1_stage1(mbuf, bucket, f);
1031 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1032 pkts_mask_out, entries, buckets_mask,
1033 buckets, keys, f);
1034 }
1035
1036 goto grind_next_buckets;
1037 }
1038
1039 /*
1040 * Pipeline fill
1041 *
1042 */
1043 /* Pipeline stage 0 */
1044 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1045 pkts_mask, f);
1046
1047 /* Pipeline feed */
1048 mbuf10 = mbuf00;
1049 mbuf11 = mbuf01;
1050 pkt10_index = pkt00_index;
1051 pkt11_index = pkt01_index;
1052
1053 /* Pipeline stage 0 */
1054 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1055 pkts_mask, f);
1056
1057 /* Pipeline stage 1 */
1058 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1059
1060 /*
1061 * Pipeline run
1062 *
1063 */
1064 for ( ; pkts_mask; ) {
1065 /* Pipeline feed */
1066 bucket20 = bucket10;
1067 bucket21 = bucket11;
1068 mbuf20 = mbuf10;
1069 mbuf21 = mbuf11;
1070 mbuf10 = mbuf00;
1071 mbuf11 = mbuf01;
1072 pkt20_index = pkt10_index;
1073 pkt21_index = pkt11_index;
1074 pkt10_index = pkt00_index;
1075 pkt11_index = pkt01_index;
1076
1077 /* Pipeline stage 0 */
1078 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1079 mbuf00, mbuf01, pkts, pkts_mask, f);
1080
1081 /* Pipeline stage 1 */
1082 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1083
1084 /* Pipeline stage 2 */
1085 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1086 bucket20, bucket21, pkts_mask_out, entries,
1087 buckets_mask, buckets, keys, f);
1088 }
1089
1090 /*
1091 * Pipeline flush
1092 *
1093 */
1094 /* Pipeline feed */
1095 bucket20 = bucket10;
1096 bucket21 = bucket11;
1097 mbuf20 = mbuf10;
1098 mbuf21 = mbuf11;
1099 mbuf10 = mbuf00;
1100 mbuf11 = mbuf01;
1101 pkt20_index = pkt10_index;
1102 pkt21_index = pkt11_index;
1103 pkt10_index = pkt00_index;
1104 pkt11_index = pkt01_index;
1105
1106 /* Pipeline stage 1 */
1107 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1108
1109 /* Pipeline stage 2 */
1110 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1111 bucket20, bucket21, pkts_mask_out, entries,
1112 buckets_mask, buckets, keys, f);
1113
1114 /* Pipeline feed */
1115 bucket20 = bucket10;
1116 bucket21 = bucket11;
1117 mbuf20 = mbuf10;
1118 mbuf21 = mbuf11;
1119 pkt20_index = pkt10_index;
1120 pkt21_index = pkt11_index;
1121
1122 /* Pipeline stage 2 */
1123 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1124 bucket20, bucket21, pkts_mask_out, entries,
1125 buckets_mask, buckets, keys, f);
1126
1127 grind_next_buckets:
1128 /* Grind next buckets */
1129 for ( ; buckets_mask; ) {
1130 uint64_t buckets_mask_next = 0;
1131
1132 for ( ; buckets_mask; ) {
1133 uint64_t pkt_mask;
1134 uint32_t pkt_index;
1135
1136 pkt_index = __builtin_ctzll(buckets_mask);
1137 pkt_mask = 1LLU << pkt_index;
1138 buckets_mask &= ~pkt_mask;
1139
1140 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1141 entries, buckets_mask_next, f);
1142 }
1143
1144 buckets_mask = buckets_mask_next;
1145 }
1146
1147 *lookup_hit_mask = pkts_mask_out;
1148 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1149 __builtin_popcountll(pkts_mask_out));
1150 return 0;
1151 } /* lookup EXT */
1152
1153 static int
rte_table_hash_key16_stats_read(void * table,struct rte_table_stats * stats,int clear)1154 rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
1155 {
1156 struct rte_table_hash *t = table;
1157
1158 if (stats != NULL)
1159 memcpy(stats, &t->stats, sizeof(t->stats));
1160
1161 if (clear)
1162 memset(&t->stats, 0, sizeof(t->stats));
1163
1164 return 0;
1165 }
1166
1167 struct rte_table_ops rte_table_hash_key16_lru_ops = {
1168 .f_create = rte_table_hash_create_key16_lru,
1169 .f_free = rte_table_hash_free_key16_lru,
1170 .f_add = rte_table_hash_entry_add_key16_lru,
1171 .f_delete = rte_table_hash_entry_delete_key16_lru,
1172 .f_add_bulk = NULL,
1173 .f_delete_bulk = NULL,
1174 .f_lookup = rte_table_hash_lookup_key16_lru,
1175 .f_stats = rte_table_hash_key16_stats_read,
1176 };
1177
1178 struct rte_table_ops rte_table_hash_key16_ext_ops = {
1179 .f_create = rte_table_hash_create_key16_ext,
1180 .f_free = rte_table_hash_free_key16_ext,
1181 .f_add = rte_table_hash_entry_add_key16_ext,
1182 .f_delete = rte_table_hash_entry_delete_key16_ext,
1183 .f_add_bulk = NULL,
1184 .f_delete_bulk = NULL,
1185 .f_lookup = rte_table_hash_lookup_key16_ext,
1186 .f_stats = rte_table_hash_key16_stats_read,
1187 };
1188