| /f-stack/dpdk/lib/librte_table/ |
| H A D | rte_lru.h | 24 #define lru_init(bucket) \ argument 26 bucket = bucket; \ 29 #define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU) argument 31 #define lru_update(bucket, mru_val) \ argument 33 bucket = bucket; \ 39 #define lru_init(bucket) \ 41 bucket->lru_list = 0x0000000100020003LLU; \ 44 #define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU) 46 #define lru_update(bucket, mru_val) \ 50 x = bucket->lru_list; \ [all …]
|
| H A D | rte_lru_x86.h | 30 #define lru_init(bucket) \ argument 31 { bucket->lru_list = 0x0000000100020003LLU; } 33 #define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU) argument 35 #define lru_update(bucket, mru_val) \ argument 48 uint64_t lru = bucket->lru_list; \ 60 bucket->lru_list = _mm_extract_epi64(k, 0); \ 74 #define lru_init(bucket) \ 75 { bucket->lru_list = ~0LLU; } 84 #define lru_pos(bucket) f_lru_pos(bucket->lru_list) 86 #define lru_update(bucket, mru_val) \ [all …]
|
| H A D | rte_table_hash_key16.c | 228 lru_init(bucket); in rte_table_hash_create_key16_lru() 278 lru_update(bucket, i); in rte_table_hash_entry_add_key16_lru() 296 lru_update(bucket, i); in rte_table_hash_entry_add_key16_lru() 305 pos = lru_pos(bucket); in rte_table_hash_entry_add_key16_lru() 309 lru_update(bucket, pos); in rte_table_hash_entry_add_key16_lru() 479 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) in rte_table_hash_entry_add_key16_ext() 497 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; in rte_table_hash_entry_add_key16_ext() 498 bucket_prev = bucket, bucket = bucket->next) in rte_table_hash_entry_add_key16_ext() 556 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; in rte_table_hash_entry_delete_key16_ext() 557 bucket_prev = bucket, bucket = bucket->next) in rte_table_hash_entry_delete_key16_ext() [all …]
|
| H A D | rte_table_hash_key32.c | 286 lru_update(bucket, i); in rte_table_hash_entry_add_key32_lru() 304 lru_update(bucket, i); in rte_table_hash_entry_add_key32_lru() 313 pos = lru_pos(bucket); in rte_table_hash_entry_add_key32_lru() 317 lru_update(bucket, pos); in rte_table_hash_entry_add_key32_lru() 493 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) { in rte_table_hash_entry_add_key32_ext() 513 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; in rte_table_hash_entry_add_key32_ext() 514 bucket_prev = bucket, bucket = bucket->next) in rte_table_hash_entry_add_key32_ext() 573 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; in rte_table_hash_entry_delete_key32_ext() 574 bucket_prev = bucket, bucket = bucket->next) in rte_table_hash_entry_delete_key32_ext() 596 bucket->next_valid; in rte_table_hash_entry_delete_key32_ext() [all …]
|
| H A D | rte_table_hash_key8.c | 267 lru_update(bucket, i); in rte_table_hash_entry_add_key8_lru() 284 lru_update(bucket, i); in rte_table_hash_entry_add_key8_lru() 293 pos = lru_pos(bucket); in rte_table_hash_entry_add_key8_lru() 296 lru_update(bucket, pos); in rte_table_hash_entry_add_key8_lru() 462 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) { in rte_table_hash_entry_add_key8_ext() 484 bucket != NULL; bucket_prev = bucket, bucket = bucket->next) { in rte_table_hash_entry_add_key8_ext() 514 bucket->signature = 1; in rte_table_hash_entry_add_key8_ext() 543 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL; in rte_table_hash_entry_delete_key8_ext() 544 bucket_prev = bucket, bucket = bucket->next) { in rte_table_hash_entry_delete_key8_ext() 566 bucket->next_valid; in rte_table_hash_entry_delete_key8_ext() [all …]
|
| H A D | rte_lru_arm64.h | 25 #define lru_init(bucket) \ argument 26 { bucket->lru_list = ~0LLU; } 38 #define lru_pos(bucket) f_lru_pos(bucket->lru_list) argument 40 #define lru_update(bucket, mru_val) \ argument 45 uint64x1_t lru = vdup_n_u64(bucket->lru_list); \ 47 bucket->lru_list = vget_lane_u64(vreinterpret_u64_u16( \ 51 bucket->lru_list |= orvals[mru_val]; \
|
| H A D | rte_table_hash_ext.c | 18 struct bucket { struct 27 #define BUCKET_NEXT(bucket) \ argument 28 ((void *) ((bucket)->next & (~1LU))) 30 #define BUCKET_NEXT_VALID(bucket) \ argument 31 ((bucket)->next & 1LU) 40 (bucket)->next = 0; \ 45 (bucket)->next = (bucket2)->next; \ 63 struct bucket *bkt; 94 struct bucket *buckets; 95 struct bucket *buckets_ext; [all …]
|
| H A D | rte_table_hash_lru.c | 33 struct bucket { struct 35 struct bucket *next; argument 43 struct bucket *bkt; argument 73 struct bucket *buckets; 254 struct bucket *bkt = &t->buckets[i]; in rte_table_hash_lru_create() 280 struct bucket *bkt; in rte_table_hash_lru_entry_add() 364 struct bucket *bkt; in rte_table_hash_lru_entry_delete() 413 struct bucket *bkt; in rte_table_hash_lru_lookup_unoptimized() 505 bucket_sig[0] = bucket->sig[0]; \ 692 struct bucket *bkt20, *bkt21; \ [all …]
|
| /f-stack/freebsd/netinet6/ |
| H A D | frag6.c | 294 uint32_t bucket; in frag6_cleanup() local 310 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { in frag6_cleanup() 895 uint32_t bucket; in frag6_slowtimo() local 900 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { in frag6_slowtimo() 933 bucket = 0; in frag6_slowtimo() 945 bucket = (bucket + 1) % IP6REASS_NHASH; in frag6_slowtimo() 977 uint32_t bucket; in frag6_init() local 981 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { in frag6_init() 1008 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { in frag6_drain_one() 1044 for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { in frag6_destroy() [all …]
|
| /f-stack/dpdk/lib/librte_hash/ |
| H A D | rte_fbk_hash.h | 135 if (! ht->t[bucket + i].entry.is_entry) { in rte_fbk_hash_add_key_with_bucket() 136 ht->t[bucket + i].whole_entry = new_entry; in rte_fbk_hash_add_key_with_bucket() 141 if (ht->t[bucket + i].entry.key == key) { in rte_fbk_hash_add_key_with_bucket() 142 ht->t[bucket + i].entry.value = value; in rte_fbk_hash_add_key_with_bucket() 187 uint32_t key, uint32_t bucket) in rte_fbk_hash_delete_key_with_bucket() argument 193 if (ht->t[bucket + i].entry.key == key) { in rte_fbk_hash_delete_key_with_bucket() 196 if (! ht->t[bucket + j].entry.is_entry) { in rte_fbk_hash_delete_key_with_bucket() 205 ht->t[bucket + i].whole_entry = in rte_fbk_hash_delete_key_with_bucket() 206 ht->t[bucket + last_entry].whole_entry; in rte_fbk_hash_delete_key_with_bucket() 207 ht->t[bucket + last_entry].whole_entry = 0; in rte_fbk_hash_delete_key_with_bucket() [all …]
|
| /f-stack/freebsd/netpfil/ipfw/ |
| H A D | ip_fw_dynamic.c | 2321 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in dyn_expire_states() 2457 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in dyn_send_keepalive_ipv4() 2564 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in dyn_send_keepalive_ipv6() 2629 for (bucket = 0; bucket < new; bucket++) { in dyn_grow_hashtable() 2656 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in dyn_grow_hashtable() 2813 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in ipfw_dyn_reset_eaction() 2861 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in ipfw_dyn_get_count() 3076 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in ipfw_dump_states() 3123 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { in ipfw_get_dynamic() 3209 int bucket; in ipfw_dyn_uninit() local [all …]
|
| /f-stack/freebsd/vm/ |
| H A D | uma_core.c | 494 if (bucket) { in bucket_alloc() 750 item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt]; in cache_bucket_pop() 752 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL; in cache_bucket_pop() 769 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item; in cache_bucket_push() 787 bucket->ucb_entries = bucket->ucb_cnt = 0; in cache_bucket_unload() 3611 item = bucket->ub_bucket[bucket->ub_cnt - 1]; in uma_zalloc_domain() 3613 bucket->ub_bucket[bucket->ub_cnt - 1] = NULL; in uma_zalloc_domain() 4257 if (bucket->ucb_cnt == bucket->ucb_entries && in uma_zfree_arg() 4312 for (; bucket->ub_cnt > 0; bucket->ub_cnt--) { in zone_free_cross() 4313 item = bucket->ub_bucket[bucket->ub_cnt - 1]; in zone_free_cross() [all …]
|
| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-tim.c | 165 cvmx_tim.bucket = cvmx_bootmem_alloc(CVMX_TIM_NUM_TIMERS * cvmx_tim.num_buckets in cvmx_tim_setup() 167 if (cvmx_tim.bucket == NULL) in cvmx_tim_setup() 172 …memset(cvmx_tim.bucket, 0, CVMX_TIM_NUM_TIMERS * cvmx_tim.num_buckets * sizeof(cvmx_tim_bucket_ent… in cvmx_tim_setup() 180 cvmx_tim_bucket_entry_t *bucket = cvmx_tim.bucket + timer_id * cvmx_tim.num_buckets; in cvmx_tim_setup() local 191 ring_ctl2.s.base = cvmx_ptr_to_phys(bucket) >> 5; in cvmx_tim_setup() 216 config_ring0.s.first_bucket = cvmx_ptr_to_phys(bucket) >> 5; in cvmx_tim_setup() 273 uint32_t bucket; in cvmx_tim_shutdown() local 285 for (bucket=0; bucket<cvmx_tim.num_buckets; bucket++) in cvmx_tim_shutdown() 289 … cvmx_tim_bucket_entry_t *bucket_ptr = cvmx_tim.bucket + timer_id * cvmx_tim.num_buckets + bucket; in cvmx_tim_shutdown()
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/src/ |
| H A D | ckh.c | 68 cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; in ckh_bucket_search() 70 return (bucket << LG_CKH_BUCKET_CELLS) + i; in ckh_bucket_search() 82 size_t hashes[2], bucket, cell; in ckh_isearch() local 90 cell = ckh_bucket_search(ckh, bucket, key); in ckh_isearch() 97 cell = ckh_bucket_search(ckh, bucket, key); in ckh_isearch() 114 cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + in ckh_try_bucket_insert() 138 size_t hashes[2], bucket, tbucket; in ckh_evict_reloc_insert() local 141 bucket = argbucket; in ckh_evict_reloc_insert() 170 if (tbucket == bucket) { in ckh_evict_reloc_insert() 197 bucket = tbucket; in ckh_evict_reloc_insert() [all …]
|
| /f-stack/freebsd/contrib/openzfs/cmd/zpool_influxdb/ |
| H A D | zpool_influxdb.c | 433 for (int bucket = 0; bucket <= end; bucket++) { in print_vdev_latency_stats() local 434 if (bucket < MIN_LAT_INDEX) { in print_vdev_latency_stats() 437 lat_type[i].sum += lat_type[i].array[bucket]; in print_vdev_latency_stats() 441 if (bucket < end) { in print_vdev_latency_stats() 444 (float)(1ULL << bucket) * 1e-9, in print_vdev_latency_stats() 453 lat_type[i].sum += lat_type[i].array[bucket]; in print_vdev_latency_stats() 455 lat_type[i].sum = lat_type[i].array[bucket]; in print_vdev_latency_stats() 528 for (int bucket = 0; bucket <= end; bucket++) { in print_vdev_size_stats() local 529 if (bucket < MIN_SIZE_INDEX) { in print_vdev_size_stats() 537 if (bucket < end) { in print_vdev_size_stats() [all …]
|
| /f-stack/freebsd/netgraph/ |
| H A D | ng_bridge.c | 428 int i, bucket; in ng_bridge_rcvmsg() local 439 for (bucket = 0; bucket < priv->numBuckets; bucket++) { in ng_bridge_rcvmsg() 544 int i = 0, bucket; in ng_bridge_rcvmsg() local 554 for (bucket = 0; bucket < priv->numBuckets; bucket++) { in ng_bridge_rcvmsg() 882 const int bucket = HASH(addr, priv->hashMask); in ng_bridge_get() local 885 SLIST_FOREACH(hent, &priv->tab[bucket], next) { in ng_bridge_get() 900 const int bucket = HASH(addr, priv->hashMask); in ng_bridge_put() local 999 int bucket; in ng_bridge_remove_hosts() local 1001 for (bucket = 0; bucket < priv->numBuckets; bucket++) { in ng_bridge_remove_hosts() 1047 int bucket; in ng_bridge_timeout() local [all …]
|
| /f-stack/dpdk/lib/librte_ipsec/ |
| H A D | ipsec_sqn.h | 96 uint32_t bit, bucket; in esn_inb_check_sqn() local 112 bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask; in esn_inb_check_sqn() 115 if (rsn->window[bucket] & ((uint64_t)1 << bit)) in esn_inb_check_sqn() 153 uint32_t bit, bucket, last_bucket, new_bucket, diff, i; in esn_inb_update_sqn() local 164 bucket = (sqn >> WINDOW_BUCKET_BITS); in esn_inb_update_sqn() 169 diff = bucket - last_bucket; in esn_inb_update_sqn() 182 bucket &= sa->replay.bucket_index_mask; in esn_inb_update_sqn() 186 if (rsn->window[bucket] & bit) in esn_inb_update_sqn() 189 rsn->window[bucket] |= bit; in esn_inb_update_sqn()
|
| /f-stack/freebsd/netinet/ |
| H A D | ip_reass.c | 112 ipq_free(bucket, fp); in ipq_timeout() 120 ipq_free(bucket, fp); in ipq_drop() 788 int bucket, i; in ipq_reuse() local 793 bucket = (start + i) % IPREASS_NHASH; in ipq_reuse() 794 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) in ipq_reuse() 808 V_ipq[bucket].count--; in ipq_reuse() 809 if (bucket != start) in ipq_reuse() 810 IPQ_UNLOCK(bucket); in ipq_reuse() 813 if (bucket != start) in ipq_reuse() 814 IPQ_UNLOCK(bucket); in ipq_reuse() [all …]
|
| /f-stack/dpdk/drivers/net/bnxt/tf_core/ |
| H A D | tf_shadow_tbl.c | 345 uint64_t *bucket; in tf_shadow_tbl_clear_hash_entry() local 352 bucket = &ctxt->hash_ctxt.hashtbl[hid]; in tf_shadow_tbl_clear_hash_entry() 356 *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket); in tf_shadow_tbl_clear_hash_entry() 359 *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket); in tf_shadow_tbl_clear_hash_entry() 362 *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket); in tf_shadow_tbl_clear_hash_entry() 365 *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket); in tf_shadow_tbl_clear_hash_entry() 525 uint64_t bucket; in tf_shadow_tbl_search() local 577 bucket = ctxt->hash_ctxt.hashtbl[hb_idx]; in tf_shadow_tbl_search() 578 if (!bucket) { in tf_shadow_tbl_search() 589 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff); in tf_shadow_tbl_search()
|
| H A D | tf_shadow_tcam.c | 354 uint64_t *bucket; in tf_shadow_tcam_clear_hash_entry() local 361 bucket = &ctxt->hash_ctxt.hashtbl[hid]; in tf_shadow_tcam_clear_hash_entry() 365 *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket); in tf_shadow_tcam_clear_hash_entry() 368 *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket); in tf_shadow_tcam_clear_hash_entry() 371 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket); in tf_shadow_tcam_clear_hash_entry() 374 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket); in tf_shadow_tcam_clear_hash_entry() 544 uint64_t bucket; in tf_shadow_tcam_search() local 605 bucket = ctxt->hash_ctxt.hashtbl[hb_idx]; in tf_shadow_tcam_search() 607 if (!bucket) { in tf_shadow_tcam_search() 618 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff); in tf_shadow_tcam_search()
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | packet_framework.rst | 312 …rule to map a key to its bucket can simply be to use the key signature (modulo the number of table… 408 …to the table, it can happen that a given bucket already has 4 keys when a new key has to be added … 413 …The number of keys in each bucket never grows bigger than 4. The logic to pick the key to be dropp… 430 …The extendable bucket logic requires maintaining specific data structures per table and per each b… 583 … | | the current bucket). The next pointer is not NULL if the bu… 607 …ure35` and :numref:`table_qos_27` detail the bucket search pipeline stages (either LRU or extendab… 632 …| 1 | Prefetch table bucket | Read the key signature from the packet meta-data (for extendable… 887 and detail the bucket search pipeline used to implement 8-byte and 16-byte key hash tables (either … 912 …| 1 | Prefetch table bucket | #. Read the key signature from the packet meta-data (for extend… 941 #. For extendable bucket hash tables only, [all …]
|
| H A D | hash_lib.rst | 142 comparing the 2-byte signature of the input key against the signature of a key from the bucket. 149 First of all, the primary bucket is identified and entry is likely to be stored there. 152 If signature is not in the primary bucket, the secondary bucket is looked up, where same procedure 158 the primary bucket, a signature is stored in that entry, key and data (if any) are added to 160 If there is no space in the primary bucket, one of the entries on that bucket is pushed to its alte… 163 If there is room in the alternative bucket, the evicted entry 169 …s considered not able to be added (unless extendable bucket flag is set, and in that case the buck… 186 reached, the secondary bucket of this key is extended 189 …kup for a certain key, as before, the primary bucket is searched for a match and then the secondar… 201 As mentioned above, Cuckoo hash implementation pushes elements out of their bucket, [all …]
|
| /f-stack/freebsd/contrib/ck/src/ |
| H A D | ck_epoch.c | 155 unsigned int i = section->bucket; in CK_STACK_CONTAINER() 157 current = &record->local.bucket[i]; in CK_STACK_CONTAINER() 172 other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK]; in CK_STACK_CONTAINER() 195 ref = &record->local.bucket[i]; in _ck_epoch_addref() 211 previous = &record->local.bucket[(i + 1) & in _ck_epoch_addref() 224 section->bucket = i; in _ck_epoch_addref()
|
| /f-stack/dpdk/drivers/event/octeontx2/ |
| H A D | otx2_tim_worker.h | 124 uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc, in tim_get_target_bucket() local 129 bucket = bucket % tim_ring->nb_bkts; in tim_get_target_bucket() 130 mirr_bucket = (bucket + (tim_ring->nb_bkts >> 1)) % in tim_get_target_bucket() 134 bucket = bucket & (tim_ring->nb_bkts - 1); in tim_get_target_bucket() 135 mirr_bucket = (bucket + (tim_ring->nb_bkts >> 1)) & in tim_get_target_bucket() 139 *bkt = &tim_ring->bkt[bucket]; in tim_get_target_bucket()
|
| /f-stack/dpdk/app/test/ |
| H A D | test_table_tables.c | 65 struct rte_bucket_4_8 *bucket; in test_lru_update() local 75 bucket = &b; in test_lru_update() 78 bucket->lru_list = 0xFFFFFFFFFFFFFFFFULL; in test_lru_update() 80 bucket->lru_list = 0x0000000100020003ULL; in test_lru_update() 86 lru_update(bucket, idx); in test_lru_update() 87 pos = lru_pos(bucket); in test_lru_update() 91 __func__, i, bucket->lru_list, i>>1, pos); in test_lru_update() 94 if (bucket->lru_list != shuffles) { in test_lru_update() 97 __func__, i, bucket->lru_list, shuffles); in test_lru_update() 117 lru_update(bucket, i); in test_lru_update() [all …]
|