Home
last modified time | relevance | path

Searched refs:cache (Results 1 – 25 of 107) sorted by relevance

12345

/dpdk/lib/mempool/
H A Drte_mempool.h1311 if (cache == NULL || cache->len == 0) in rte_mempool_cache_flush()
1314 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); in rte_mempool_cache_flush()
1315 cache->len = 0; in rte_mempool_cache_flush()
1344 cache_objs = &cache->objs[cache->len]; in rte_mempool_do_generic_put()
1358 if (cache->len >= cache->flushthresh) { in rte_mempool_do_generic_put()
1359 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], in rte_mempool_do_generic_put()
1360 cache->len - cache->size); in rte_mempool_do_generic_put()
1361 cache->len = cache->size; in rte_mempool_do_generic_put()
1464 if (unlikely(cache == NULL || n >= cache->size)) in rte_mempool_do_generic_get()
1472 uint32_t req = n + (cache->size - cache->len); in rte_mempool_do_generic_get()
[all …]
H A Drte_mempool_trace_fp.h50 uint32_t nb_objs, void *cache),
54 rte_trace_point_emit_ptr(cache);
60 uint32_t nb_objs, void *cache),
64 rte_trace_point_emit_ptr(cache);
70 uint32_t nb_objs, void *cache),
74 rte_trace_point_emit_ptr(cache);
80 uint32_t nb_objs, void *cache),
84 rte_trace_point_emit_ptr(cache);
107 RTE_TRACE_POINT_ARGS(void *cache, void *mempool),
108 rte_trace_point_emit_ptr(cache);
H A Drte_mempool_trace.h110 struct rte_mempool_cache *cache),
113 rte_trace_point_emit_ptr(cache);
114 rte_trace_point_emit_u32(cache->len);
115 rte_trace_point_emit_u32(cache->flushthresh);
120 RTE_TRACE_POINT_ARGS(void *cache),
121 rte_trace_point_emit_ptr(cache);
H A Dmempool_trace_points.c58 lib.mempool.cache.free)
61 lib.mempool.default.cache)
67 lib.mempool.cache.flush)
H A Drte_mempool.c749 cache->size = size; in mempool_cache_init()
751 cache->len = 0; in mempool_cache_init()
762 struct rte_mempool_cache *cache; in rte_mempool_cache_create() local
769 cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache), in rte_mempool_cache_create()
771 if (cache == NULL) { in rte_mempool_cache_create()
777 mempool_cache_init(cache, size); in rte_mempool_cache_create()
780 return cache; in rte_mempool_cache_create()
791 rte_mempool_trace_cache_free(cache); in rte_mempool_cache_free()
792 rte_free(cache); in rte_mempool_cache_free()
1196 cache = &mp->local_cache[lcore_id]; in mempool_audit_cache()
[all …]
/dpdk/drivers/net/nfp/nfpcore/
H A Dnfp_rtsym.c121 struct nfp_rtsym_table *cache; in __nfp_rtsym_table_read() local
144 size = sizeof(*cache); in __nfp_rtsym_table_read()
147 cache = malloc(size); in __nfp_rtsym_table_read()
148 if (!cache) in __nfp_rtsym_table_read()
151 cache->cpp = cpp; in __nfp_rtsym_table_read()
153 cache->strtab = (void *)&cache->symtab[cache->num]; in __nfp_rtsym_table_read()
162 cache->strtab[strtab_size] = '\0'; in __nfp_rtsym_table_read()
164 for (n = 0; n < cache->num; n++) in __nfp_rtsym_table_read()
166 &cache->symtab[n], &rtsymtab[n]); in __nfp_rtsym_table_read()
170 return cache; in __nfp_rtsym_table_read()
[all …]
/dpdk/app/test/
H A Dtest_mempool_perf.c138 cache); in test_loop()
150 cache); in test_loop()
165 struct rte_mempool_cache *cache; in per_lcore_mempool_test() local
171 if (cache == NULL) in per_lcore_mempool_test()
175 cache = rte_mempool_default_cache(mp, lcore_id); in per_lcore_mempool_test()
199 ret = test_loop(mp, cache, n_keep, 1, 1); in per_lcore_mempool_test()
201 ret = test_loop(mp, cache, n_keep, 4, 4); in per_lcore_mempool_test()
203 ret = test_loop(mp, cache, n_keep, in per_lcore_mempool_test()
206 ret = test_loop(mp, cache, n_keep, 32, 32); in per_lcore_mempool_test()
220 rte_mempool_cache_flush(cache, mp); in per_lcore_mempool_test()
[all …]
H A Dtest_mempool.c86 struct rte_mempool_cache *cache; in test_mempool_basic() local
92 if (cache == NULL) in test_mempool_basic()
103 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) in test_mempool_basic()
110 offset = use_external_cache ? 1 * cache->len : 0; in test_mempool_basic()
126 rte_mempool_generic_put(mp, &obj, 1, cache); in test_mempool_basic()
130 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) in test_mempool_basic()
133 rte_mempool_generic_put(mp, &obj, 1, cache); in test_mempool_basic()
139 rte_mempool_generic_put(mp, &obj, 1, cache); in test_mempool_basic()
140 rte_mempool_generic_put(mp, &obj2, 1, cache); in test_mempool_basic()
183 rte_mempool_cache_flush(cache, mp); in test_mempool_basic()
[all …]
/dpdk/.github/workflows/
H A Dbuild.yml81 - name: Generate cache keys
91 uses: actions/cache@v2
98 id: libabigail-cache
99 uses: actions/cache@v2
105 uses: actions/cache@v2
110 - name: Update APT cache
118 if: env.ABI_CHECKS == 'true' && steps.libabigail-cache.outputs.cache-hit != 'true'
178 uses: actions/cache@v2
198 - name: Save image in cache
244 uses: actions/cache@v2
[all …]
/dpdk/examples/ipsec-secgw/
H A Dsad.h78 struct ipsec_sad_cache *cache; in sad_lookup() local
83 cache = &RTE_PER_LCORE(sad_cache); in sad_lookup()
98 cache_idx = SPI2IDX(spi, cache->mask); in sad_lookup()
101 cached_sa = (cache->mask != 0) ? in sad_lookup()
102 cache->v4[cache_idx] : NULL; in sad_lookup()
122 cached_sa = (cache->mask != 0) ? in sad_lookup()
123 cache->v6[cache_idx] : NULL; in sad_lookup()
152 sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i], in sad_lookup()
153 cache->mask); in sad_lookup()
163 sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i], in sad_lookup()
[all …]
H A Dsad.c84 struct ipsec_sad_cache *cache; in ipsec_sad_lcore_cache_init() local
86 cache = &RTE_PER_LCORE(sad_cache); in ipsec_sad_lcore_cache_init()
92 cache->v4 = rte_zmalloc_socket(NULL, cache_mem_sz, in ipsec_sad_lcore_cache_init()
94 if (cache->v4 == NULL) in ipsec_sad_lcore_cache_init()
97 cache->v6 = rte_zmalloc_socket(NULL, cache_mem_sz, in ipsec_sad_lcore_cache_init()
99 if (cache->v6 == NULL) in ipsec_sad_lcore_cache_init()
102 cache->mask = cache_elem - 1; in ipsec_sad_lcore_cache_init()
/dpdk/drivers/net/mlx5/
H A Dmlx5_utils.c205 lc = pool->cache[cidx]->lc; in mlx5_ipool_update_global_cache()
267 return pool->cache[cidx]->idx[pool->cache[cidx]->len]; in mlx5_ipool_allocate_from_global()
373 if (!pool->cache[cidx]) { in _mlx5_ipool_get_cache()
414 if (!pool->cache[cidx]) { in _mlx5_ipool_malloc_cache()
419 pool->cache[cidx]->len--; in _mlx5_ipool_malloc_cache()
420 *idx = pool->cache[cidx]->idx[pool->cache[cidx]->len]; in _mlx5_ipool_malloc_cache()
471 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; in _mlx5_ipool_free_cache()
475 ilc = pool->cache[cidx]; in _mlx5_ipool_free_cache()
493 pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx; in _mlx5_ipool_free_cache()
494 pool->cache[cidx]->len++; in _mlx5_ipool_free_cache()
[all …]
/dpdk/drivers/common/mlx5/
H A Dmlx5_common_utils.c23 l_inconst->cache[MLX5_LIST_GLOBAL] = gc; in mlx5_list_init()
24 LIST_INIT(&l_inconst->cache[MLX5_LIST_GLOBAL]->h); in mlx5_list_init()
78 LIST_FIRST(&l_inconst->cache[lcore_index]->h); in __list_lookup()
112 if (!l_inconst->cache[i]) in _mlx5_list_lookup()
180 if (unlikely(!l_inconst->cache[lcore_index])) { in _mlx5_list_register()
181 l_inconst->cache[lcore_index] = mlx5_malloc(0, in _mlx5_list_register()
184 if (!l_inconst->cache[lcore_index]) { in _mlx5_list_register()
188 l_inconst->cache[lcore_index]->inv_cnt = 0; in _mlx5_list_register()
189 LIST_INIT(&l_inconst->cache[lcore_index]->h); in _mlx5_list_register()
352 if (!l_inconst->cache[i]) in mlx5_list_uninit()
[all …]
H A Dmlx5_common_mr.c412 if (mr_btree_insert(&share_cache->cache, &entry) < 0) { in mlx5_mr_insert_cache()
490 if (!unlikely(share_cache->cache.overflow)) { in mlx5_mr_lookup_cache()
493 *entry = (*share_cache->cache.table)[idx]; in mlx5_mr_lookup_cache()
530 share_cache->cache.len = 1; in mlx5_mr_rebuild_cache()
531 share_cache->cache.overflow = 0; in mlx5_mr_rebuild_cache()
787 mr_btree_insert(&share_cache->cache, entry); in mlx5_mr_create_primary()
950 *entry = (*share_cache->cache.table)[idx]; in mr_lookup_caches()
1041 mlx5_mr_btree_free(&share_cache->cache); in mlx5_mr_release_cache()
1068 return mlx5_mr_btree_init(&share_cache->cache, in mlx5_mr_create_cache()
1085 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); in mlx5_mr_flush_local_cache()
[all …]
H A Dmlx5_common_mr.h73 struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ member
86 struct mlx5_mr_btree cache; /* Global MR cache table. */ member
202 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, in mlx5_mr_mb2mr()
/dpdk/drivers/net/i40e/
H A Di40e_rxtx_vec_avx512.c36 if (unlikely(!cache)) in i40e_rxq_rearm()
47 cache->len); in i40e_rxq_rearm()
53 &cache->objs[cache->len], req); in i40e_rxq_rearm()
55 cache->len += req; in i40e_rxq_rearm()
93 (&cache->objs[cache->len - 8]); in i40e_rxq_rearm()
909 if (!cache || cache->len == 0) in i40e_tx_free_bufs_avx512()
912 cache_objs = &cache->objs[cache->len]; in i40e_tx_free_bufs_avx512()
941 if (cache->len >= cache->flushthresh) { in i40e_tx_free_bufs_avx512()
943 (mp, &cache->objs[cache->size], in i40e_tx_free_bufs_avx512()
944 cache->len - cache->size); in i40e_tx_free_bufs_avx512()
[all …]
/dpdk/doc/guides/prog_guide/
H A Dmempool_lib.rst12 It provides some other optional services such as a per-core object cache and
80 the memory pool allocator can maintain a per-core cache and do bulk requests to the memory pool's r…
81 via the cache with many fewer locks on the actual memory pool structure.
82 In this way, each core has full access to its own cache (with locks) of free objects and
84 obtain more objects when the cache is empty.
86 While this may mean a number of buffers may sit idle on some core's cache,
89 The cache is composed of a small, per-core table of pointers and its length (used as a stack).
90 This internal cache can be enabled or disabled at creation of the pool.
92 The maximum size of the cache is static and is defined at compilation time (RTE_MEMPOOL_CACHE_MAX_S…
94 :numref:`figure_mempool` shows a cache in operation.
[all …]
/dpdk/drivers/net/ice/
H A Dice_rxtx_vec_avx512.c28 if (unlikely(!cache)) in ice_rxq_rearm()
34 cache->len); in ice_rxq_rearm()
37 &cache->objs[cache->len], req); in ice_rxq_rearm()
39 cache->len += req; in ice_rxq_rearm()
75 (&cache->objs[cache->len - 8]); in ice_rxq_rearm()
1004 if (!cache || cache->len == 0) in ice_tx_free_bufs_avx512()
1007 cache_objs = &cache->objs[cache->len]; in ice_tx_free_bufs_avx512()
1036 if (cache->len >= cache->flushthresh) { in ice_tx_free_bufs_avx512()
1038 (mp, &cache->objs[cache->size], in ice_tx_free_bufs_avx512()
1039 cache->len - cache->size); in ice_tx_free_bufs_avx512()
[all …]
/dpdk/drivers/net/sfc/
H A Dsfc_sw_stats.c494 uint64_t *cache = sa->sw_stats.cache; in sfc_sw_stats_clear_cache() local
497 memset(cache, 0xff, cache_count * sizeof(*cache)); in sfc_sw_stats_clear_cache()
746 uint64_t **cache = &sa->sw_stats.cache; in sfc_sw_xstats_configure() local
780 *cache = rte_realloc(*cache, cache_count * sizeof(**cache), 0); in sfc_sw_xstats_configure()
781 if (*cache == NULL) { in sfc_sw_xstats_configure()
786 stat_cache = *cache; in sfc_sw_xstats_configure()
800 rte_free(*cache); in sfc_sw_xstats_configure()
801 *cache = NULL; in sfc_sw_xstats_configure()
860 sa->sw_stats.cache = NULL; in sfc_sw_xstats_init()
872 rte_free(sa->sw_stats.cache); in sfc_sw_xstats_close()
[all …]
/dpdk/drivers/net/iavf/
H A Diavf_rxtx_vec_avx512.c44 if (unlikely(!cache)) in iavf_rxq_rearm()
56 cache->len); in iavf_rxq_rearm()
60 (rxq->mp, &cache->objs[cache->len], req); in iavf_rxq_rearm()
62 cache->len += req; in iavf_rxq_rearm()
99 (&cache->objs[cache->len - IAVF_DESCS_PER_LOOP_AVX]); in iavf_rxq_rearm()
1730 if (!cache || cache->len == 0) in iavf_tx_free_bufs_avx512()
1733 cache_objs = &cache->objs[cache->len]; in iavf_tx_free_bufs_avx512()
1762 if (cache->len >= cache->flushthresh) { in iavf_tx_free_bufs_avx512()
1764 &cache->objs[cache->size], in iavf_tx_free_bufs_avx512()
1765 cache->len - cache->size); in iavf_tx_free_bufs_avx512()
[all …]
/dpdk/drivers/net/mlx4/
H A Dmlx4_mr.c364 if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { in mr_insert_dev_cache()
444 if (!unlikely(priv->mr.cache.overflow)) { in mr_lookup_dev()
445 lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); in mr_lookup_dev()
447 *entry = (*priv->mr.cache.table)[idx]; in mr_lookup_dev()
731 mr_btree_insert(&priv->mr.cache, entry); in mlx4_mr_create_primary()
868 priv->mr.cache.len = 1; in mr_rebuild_dev_cache()
869 priv->mr.cache.overflow = 0; in mr_rebuild_dev_cache()
1032 *entry = (*priv->mr.cache.table)[idx]; in mlx4_mr_lookup_dev()
1181 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); in mlx4_mr_flush_local_cache()
1415 mlx4_mr_btree_dump(&priv->mr.cache); in mlx4_mr_dump_dev()
[all …]
H A Dmlx4_rxtx.h208 lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, in mlx4_rx_addr2mr()
240 lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, in mlx4_tx_mb2mr()
/dpdk/doc/guides/sample_app_ug/
H A Dl2_forward_cat.rst17 last level cache. CAT introduces classes of service (COS) that are essentially
19 one cache way in last level cache.
22 exclusive, shared, or mixed access to the CPU's last level cache.
29 are programmed to allow fill into all cache ways.
/dpdk/drivers/bus/dpaa/base/qbman/
H A Dprocess.c185 uint32_t cache; member
246 input.cache = portal->cache; in qman_allocate_raw_portal()
/dpdk/lib/member/
H A Drte_member_ht.c109 ss->cache = params->is_cache; in rte_member_create_ht()
160 if (ss->cache) { in get_buckets_index()
484 if (ss->cache) { in rte_member_add_ht()
497 if (ss->cache) { in rte_member_add_ht()

12345