Lines Matching refs:share_cache
324 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache, in mlx5_mr_insert_cache() argument
330 (void *)mr, (void *)share_cache); in mlx5_mr_insert_cache()
339 if (mr_btree_insert(&share_cache->cache, &entry) < 0) { in mlx5_mr_insert_cache()
364 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache, in mlx5_mr_lookup_list() argument
370 LIST_FOREACH(mr, &share_cache->mr_list, mr) { in mlx5_mr_lookup_list()
404 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache, in mlx5_mr_lookup_cache() argument
417 if (!unlikely(share_cache->cache.overflow)) { in mlx5_mr_lookup_cache()
418 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); in mlx5_mr_lookup_cache()
420 *entry = (*share_cache->cache.table)[idx]; in mlx5_mr_lookup_cache()
423 mr = mlx5_mr_lookup_list(share_cache, entry, addr); in mlx5_mr_lookup_cache()
452 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache) in mlx5_mr_rebuild_cache() argument
456 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache); in mlx5_mr_rebuild_cache()
458 share_cache->cache.len = 1; in mlx5_mr_rebuild_cache()
459 share_cache->cache.overflow = 0; in mlx5_mr_rebuild_cache()
461 LIST_FOREACH(mr, &share_cache->mr_list, mr) in mlx5_mr_rebuild_cache()
462 if (mlx5_mr_insert_cache(share_cache, mr) < 0) in mlx5_mr_rebuild_cache()
473 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache) in mlx5_mr_garbage_collect() argument
484 rte_rwlock_write_lock(&share_cache->rwlock); in mlx5_mr_garbage_collect()
486 free_list = share_cache->mr_free_list; in mlx5_mr_garbage_collect()
487 LIST_INIT(&share_cache->mr_free_list); in mlx5_mr_garbage_collect()
488 rte_rwlock_write_unlock(&share_cache->rwlock); in mlx5_mr_garbage_collect()
495 mlx5_mr_free(mr, share_cache->dereg_mr_cb); in mlx5_mr_garbage_collect()
540 struct mlx5_mr_share_cache *share_cache, in mlx5_mr_create_secondary() argument
554 rte_rwlock_read_lock(&share_cache->rwlock); in mlx5_mr_create_secondary()
556 mlx5_mr_lookup_cache(share_cache, entry, addr); in mlx5_mr_create_secondary()
559 rte_rwlock_read_unlock(&share_cache->rwlock); in mlx5_mr_create_secondary()
587 struct mlx5_mr_share_cache *share_cache, in mlx5_mr_create_primary() argument
611 mlx5_mr_garbage_collect(share_cache); in mlx5_mr_create_primary()
705 mlx5_mr_free(mr, share_cache->dereg_mr_cb); in mlx5_mr_create_primary()
709 rte_rwlock_write_lock(&share_cache->rwlock); in mlx5_mr_create_primary()
714 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) { in mlx5_mr_create_primary()
720 mr_btree_insert(&share_cache->cache, entry); in mlx5_mr_create_primary()
722 rte_rwlock_write_unlock(&share_cache->rwlock); in mlx5_mr_create_primary()
728 mlx5_mr_free(mr, share_cache->dereg_mr_cb); in mlx5_mr_create_primary()
743 if (mlx5_mr_lookup_cache(share_cache, &ret, start) == in mlx5_mr_create_primary()
768 share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr); in mlx5_mr_create_primary()
777 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr); in mlx5_mr_create_primary()
785 mlx5_mr_insert_cache(share_cache, mr); in mlx5_mr_create_primary()
787 mlx5_mr_lookup_cache(share_cache, entry, addr); in mlx5_mr_create_primary()
790 rte_rwlock_write_unlock(&share_cache->rwlock); in mlx5_mr_create_primary()
794 rte_rwlock_write_unlock(&share_cache->rwlock); in mlx5_mr_create_primary()
804 mlx5_mr_free(mr, share_cache->dereg_mr_cb); in mlx5_mr_create_primary()
827 struct mlx5_mr_share_cache *share_cache, in mlx5_mr_create() argument
835 ret = mlx5_mr_create_primary(pd, share_cache, entry, in mlx5_mr_create()
839 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry, in mlx5_mr_create()
869 struct mlx5_mr_share_cache *share_cache, in mr_lookup_caches() argument
882 rte_rwlock_read_lock(&share_cache->rwlock); in mr_lookup_caches()
883 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr); in mr_lookup_caches()
886 *entry = (*share_cache->cache.table)[idx]; in mr_lookup_caches()
887 rte_rwlock_read_unlock(&share_cache->rwlock); in mr_lookup_caches()
896 rte_rwlock_read_unlock(&share_cache->rwlock); in mr_lookup_caches()
898 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr, in mr_lookup_caches()
929 struct mlx5_mr_share_cache *share_cache, in mlx5_mr_addr2mr_bh() argument
949 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl, in mlx5_mr_addr2mr_bh()
969 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache) in mlx5_mr_release_cache() argument
973 rte_rwlock_write_lock(&share_cache->rwlock); in mlx5_mr_release_cache()
975 mr_next = LIST_FIRST(&share_cache->mr_list); in mlx5_mr_release_cache()
981 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr); in mlx5_mr_release_cache()
983 LIST_INIT(&share_cache->mr_list); in mlx5_mr_release_cache()
985 mlx5_mr_btree_free(&share_cache->cache); in mlx5_mr_release_cache()
986 rte_rwlock_write_unlock(&share_cache->rwlock); in mlx5_mr_release_cache()
988 mlx5_mr_garbage_collect(share_cache); in mlx5_mr_release_cache()
1070 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused) in mlx5_mr_dump_cache()
1077 rte_rwlock_read_lock(&share_cache->rwlock); in mlx5_mr_dump_cache()
1079 LIST_FOREACH(mr, &share_cache->mr_list, mr) { in mlx5_mr_dump_cache()
1097 DEBUG("Dumping global cache %p", (void *)share_cache); in mlx5_mr_dump_cache()
1098 mlx5_mr_btree_dump(&share_cache->cache); in mlx5_mr_dump_cache()
1099 rte_rwlock_read_unlock(&share_cache->rwlock); in mlx5_mr_dump_cache()