1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 6WIND S.A.
3 * Copyright 2018 Mellanox Technologies, Ltd
4 */
5
6 #ifndef RTE_PMD_MLX5_COMMON_MR_H_
7 #define RTE_PMD_MLX5_COMMON_MR_H_
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <sys/queue.h>
12
13
14 #include <rte_rwlock.h>
15 #include <rte_bitmap.h>
16 #include <rte_memory.h>
17
18 #include "mlx5_glue.h"
19 #include "mlx5_common_mp.h"
20
21 /* Size of per-queue MR cache array for linear search. */
22 #define MLX5_MR_CACHE_N 8
23 #define MLX5_MR_BTREE_CACHE_N 256
24
25 /* mlx5 PMD MR struct. */
26 struct mlx5_pmd_mr {
27 uint32_t lkey;
28 void *addr;
29 size_t len;
30 void *obj; /* verbs mr object or devx umem object. */
31 };
32
33 /**
34 * mr operations typedef
35 */
36 typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length,
37 struct mlx5_pmd_mr *pmd_mr);
38 typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr);
39
40 /* Memory Region object. */
41 struct mlx5_mr {
42 LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
43 struct mlx5_pmd_mr pmd_mr; /* PMD memory region. */
44 const struct rte_memseg_list *msl;
45 int ms_base_idx; /* Start index of msl->memseg_arr[]. */
46 int ms_n; /* Number of memsegs in use. */
47 uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
48 struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
49 };
50
51 /* Cache entry for Memory Region. */
52 struct mr_cache_entry {
53 uintptr_t start; /* Start address of MR. */
54 uintptr_t end; /* End address of MR. */
55 uint32_t lkey; /* rte_cpu_to_be_32(lkey). */
56 } __rte_packed;
57
58 /* MR Cache table for Binary search. */
59 struct mlx5_mr_btree {
60 uint16_t len; /* Number of entries. */
61 uint16_t size; /* Total number of entries. */
62 int overflow; /* Mark failure of table expansion. */
63 struct mr_cache_entry (*table)[];
64 } __rte_packed;
65
66 /* Per-queue MR control descriptor. */
67 struct mlx5_mr_ctrl {
68 uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
69 uint32_t cur_gen; /* Generation number saved to flush caches. */
70 uint16_t mru; /* Index of last hit entry in top-half cache. */
71 uint16_t head; /* Index of the oldest entry in top-half cache. */
72 struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
73 struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
74 } __rte_packed;
75
76 LIST_HEAD(mlx5_mr_list, mlx5_mr);
77
78 /* Global per-device MR cache. */
79 struct mlx5_mr_share_cache {
80 uint32_t dev_gen; /* Generation number to flush local caches. */
81 rte_rwlock_t rwlock; /* MR cache Lock. */
82 struct mlx5_mr_btree cache; /* Global MR cache table. */
83 struct mlx5_mr_list mr_list; /* Registered MR list. */
84 struct mlx5_mr_list mr_free_list; /* Freed MR list. */
85 mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
86 mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
87 } __rte_packed;
88
89 /**
90 * Look up LKey from given lookup table by linear search. Firstly look up the
91 * last-hit entry. If miss, the entire array is searched. If found, update the
92 * last-hit index and return LKey.
93 *
94 * @param lkp_tbl
95 * Pointer to lookup table.
96 * @param[in,out] cached_idx
97 * Pointer to last-hit index.
98 * @param n
99 * Size of lookup table.
100 * @param addr
101 * Search key.
102 *
103 * @return
104 * Searched LKey on success, UINT32_MAX on no match.
105 */
106 static __rte_always_inline uint32_t
mlx5_mr_lookup_lkey(struct mr_cache_entry * lkp_tbl,uint16_t * cached_idx,uint16_t n,uintptr_t addr)107 mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
108 uint16_t n, uintptr_t addr)
109 {
110 uint16_t idx;
111
112 if (likely(addr >= lkp_tbl[*cached_idx].start &&
113 addr < lkp_tbl[*cached_idx].end))
114 return lkp_tbl[*cached_idx].lkey;
115 for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
116 if (addr >= lkp_tbl[idx].start &&
117 addr < lkp_tbl[idx].end) {
118 /* Found. */
119 *cached_idx = idx;
120 return lkp_tbl[idx].lkey;
121 }
122 }
123 return UINT32_MAX;
124 }
125
126 __rte_internal
127 int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
128 __rte_internal
129 void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
130 __rte_internal
131 void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
132 __rte_internal
133 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
134 struct mlx5_mr_share_cache *share_cache,
135 struct mlx5_mr_ctrl *mr_ctrl,
136 uintptr_t addr, unsigned int mr_ext_memseg_en);
137 __rte_internal
138 void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
139 __rte_internal
140 void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
141 __rte_internal
142 void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
143 __rte_internal
144 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
145 __rte_internal
146 int
147 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
148 struct mlx5_mr *mr);
149 __rte_internal
150 uint32_t
151 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
152 struct mr_cache_entry *entry, uintptr_t addr);
153 __rte_internal
154 struct mlx5_mr *
155 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
156 struct mr_cache_entry *entry, uintptr_t addr);
157 __rte_internal
158 struct mlx5_mr *
159 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
160 mlx5_reg_mr_t reg_mr_cb);
161 __rte_internal
162 uint32_t
163 mlx5_mr_create_primary(void *pd,
164 struct mlx5_mr_share_cache *share_cache,
165 struct mr_cache_entry *entry, uintptr_t addr,
166 unsigned int mr_ext_memseg_en);
167 __rte_internal
168 int
169 mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length,
170 struct mlx5_pmd_mr *pmd_mr);
171 __rte_internal
172 void
173 mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
174
175 __rte_internal
176 void
177 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
178 #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
179