1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_hash_crc.h>
7 #include <rte_errno.h>
8
9 #include <mlx5_malloc.h>
10
11 #include "mlx5_common_utils.h"
12 #include "mlx5_common_log.h"
13
14 /********************* mlx5 list ************************/
15
16 static int
mlx5_list_init(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,struct mlx5_list_cache * gc)17 mlx5_list_init(struct mlx5_list_inconst *l_inconst,
18 struct mlx5_list_const *l_const,
19 struct mlx5_list_cache *gc)
20 {
21 rte_rwlock_init(&l_inconst->lock);
22 if (l_const->lcores_share) {
23 l_inconst->cache[MLX5_LIST_GLOBAL] = gc;
24 LIST_INIT(&l_inconst->cache[MLX5_LIST_GLOBAL]->h);
25 }
26 return 0;
27 }
28
29 struct mlx5_list *
mlx5_list_create(const char * name,void * ctx,bool lcores_share,mlx5_list_create_cb cb_create,mlx5_list_match_cb cb_match,mlx5_list_remove_cb cb_remove,mlx5_list_clone_cb cb_clone,mlx5_list_clone_free_cb cb_clone_free)30 mlx5_list_create(const char *name, void *ctx, bool lcores_share,
31 mlx5_list_create_cb cb_create,
32 mlx5_list_match_cb cb_match,
33 mlx5_list_remove_cb cb_remove,
34 mlx5_list_clone_cb cb_clone,
35 mlx5_list_clone_free_cb cb_clone_free)
36 {
37 struct mlx5_list *list;
38 struct mlx5_list_cache *gc = NULL;
39
40 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
41 !cb_clone_free) {
42 rte_errno = EINVAL;
43 return NULL;
44 }
45 list = mlx5_malloc(MLX5_MEM_ZERO,
46 sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
47 0, SOCKET_ID_ANY);
48
49 if (!list)
50 return NULL;
51 if (name)
52 snprintf(list->l_const.name,
53 sizeof(list->l_const.name), "%s", name);
54 list->l_const.ctx = ctx;
55 list->l_const.lcores_share = lcores_share;
56 list->l_const.cb_create = cb_create;
57 list->l_const.cb_match = cb_match;
58 list->l_const.cb_remove = cb_remove;
59 list->l_const.cb_clone = cb_clone;
60 list->l_const.cb_clone_free = cb_clone_free;
61 rte_spinlock_init(&list->l_const.lcore_lock);
62 if (lcores_share)
63 gc = (struct mlx5_list_cache *)(list + 1);
64 if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {
65 mlx5_free(list);
66 return NULL;
67 }
68 DRV_LOG(DEBUG, "mlx5 list %s was created.", name);
69 return list;
70 }
71
72 static struct mlx5_list_entry *
__list_lookup(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index,void * ctx,bool reuse)73 __list_lookup(struct mlx5_list_inconst *l_inconst,
74 struct mlx5_list_const *l_const,
75 int lcore_index, void *ctx, bool reuse)
76 {
77 struct mlx5_list_entry *entry =
78 LIST_FIRST(&l_inconst->cache[lcore_index]->h);
79 uint32_t ret;
80
81 while (entry != NULL) {
82 if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
83 if (reuse) {
84 ret = __atomic_add_fetch(&entry->ref_cnt, 1,
85 __ATOMIC_RELAXED) - 1;
86 DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
87 l_const->name, (void *)entry,
88 entry->ref_cnt);
89 } else if (lcore_index < MLX5_LIST_GLOBAL) {
90 ret = __atomic_load_n(&entry->ref_cnt,
91 __ATOMIC_RELAXED);
92 }
93 if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
94 return entry;
95 if (reuse && ret == 0)
96 entry->ref_cnt--; /* Invalid entry. */
97 }
98 entry = LIST_NEXT(entry, next);
99 }
100 return NULL;
101 }
102
103 static inline struct mlx5_list_entry *
_mlx5_list_lookup(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,void * ctx)104 _mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,
105 struct mlx5_list_const *l_const, void *ctx)
106 {
107 struct mlx5_list_entry *entry = NULL;
108 int i;
109
110 rte_rwlock_read_lock(&l_inconst->lock);
111 for (i = 0; i < MLX5_LIST_GLOBAL; i++) {
112 if (!l_inconst->cache[i])
113 continue;
114 entry = __list_lookup(l_inconst, l_const, i,
115 ctx, false);
116 if (entry)
117 break;
118 }
119 rte_rwlock_read_unlock(&l_inconst->lock);
120 return entry;
121 }
122
123 struct mlx5_list_entry *
mlx5_list_lookup(struct mlx5_list * list,void * ctx)124 mlx5_list_lookup(struct mlx5_list *list, void *ctx)
125 {
126 return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);
127 }
128
129
130 static struct mlx5_list_entry *
mlx5_list_cache_insert(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index,struct mlx5_list_entry * gentry,void * ctx)131 mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,
132 struct mlx5_list_const *l_const, int lcore_index,
133 struct mlx5_list_entry *gentry, void *ctx)
134 {
135 struct mlx5_list_entry *lentry =
136 l_const->cb_clone(l_const->ctx, gentry, ctx);
137
138 if (unlikely(!lentry))
139 return NULL;
140 lentry->ref_cnt = 1u;
141 lentry->gentry = gentry;
142 lentry->lcore_idx = (uint32_t)lcore_index;
143 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);
144 return lentry;
145 }
146
147 static void
__list_cache_clean(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,int lcore_index)148 __list_cache_clean(struct mlx5_list_inconst *l_inconst,
149 struct mlx5_list_const *l_const,
150 int lcore_index)
151 {
152 struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
153 struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
154 uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
155 __ATOMIC_RELAXED);
156
157 while (inv_cnt != 0 && entry != NULL) {
158 struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
159
160 if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
161 LIST_REMOVE(entry, next);
162 if (l_const->lcores_share)
163 l_const->cb_clone_free(l_const->ctx, entry);
164 else
165 l_const->cb_remove(l_const->ctx, entry);
166 inv_cnt--;
167 }
168 entry = nentry;
169 }
170 }
171
172 static inline struct mlx5_list_entry *
_mlx5_list_register(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,void * ctx,int lcore_index)173 _mlx5_list_register(struct mlx5_list_inconst *l_inconst,
174 struct mlx5_list_const *l_const,
175 void *ctx, int lcore_index)
176 {
177 struct mlx5_list_entry *entry = NULL, *local_entry;
178 volatile uint32_t prev_gen_cnt = 0;
179 MLX5_ASSERT(l_inconst);
180 if (unlikely(!l_inconst->cache[lcore_index])) {
181 l_inconst->cache[lcore_index] = mlx5_malloc(0,
182 sizeof(struct mlx5_list_cache),
183 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
184 if (!l_inconst->cache[lcore_index]) {
185 rte_errno = ENOMEM;
186 return NULL;
187 }
188 l_inconst->cache[lcore_index]->inv_cnt = 0;
189 LIST_INIT(&l_inconst->cache[lcore_index]->h);
190 }
191 /* 0. Free entries that was invalidated by other lcores. */
192 __list_cache_clean(l_inconst, l_const, lcore_index);
193 /* 1. Lookup in local cache. */
194 local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
195 if (local_entry)
196 return local_entry;
197 if (l_const->lcores_share) {
198 /* 2. Lookup with read lock on global list, reuse if found. */
199 rte_rwlock_read_lock(&l_inconst->lock);
200 entry = __list_lookup(l_inconst, l_const, MLX5_LIST_GLOBAL,
201 ctx, true);
202 if (likely(entry)) {
203 rte_rwlock_read_unlock(&l_inconst->lock);
204 return mlx5_list_cache_insert(l_inconst, l_const,
205 lcore_index,
206 entry, ctx);
207 }
208 prev_gen_cnt = l_inconst->gen_cnt;
209 rte_rwlock_read_unlock(&l_inconst->lock);
210 }
211 /* 3. Prepare new entry for global list and for cache. */
212 entry = l_const->cb_create(l_const->ctx, ctx);
213 if (unlikely(!entry))
214 return NULL;
215 entry->ref_cnt = 1u;
216 if (!l_const->lcores_share) {
217 entry->lcore_idx = (uint32_t)lcore_index;
218 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
219 entry, next);
220 __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
221 DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
222 l_const->name, lcore_index,
223 (void *)entry, entry->ref_cnt);
224 return entry;
225 }
226 local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
227 if (unlikely(!local_entry)) {
228 l_const->cb_remove(l_const->ctx, entry);
229 return NULL;
230 }
231 local_entry->ref_cnt = 1u;
232 local_entry->gentry = entry;
233 local_entry->lcore_idx = (uint32_t)lcore_index;
234 rte_rwlock_write_lock(&l_inconst->lock);
235 /* 4. Make sure the same entry was not created before the write lock. */
236 if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
237 struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
238 l_const,
239 MLX5_LIST_GLOBAL,
240 ctx, true);
241
242 if (unlikely(oentry)) {
243 /* 4.5. Found real race!!, reuse the old entry. */
244 rte_rwlock_write_unlock(&l_inconst->lock);
245 l_const->cb_remove(l_const->ctx, entry);
246 l_const->cb_clone_free(l_const->ctx, local_entry);
247 return mlx5_list_cache_insert(l_inconst, l_const,
248 lcore_index,
249 oentry, ctx);
250 }
251 }
252 /* 5. Update lists. */
253 LIST_INSERT_HEAD(&l_inconst->cache[MLX5_LIST_GLOBAL]->h, entry, next);
254 l_inconst->gen_cnt++;
255 rte_rwlock_write_unlock(&l_inconst->lock);
256 LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
257 __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
258 DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
259 (void *)entry, entry->ref_cnt);
260 return local_entry;
261 }
262
263 struct mlx5_list_entry *
mlx5_list_register(struct mlx5_list * list,void * ctx)264 mlx5_list_register(struct mlx5_list *list, void *ctx)
265 {
266 struct mlx5_list_entry *entry;
267 int lcore_index = rte_lcore_index(rte_lcore_id());
268
269 if (unlikely(lcore_index == -1)) {
270 lcore_index = MLX5_LIST_NLCORE;
271 rte_spinlock_lock(&list->l_const.lcore_lock);
272 }
273 entry = _mlx5_list_register(&list->l_inconst, &list->l_const, ctx,
274 lcore_index);
275 if (unlikely(lcore_index == MLX5_LIST_NLCORE))
276 rte_spinlock_unlock(&list->l_const.lcore_lock);
277 return entry;
278 }
279
280 static inline int
_mlx5_list_unregister(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const,struct mlx5_list_entry * entry,int lcore_idx)281 _mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
282 struct mlx5_list_const *l_const,
283 struct mlx5_list_entry *entry,
284 int lcore_idx)
285 {
286 struct mlx5_list_entry *gentry = entry->gentry;
287
288 if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
289 return 1;
290 if (entry->lcore_idx == (uint32_t)lcore_idx) {
291 LIST_REMOVE(entry, next);
292 if (l_const->lcores_share)
293 l_const->cb_clone_free(l_const->ctx, entry);
294 else
295 l_const->cb_remove(l_const->ctx, entry);
296 } else if (likely(lcore_idx != -1)) {
297 __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
298 1, __ATOMIC_RELAXED);
299 } else {
300 return 0;
301 }
302 if (!l_const->lcores_share) {
303 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
304 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
305 l_const->name, (void *)entry);
306 return 0;
307 }
308 if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
309 return 1;
310 rte_rwlock_write_lock(&l_inconst->lock);
311 if (likely(gentry->ref_cnt == 0)) {
312 LIST_REMOVE(gentry, next);
313 rte_rwlock_write_unlock(&l_inconst->lock);
314 l_const->cb_remove(l_const->ctx, gentry);
315 __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
316 DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
317 l_const->name, (void *)gentry);
318 return 0;
319 }
320 rte_rwlock_write_unlock(&l_inconst->lock);
321 return 1;
322 }
323
324 int
mlx5_list_unregister(struct mlx5_list * list,struct mlx5_list_entry * entry)325 mlx5_list_unregister(struct mlx5_list *list,
326 struct mlx5_list_entry *entry)
327 {
328 int ret;
329 int lcore_index = rte_lcore_index(rte_lcore_id());
330
331 if (unlikely(lcore_index == -1)) {
332 lcore_index = MLX5_LIST_NLCORE;
333 rte_spinlock_lock(&list->l_const.lcore_lock);
334 }
335 ret = _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry,
336 lcore_index);
337 if (unlikely(lcore_index == MLX5_LIST_NLCORE))
338 rte_spinlock_unlock(&list->l_const.lcore_lock);
339 return ret;
340
341 }
342
343 static void
mlx5_list_uninit(struct mlx5_list_inconst * l_inconst,struct mlx5_list_const * l_const)344 mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
345 struct mlx5_list_const *l_const)
346 {
347 struct mlx5_list_entry *entry;
348 int i;
349
350 MLX5_ASSERT(l_inconst);
351 for (i = 0; i < MLX5_LIST_MAX; i++) {
352 if (!l_inconst->cache[i])
353 continue;
354 while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
355 entry = LIST_FIRST(&l_inconst->cache[i]->h);
356 LIST_REMOVE(entry, next);
357 if (i == MLX5_LIST_GLOBAL) {
358 l_const->cb_remove(l_const->ctx, entry);
359 DRV_LOG(DEBUG, "mlx5 list %s entry %p "
360 "destroyed.", l_const->name,
361 (void *)entry);
362 } else {
363 l_const->cb_clone_free(l_const->ctx, entry);
364 }
365 }
366 if (i != MLX5_LIST_GLOBAL)
367 mlx5_free(l_inconst->cache[i]);
368 }
369 }
370
371 void
mlx5_list_destroy(struct mlx5_list * list)372 mlx5_list_destroy(struct mlx5_list *list)
373 {
374 mlx5_list_uninit(&list->l_inconst, &list->l_const);
375 mlx5_free(list);
376 }
377
378 uint32_t
mlx5_list_get_entry_num(struct mlx5_list * list)379 mlx5_list_get_entry_num(struct mlx5_list *list)
380 {
381 MLX5_ASSERT(list);
382 return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
383 }
384
385 /********************* Hash List **********************/
386
387 struct mlx5_hlist *
mlx5_hlist_create(const char * name,uint32_t size,bool direct_key,bool lcores_share,void * ctx,mlx5_list_create_cb cb_create,mlx5_list_match_cb cb_match,mlx5_list_remove_cb cb_remove,mlx5_list_clone_cb cb_clone,mlx5_list_clone_free_cb cb_clone_free)388 mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
389 bool lcores_share, void *ctx, mlx5_list_create_cb cb_create,
390 mlx5_list_match_cb cb_match,
391 mlx5_list_remove_cb cb_remove,
392 mlx5_list_clone_cb cb_clone,
393 mlx5_list_clone_free_cb cb_clone_free)
394 {
395 struct mlx5_hlist *h;
396 struct mlx5_list_cache *gc;
397 uint32_t act_size;
398 uint32_t alloc_size;
399 uint32_t i;
400
401 if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
402 !cb_clone_free) {
403 rte_errno = EINVAL;
404 return NULL;
405 }
406 /* Align to the next power of 2, 32bits integer is enough now. */
407 if (!rte_is_power_of_2(size)) {
408 act_size = rte_align32pow2(size);
409 DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will "
410 "be aligned to 0x%" PRIX32 ".", size, act_size);
411 } else {
412 act_size = size;
413 }
414 alloc_size = sizeof(struct mlx5_hlist) +
415 sizeof(struct mlx5_hlist_bucket) * act_size;
416 if (lcores_share)
417 alloc_size += sizeof(struct mlx5_list_cache) * act_size;
418 /* Using zmalloc, then no need to initialize the heads. */
419 h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
420 SOCKET_ID_ANY);
421 if (!h) {
422 DRV_LOG(ERR, "No memory for hash list %s creation",
423 name ? name : "None");
424 return NULL;
425 }
426 if (name)
427 snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name);
428 h->l_const.ctx = ctx;
429 h->l_const.lcores_share = lcores_share;
430 h->l_const.cb_create = cb_create;
431 h->l_const.cb_match = cb_match;
432 h->l_const.cb_remove = cb_remove;
433 h->l_const.cb_clone = cb_clone;
434 h->l_const.cb_clone_free = cb_clone_free;
435 rte_spinlock_init(&h->l_const.lcore_lock);
436 h->mask = act_size - 1;
437 h->direct_key = direct_key;
438 gc = (struct mlx5_list_cache *)&h->buckets[act_size];
439 for (i = 0; i < act_size; i++) {
440 if (mlx5_list_init(&h->buckets[i].l, &h->l_const,
441 lcores_share ? &gc[i] : NULL) != 0) {
442 mlx5_free(h);
443 return NULL;
444 }
445 }
446 DRV_LOG(DEBUG, "Hash list %s with size 0x%" PRIX32 " was created.",
447 name, act_size);
448 return h;
449 }
450
451
452 struct mlx5_list_entry *
mlx5_hlist_lookup(struct mlx5_hlist * h,uint64_t key,void * ctx)453 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
454 {
455 uint32_t idx;
456
457 if (h->direct_key)
458 idx = (uint32_t)(key & h->mask);
459 else
460 idx = rte_hash_crc_8byte(key, 0) & h->mask;
461 return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);
462 }
463
464 struct mlx5_list_entry*
mlx5_hlist_register(struct mlx5_hlist * h,uint64_t key,void * ctx)465 mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
466 {
467 uint32_t idx;
468 struct mlx5_list_entry *entry;
469 int lcore_index = rte_lcore_index(rte_lcore_id());
470
471 if (h->direct_key)
472 idx = (uint32_t)(key & h->mask);
473 else
474 idx = rte_hash_crc_8byte(key, 0) & h->mask;
475 if (unlikely(lcore_index == -1)) {
476 lcore_index = MLX5_LIST_NLCORE;
477 rte_spinlock_lock(&h->l_const.lcore_lock);
478 }
479 entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx,
480 lcore_index);
481 if (likely(entry)) {
482 if (h->l_const.lcores_share)
483 entry->gentry->bucket_idx = idx;
484 else
485 entry->bucket_idx = idx;
486 }
487 if (unlikely(lcore_index == MLX5_LIST_NLCORE))
488 rte_spinlock_unlock(&h->l_const.lcore_lock);
489 return entry;
490 }
491
492 int
mlx5_hlist_unregister(struct mlx5_hlist * h,struct mlx5_list_entry * entry)493 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
494 {
495 int lcore_index = rte_lcore_index(rte_lcore_id());
496 int ret;
497 uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :
498 entry->bucket_idx;
499 if (unlikely(lcore_index == -1)) {
500 lcore_index = MLX5_LIST_NLCORE;
501 rte_spinlock_lock(&h->l_const.lcore_lock);
502 }
503 ret = _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry,
504 lcore_index);
505 if (unlikely(lcore_index == MLX5_LIST_NLCORE))
506 rte_spinlock_unlock(&h->l_const.lcore_lock);
507 return ret;
508 }
509
510 void
mlx5_hlist_destroy(struct mlx5_hlist * h)511 mlx5_hlist_destroy(struct mlx5_hlist *h)
512 {
513 uint32_t i;
514
515 for (i = 0; i <= h->mask; i++)
516 mlx5_list_uninit(&h->buckets[i].l, &h->l_const);
517 mlx5_free(h);
518 }
519