1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
4 */
5 #include <rte_eal_memconfig.h>
6 #include <rte_errno.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
10
11 #include "mlx5_glue.h"
12 #include "mlx5_common_mp.h"
13 #include "mlx5_common_mr.h"
14 #include "mlx5_common_utils.h"
15 #include "mlx5_malloc.h"
16
17 struct mr_find_contig_memsegs_data {
18 uintptr_t addr;
19 uintptr_t start;
20 uintptr_t end;
21 const struct rte_memseg_list *msl;
22 };
23
24 /**
25 * Expand B-tree table to a given size. Can't be called with holding
26 * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().
27 *
28 * @param bt
29 * Pointer to B-tree structure.
30 * @param n
31 * Number of entries for expansion.
32 *
33 * @return
34 * 0 on success, -1 on failure.
35 */
36 static int
mr_btree_expand(struct mlx5_mr_btree * bt,int n)37 mr_btree_expand(struct mlx5_mr_btree *bt, int n)
38 {
39 void *mem;
40 int ret = 0;
41
42 if (n <= bt->size)
43 return ret;
44 /*
45 * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
46 * used inside if there's no room to expand. Because this is a quite
47 * rare case and a part of very slow path, it is very acceptable.
48 * Initially cache_bh[] will be given practically enough space and once
49 * it is expanded, expansion wouldn't be needed again ever.
50 */
51 mem = mlx5_realloc(bt->table, MLX5_MEM_RTE | MLX5_MEM_ZERO,
52 n * sizeof(struct mr_cache_entry), 0, SOCKET_ID_ANY);
53 if (mem == NULL) {
54 /* Not an error, B-tree search will be skipped. */
55 DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
56 (void *)bt);
57 ret = -1;
58 } else {
59 DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
60 bt->table = mem;
61 bt->size = n;
62 }
63 return ret;
64 }
65
66 /**
67 * Look up LKey from given B-tree lookup table, store the last index and return
68 * searched LKey.
69 *
70 * @param bt
71 * Pointer to B-tree structure.
72 * @param[out] idx
73 * Pointer to index. Even on search failure, returns index where it stops
74 * searching so that index can be used when inserting a new entry.
75 * @param addr
76 * Search key.
77 *
78 * @return
79 * Searched LKey on success, UINT32_MAX on no match.
80 */
81 static uint32_t
mr_btree_lookup(struct mlx5_mr_btree * bt,uint16_t * idx,uintptr_t addr)82 mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
83 {
84 struct mr_cache_entry *lkp_tbl;
85 uint16_t n;
86 uint16_t base = 0;
87
88 MLX5_ASSERT(bt != NULL);
89 lkp_tbl = *bt->table;
90 n = bt->len;
91 /* First entry must be NULL for comparison. */
92 MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
93 lkp_tbl[0].lkey == UINT32_MAX));
94 /* Binary search. */
95 do {
96 register uint16_t delta = n >> 1;
97
98 if (addr < lkp_tbl[base + delta].start) {
99 n = delta;
100 } else {
101 base += delta;
102 n -= delta;
103 }
104 } while (n > 1);
105 MLX5_ASSERT(addr >= lkp_tbl[base].start);
106 *idx = base;
107 if (addr < lkp_tbl[base].end)
108 return lkp_tbl[base].lkey;
109 /* Not found. */
110 return UINT32_MAX;
111 }
112
113 /**
114 * Insert an entry to B-tree lookup table.
115 *
116 * @param bt
117 * Pointer to B-tree structure.
118 * @param entry
119 * Pointer to new entry to insert.
120 *
121 * @return
122 * 0 on success, -1 on failure.
123 */
124 static int
mr_btree_insert(struct mlx5_mr_btree * bt,struct mr_cache_entry * entry)125 mr_btree_insert(struct mlx5_mr_btree *bt, struct mr_cache_entry *entry)
126 {
127 struct mr_cache_entry *lkp_tbl;
128 uint16_t idx = 0;
129 size_t shift;
130
131 MLX5_ASSERT(bt != NULL);
132 MLX5_ASSERT(bt->len <= bt->size);
133 MLX5_ASSERT(bt->len > 0);
134 lkp_tbl = *bt->table;
135 /* Find out the slot for insertion. */
136 if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
137 DRV_LOG(DEBUG,
138 "abort insertion to B-tree(%p): already exist at"
139 " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
140 (void *)bt, idx, entry->start, entry->end, entry->lkey);
141 /* Already exist, return. */
142 return 0;
143 }
144 /* If table is full, return error. */
145 if (unlikely(bt->len == bt->size)) {
146 bt->overflow = 1;
147 return -1;
148 }
149 /* Insert entry. */
150 ++idx;
151 shift = (bt->len - idx) * sizeof(struct mr_cache_entry);
152 if (shift)
153 memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
154 lkp_tbl[idx] = *entry;
155 bt->len++;
156 DRV_LOG(DEBUG,
157 "inserted B-tree(%p)[%u],"
158 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
159 (void *)bt, idx, entry->start, entry->end, entry->lkey);
160 return 0;
161 }
162
163 /**
164 * Initialize B-tree and allocate memory for lookup table.
165 *
166 * @param bt
167 * Pointer to B-tree structure.
168 * @param n
169 * Number of entries to allocate.
170 * @param socket
171 * NUMA socket on which memory must be allocated.
172 *
173 * @return
174 * 0 on success, a negative errno value otherwise and rte_errno is set.
175 */
176 int
mlx5_mr_btree_init(struct mlx5_mr_btree * bt,int n,int socket)177 mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
178 {
179 if (bt == NULL) {
180 rte_errno = EINVAL;
181 return -rte_errno;
182 }
183 MLX5_ASSERT(!bt->table && !bt->size);
184 memset(bt, 0, sizeof(*bt));
185 bt->table = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
186 sizeof(struct mr_cache_entry) * n,
187 0, socket);
188 if (bt->table == NULL) {
189 rte_errno = ENOMEM;
190 DEBUG("failed to allocate memory for btree cache on socket %d",
191 socket);
192 return -rte_errno;
193 }
194 bt->size = n;
195 /* First entry must be NULL for binary search. */
196 (*bt->table)[bt->len++] = (struct mr_cache_entry) {
197 .lkey = UINT32_MAX,
198 };
199 DEBUG("initialized B-tree %p with table %p",
200 (void *)bt, (void *)bt->table);
201 return 0;
202 }
203
204 /**
205 * Free B-tree resources.
206 *
207 * @param bt
208 * Pointer to B-tree structure.
209 */
210 void
mlx5_mr_btree_free(struct mlx5_mr_btree * bt)211 mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
212 {
213 if (bt == NULL)
214 return;
215 DEBUG("freeing B-tree %p with table %p",
216 (void *)bt, (void *)bt->table);
217 mlx5_free(bt->table);
218 memset(bt, 0, sizeof(*bt));
219 }
220
221 /**
222 * Dump all the entries in a B-tree
223 *
224 * @param bt
225 * Pointer to B-tree structure.
226 */
227 void
mlx5_mr_btree_dump(struct mlx5_mr_btree * bt __rte_unused)228 mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
229 {
230 #ifdef RTE_LIBRTE_MLX5_DEBUG
231 int idx;
232 struct mr_cache_entry *lkp_tbl;
233
234 if (bt == NULL)
235 return;
236 lkp_tbl = *bt->table;
237 for (idx = 0; idx < bt->len; ++idx) {
238 struct mr_cache_entry *entry = &lkp_tbl[idx];
239
240 DEBUG("B-tree(%p)[%u],"
241 " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
242 (void *)bt, idx, entry->start, entry->end, entry->lkey);
243 }
244 #endif
245 }
246
247 /**
248 * Find virtually contiguous memory chunk in a given MR.
249 *
250 * @param dev
251 * Pointer to MR structure.
252 * @param[out] entry
253 * Pointer to returning MR cache entry. If not found, this will not be
254 * updated.
255 * @param start_idx
256 * Start index of the memseg bitmap.
257 *
258 * @return
259 * Next index to go on lookup.
260 */
261 static int
mr_find_next_chunk(struct mlx5_mr * mr,struct mr_cache_entry * entry,int base_idx)262 mr_find_next_chunk(struct mlx5_mr *mr, struct mr_cache_entry *entry,
263 int base_idx)
264 {
265 uintptr_t start = 0;
266 uintptr_t end = 0;
267 uint32_t idx = 0;
268
269 /* MR for external memory doesn't have memseg list. */
270 if (mr->msl == NULL) {
271 MLX5_ASSERT(mr->ms_bmp_n == 1);
272 MLX5_ASSERT(mr->ms_n == 1);
273 MLX5_ASSERT(base_idx == 0);
274 /*
275 * Can't search it from memseg list but get it directly from
276 * pmd_mr as there's only one chunk.
277 */
278 entry->start = (uintptr_t)mr->pmd_mr.addr;
279 entry->end = (uintptr_t)mr->pmd_mr.addr + mr->pmd_mr.len;
280 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
281 /* Returning 1 ends iteration. */
282 return 1;
283 }
284 for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
285 if (rte_bitmap_get(mr->ms_bmp, idx)) {
286 const struct rte_memseg_list *msl;
287 const struct rte_memseg *ms;
288
289 msl = mr->msl;
290 ms = rte_fbarray_get(&msl->memseg_arr,
291 mr->ms_base_idx + idx);
292 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
293 if (!start)
294 start = ms->addr_64;
295 end = ms->addr_64 + ms->hugepage_sz;
296 } else if (start) {
297 /* Passed the end of a fragment. */
298 break;
299 }
300 }
301 if (start) {
302 /* Found one chunk. */
303 entry->start = start;
304 entry->end = end;
305 entry->lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
306 }
307 return idx;
308 }
309
310 /**
311 * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
312 * Then, this entry will have to be searched by mr_lookup_list() in
313 * mlx5_mr_create() on miss.
314 *
315 * @param share_cache
316 * Pointer to a global shared MR cache.
317 * @param mr
318 * Pointer to MR to insert.
319 *
320 * @return
321 * 0 on success, -1 on failure.
322 */
323 int
mlx5_mr_insert_cache(struct mlx5_mr_share_cache * share_cache,struct mlx5_mr * mr)324 mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
325 struct mlx5_mr *mr)
326 {
327 unsigned int n;
328
329 DRV_LOG(DEBUG, "Inserting MR(%p) to global cache(%p)",
330 (void *)mr, (void *)share_cache);
331 for (n = 0; n < mr->ms_bmp_n; ) {
332 struct mr_cache_entry entry;
333
334 memset(&entry, 0, sizeof(entry));
335 /* Find a contiguous chunk and advance the index. */
336 n = mr_find_next_chunk(mr, &entry, n);
337 if (!entry.end)
338 break;
339 if (mr_btree_insert(&share_cache->cache, &entry) < 0) {
340 /*
341 * Overflowed, but the global table cannot be expanded
342 * because of deadlock.
343 */
344 return -1;
345 }
346 }
347 return 0;
348 }
349
350 /**
351 * Look up address in the original global MR list.
352 *
353 * @param share_cache
354 * Pointer to a global shared MR cache.
355 * @param[out] entry
356 * Pointer to returning MR cache entry. If no match, this will not be updated.
357 * @param addr
358 * Search key.
359 *
360 * @return
361 * Found MR on match, NULL otherwise.
362 */
363 struct mlx5_mr *
mlx5_mr_lookup_list(struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)364 mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
365 struct mr_cache_entry *entry, uintptr_t addr)
366 {
367 struct mlx5_mr *mr;
368
369 /* Iterate all the existing MRs. */
370 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
371 unsigned int n;
372
373 if (mr->ms_n == 0)
374 continue;
375 for (n = 0; n < mr->ms_bmp_n; ) {
376 struct mr_cache_entry ret;
377
378 memset(&ret, 0, sizeof(ret));
379 n = mr_find_next_chunk(mr, &ret, n);
380 if (addr >= ret.start && addr < ret.end) {
381 /* Found. */
382 *entry = ret;
383 return mr;
384 }
385 }
386 }
387 return NULL;
388 }
389
390 /**
391 * Look up address on global MR cache.
392 *
393 * @param share_cache
394 * Pointer to a global shared MR cache.
395 * @param[out] entry
396 * Pointer to returning MR cache entry. If no match, this will not be updated.
397 * @param addr
398 * Search key.
399 *
400 * @return
401 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
402 */
403 uint32_t
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr)404 mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
405 struct mr_cache_entry *entry, uintptr_t addr)
406 {
407 uint16_t idx;
408 uint32_t lkey = UINT32_MAX;
409 struct mlx5_mr *mr;
410
411 /*
412 * If the global cache has overflowed since it failed to expand the
413 * B-tree table, it can't have all the existing MRs. Then, the address
414 * has to be searched by traversing the original MR list instead, which
415 * is very slow path. Otherwise, the global cache is all inclusive.
416 */
417 if (!unlikely(share_cache->cache.overflow)) {
418 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
419 if (lkey != UINT32_MAX)
420 *entry = (*share_cache->cache.table)[idx];
421 } else {
422 /* Falling back to the slowest path. */
423 mr = mlx5_mr_lookup_list(share_cache, entry, addr);
424 if (mr != NULL)
425 lkey = entry->lkey;
426 }
427 MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
428 addr < entry->end));
429 return lkey;
430 }
431
432 /**
433 * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
434 * can raise memory free event and the callback function will spin on the lock.
435 *
436 * @param mr
437 * Pointer to MR to free.
438 */
439 void
mlx5_mr_free(struct mlx5_mr * mr,mlx5_dereg_mr_t dereg_mr_cb)440 mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
441 {
442 if (mr == NULL)
443 return;
444 DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
445 dereg_mr_cb(&mr->pmd_mr);
446 if (mr->ms_bmp != NULL)
447 rte_bitmap_free(mr->ms_bmp);
448 mlx5_free(mr);
449 }
450
451 void
mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache * share_cache)452 mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache)
453 {
454 struct mlx5_mr *mr;
455
456 DRV_LOG(DEBUG, "Rebuild dev cache[] %p", (void *)share_cache);
457 /* Flush cache to rebuild. */
458 share_cache->cache.len = 1;
459 share_cache->cache.overflow = 0;
460 /* Iterate all the existing MRs. */
461 LIST_FOREACH(mr, &share_cache->mr_list, mr)
462 if (mlx5_mr_insert_cache(share_cache, mr) < 0)
463 return;
464 }
465
466 /**
467 * Release resources of detached MR having no online entry.
468 *
469 * @param share_cache
470 * Pointer to a global shared MR cache.
471 */
472 static void
mlx5_mr_garbage_collect(struct mlx5_mr_share_cache * share_cache)473 mlx5_mr_garbage_collect(struct mlx5_mr_share_cache *share_cache)
474 {
475 struct mlx5_mr *mr_next;
476 struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
477
478 /* Must be called from the primary process. */
479 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
480 /*
481 * MR can't be freed with holding the lock because rte_free() could call
482 * memory free callback function. This will be a deadlock situation.
483 */
484 rte_rwlock_write_lock(&share_cache->rwlock);
485 /* Detach the whole free list and release it after unlocking. */
486 free_list = share_cache->mr_free_list;
487 LIST_INIT(&share_cache->mr_free_list);
488 rte_rwlock_write_unlock(&share_cache->rwlock);
489 /* Release resources. */
490 mr_next = LIST_FIRST(&free_list);
491 while (mr_next != NULL) {
492 struct mlx5_mr *mr = mr_next;
493
494 mr_next = LIST_NEXT(mr, mr);
495 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
496 }
497 }
498
499 /* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
500 static int
mr_find_contig_memsegs_cb(const struct rte_memseg_list * msl,const struct rte_memseg * ms,size_t len,void * arg)501 mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
502 const struct rte_memseg *ms, size_t len, void *arg)
503 {
504 struct mr_find_contig_memsegs_data *data = arg;
505
506 if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
507 return 0;
508 /* Found, save it and stop walking. */
509 data->start = ms->addr_64;
510 data->end = ms->addr_64 + len;
511 data->msl = msl;
512 return 1;
513 }
514
515 /**
516 * Create a new global Memory Region (MR) for a missing virtual address.
517 * This API should be called on a secondary process, then a request is sent to
518 * the primary process in order to create a MR for the address. As the global MR
519 * list is on the shared memory, following LKey lookup should succeed unless the
520 * request fails.
521 *
522 * @param pd
523 * Pointer to pd of a device (net, regex, vdpa,...).
524 * @param share_cache
525 * Pointer to a global shared MR cache.
526 * @param[out] entry
527 * Pointer to returning MR cache entry, found in the global cache or newly
528 * created. If failed to create one, this will not be updated.
529 * @param addr
530 * Target virtual address to register.
531 * @param mr_ext_memseg_en
532 * Configurable flag about external memory segment enable or not.
533 *
534 * @return
535 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
536 */
537 static uint32_t
mlx5_mr_create_secondary(void * pd __rte_unused,struct mlx5_mp_id * mp_id,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr,unsigned int mr_ext_memseg_en __rte_unused)538 mlx5_mr_create_secondary(void *pd __rte_unused,
539 struct mlx5_mp_id *mp_id,
540 struct mlx5_mr_share_cache *share_cache,
541 struct mr_cache_entry *entry, uintptr_t addr,
542 unsigned int mr_ext_memseg_en __rte_unused)
543 {
544 int ret;
545
546 DEBUG("port %u requesting MR creation for address (%p)",
547 mp_id->port_id, (void *)addr);
548 ret = mlx5_mp_req_mr_create(mp_id, addr);
549 if (ret) {
550 DEBUG("Fail to request MR creation for address (%p)",
551 (void *)addr);
552 return UINT32_MAX;
553 }
554 rte_rwlock_read_lock(&share_cache->rwlock);
555 /* Fill in output data. */
556 mlx5_mr_lookup_cache(share_cache, entry, addr);
557 /* Lookup can't fail. */
558 MLX5_ASSERT(entry->lkey != UINT32_MAX);
559 rte_rwlock_read_unlock(&share_cache->rwlock);
560 DEBUG("MR CREATED by primary process for %p:\n"
561 " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
562 (void *)addr, entry->start, entry->end, entry->lkey);
563 return entry->lkey;
564 }
565
566 /**
567 * Create a new global Memory Region (MR) for a missing virtual address.
568 * Register entire virtually contiguous memory chunk around the address.
569 *
570 * @param pd
571 * Pointer to pd of a device (net, regex, vdpa,...).
572 * @param share_cache
573 * Pointer to a global shared MR cache.
574 * @param[out] entry
575 * Pointer to returning MR cache entry, found in the global cache or newly
576 * created. If failed to create one, this will not be updated.
577 * @param addr
578 * Target virtual address to register.
579 * @param mr_ext_memseg_en
580 * Configurable flag about external memory segment enable or not.
581 *
582 * @return
583 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
584 */
585 uint32_t
mlx5_mr_create_primary(void * pd,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr,unsigned int mr_ext_memseg_en)586 mlx5_mr_create_primary(void *pd,
587 struct mlx5_mr_share_cache *share_cache,
588 struct mr_cache_entry *entry, uintptr_t addr,
589 unsigned int mr_ext_memseg_en)
590 {
591 struct mr_find_contig_memsegs_data data = {.addr = addr, };
592 struct mr_find_contig_memsegs_data data_re;
593 const struct rte_memseg_list *msl;
594 const struct rte_memseg *ms;
595 struct mlx5_mr *mr = NULL;
596 int ms_idx_shift = -1;
597 uint32_t bmp_size;
598 void *bmp_mem;
599 uint32_t ms_n;
600 uint32_t n;
601 size_t len;
602
603 DRV_LOG(DEBUG, "Creating a MR using address (%p)", (void *)addr);
604 /*
605 * Release detached MRs if any. This can't be called with holding either
606 * memory_hotplug_lock or share_cache->rwlock. MRs on the free list have
607 * been detached by the memory free event but it couldn't be released
608 * inside the callback due to deadlock. As a result, releasing resources
609 * is quite opportunistic.
610 */
611 mlx5_mr_garbage_collect(share_cache);
612 /*
613 * If enabled, find out a contiguous virtual address chunk in use, to
614 * which the given address belongs, in order to register maximum range.
615 * In the best case where mempools are not dynamically recreated and
616 * '--socket-mem' is specified as an EAL option, it is very likely to
617 * have only one MR(LKey) per a socket and per a hugepage-size even
618 * though the system memory is highly fragmented. As the whole memory
619 * chunk will be pinned by kernel, it can't be reused unless entire
620 * chunk is freed from EAL.
621 *
622 * If disabled, just register one memseg (page). Then, memory
623 * consumption will be minimized but it may drop performance if there
624 * are many MRs to lookup on the datapath.
625 */
626 if (!mr_ext_memseg_en) {
627 data.msl = rte_mem_virt2memseg_list((void *)addr);
628 data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz);
629 data.end = data.start + data.msl->page_sz;
630 } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
631 DRV_LOG(WARNING,
632 "Unable to find virtually contiguous"
633 " chunk for address (%p)."
634 " rte_memseg_contig_walk() failed.", (void *)addr);
635 rte_errno = ENXIO;
636 goto err_nolock;
637 }
638 alloc_resources:
639 /* Addresses must be page-aligned. */
640 MLX5_ASSERT(data.msl);
641 MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
642 MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
643 msl = data.msl;
644 ms = rte_mem_virt2memseg((void *)data.start, msl);
645 len = data.end - data.start;
646 MLX5_ASSERT(ms);
647 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
648 /* Number of memsegs in the range. */
649 ms_n = len / msl->page_sz;
650 DEBUG("Extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
651 " page_sz=0x%" PRIx64 ", ms_n=%u",
652 (void *)addr, data.start, data.end, msl->page_sz, ms_n);
653 /* Size of memory for bitmap. */
654 bmp_size = rte_bitmap_get_memory_footprint(ms_n);
655 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
656 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE) +
657 bmp_size, RTE_CACHE_LINE_SIZE, msl->socket_id);
658 if (mr == NULL) {
659 DEBUG("Unable to allocate memory for a new MR of"
660 " address (%p).", (void *)addr);
661 rte_errno = ENOMEM;
662 goto err_nolock;
663 }
664 mr->msl = msl;
665 /*
666 * Save the index of the first memseg and initialize memseg bitmap. To
667 * see if a memseg of ms_idx in the memseg-list is still valid, check:
668 * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
669 */
670 mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
671 bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
672 mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
673 if (mr->ms_bmp == NULL) {
674 DEBUG("Unable to initialize bitmap for a new MR of"
675 " address (%p).", (void *)addr);
676 rte_errno = EINVAL;
677 goto err_nolock;
678 }
679 /*
680 * Should recheck whether the extended contiguous chunk is still valid.
681 * Because memory_hotplug_lock can't be held if there's any memory
682 * related calls in a critical path, resource allocation above can't be
683 * locked. If the memory has been changed at this point, try again with
684 * just single page. If not, go on with the big chunk atomically from
685 * here.
686 */
687 rte_mcfg_mem_read_lock();
688 data_re = data;
689 if (len > msl->page_sz &&
690 !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
691 DEBUG("Unable to find virtually contiguous"
692 " chunk for address (%p)."
693 " rte_memseg_contig_walk() failed.", (void *)addr);
694 rte_errno = ENXIO;
695 goto err_memlock;
696 }
697 if (data.start != data_re.start || data.end != data_re.end) {
698 /*
699 * The extended contiguous chunk has been changed. Try again
700 * with single memseg instead.
701 */
702 data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
703 data.end = data.start + msl->page_sz;
704 rte_mcfg_mem_read_unlock();
705 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
706 goto alloc_resources;
707 }
708 MLX5_ASSERT(data.msl == data_re.msl);
709 rte_rwlock_write_lock(&share_cache->rwlock);
710 /*
711 * Check the address is really missing. If other thread already created
712 * one or it is not found due to overflow, abort and return.
713 */
714 if (mlx5_mr_lookup_cache(share_cache, entry, addr) != UINT32_MAX) {
715 /*
716 * Insert to the global cache table. It may fail due to
717 * low-on-memory. Then, this entry will have to be searched
718 * here again.
719 */
720 mr_btree_insert(&share_cache->cache, entry);
721 DEBUG("Found MR for %p on final lookup, abort", (void *)addr);
722 rte_rwlock_write_unlock(&share_cache->rwlock);
723 rte_mcfg_mem_read_unlock();
724 /*
725 * Must be unlocked before calling rte_free() because
726 * mlx5_mr_mem_event_free_cb() can be called inside.
727 */
728 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
729 return entry->lkey;
730 }
731 /*
732 * Trim start and end addresses for verbs MR. Set bits for registering
733 * memsegs but exclude already registered ones. Bitmap can be
734 * fragmented.
735 */
736 for (n = 0; n < ms_n; ++n) {
737 uintptr_t start;
738 struct mr_cache_entry ret;
739
740 memset(&ret, 0, sizeof(ret));
741 start = data_re.start + n * msl->page_sz;
742 /* Exclude memsegs already registered by other MRs. */
743 if (mlx5_mr_lookup_cache(share_cache, &ret, start) ==
744 UINT32_MAX) {
745 /*
746 * Start from the first unregistered memseg in the
747 * extended range.
748 */
749 if (ms_idx_shift == -1) {
750 mr->ms_base_idx += n;
751 data.start = start;
752 ms_idx_shift = n;
753 }
754 data.end = start + msl->page_sz;
755 rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
756 ++mr->ms_n;
757 }
758 }
759 len = data.end - data.start;
760 mr->ms_bmp_n = len / msl->page_sz;
761 MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
762 /*
763 * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
764 * be called with holding the memory lock because it doesn't use
765 * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
766 * through mlx5_alloc_verbs_buf().
767 */
768 share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
769 if (mr->pmd_mr.obj == NULL) {
770 DEBUG("Fail to create an MR for address (%p)",
771 (void *)addr);
772 rte_errno = EINVAL;
773 goto err_mrlock;
774 }
775 MLX5_ASSERT((uintptr_t)mr->pmd_mr.addr == data.start);
776 MLX5_ASSERT(mr->pmd_mr.len);
777 LIST_INSERT_HEAD(&share_cache->mr_list, mr, mr);
778 DEBUG("MR CREATED (%p) for %p:\n"
779 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
780 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
781 (void *)mr, (void *)addr, data.start, data.end,
782 rte_cpu_to_be_32(mr->pmd_mr.lkey),
783 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
784 /* Insert to the global cache table. */
785 mlx5_mr_insert_cache(share_cache, mr);
786 /* Fill in output data. */
787 mlx5_mr_lookup_cache(share_cache, entry, addr);
788 /* Lookup can't fail. */
789 MLX5_ASSERT(entry->lkey != UINT32_MAX);
790 rte_rwlock_write_unlock(&share_cache->rwlock);
791 rte_mcfg_mem_read_unlock();
792 return entry->lkey;
793 err_mrlock:
794 rte_rwlock_write_unlock(&share_cache->rwlock);
795 err_memlock:
796 rte_mcfg_mem_read_unlock();
797 err_nolock:
798 /*
799 * In case of error, as this can be called in a datapath, a warning
800 * message per an error is preferable instead. Must be unlocked before
801 * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
802 * inside.
803 */
804 mlx5_mr_free(mr, share_cache->dereg_mr_cb);
805 return UINT32_MAX;
806 }
807
808 /**
809 * Create a new global Memory Region (MR) for a missing virtual address.
810 * This can be called from primary and secondary process.
811 *
812 * @param pd
813 * Pointer to pd handle of a device (net, regex, vdpa,...).
814 * @param share_cache
815 * Pointer to a global shared MR cache.
816 * @param[out] entry
817 * Pointer to returning MR cache entry, found in the global cache or newly
818 * created. If failed to create one, this will not be updated.
819 * @param addr
820 * Target virtual address to register.
821 *
822 * @return
823 * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
824 */
825 static uint32_t
mlx5_mr_create(void * pd,struct mlx5_mp_id * mp_id,struct mlx5_mr_share_cache * share_cache,struct mr_cache_entry * entry,uintptr_t addr,unsigned int mr_ext_memseg_en)826 mlx5_mr_create(void *pd, struct mlx5_mp_id *mp_id,
827 struct mlx5_mr_share_cache *share_cache,
828 struct mr_cache_entry *entry, uintptr_t addr,
829 unsigned int mr_ext_memseg_en)
830 {
831 uint32_t ret = 0;
832
833 switch (rte_eal_process_type()) {
834 case RTE_PROC_PRIMARY:
835 ret = mlx5_mr_create_primary(pd, share_cache, entry,
836 addr, mr_ext_memseg_en);
837 break;
838 case RTE_PROC_SECONDARY:
839 ret = mlx5_mr_create_secondary(pd, mp_id, share_cache, entry,
840 addr, mr_ext_memseg_en);
841 break;
842 default:
843 break;
844 }
845 return ret;
846 }
847
848 /**
849 * Look up address in the global MR cache table. If not found, create a new MR.
850 * Insert the found/created entry to local bottom-half cache table.
851 *
852 * @param pd
853 * Pointer to pd of a device (net, regex, vdpa,...).
854 * @param share_cache
855 * Pointer to a global shared MR cache.
856 * @param mr_ctrl
857 * Pointer to per-queue MR control structure.
858 * @param[out] entry
859 * Pointer to returning MR cache entry, found in the global cache or newly
860 * created. If failed to create one, this is not written.
861 * @param addr
862 * Search key.
863 *
864 * @return
865 * Searched LKey on success, UINT32_MAX on no match.
866 */
867 static uint32_t
mr_lookup_caches(void * pd,struct mlx5_mp_id * mp_id,struct mlx5_mr_share_cache * share_cache,struct mlx5_mr_ctrl * mr_ctrl,struct mr_cache_entry * entry,uintptr_t addr,unsigned int mr_ext_memseg_en)868 mr_lookup_caches(void *pd, struct mlx5_mp_id *mp_id,
869 struct mlx5_mr_share_cache *share_cache,
870 struct mlx5_mr_ctrl *mr_ctrl,
871 struct mr_cache_entry *entry, uintptr_t addr,
872 unsigned int mr_ext_memseg_en)
873 {
874 struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
875 uint32_t lkey;
876 uint16_t idx;
877
878 /* If local cache table is full, try to double it. */
879 if (unlikely(bt->len == bt->size))
880 mr_btree_expand(bt, bt->size << 1);
881 /* Look up in the global cache. */
882 rte_rwlock_read_lock(&share_cache->rwlock);
883 lkey = mr_btree_lookup(&share_cache->cache, &idx, addr);
884 if (lkey != UINT32_MAX) {
885 /* Found. */
886 *entry = (*share_cache->cache.table)[idx];
887 rte_rwlock_read_unlock(&share_cache->rwlock);
888 /*
889 * Update local cache. Even if it fails, return the found entry
890 * to update top-half cache. Next time, this entry will be found
891 * in the global cache.
892 */
893 mr_btree_insert(bt, entry);
894 return lkey;
895 }
896 rte_rwlock_read_unlock(&share_cache->rwlock);
897 /* First time to see the address? Create a new MR. */
898 lkey = mlx5_mr_create(pd, mp_id, share_cache, entry, addr,
899 mr_ext_memseg_en);
900 /*
901 * Update the local cache if successfully created a new global MR. Even
902 * if failed to create one, there's no action to take in this datapath
903 * code. As returning LKey is invalid, this will eventually make HW
904 * fail.
905 */
906 if (lkey != UINT32_MAX)
907 mr_btree_insert(bt, entry);
908 return lkey;
909 }
910
911 /**
912 * Bottom-half of LKey search on datapath. First search in cache_bh[] and if
913 * misses, search in the global MR cache table and update the new entry to
914 * per-queue local caches.
915 *
916 * @param pd
917 * Pointer to pd of a device (net, regex, vdpa,...).
918 * @param share_cache
919 * Pointer to a global shared MR cache.
920 * @param mr_ctrl
921 * Pointer to per-queue MR control structure.
922 * @param addr
923 * Search key.
924 *
925 * @return
926 * Searched LKey on success, UINT32_MAX on no match.
927 */
mlx5_mr_addr2mr_bh(void * pd,struct mlx5_mp_id * mp_id,struct mlx5_mr_share_cache * share_cache,struct mlx5_mr_ctrl * mr_ctrl,uintptr_t addr,unsigned int mr_ext_memseg_en)928 uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
929 struct mlx5_mr_share_cache *share_cache,
930 struct mlx5_mr_ctrl *mr_ctrl,
931 uintptr_t addr, unsigned int mr_ext_memseg_en)
932 {
933 uint32_t lkey;
934 uint16_t bh_idx = 0;
935 /* Victim in top-half cache to replace with new entry. */
936 struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];
937
938 /* Binary-search MR translation table. */
939 lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
940 /* Update top-half cache. */
941 if (likely(lkey != UINT32_MAX)) {
942 *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
943 } else {
944 /*
945 * If missed in local lookup table, search in the global cache
946 * and local cache_bh[] will be updated inside if possible.
947 * Top-half cache entry will also be updated.
948 */
949 lkey = mr_lookup_caches(pd, mp_id, share_cache, mr_ctrl,
950 repl, addr, mr_ext_memseg_en);
951 if (unlikely(lkey == UINT32_MAX))
952 return UINT32_MAX;
953 }
954 /* Update the most recently used entry. */
955 mr_ctrl->mru = mr_ctrl->head;
956 /* Point to the next victim, the oldest. */
957 mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
958 return lkey;
959 }
960
961 /**
962 * Release all the created MRs and resources on global MR cache of a device.
963 * list.
964 *
965 * @param share_cache
966 * Pointer to a global shared MR cache.
967 */
968 void
mlx5_mr_release_cache(struct mlx5_mr_share_cache * share_cache)969 mlx5_mr_release_cache(struct mlx5_mr_share_cache *share_cache)
970 {
971 struct mlx5_mr *mr_next;
972
973 rte_rwlock_write_lock(&share_cache->rwlock);
974 /* Detach from MR list and move to free list. */
975 mr_next = LIST_FIRST(&share_cache->mr_list);
976 while (mr_next != NULL) {
977 struct mlx5_mr *mr = mr_next;
978
979 mr_next = LIST_NEXT(mr, mr);
980 LIST_REMOVE(mr, mr);
981 LIST_INSERT_HEAD(&share_cache->mr_free_list, mr, mr);
982 }
983 LIST_INIT(&share_cache->mr_list);
984 /* Free global cache. */
985 mlx5_mr_btree_free(&share_cache->cache);
986 rte_rwlock_write_unlock(&share_cache->rwlock);
987 /* Free all remaining MRs. */
988 mlx5_mr_garbage_collect(share_cache);
989 }
990
991 /**
992 * Flush all of the local cache entries.
993 *
994 * @param mr_ctrl
995 * Pointer to per-queue MR local cache.
996 */
997 void
mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl * mr_ctrl)998 mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
999 {
1000 /* Reset the most-recently-used index. */
1001 mr_ctrl->mru = 0;
1002 /* Reset the linear search array. */
1003 mr_ctrl->head = 0;
1004 memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
1005 /* Reset the B-tree table. */
1006 mr_ctrl->cache_bh.len = 1;
1007 mr_ctrl->cache_bh.overflow = 0;
1008 /* Update the generation number. */
1009 mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
1010 DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
1011 (void *)mr_ctrl, mr_ctrl->cur_gen);
1012 }
1013
1014 /**
1015 * Creates a memory region for external memory, that is memory which is not
1016 * part of the DPDK memory segments.
1017 *
1018 * @param pd
1019 * Pointer to pd of a device (net, regex, vdpa,...).
1020 * @param addr
1021 * Starting virtual address of memory.
1022 * @param len
1023 * Length of memory segment being mapped.
1024 * @param socked_id
1025 * Socket to allocate heap memory for the control structures.
1026 *
1027 * @return
1028 * Pointer to MR structure on success, NULL otherwise.
1029 */
1030 struct mlx5_mr *
mlx5_create_mr_ext(void * pd,uintptr_t addr,size_t len,int socket_id,mlx5_reg_mr_t reg_mr_cb)1031 mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
1032 mlx5_reg_mr_t reg_mr_cb)
1033 {
1034 struct mlx5_mr *mr = NULL;
1035
1036 mr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1037 RTE_ALIGN_CEIL(sizeof(*mr), RTE_CACHE_LINE_SIZE),
1038 RTE_CACHE_LINE_SIZE, socket_id);
1039 if (mr == NULL)
1040 return NULL;
1041 reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
1042 if (mr->pmd_mr.obj == NULL) {
1043 DRV_LOG(WARNING,
1044 "Fail to create MR for address (%p)",
1045 (void *)addr);
1046 mlx5_free(mr);
1047 return NULL;
1048 }
1049 mr->msl = NULL; /* Mark it is external memory. */
1050 mr->ms_bmp = NULL;
1051 mr->ms_n = 1;
1052 mr->ms_bmp_n = 1;
1053 DRV_LOG(DEBUG,
1054 "MR CREATED (%p) for external memory %p:\n"
1055 " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
1056 " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
1057 (void *)mr, (void *)addr,
1058 addr, addr + len, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1059 mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
1060 return mr;
1061 }
1062
1063 /**
1064 * Dump all the created MRs and the global cache entries.
1065 *
1066 * @param sh
1067 * Pointer to Ethernet device shared context.
1068 */
1069 void
mlx5_mr_dump_cache(struct mlx5_mr_share_cache * share_cache __rte_unused)1070 mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)
1071 {
1072 #ifdef RTE_LIBRTE_MLX5_DEBUG
1073 struct mlx5_mr *mr;
1074 int mr_n = 0;
1075 int chunk_n = 0;
1076
1077 rte_rwlock_read_lock(&share_cache->rwlock);
1078 /* Iterate all the existing MRs. */
1079 LIST_FOREACH(mr, &share_cache->mr_list, mr) {
1080 unsigned int n;
1081
1082 DEBUG("MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
1083 mr_n++, rte_cpu_to_be_32(mr->pmd_mr.lkey),
1084 mr->ms_n, mr->ms_bmp_n);
1085 if (mr->ms_n == 0)
1086 continue;
1087 for (n = 0; n < mr->ms_bmp_n; ) {
1088 struct mr_cache_entry ret = { 0, };
1089
1090 n = mr_find_next_chunk(mr, &ret, n);
1091 if (!ret.end)
1092 break;
1093 DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
1094 chunk_n++, ret.start, ret.end);
1095 }
1096 }
1097 DEBUG("Dumping global cache %p", (void *)share_cache);
1098 mlx5_mr_btree_dump(&share_cache->cache);
1099 rte_rwlock_read_unlock(&share_cache->rwlock);
1100 #endif
1101 }
1102