1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
4 */
5
6 #include <rte_eal_memconfig.h>
7 #include <rte_mempool.h>
8 #include <rte_malloc.h>
9 #include <rte_rwlock.h>
10 #include <rte_bus_pci.h>
11
12 #include <mlx5_common_mp.h>
13 #include <mlx5_common_mr.h>
14
15 #include "mlx5.h"
16 #include "mlx5_mr.h"
17 #include "mlx5_rxtx.h"
18
19 struct mr_find_contig_memsegs_data {
20 uintptr_t addr;
21 uintptr_t start;
22 uintptr_t end;
23 const struct rte_memseg_list *msl;
24 };
25
26 struct mr_update_mp_data {
27 struct rte_eth_dev *dev;
28 struct mlx5_mr_ctrl *mr_ctrl;
29 int ret;
30 };
31
32 /**
33 * Callback for memory free event. Iterate freed memsegs and check whether it
34 * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
35 * result, the MR would be fragmented. If it becomes empty, the MR will be freed
36 * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
37 * secondary process, the garbage collector will be called in primary process
38 * as the secondary process can't call mlx5_mr_create().
39 *
40 * The global cache must be rebuilt if there's any change and this event has to
41 * be propagated to dataplane threads to flush the local caches.
42 *
43 * @param sh
44 * Pointer to the Ethernet device shared context.
45 * @param addr
46 * Address of freed memory.
47 * @param len
48 * Size of freed memory.
49 */
50 static void
mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared * sh,const void * addr,size_t len)51 mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
52 const void *addr, size_t len)
53 {
54 const struct rte_memseg_list *msl;
55 struct mlx5_mr *mr;
56 int ms_n;
57 int i;
58 int rebuild = 0;
59
60 DEBUG("device %s free callback: addr=%p, len=%zu",
61 sh->ibdev_name, addr, len);
62 msl = rte_mem_virt2memseg_list(addr);
63 /* addr and len must be page-aligned. */
64 MLX5_ASSERT((uintptr_t)addr ==
65 RTE_ALIGN((uintptr_t)addr, msl->page_sz));
66 MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
67 ms_n = len / msl->page_sz;
68 rte_rwlock_write_lock(&sh->share_cache.rwlock);
69 /* Clear bits of freed memsegs from MR. */
70 for (i = 0; i < ms_n; ++i) {
71 const struct rte_memseg *ms;
72 struct mr_cache_entry entry;
73 uintptr_t start;
74 int ms_idx;
75 uint32_t pos;
76
77 /* Find MR having this memseg. */
78 start = (uintptr_t)addr + i * msl->page_sz;
79 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, start);
80 if (mr == NULL)
81 continue;
82 MLX5_ASSERT(mr->msl); /* Can't be external memory. */
83 ms = rte_mem_virt2memseg((void *)start, msl);
84 MLX5_ASSERT(ms != NULL);
85 MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
86 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
87 pos = ms_idx - mr->ms_base_idx;
88 MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
89 MLX5_ASSERT(pos < mr->ms_bmp_n);
90 DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
91 sh->ibdev_name, (void *)mr, pos, (void *)start);
92 rte_bitmap_clear(mr->ms_bmp, pos);
93 if (--mr->ms_n == 0) {
94 LIST_REMOVE(mr, mr);
95 LIST_INSERT_HEAD(&sh->share_cache.mr_free_list, mr, mr);
96 DEBUG("device %s remove MR(%p) from list",
97 sh->ibdev_name, (void *)mr);
98 }
99 /*
100 * MR is fragmented or will be freed. the global cache must be
101 * rebuilt.
102 */
103 rebuild = 1;
104 }
105 if (rebuild) {
106 mlx5_mr_rebuild_cache(&sh->share_cache);
107 /*
108 * Flush local caches by propagating invalidation across cores.
109 * rte_smp_wmb() is enough to synchronize this event. If one of
110 * freed memsegs is seen by other core, that means the memseg
111 * has been allocated by allocator, which will come after this
112 * free call. Therefore, this store instruction (incrementing
113 * generation below) will be guaranteed to be seen by other core
114 * before the core sees the newly allocated memory.
115 */
116 ++sh->share_cache.dev_gen;
117 DEBUG("broadcasting local cache flush, gen=%d",
118 sh->share_cache.dev_gen);
119 rte_smp_wmb();
120 }
121 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
122 }
123
124 /**
125 * Callback for memory event. This can be called from both primary and secondary
126 * process.
127 *
128 * @param event_type
129 * Memory event type.
130 * @param addr
131 * Address of memory.
132 * @param len
133 * Size of memory.
134 */
135 void
mlx5_mr_mem_event_cb(enum rte_mem_event event_type,const void * addr,size_t len,void * arg __rte_unused)136 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
137 size_t len, void *arg __rte_unused)
138 {
139 struct mlx5_dev_ctx_shared *sh;
140 struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
141
142 /* Must be called from the primary process. */
143 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
144 switch (event_type) {
145 case RTE_MEM_EVENT_FREE:
146 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
147 /* Iterate all the existing mlx5 devices. */
148 LIST_FOREACH(sh, dev_list, mem_event_cb)
149 mlx5_mr_mem_event_free_cb(sh, addr, len);
150 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
151 break;
152 case RTE_MEM_EVENT_ALLOC:
153 default:
154 break;
155 }
156 }
157
158 /**
159 * Bottom-half of LKey search on Rx.
160 *
161 * @param rxq
162 * Pointer to Rx queue structure.
163 * @param addr
164 * Search key.
165 *
166 * @return
167 * Searched LKey on success, UINT32_MAX on no match.
168 */
169 uint32_t
mlx5_rx_addr2mr_bh(struct mlx5_rxq_data * rxq,uintptr_t addr)170 mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
171 {
172 struct mlx5_rxq_ctrl *rxq_ctrl =
173 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
174 struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
175 struct mlx5_priv *priv = rxq_ctrl->priv;
176
177 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
178 &priv->sh->share_cache, mr_ctrl, addr,
179 priv->config.mr_ext_memseg_en);
180 }
181
182 /**
183 * Bottom-half of LKey search on Tx.
184 *
185 * @param txq
186 * Pointer to Tx queue structure.
187 * @param addr
188 * Search key.
189 *
190 * @return
191 * Searched LKey on success, UINT32_MAX on no match.
192 */
193 static uint32_t
mlx5_tx_addr2mr_bh(struct mlx5_txq_data * txq,uintptr_t addr)194 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
195 {
196 struct mlx5_txq_ctrl *txq_ctrl =
197 container_of(txq, struct mlx5_txq_ctrl, txq);
198 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
199 struct mlx5_priv *priv = txq_ctrl->priv;
200
201 return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
202 &priv->sh->share_cache, mr_ctrl, addr,
203 priv->config.mr_ext_memseg_en);
204 }
205
206 /**
207 * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
208 * list, register the mempool of the mbuf as externally allocated memory.
209 *
210 * @param txq
211 * Pointer to Tx queue structure.
212 * @param mb
213 * Pointer to mbuf.
214 *
215 * @return
216 * Searched LKey on success, UINT32_MAX on no match.
217 */
218 uint32_t
mlx5_tx_mb2mr_bh(struct mlx5_txq_data * txq,struct rte_mbuf * mb)219 mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
220 {
221 uintptr_t addr = (uintptr_t)mb->buf_addr;
222 uint32_t lkey;
223
224 lkey = mlx5_tx_addr2mr_bh(txq, addr);
225 if (lkey == UINT32_MAX && rte_errno == ENXIO) {
226 /* Mempool may have externally allocated memory. */
227 return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
228 }
229 return lkey;
230 }
231
232 /**
233 * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
234 *
235 * Externally allocated chunk is registered and a MR is created for the chunk.
236 * The MR object is added to the global list. If memseg list of a MR object
237 * (mr->msl) is null, the MR object can be regarded as externally allocated
238 * memory.
239 *
240 * Once external memory is registered, it should be static. If the memory is
241 * freed and the virtual address range has different physical memory mapped
242 * again, it may cause crash on device due to the wrong translation entry. PMD
243 * can't track the free event of the external memory for now.
244 */
245 static void
mlx5_mr_update_ext_mp_cb(struct rte_mempool * mp,void * opaque,struct rte_mempool_memhdr * memhdr,unsigned mem_idx __rte_unused)246 mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
247 struct rte_mempool_memhdr *memhdr,
248 unsigned mem_idx __rte_unused)
249 {
250 struct mr_update_mp_data *data = opaque;
251 struct rte_eth_dev *dev = data->dev;
252 struct mlx5_priv *priv = dev->data->dev_private;
253 struct mlx5_dev_ctx_shared *sh = priv->sh;
254 struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
255 struct mlx5_mr *mr = NULL;
256 uintptr_t addr = (uintptr_t)memhdr->addr;
257 size_t len = memhdr->len;
258 struct mr_cache_entry entry;
259 uint32_t lkey;
260
261 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
262 /* If already registered, it should return. */
263 rte_rwlock_read_lock(&sh->share_cache.rwlock);
264 lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr);
265 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
266 if (lkey != UINT32_MAX)
267 return;
268 DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
269 dev->data->port_id, mem_idx, mp->name);
270 mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
271 sh->share_cache.reg_mr_cb);
272 if (!mr) {
273 DRV_LOG(WARNING,
274 "port %u unable to allocate a new MR of"
275 " mempool (%s).",
276 dev->data->port_id, mp->name);
277 data->ret = -1;
278 return;
279 }
280 rte_rwlock_write_lock(&sh->share_cache.rwlock);
281 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
282 /* Insert to the global cache table. */
283 mlx5_mr_insert_cache(&sh->share_cache, mr);
284 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
285 /* Insert to the local cache table */
286 mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
287 mr_ctrl, addr, priv->config.mr_ext_memseg_en);
288 }
289
290 /**
291 * Finds the first ethdev that match the pci device.
292 * The existence of multiple ethdev per pci device is only with representors.
293 * On such case, it is enough to get only one of the ports as they all share
294 * the same ibv context.
295 *
296 * @param pdev
297 * Pointer to the PCI device.
298 *
299 * @return
300 * Pointer to the ethdev if found, NULL otherwise.
301 */
302 static struct rte_eth_dev *
pci_dev_to_eth_dev(struct rte_pci_device * pdev)303 pci_dev_to_eth_dev(struct rte_pci_device *pdev)
304 {
305 uint16_t port_id;
306
307 port_id = rte_eth_find_next_of(0, &pdev->device);
308 if (port_id == RTE_MAX_ETHPORTS)
309 return NULL;
310 return &rte_eth_devices[port_id];
311 }
312
313 /**
314 * DPDK callback to DMA map external memory to a PCI device.
315 *
316 * @param pdev
317 * Pointer to the PCI device.
318 * @param addr
319 * Starting virtual address of memory to be mapped.
320 * @param iova
321 * Starting IOVA address of memory to be mapped.
322 * @param len
323 * Length of memory segment being mapped.
324 *
325 * @return
326 * 0 on success, negative value on error.
327 */
328 int
mlx5_dma_map(struct rte_pci_device * pdev,void * addr,uint64_t iova __rte_unused,size_t len)329 mlx5_dma_map(struct rte_pci_device *pdev, void *addr,
330 uint64_t iova __rte_unused, size_t len)
331 {
332 struct rte_eth_dev *dev;
333 struct mlx5_mr *mr;
334 struct mlx5_priv *priv;
335 struct mlx5_dev_ctx_shared *sh;
336
337 dev = pci_dev_to_eth_dev(pdev);
338 if (!dev) {
339 DRV_LOG(WARNING, "unable to find matching ethdev "
340 "to PCI device %p", (void *)pdev);
341 rte_errno = ENODEV;
342 return -1;
343 }
344 priv = dev->data->dev_private;
345 sh = priv->sh;
346 mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
347 sh->share_cache.reg_mr_cb);
348 if (!mr) {
349 DRV_LOG(WARNING,
350 "port %u unable to dma map", dev->data->port_id);
351 rte_errno = EINVAL;
352 return -1;
353 }
354 rte_rwlock_write_lock(&sh->share_cache.rwlock);
355 LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
356 /* Insert to the global cache table. */
357 mlx5_mr_insert_cache(&sh->share_cache, mr);
358 rte_rwlock_write_unlock(&sh->share_cache.rwlock);
359 return 0;
360 }
361
362 /**
363 * DPDK callback to DMA unmap external memory to a PCI device.
364 *
365 * @param pdev
366 * Pointer to the PCI device.
367 * @param addr
368 * Starting virtual address of memory to be unmapped.
369 * @param iova
370 * Starting IOVA address of memory to be unmapped.
371 * @param len
372 * Length of memory segment being unmapped.
373 *
374 * @return
375 * 0 on success, negative value on error.
376 */
377 int
mlx5_dma_unmap(struct rte_pci_device * pdev,void * addr,uint64_t iova __rte_unused,size_t len __rte_unused)378 mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr,
379 uint64_t iova __rte_unused, size_t len __rte_unused)
380 {
381 struct rte_eth_dev *dev;
382 struct mlx5_priv *priv;
383 struct mlx5_dev_ctx_shared *sh;
384 struct mlx5_mr *mr;
385 struct mr_cache_entry entry;
386
387 dev = pci_dev_to_eth_dev(pdev);
388 if (!dev) {
389 DRV_LOG(WARNING, "unable to find matching ethdev "
390 "to PCI device %p", (void *)pdev);
391 rte_errno = ENODEV;
392 return -1;
393 }
394 priv = dev->data->dev_private;
395 sh = priv->sh;
396 rte_rwlock_read_lock(&sh->share_cache.rwlock);
397 mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
398 if (!mr) {
399 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
400 DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
401 "to PCI device %p", (uintptr_t)addr,
402 (void *)pdev);
403 rte_errno = EINVAL;
404 return -1;
405 }
406 LIST_REMOVE(mr, mr);
407 mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
408 DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
409 (void *)mr);
410 mlx5_mr_rebuild_cache(&sh->share_cache);
411 /*
412 * Flush local caches by propagating invalidation across cores.
413 * rte_smp_wmb() is enough to synchronize this event. If one of
414 * freed memsegs is seen by other core, that means the memseg
415 * has been allocated by allocator, which will come after this
416 * free call. Therefore, this store instruction (incrementing
417 * generation below) will be guaranteed to be seen by other core
418 * before the core sees the newly allocated memory.
419 */
420 ++sh->share_cache.dev_gen;
421 DEBUG("broadcasting local cache flush, gen=%d",
422 sh->share_cache.dev_gen);
423 rte_smp_wmb();
424 rte_rwlock_read_unlock(&sh->share_cache.rwlock);
425 return 0;
426 }
427
428 /**
429 * Register MR for entire memory chunks in a Mempool having externally allocated
430 * memory and fill in local cache.
431 *
432 * @param dev
433 * Pointer to Ethernet device.
434 * @param mr_ctrl
435 * Pointer to per-queue MR control structure.
436 * @param mp
437 * Pointer to registering Mempool.
438 *
439 * @return
440 * 0 on success, -1 on failure.
441 */
442 static uint32_t
mlx5_mr_update_ext_mp(struct rte_eth_dev * dev,struct mlx5_mr_ctrl * mr_ctrl,struct rte_mempool * mp)443 mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
444 struct rte_mempool *mp)
445 {
446 struct mr_update_mp_data data = {
447 .dev = dev,
448 .mr_ctrl = mr_ctrl,
449 .ret = 0,
450 };
451
452 rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
453 return data.ret;
454 }
455
456 /**
457 * Register MR entire memory chunks in a Mempool having externally allocated
458 * memory and search LKey of the address to return.
459 *
460 * @param dev
461 * Pointer to Ethernet device.
462 * @param addr
463 * Search key.
464 * @param mp
465 * Pointer to registering Mempool where addr belongs.
466 *
467 * @return
468 * LKey for address on success, UINT32_MAX on failure.
469 */
470 uint32_t
mlx5_tx_update_ext_mp(struct mlx5_txq_data * txq,uintptr_t addr,struct rte_mempool * mp)471 mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
472 struct rte_mempool *mp)
473 {
474 struct mlx5_txq_ctrl *txq_ctrl =
475 container_of(txq, struct mlx5_txq_ctrl, txq);
476 struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
477 struct mlx5_priv *priv = txq_ctrl->priv;
478
479 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
480 DRV_LOG(WARNING,
481 "port %u using address (%p) from unregistered mempool"
482 " having externally allocated memory"
483 " in secondary process, please create mempool"
484 " prior to rte_eth_dev_start()",
485 PORT_ID(priv), (void *)addr);
486 return UINT32_MAX;
487 }
488 mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
489 return mlx5_tx_addr2mr_bh(txq, addr);
490 }
491
492 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
493 static void
mlx5_mr_update_mp_cb(struct rte_mempool * mp __rte_unused,void * opaque,struct rte_mempool_memhdr * memhdr,unsigned mem_idx __rte_unused)494 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
495 struct rte_mempool_memhdr *memhdr,
496 unsigned mem_idx __rte_unused)
497 {
498 struct mr_update_mp_data *data = opaque;
499 struct rte_eth_dev *dev = data->dev;
500 struct mlx5_priv *priv = dev->data->dev_private;
501
502 uint32_t lkey;
503
504 /* Stop iteration if failed in the previous walk. */
505 if (data->ret < 0)
506 return;
507 /* Register address of the chunk and update local caches. */
508 lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
509 &priv->sh->share_cache, data->mr_ctrl,
510 (uintptr_t)memhdr->addr,
511 priv->config.mr_ext_memseg_en);
512 if (lkey == UINT32_MAX)
513 data->ret = -1;
514 }
515
516 /**
517 * Register entire memory chunks in a Mempool.
518 *
519 * @param dev
520 * Pointer to Ethernet device.
521 * @param mr_ctrl
522 * Pointer to per-queue MR control structure.
523 * @param mp
524 * Pointer to registering Mempool.
525 *
526 * @return
527 * 0 on success, -1 on failure.
528 */
529 int
mlx5_mr_update_mp(struct rte_eth_dev * dev,struct mlx5_mr_ctrl * mr_ctrl,struct rte_mempool * mp)530 mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
531 struct rte_mempool *mp)
532 {
533 struct mr_update_mp_data data = {
534 .dev = dev,
535 .mr_ctrl = mr_ctrl,
536 .ret = 0,
537 };
538
539 DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
540 "having %u chunks.", dev->data->port_id,
541 mp->name, mp->nb_mem_chunks);
542 rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
543 if (data.ret < 0 && rte_errno == ENXIO) {
544 /* Mempool may have externally allocated memory. */
545 return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
546 }
547 return data.ret;
548 }
549