1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <sys/types.h> 11 #include <string.h> 12 #include <stdlib.h> 13 #include <fcntl.h> 14 #include <errno.h> 15 16 #include <rte_mbuf.h> 17 #include <ethdev_driver.h> 18 #include <rte_malloc.h> 19 #include <rte_memcpy.h> 20 #include <rte_string_fns.h> 21 #include <rte_cycles.h> 22 #include <rte_kvargs.h> 23 #include <rte_dev.h> 24 #include "rte_dpaa2_mempool.h" 25 26 #include "fslmc_vfio.h" 27 #include <fslmc_logs.h> 28 #include <mc/fsl_dpbp.h> 29 #include <portal/dpaa2_hw_pvt.h> 30 #include <portal/dpaa2_hw_dpio.h> 31 #include "dpaa2_hw_mempool.h" 32 #include "dpaa2_hw_mempool_logs.h" 33 34 #include <dpaax_iova_table.h> 35 36 struct dpaa2_bp_info *rte_dpaa2_bpid_info; 37 static struct dpaa2_bp_list *h_bp_list; 38 39 static int 40 rte_hw_mbuf_create_pool(struct rte_mempool *mp) 41 { 42 struct dpaa2_bp_list *bp_list; 43 struct dpaa2_dpbp_dev *avail_dpbp; 44 struct dpaa2_bp_info *bp_info; 45 struct dpbp_attr dpbp_attr; 46 uint32_t bpid; 47 int ret; 48 49 avail_dpbp = dpaa2_alloc_dpbp_dev(); 50 51 if (rte_dpaa2_bpid_info == NULL) { 52 rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL, 53 sizeof(struct dpaa2_bp_info) * MAX_BPID, 54 RTE_CACHE_LINE_SIZE); 55 if (rte_dpaa2_bpid_info == NULL) 56 return -ENOMEM; 57 memset(rte_dpaa2_bpid_info, 0, 58 sizeof(struct dpaa2_bp_info) * MAX_BPID); 59 } 60 61 if (!avail_dpbp) { 62 DPAA2_MEMPOOL_ERR("DPAA2 pool not available!"); 63 return -ENOENT; 64 } 65 66 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 67 ret = dpaa2_affine_qbman_swp(); 68 if (ret) { 69 DPAA2_MEMPOOL_ERR( 70 "Failed to allocate IO portal, tid: %d\n", 71 rte_gettid()); 72 goto err1; 73 } 74 } 75 76 ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); 77 if (ret != 0) { 78 DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d", 79 ret); 80 goto err1; 81 } 82 83 ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, 84 avail_dpbp->token, &dpbp_attr); 85 if (ret != 0) { 86 DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d", 87 ret); 88 goto err2; 89 } 90 91 bp_info = rte_malloc(NULL, 92 sizeof(struct dpaa2_bp_info), 93 RTE_CACHE_LINE_SIZE); 94 if (!bp_info) { 95 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); 96 ret = -ENOMEM; 97 goto err2; 98 } 99 100 /* Allocate the bp_list which will be added into global_bp_list */ 101 bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list), 102 RTE_CACHE_LINE_SIZE); 103 if (!bp_list) { 104 DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory"); 105 ret = -ENOMEM; 106 goto err3; 107 } 108 109 /* Set parameters of buffer pool list */ 110 bp_list->buf_pool.num_bufs = mp->size; 111 bp_list->buf_pool.size = mp->elt_size 112 - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp); 113 bp_list->buf_pool.bpid = dpbp_attr.bpid; 114 bp_list->buf_pool.h_bpool_mem = NULL; 115 bp_list->buf_pool.dpbp_node = avail_dpbp; 116 /* Identification for our offloaded pool_data structure */ 117 bp_list->dpaa2_ops_index = mp->ops_index; 118 bp_list->next = h_bp_list; 119 bp_list->mp = mp; 120 121 bpid = dpbp_attr.bpid; 122 123 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) 124 + rte_pktmbuf_priv_size(mp); 125 rte_dpaa2_bpid_info[bpid].bp_list = bp_list; 126 rte_dpaa2_bpid_info[bpid].bpid = bpid; 127 128 rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid], 129 sizeof(struct dpaa2_bp_info)); 130 mp->pool_data = (void *)bp_info; 131 132 DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid); 133 134 h_bp_list = bp_list; 135 return 0; 136 err3: 137 rte_free(bp_info); 138 err2: 139 dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); 140 err1: 141 dpaa2_free_dpbp_dev(avail_dpbp); 142 143 return ret; 144 } 145 146 static void 147 rte_hw_mbuf_free_pool(struct rte_mempool *mp) 148 { 149 struct dpaa2_bp_info *bpinfo; 150 struct dpaa2_bp_list *bp; 151 struct dpaa2_dpbp_dev *dpbp_node; 152 153 if (!mp->pool_data) { 154 DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool"); 155 return; 156 } 157 158 bpinfo = (struct dpaa2_bp_info *)mp->pool_data; 159 bp = bpinfo->bp_list; 160 dpbp_node = bp->buf_pool.dpbp_node; 161 162 dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token); 163 164 if (h_bp_list == bp) { 165 h_bp_list = h_bp_list->next; 166 } else { /* if it is not the first node */ 167 struct dpaa2_bp_list *prev = h_bp_list, *temp; 168 temp = h_bp_list->next; 169 while (temp) { 170 if (temp == bp) { 171 prev->next = temp->next; 172 rte_free(bp); 173 break; 174 } 175 prev = temp; 176 temp = temp->next; 177 } 178 } 179 180 rte_free(mp->pool_data); 181 dpaa2_free_dpbp_dev(dpbp_node); 182 } 183 184 static void 185 rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, 186 void * const *obj_table, 187 uint32_t bpid, 188 uint32_t meta_data_size, 189 int count) 190 { 191 struct qbman_release_desc releasedesc; 192 struct qbman_swp *swp; 193 int ret; 194 int i, n, retry_count; 195 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; 196 197 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 198 ret = dpaa2_affine_qbman_swp(); 199 if (ret != 0) { 200 DPAA2_MEMPOOL_ERR( 201 "Failed to allocate IO portal, tid: %d\n", 202 rte_gettid()); 203 return; 204 } 205 } 206 swp = DPAA2_PER_LCORE_PORTAL; 207 208 /* Create a release descriptor required for releasing 209 * buffers into QBMAN 210 */ 211 qbman_release_desc_clear(&releasedesc); 212 qbman_release_desc_set_bpid(&releasedesc, bpid); 213 214 n = count % DPAA2_MBUF_MAX_ACQ_REL; 215 if (unlikely(!n)) 216 goto aligned; 217 218 /* convert mbuf to buffers for the remainder */ 219 for (i = 0; i < n ; i++) { 220 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 221 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i]) 222 + meta_data_size; 223 #else 224 bufs[i] = (uint64_t)obj_table[i] + meta_data_size; 225 #endif 226 } 227 228 /* feed them to bman */ 229 retry_count = 0; 230 while ((ret = qbman_swp_release(swp, &releasedesc, bufs, n)) == 231 -EBUSY) { 232 retry_count++; 233 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 234 DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); 235 return; 236 } 237 } 238 239 aligned: 240 /* if there are more buffers to free */ 241 while (n < count) { 242 /* convert mbuf to buffers */ 243 for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) { 244 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 245 bufs[i] = (uint64_t) 246 rte_mempool_virt2iova(obj_table[n + i]) 247 + meta_data_size; 248 #else 249 bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size; 250 #endif 251 } 252 253 retry_count = 0; 254 while ((ret = qbman_swp_release(swp, &releasedesc, bufs, 255 DPAA2_MBUF_MAX_ACQ_REL)) == -EBUSY) { 256 retry_count++; 257 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 258 DPAA2_MEMPOOL_ERR("bman release retry exceeded, low fbpr?"); 259 return; 260 } 261 } 262 n += DPAA2_MBUF_MAX_ACQ_REL; 263 } 264 } 265 266 int rte_dpaa2_bpid_info_init(struct rte_mempool *mp) 267 { 268 struct dpaa2_bp_info *bp_info = mempool_to_bpinfo(mp); 269 uint32_t bpid = bp_info->bpid; 270 271 if (!rte_dpaa2_bpid_info) { 272 rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL, 273 sizeof(struct dpaa2_bp_info) * MAX_BPID, 274 RTE_CACHE_LINE_SIZE); 275 if (rte_dpaa2_bpid_info == NULL) 276 return -ENOMEM; 277 memset(rte_dpaa2_bpid_info, 0, 278 sizeof(struct dpaa2_bp_info) * MAX_BPID); 279 } 280 281 rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) 282 + rte_pktmbuf_priv_size(mp); 283 rte_dpaa2_bpid_info[bpid].bp_list = bp_info->bp_list; 284 rte_dpaa2_bpid_info[bpid].bpid = bpid; 285 286 return 0; 287 } 288 289 uint16_t 290 rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp) 291 { 292 struct dpaa2_bp_info *bp_info; 293 294 bp_info = mempool_to_bpinfo(mp); 295 if (!(bp_info->bp_list)) { 296 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); 297 return -ENOMEM; 298 } 299 300 return bp_info->bpid; 301 } 302 303 struct rte_mbuf * 304 rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr) 305 { 306 struct dpaa2_bp_info *bp_info; 307 308 bp_info = mempool_to_bpinfo(mp); 309 if (!(bp_info->bp_list)) { 310 RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n"); 311 return NULL; 312 } 313 314 return (struct rte_mbuf *)((uint8_t *)buf_addr - 315 bp_info->meta_data_size); 316 } 317 318 int 319 rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, 320 void **obj_table, unsigned int count) 321 { 322 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER 323 static int alloc; 324 #endif 325 struct qbman_swp *swp; 326 uint16_t bpid; 327 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; 328 int i, ret; 329 unsigned int n = 0; 330 struct dpaa2_bp_info *bp_info; 331 332 bp_info = mempool_to_bpinfo(pool); 333 334 if (!(bp_info->bp_list)) { 335 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); 336 return -ENOENT; 337 } 338 339 bpid = bp_info->bpid; 340 341 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 342 ret = dpaa2_affine_qbman_swp(); 343 if (ret != 0) { 344 DPAA2_MEMPOOL_ERR( 345 "Failed to allocate IO portal, tid: %d\n", 346 rte_gettid()); 347 return ret; 348 } 349 } 350 swp = DPAA2_PER_LCORE_PORTAL; 351 352 while (n < count) { 353 /* Acquire is all-or-nothing, so we drain in 7s, 354 * then the remainder. 355 */ 356 if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) { 357 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, 358 DPAA2_MBUF_MAX_ACQ_REL); 359 } else { 360 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, 361 count - n); 362 } 363 /* In case of less than requested number of buffers available 364 * in pool, qbman_swp_acquire returns 0 365 */ 366 if (ret <= 0) { 367 DPAA2_MEMPOOL_DP_DEBUG( 368 "Buffer acquire failed with err code: %d", ret); 369 /* The API expect the exact number of requested bufs */ 370 /* Releasing all buffers allocated */ 371 rte_dpaa2_mbuf_release(pool, obj_table, bpid, 372 bp_info->meta_data_size, n); 373 return -ENOBUFS; 374 } 375 /* assigning mbuf from the acquired objects */ 376 for (i = 0; (i < ret) && bufs[i]; i++) { 377 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t); 378 obj_table[n] = (struct rte_mbuf *) 379 (bufs[i] - bp_info->meta_data_size); 380 DPAA2_MEMPOOL_DP_DEBUG( 381 "Acquired %p address %p from BMAN\n", 382 (void *)bufs[i], (void *)obj_table[n]); 383 n++; 384 } 385 } 386 387 #ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER 388 alloc += n; 389 DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n", 390 alloc, count, n); 391 #endif 392 return 0; 393 } 394 395 static int 396 rte_hw_mbuf_free_bulk(struct rte_mempool *pool, 397 void * const *obj_table, unsigned int n) 398 { 399 struct dpaa2_bp_info *bp_info; 400 401 bp_info = mempool_to_bpinfo(pool); 402 if (!(bp_info->bp_list)) { 403 DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured"); 404 return -ENOENT; 405 } 406 rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid, 407 bp_info->meta_data_size, n); 408 409 return 0; 410 } 411 412 static unsigned int 413 rte_hw_mbuf_get_count(const struct rte_mempool *mp) 414 { 415 int ret; 416 unsigned int num_of_bufs = 0; 417 struct dpaa2_bp_info *bp_info; 418 struct dpaa2_dpbp_dev *dpbp_node; 419 struct fsl_mc_io mc_io; 420 421 if (!mp || !mp->pool_data) { 422 DPAA2_MEMPOOL_ERR("Invalid mempool provided"); 423 return 0; 424 } 425 426 bp_info = (struct dpaa2_bp_info *)mp->pool_data; 427 dpbp_node = bp_info->bp_list->buf_pool.dpbp_node; 428 429 /* In case as secondary process access stats, MCP portal in priv-hw may 430 * have primary process address. Need the secondary process based MCP 431 * portal address for this object. 432 */ 433 mc_io.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); 434 ret = dpbp_get_num_free_bufs(&mc_io, CMD_PRI_LOW, 435 dpbp_node->token, &num_of_bufs); 436 if (ret) { 437 DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)", 438 ret); 439 return 0; 440 } 441 442 DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs); 443 444 return num_of_bufs; 445 } 446 447 static int 448 dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, 449 void *vaddr, rte_iova_t paddr, size_t len, 450 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) 451 { 452 struct rte_memseg_list *msl; 453 /* The memsegment list exists incase the memory is not external. 454 * So, DMA-Map is required only when memory is provided by user, 455 * i.e. External. 456 */ 457 msl = rte_mem_virt2memseg_list(vaddr); 458 459 if (!msl) { 460 DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n"); 461 rte_fslmc_vfio_mem_dmamap((size_t)vaddr, 462 (size_t)paddr, (size_t)len); 463 } 464 /* Insert entry into the PA->VA Table */ 465 dpaax_iova_table_update(paddr, vaddr, len); 466 467 return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr, 468 len, obj_cb, obj_cb_arg); 469 } 470 471 static const struct rte_mempool_ops dpaa2_mpool_ops = { 472 .name = DPAA2_MEMPOOL_OPS_NAME, 473 .alloc = rte_hw_mbuf_create_pool, 474 .free = rte_hw_mbuf_free_pool, 475 .enqueue = rte_hw_mbuf_free_bulk, 476 .dequeue = rte_dpaa2_mbuf_alloc_bulk, 477 .get_count = rte_hw_mbuf_get_count, 478 .populate = dpaa2_populate, 479 }; 480 481 RTE_MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops); 482 483 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_mempool, NOTICE); 484