1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017,2019 NXP
4  *
5  */
6 
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17 
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_memory.h>
23 #include <rte_tailq.h>
24 #include <rte_eal.h>
25 #include <rte_malloc.h>
26 #include <rte_ring.h>
27 
28 #include <dpaa_mempool.h>
29 #include <dpaax_iova_table.h>
30 
31 /* List of all the memseg information locally maintained in dpaa driver. This
32  * is to optimize the PA_to_VA searches until a better mechanism (algo) is
33  * available.
34  */
35 struct dpaa_memseg_list rte_dpaa_memsegs
36 	= TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
37 
38 struct dpaa_bp_info *rte_dpaa_bpid_info;
39 
40 RTE_LOG_REGISTER(dpaa_logtype_mempool, mempool.dpaa, NOTICE);
41 
42 static int
dpaa_mbuf_create_pool(struct rte_mempool * mp)43 dpaa_mbuf_create_pool(struct rte_mempool *mp)
44 {
45 	struct bman_pool *bp;
46 	struct bm_buffer bufs[8];
47 	struct dpaa_bp_info *bp_info;
48 	uint8_t bpid;
49 	int num_bufs = 0, ret = 0;
50 	struct bman_pool_params params = {
51 		.flags = BMAN_POOL_FLAG_DYNAMIC_BPID
52 	};
53 
54 	MEMPOOL_INIT_FUNC_TRACE();
55 
56 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
57 		ret = rte_dpaa_portal_init((void *)0);
58 		if (ret) {
59 			DPAA_MEMPOOL_ERR(
60 				"rte_dpaa_portal_init failed with ret: %d",
61 				 ret);
62 			return -1;
63 		}
64 	}
65 	bp = bman_new_pool(&params);
66 	if (!bp) {
67 		DPAA_MEMPOOL_ERR("bman_new_pool() failed");
68 		return -ENODEV;
69 	}
70 	bpid = bman_get_params(bp)->bpid;
71 
72 	/* Drain the pool of anything already in it. */
73 	do {
74 		/* Acquire is all-or-nothing, so we drain in 8s,
75 		 * then in 1s for the remainder.
76 		 */
77 		if (ret != 1)
78 			ret = bman_acquire(bp, bufs, 8, 0);
79 		if (ret < 8)
80 			ret = bman_acquire(bp, bufs, 1, 0);
81 		if (ret > 0)
82 			num_bufs += ret;
83 	} while (ret > 0);
84 	if (num_bufs)
85 		DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
86 				  num_bufs, bpid);
87 
88 	if (rte_dpaa_bpid_info == NULL) {
89 		rte_dpaa_bpid_info = (struct dpaa_bp_info *)rte_zmalloc(NULL,
90 				sizeof(struct dpaa_bp_info) * DPAA_MAX_BPOOLS,
91 				RTE_CACHE_LINE_SIZE);
92 		if (rte_dpaa_bpid_info == NULL) {
93 			bman_free_pool(bp);
94 			return -ENOMEM;
95 		}
96 	}
97 
98 	rte_dpaa_bpid_info[bpid].mp = mp;
99 	rte_dpaa_bpid_info[bpid].bpid = bpid;
100 	rte_dpaa_bpid_info[bpid].size = mp->elt_size;
101 	rte_dpaa_bpid_info[bpid].bp = bp;
102 	rte_dpaa_bpid_info[bpid].meta_data_size =
103 		sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
104 	rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
105 	rte_dpaa_bpid_info[bpid].ptov_off = 0;
106 	rte_dpaa_bpid_info[bpid].flags = 0;
107 
108 	bp_info = rte_malloc(NULL,
109 			     sizeof(struct dpaa_bp_info),
110 			     RTE_CACHE_LINE_SIZE);
111 	if (!bp_info) {
112 		DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
113 		bman_free_pool(bp);
114 		return -ENOMEM;
115 	}
116 
117 	rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
118 		   sizeof(struct dpaa_bp_info));
119 	mp->pool_data = (void *)bp_info;
120 
121 	DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
122 	return 0;
123 }
124 
125 static void
dpaa_mbuf_free_pool(struct rte_mempool * mp)126 dpaa_mbuf_free_pool(struct rte_mempool *mp)
127 {
128 	struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
129 
130 	MEMPOOL_INIT_FUNC_TRACE();
131 
132 	if (bp_info) {
133 		bman_free_pool(bp_info->bp);
134 		DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
135 				  bp_info->bpid);
136 		rte_free(mp->pool_data);
137 		mp->pool_data = NULL;
138 	}
139 }
140 
141 static void
dpaa_buf_free(struct dpaa_bp_info * bp_info,uint64_t addr)142 dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
143 {
144 	struct bm_buffer buf;
145 	int ret;
146 
147 	DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
148 			   addr, bp_info->bpid);
149 
150 	bm_buffer_set64(&buf, addr);
151 retry:
152 	ret = bman_release(bp_info->bp, &buf, 1, 0);
153 	if (ret) {
154 		DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
155 		cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
156 		goto retry;
157 	}
158 }
159 
160 static int
dpaa_mbuf_free_bulk(struct rte_mempool * pool,void * const * obj_table,unsigned int n)161 dpaa_mbuf_free_bulk(struct rte_mempool *pool,
162 		    void *const *obj_table,
163 		    unsigned int n)
164 {
165 	struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
166 	int ret;
167 	unsigned int i = 0;
168 
169 	DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
170 			     n, bp_info->bpid);
171 
172 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
173 		ret = rte_dpaa_portal_init((void *)0);
174 		if (ret) {
175 			DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
176 					 ret);
177 			return 0;
178 		}
179 	}
180 
181 	while (i < n) {
182 		uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
183 
184 		if (unlikely(!bp_info->ptov_off)) {
185 			/* buffers are from single mem segment */
186 			if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
187 				bp_info->ptov_off = (size_t)obj_table[i] - phy;
188 				rte_dpaa_bpid_info[bp_info->bpid].ptov_off
189 						= bp_info->ptov_off;
190 			}
191 		}
192 
193 		dpaa_buf_free(bp_info,
194 			      (uint64_t)phy + bp_info->meta_data_size);
195 		i = i + 1;
196 	}
197 
198 	DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
199 			     n, bp_info->bpid);
200 
201 	return 0;
202 }
203 
204 static int
dpaa_mbuf_alloc_bulk(struct rte_mempool * pool,void ** obj_table,unsigned int count)205 dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
206 		     void **obj_table,
207 		     unsigned int count)
208 {
209 	struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
210 	struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
211 	struct dpaa_bp_info *bp_info;
212 	void *bufaddr;
213 	int i, ret;
214 	unsigned int n = 0;
215 
216 	bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
217 
218 	DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
219 			     count, bp_info->bpid);
220 
221 	if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
222 		DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
223 				 count);
224 		return -1;
225 	}
226 
227 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
228 		ret = rte_dpaa_portal_init((void *)0);
229 		if (ret) {
230 			DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
231 					 ret);
232 			return -1;
233 		}
234 	}
235 
236 	while (n < count) {
237 		/* Acquire is all-or-nothing, so we drain in 7s,
238 		 * then the remainder.
239 		 */
240 		if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
241 			ret = bman_acquire(bp_info->bp, bufs,
242 					   DPAA_MBUF_MAX_ACQ_REL, 0);
243 		} else {
244 			ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
245 		}
246 		/* In case of less than requested number of buffers available
247 		 * in pool, qbman_swp_acquire returns 0
248 		 */
249 		if (ret <= 0) {
250 			DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
251 					     ret);
252 			/* The API expect the exact number of requested
253 			 * buffers. Releasing all buffers allocated
254 			 */
255 			dpaa_mbuf_free_bulk(pool, obj_table, n);
256 			return -ENOBUFS;
257 		}
258 		/* assigning mbuf from the acquired objects */
259 		for (i = 0; (i < ret) && bufs[i].addr; i++) {
260 			/* TODO-errata - objerved that bufs may be null
261 			 * i.e. first buffer is valid, remaining 6 buffers
262 			 * may be null.
263 			 */
264 			bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
265 			m[n] = (struct rte_mbuf *)((char *)bufaddr
266 						- bp_info->meta_data_size);
267 			DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
268 					     (void *)bufaddr, (void *)m[n]);
269 			n++;
270 		}
271 	}
272 
273 	DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
274 			     n, bp_info->bpid);
275 	return 0;
276 }
277 
278 static unsigned int
dpaa_mbuf_get_count(const struct rte_mempool * mp)279 dpaa_mbuf_get_count(const struct rte_mempool *mp)
280 {
281 	struct dpaa_bp_info *bp_info;
282 
283 	MEMPOOL_INIT_FUNC_TRACE();
284 
285 	if (!mp || !mp->pool_data) {
286 		DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
287 		return 0;
288 	}
289 
290 	bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
291 
292 	return bman_query_free_buffers(bp_info->bp);
293 }
294 
295 static int
dpaa_populate(struct rte_mempool * mp,unsigned int max_objs,void * vaddr,rte_iova_t paddr,size_t len,rte_mempool_populate_obj_cb_t * obj_cb,void * obj_cb_arg)296 dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
297 	      void *vaddr, rte_iova_t paddr, size_t len,
298 	      rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
299 {
300 	struct dpaa_bp_info *bp_info;
301 	unsigned int total_elt_sz;
302 
303 	if (!mp || !mp->pool_data) {
304 		DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
305 		return 0;
306 	}
307 
308 	/* Update the PA-VA Table */
309 	dpaax_iova_table_update(paddr, vaddr, len);
310 
311 	bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
312 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
313 
314 	DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
315 			   (uint64_t)len, total_elt_sz * mp->size);
316 
317 	/* Detect pool area has sufficient space for elements in this memzone */
318 	if (len >= total_elt_sz * mp->size)
319 		bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
320 	struct dpaa_memseg *ms;
321 
322 	/* For each memory chunk pinned to the Mempool, a linked list of the
323 	 * contained memsegs is created for searching when PA to VA
324 	 * conversion is required.
325 	 */
326 	ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
327 	if (!ms) {
328 		DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
329 		DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
330 		/* If the element is not added, it would only lead to failure
331 		 * in searching for the element and the logic would Fallback
332 		 * to traditional DPDK memseg traversal code. So, this is not
333 		 * a blocking error - but, error would be printed on screen.
334 		 */
335 		return 0;
336 	}
337 
338 	ms->vaddr = vaddr;
339 	ms->iova = paddr;
340 	ms->len = len;
341 	/* Head insertions are generally faster than tail insertions as the
342 	 * buffers pinned are picked from rear end.
343 	 */
344 	TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
345 
346 	return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
347 					       len, obj_cb, obj_cb_arg);
348 }
349 
350 static const struct rte_mempool_ops dpaa_mpool_ops = {
351 	.name = DPAA_MEMPOOL_OPS_NAME,
352 	.alloc = dpaa_mbuf_create_pool,
353 	.free = dpaa_mbuf_free_pool,
354 	.enqueue = dpaa_mbuf_free_bulk,
355 	.dequeue = dpaa_mbuf_alloc_bulk,
356 	.get_count = dpaa_mbuf_get_count,
357 	.populate = dpaa_populate,
358 };
359 
360 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
361