xref: /f-stack/dpdk/drivers/event/dlb/pf/dlb_pf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <sys/fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15 #include <rte_debug.h>
16 #include <rte_log.h>
17 #include <rte_dev.h>
18 #include <rte_devargs.h>
19 #include <rte_mbuf.h>
20 #include <rte_ring.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
25 #include <rte_io.h>
26 #include <rte_memory.h>
27 #include <rte_string_fns.h>
28 
29 #include "../dlb_priv.h"
30 #include "../dlb_iface.h"
31 #include "../dlb_inline_fns.h"
32 #include "dlb_main.h"
33 #include "base/dlb_hw_types.h"
34 #include "base/dlb_osdep.h"
35 #include "base/dlb_resource.h"
36 
37 static void
dlb_pf_low_level_io_init(struct dlb_eventdev * dlb __rte_unused)38 dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
39 {
40 	int i;
41 
42 	/* Addresses will be initialized at port create */
43 	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
44 		/* First directed ports */
45 
46 		/* producer port */
47 		dlb_port[i][DLB_DIR].pp_addr = NULL;
48 
49 		/* popcount */
50 		dlb_port[i][DLB_DIR].ldb_popcount = NULL;
51 		dlb_port[i][DLB_DIR].dir_popcount = NULL;
52 
53 		/* consumer queue */
54 		dlb_port[i][DLB_DIR].cq_base = NULL;
55 		dlb_port[i][DLB_DIR].mmaped = true;
56 
57 		/* Now load balanced ports */
58 
59 		/* producer port */
60 		dlb_port[i][DLB_LDB].pp_addr = NULL;
61 
62 		/* popcount */
63 		dlb_port[i][DLB_LDB].ldb_popcount = NULL;
64 		dlb_port[i][DLB_LDB].dir_popcount = NULL;
65 
66 		/* consumer queue */
67 		dlb_port[i][DLB_LDB].cq_base = NULL;
68 		dlb_port[i][DLB_LDB].mmaped = true;
69 	}
70 }
71 
72 static int
dlb_pf_open(struct dlb_hw_dev * handle,const char * name)73 dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
74 {
75 	RTE_SET_USED(handle);
76 	RTE_SET_USED(name);
77 
78 	return 0;
79 }
80 
81 static void
dlb_pf_domain_close(struct dlb_eventdev * dlb)82 dlb_pf_domain_close(struct dlb_eventdev *dlb)
83 {
84 	struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
85 	int ret;
86 
87 	ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
88 	if (ret)
89 		DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
90 }
91 
92 static int
dlb_pf_get_device_version(struct dlb_hw_dev * handle,uint8_t * revision)93 dlb_pf_get_device_version(struct dlb_hw_dev *handle,
94 			  uint8_t *revision)
95 {
96 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
97 
98 	*revision = dlb_dev->revision;
99 
100 	return 0;
101 }
102 
103 static int
dlb_pf_get_num_resources(struct dlb_hw_dev * handle,struct dlb_get_num_resources_args * rsrcs)104 dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
105 			 struct dlb_get_num_resources_args *rsrcs)
106 {
107 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
108 
109 	dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
110 
111 	return 0;
112 }
113 
114 static int
dlb_pf_sched_domain_create(struct dlb_hw_dev * handle,struct dlb_create_sched_domain_args * arg)115 dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
116 			   struct dlb_create_sched_domain_args *arg)
117 {
118 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
119 	struct dlb_cmd_response response = {0};
120 	int ret;
121 
122 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
123 
124 	if (dlb_dev->domain_reset_failed) {
125 		response.status = DLB_ST_DOMAIN_RESET_FAILED;
126 		ret = -EINVAL;
127 		goto done;
128 	}
129 
130 	ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
131 	if (ret)
132 		goto done;
133 
134 done:
135 
136 	*(struct dlb_cmd_response *)arg->response = response;
137 
138 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
139 
140 	return ret;
141 }
142 
143 static int
dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev * handle,struct dlb_create_ldb_pool_args * cfg)144 dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
145 			      struct dlb_create_ldb_pool_args *cfg)
146 {
147 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
148 	struct dlb_cmd_response response = {0};
149 	int ret;
150 
151 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
152 
153 	ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
154 				     handle->domain_id,
155 				     cfg,
156 				     &response);
157 
158 	*(struct dlb_cmd_response *)cfg->response = response;
159 
160 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
161 
162 	return ret;
163 }
164 
165 static int
dlb_pf_dir_credit_pool_create(struct dlb_hw_dev * handle,struct dlb_create_dir_pool_args * cfg)166 dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
167 			      struct dlb_create_dir_pool_args *cfg)
168 {
169 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
170 	struct dlb_cmd_response response = {0};
171 	int ret;
172 
173 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
174 
175 	ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
176 				     handle->domain_id,
177 				     cfg,
178 				     &response);
179 
180 	*(struct dlb_cmd_response *)cfg->response = response;
181 
182 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
183 
184 	return ret;
185 }
186 
187 static int
dlb_pf_get_cq_poll_mode(struct dlb_hw_dev * handle,enum dlb_cq_poll_modes * mode)188 dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
189 			enum dlb_cq_poll_modes *mode)
190 {
191 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
192 
193 	if (dlb_dev->revision >= DLB_REV_B0)
194 		*mode = DLB_CQ_POLL_MODE_SPARSE;
195 	else
196 		*mode = DLB_CQ_POLL_MODE_STD;
197 
198 	return 0;
199 }
200 
201 static int
dlb_pf_ldb_queue_create(struct dlb_hw_dev * handle,struct dlb_create_ldb_queue_args * cfg)202 dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
203 			struct dlb_create_ldb_queue_args *cfg)
204 {
205 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
206 	struct dlb_cmd_response response = {0};
207 	int ret;
208 
209 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
210 
211 	ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
212 				      handle->domain_id,
213 				      cfg,
214 				      &response);
215 
216 	*(struct dlb_cmd_response *)cfg->response = response;
217 
218 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
219 
220 	return ret;
221 }
222 
223 static int
dlb_pf_dir_queue_create(struct dlb_hw_dev * handle,struct dlb_create_dir_queue_args * cfg)224 dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
225 			struct dlb_create_dir_queue_args *cfg)
226 {
227 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
228 	struct dlb_cmd_response response = {0};
229 	int ret;
230 
231 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
232 
233 	ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
234 				      handle->domain_id,
235 				      cfg,
236 				      &response);
237 
238 	*(struct dlb_cmd_response *)cfg->response = response;
239 
240 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
241 
242 	return ret;
243 }
244 
245 static void *
dlb_alloc_coherent_aligned(const struct rte_memzone ** mz,rte_iova_t * phys,size_t size,int align)246 dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
247 			   size_t size, int align)
248 {
249 	char mz_name[RTE_MEMZONE_NAMESIZE];
250 	uint32_t core_id = rte_lcore_id();
251 	unsigned int socket_id;
252 
253 	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
254 		 (unsigned long)rte_get_timer_cycles());
255 	if (core_id == (unsigned int)LCORE_ID_ANY)
256 		core_id = rte_get_main_lcore();
257 	socket_id = rte_lcore_to_socket_id(core_id);
258 	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
259 					 RTE_MEMZONE_IOVA_CONTIG, align);
260 	if (*mz == NULL) {
261 		DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
262 			    size);
263 		*phys = 0;
264 		return NULL;
265 	}
266 	*phys = (*mz)->iova;
267 	return (*mz)->addr;
268 }
269 
270 static int
dlb_pf_ldb_port_create(struct dlb_hw_dev * handle,struct dlb_create_ldb_port_args * cfg,enum dlb_cq_poll_modes poll_mode)271 dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
272 		       struct dlb_create_ldb_port_args *cfg,
273 		       enum dlb_cq_poll_modes poll_mode)
274 {
275 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
276 	struct dlb_cmd_response response = {0};
277 	int ret;
278 	uint8_t *port_base;
279 	const struct rte_memzone *mz;
280 	int alloc_sz, qe_sz, cq_alloc_depth;
281 	rte_iova_t pp_dma_base;
282 	rte_iova_t pc_dma_base;
283 	rte_iova_t cq_dma_base;
284 	int is_dir = false;
285 
286 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
287 
288 	if (poll_mode == DLB_CQ_POLL_MODE_STD)
289 		qe_sz = sizeof(struct dlb_dequeue_qe);
290 	else
291 		qe_sz = RTE_CACHE_LINE_SIZE;
292 
293 	/* The hardware always uses a CQ depth of at least
294 	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
295 	 * perspective we support a depth as low as 1 for LDB ports.
296 	 */
297 	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
298 
299 	/* Calculate the port memory required, including two cache lines for
300 	 * credit pop counts. Round up to the nearest cache line.
301 	 */
302 	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
303 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
304 
305 	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
306 					       alloc_sz, PAGE_SIZE);
307 	if (port_base == NULL)
308 		return -ENOMEM;
309 
310 	/* Lock the page in memory */
311 	ret = rte_mem_lock_page(port_base);
312 	if (ret < 0) {
313 		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
314 		goto create_port_err;
315 	}
316 
317 	memset(port_base, 0, alloc_sz);
318 	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
319 
320 	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
321 				     handle->domain_id,
322 				     cfg,
323 				     pc_dma_base,
324 				     cq_dma_base,
325 				     &response);
326 	if (ret)
327 		goto create_port_err;
328 
329 	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
330 	dlb_port[response.id][DLB_LDB].pp_addr =
331 		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
332 
333 	dlb_port[response.id][DLB_LDB].cq_base =
334 		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
335 
336 	dlb_port[response.id][DLB_LDB].ldb_popcount =
337 		(void *)(uintptr_t)port_base;
338 	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
339 		(port_base + RTE_CACHE_LINE_SIZE);
340 	dlb_port[response.id][DLB_LDB].mz = mz;
341 
342 	*(struct dlb_cmd_response *)cfg->response = response;
343 
344 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
345 	return 0;
346 
347 create_port_err:
348 
349 	rte_memzone_free(mz);
350 
351 	return ret;
352 }
353 
354 static int
dlb_pf_dir_port_create(struct dlb_hw_dev * handle,struct dlb_create_dir_port_args * cfg,enum dlb_cq_poll_modes poll_mode)355 dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
356 		       struct dlb_create_dir_port_args *cfg,
357 		       enum dlb_cq_poll_modes poll_mode)
358 {
359 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
360 	struct dlb_cmd_response response = {0};
361 	int ret;
362 	uint8_t *port_base;
363 	const struct rte_memzone *mz;
364 	int alloc_sz, qe_sz;
365 	rte_iova_t pp_dma_base;
366 	rte_iova_t pc_dma_base;
367 	rte_iova_t cq_dma_base;
368 	int is_dir = true;
369 
370 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
371 
372 	if (poll_mode == DLB_CQ_POLL_MODE_STD)
373 		qe_sz = sizeof(struct dlb_dequeue_qe);
374 	else
375 		qe_sz = RTE_CACHE_LINE_SIZE;
376 
377 	/* Calculate the port memory required, including two cache lines for
378 	 * credit pop counts. Round up to the nearest cache line.
379 	 */
380 	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
381 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
382 
383 	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
384 					       alloc_sz, PAGE_SIZE);
385 	if (port_base == NULL)
386 		return -ENOMEM;
387 
388 	/* Lock the page in memory */
389 	ret = rte_mem_lock_page(port_base);
390 	if (ret < 0) {
391 		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
392 		goto create_port_err;
393 	}
394 
395 	memset(port_base, 0, alloc_sz);
396 	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
397 
398 	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
399 				     handle->domain_id,
400 				     cfg,
401 				     pc_dma_base,
402 				     cq_dma_base,
403 				     &response);
404 	if (ret)
405 		goto create_port_err;
406 
407 	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
408 	dlb_port[response.id][DLB_DIR].pp_addr =
409 		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
410 
411 	dlb_port[response.id][DLB_DIR].cq_base =
412 		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
413 
414 	dlb_port[response.id][DLB_DIR].ldb_popcount =
415 		(void *)(uintptr_t)port_base;
416 	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
417 		(port_base + RTE_CACHE_LINE_SIZE);
418 	dlb_port[response.id][DLB_DIR].mz = mz;
419 
420 	*(struct dlb_cmd_response *)cfg->response = response;
421 
422 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
423 	return 0;
424 
425 create_port_err:
426 
427 	rte_memzone_free(mz);
428 
429 	return ret;
430 }
431 
432 static int
dlb_pf_get_sn_allocation(struct dlb_hw_dev * handle,struct dlb_get_sn_allocation_args * args)433 dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
434 			 struct dlb_get_sn_allocation_args *args)
435 {
436 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
437 	struct dlb_cmd_response response = {0};
438 	int ret;
439 
440 	ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
441 
442 	response.id = ret;
443 	response.status = 0;
444 
445 	*(struct dlb_cmd_response *)args->response = response;
446 
447 	return ret;
448 }
449 
450 static int
dlb_pf_set_sn_allocation(struct dlb_hw_dev * handle,struct dlb_set_sn_allocation_args * args)451 dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
452 			 struct dlb_set_sn_allocation_args *args)
453 {
454 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
455 	struct dlb_cmd_response response = {0};
456 	int ret;
457 
458 	ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
459 					     args->num);
460 
461 	response.status = 0;
462 
463 	*(struct dlb_cmd_response *)args->response = response;
464 
465 	return ret;
466 }
467 
468 static int
dlb_pf_get_sn_occupancy(struct dlb_hw_dev * handle,struct dlb_get_sn_occupancy_args * args)469 dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
470 			struct dlb_get_sn_occupancy_args *args)
471 {
472 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
473 	struct dlb_cmd_response response = {0};
474 	int ret;
475 
476 	ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
477 						      args->group);
478 
479 	response.id = ret;
480 	response.status = 0;
481 
482 	*(struct dlb_cmd_response *)args->response = response;
483 
484 	return ret;
485 }
486 
487 static int
dlb_pf_sched_domain_start(struct dlb_hw_dev * handle,struct dlb_start_domain_args * cfg)488 dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
489 			  struct dlb_start_domain_args *cfg)
490 {
491 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
492 	struct dlb_cmd_response response = {0};
493 	int ret;
494 
495 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
496 
497 	ret = dlb_hw_start_domain(&dlb_dev->hw,
498 				  handle->domain_id,
499 				  cfg,
500 				  &response);
501 
502 	*(struct dlb_cmd_response *)cfg->response = response;
503 
504 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
505 
506 	return ret;
507 }
508 
509 static int
dlb_pf_pending_port_unmaps(struct dlb_hw_dev * handle,struct dlb_pending_port_unmaps_args * args)510 dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
511 			   struct dlb_pending_port_unmaps_args *args)
512 {
513 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
514 	struct dlb_cmd_response response = {0};
515 	int ret;
516 
517 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
518 
519 	ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
520 					 handle->domain_id,
521 					 args,
522 					 &response);
523 
524 	*(struct dlb_cmd_response *)args->response = response;
525 
526 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
527 
528 	return ret;
529 }
530 
531 static int
dlb_pf_map_qid(struct dlb_hw_dev * handle,struct dlb_map_qid_args * cfg)532 dlb_pf_map_qid(struct dlb_hw_dev *handle,
533 	       struct dlb_map_qid_args *cfg)
534 {
535 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
536 	struct dlb_cmd_response response = {0};
537 	int ret;
538 
539 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
540 
541 	ret = dlb_hw_map_qid(&dlb_dev->hw,
542 			     handle->domain_id,
543 			     cfg,
544 			     &response);
545 
546 	*(struct dlb_cmd_response *)cfg->response = response;
547 
548 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
549 
550 	return ret;
551 }
552 
553 static int
dlb_pf_unmap_qid(struct dlb_hw_dev * handle,struct dlb_unmap_qid_args * cfg)554 dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
555 		 struct dlb_unmap_qid_args *cfg)
556 {
557 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
558 	struct dlb_cmd_response response = {0};
559 	int ret;
560 
561 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
562 
563 	ret = dlb_hw_unmap_qid(&dlb_dev->hw,
564 			       handle->domain_id,
565 			       cfg,
566 			       &response);
567 
568 	*(struct dlb_cmd_response *)cfg->response = response;
569 
570 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
571 
572 	return ret;
573 }
574 
575 static int
dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev * handle,struct dlb_get_ldb_queue_depth_args * args)576 dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
577 			   struct dlb_get_ldb_queue_depth_args *args)
578 {
579 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
580 	struct dlb_cmd_response response = {0};
581 	int ret;
582 
583 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
584 
585 	ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
586 					 handle->domain_id,
587 					 args,
588 					 &response);
589 
590 	*(struct dlb_cmd_response *)args->response = response;
591 
592 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
593 
594 	return ret;
595 }
596 
597 static int
dlb_pf_get_dir_queue_depth(struct dlb_hw_dev * handle,struct dlb_get_dir_queue_depth_args * args)598 dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
599 			   struct dlb_get_dir_queue_depth_args *args)
600 {
601 	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
602 	struct dlb_cmd_response response = {0};
603 	int ret = 0;
604 
605 	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
606 
607 	ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
608 					 handle->domain_id,
609 					 args,
610 					 &response);
611 
612 	*(struct dlb_cmd_response *)args->response = response;
613 
614 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
615 
616 	return ret;
617 }
618 
619 static void
dlb_pf_iface_fn_ptrs_init(void)620 dlb_pf_iface_fn_ptrs_init(void)
621 {
622 	dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
623 	dlb_iface_open = dlb_pf_open;
624 	dlb_iface_domain_close = dlb_pf_domain_close;
625 	dlb_iface_get_device_version = dlb_pf_get_device_version;
626 	dlb_iface_get_num_resources = dlb_pf_get_num_resources;
627 	dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
628 	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
629 	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
630 	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
631 	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
632 	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
633 	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
634 	dlb_iface_map_qid = dlb_pf_map_qid;
635 	dlb_iface_unmap_qid = dlb_pf_unmap_qid;
636 	dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
637 	dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
638 	dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
639 	dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
640 	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
641 	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
642 	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
643 	dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
644 
645 }
646 
647 /* PCI DEV HOOKS */
648 static int
dlb_eventdev_pci_init(struct rte_eventdev * eventdev)649 dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
650 {
651 	int ret = 0;
652 	struct rte_pci_device *pci_dev;
653 	struct dlb_devargs dlb_args = {
654 		.socket_id = rte_socket_id(),
655 		.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
656 		.num_dir_credits_override = -1,
657 		.defer_sched = 0,
658 		.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
659 	};
660 	struct dlb_eventdev *dlb;
661 
662 	DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
663 		    eventdev->data->dev_id, eventdev->data->socket_id);
664 
665 	dlb_entry_points_init(eventdev);
666 
667 	dlb_pf_iface_fn_ptrs_init();
668 
669 	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
670 
671 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
672 		dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
673 
674 		/* Probe the DLB PF layer */
675 		dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
676 
677 		if (dlb->qm_instance.pf_dev == NULL) {
678 			DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
679 				    rte_errno);
680 			ret = -rte_errno;
681 			goto dlb_probe_failed;
682 		}
683 
684 		/* Were we invoked with runtime parameters? */
685 		if (pci_dev->device.devargs) {
686 			ret = dlb_parse_params(pci_dev->device.devargs->args,
687 					       pci_dev->device.devargs->name,
688 					       &dlb_args);
689 			if (ret) {
690 				DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
691 					    ret, rte_errno);
692 				goto dlb_probe_failed;
693 			}
694 		}
695 
696 		ret = dlb_primary_eventdev_probe(eventdev,
697 						 EVDEV_DLB_NAME_PMD_STR,
698 						 &dlb_args);
699 	} else {
700 		ret = dlb_secondary_eventdev_probe(eventdev,
701 						   EVDEV_DLB_NAME_PMD_STR);
702 	}
703 	if (ret)
704 		goto dlb_probe_failed;
705 
706 	DLB_LOG_INFO("DLB PF Probe success\n");
707 
708 	return 0;
709 
710 dlb_probe_failed:
711 
712 	DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
713 
714 	return ret;
715 }
716 
717 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
718 
719 static const struct rte_pci_id pci_id_dlb_map[] = {
720 	{
721 		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
722 			       DLB_PF_DEV_ID)
723 	},
724 	{
725 		.vendor_id = 0,
726 	},
727 };
728 
729 static int
event_dlb_pci_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)730 event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
731 		    struct rte_pci_device *pci_dev)
732 {
733 	return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
734 		sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
735 		EVDEV_DLB_NAME_PMD_STR);
736 }
737 
738 static int
event_dlb_pci_remove(struct rte_pci_device * pci_dev)739 event_dlb_pci_remove(struct rte_pci_device *pci_dev)
740 {
741 	return rte_event_pmd_pci_remove(pci_dev, NULL);
742 }
743 
744 static struct rte_pci_driver pci_eventdev_dlb_pmd = {
745 	.id_table = pci_id_dlb_map,
746 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
747 	.probe = event_dlb_pci_probe,
748 	.remove = event_dlb_pci_remove,
749 };
750 
751 RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
752 RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
753