xref: /f-stack/dpdk/drivers/event/dlb2/pf/dlb2_pf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <sys/fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15 #include <rte_debug.h>
16 #include <rte_log.h>
17 #include <rte_dev.h>
18 #include <rte_devargs.h>
19 #include <rte_mbuf.h>
20 #include <rte_ring.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
25 #include <rte_io.h>
26 #include <rte_pci.h>
27 #include <rte_bus_pci.h>
28 #include <rte_eventdev.h>
29 #include <rte_eventdev_pmd.h>
30 #include <rte_eventdev_pmd_pci.h>
31 #include <rte_memory.h>
32 #include <rte_string_fns.h>
33 
34 #include "../dlb2_priv.h"
35 #include "../dlb2_iface.h"
36 #include "../dlb2_inline_fns.h"
37 #include "dlb2_main.h"
38 #include "base/dlb2_hw_types.h"
39 #include "base/dlb2_osdep.h"
40 #include "base/dlb2_resource.h"
41 
42 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
43 
44 static void
dlb2_pf_low_level_io_init(void)45 dlb2_pf_low_level_io_init(void)
46 {
47 	int i;
48 	/* Addresses will be initialized at port create */
49 	for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
50 		/* First directed ports */
51 		dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
52 		dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
53 		dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
54 
55 		/* Now load balanced ports */
56 		dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
57 		dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
58 		dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
59 	}
60 }
61 
62 static int
dlb2_pf_open(struct dlb2_hw_dev * handle,const char * name)63 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
64 {
65 	RTE_SET_USED(handle);
66 	RTE_SET_USED(name);
67 
68 	return 0;
69 }
70 
71 static int
dlb2_pf_get_device_version(struct dlb2_hw_dev * handle,uint8_t * revision)72 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
73 			   uint8_t *revision)
74 {
75 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
76 
77 	*revision = dlb2_dev->revision;
78 
79 	return 0;
80 }
81 
82 static void
dlb2_pf_hardware_init(struct dlb2_hw_dev * handle)83 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
84 {
85 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
86 
87 	dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
88 	dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
89 }
90 
91 static int
dlb2_pf_get_num_resources(struct dlb2_hw_dev * handle,struct dlb2_get_num_resources_args * rsrcs)92 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
93 			  struct dlb2_get_num_resources_args *rsrcs)
94 {
95 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
96 
97 	return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
98 }
99 
100 static int
dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev * handle,enum dlb2_cq_poll_modes * mode)101 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
102 			 enum dlb2_cq_poll_modes *mode)
103 {
104 	RTE_SET_USED(handle);
105 
106 	*mode = DLB2_CQ_POLL_MODE_SPARSE;
107 
108 	return 0;
109 }
110 
111 static int
dlb2_pf_sched_domain_create(struct dlb2_hw_dev * handle,struct dlb2_create_sched_domain_args * arg)112 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
113 			    struct dlb2_create_sched_domain_args *arg)
114 {
115 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
116 	struct dlb2_cmd_response response = {0};
117 	int ret;
118 
119 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
120 
121 	if (dlb2_dev->domain_reset_failed) {
122 		response.status = DLB2_ST_DOMAIN_RESET_FAILED;
123 		ret = -EINVAL;
124 		goto done;
125 	}
126 
127 	ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
128 	if (ret)
129 		goto done;
130 
131 done:
132 
133 	arg->response = response;
134 
135 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
136 		  __func__, ret);
137 
138 	return ret;
139 }
140 
141 static void
dlb2_pf_domain_reset(struct dlb2_eventdev * dlb2)142 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
143 {
144 	struct dlb2_dev *dlb2_dev;
145 	int ret;
146 
147 	dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
148 	ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
149 	if (ret)
150 		DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
151 }
152 
153 static int
dlb2_pf_ldb_queue_create(struct dlb2_hw_dev * handle,struct dlb2_create_ldb_queue_args * cfg)154 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
155 			 struct dlb2_create_ldb_queue_args *cfg)
156 {
157 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
158 	struct dlb2_cmd_response response = {0};
159 	int ret;
160 
161 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
162 
163 	ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
164 				       handle->domain_id,
165 				       cfg,
166 				       &response);
167 
168 	cfg->response = response;
169 
170 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
171 		  __func__, ret);
172 
173 	return ret;
174 }
175 
176 static int
dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev * handle,struct dlb2_get_sn_occupancy_args * args)177 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
178 			 struct dlb2_get_sn_occupancy_args *args)
179 {
180 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
181 	struct dlb2_cmd_response response = {0};
182 	int ret;
183 
184 	ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
185 						       args->group);
186 
187 	response.id = ret;
188 	response.status = 0;
189 
190 	args->response = response;
191 
192 	return ret;
193 }
194 
195 static int
dlb2_pf_get_sn_allocation(struct dlb2_hw_dev * handle,struct dlb2_get_sn_allocation_args * args)196 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
197 			  struct dlb2_get_sn_allocation_args *args)
198 {
199 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
200 	struct dlb2_cmd_response response = {0};
201 	int ret;
202 
203 	ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
204 
205 	response.id = ret;
206 	response.status = 0;
207 
208 	args->response = response;
209 
210 	return ret;
211 }
212 
213 static int
dlb2_pf_set_sn_allocation(struct dlb2_hw_dev * handle,struct dlb2_set_sn_allocation_args * args)214 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
215 			  struct dlb2_set_sn_allocation_args *args)
216 {
217 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
218 	struct dlb2_cmd_response response = {0};
219 	int ret;
220 
221 	ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
222 					      args->num);
223 
224 	response.status = 0;
225 
226 	args->response = response;
227 
228 	return ret;
229 }
230 
231 static void *
dlb2_alloc_coherent_aligned(const struct rte_memzone ** mz,uintptr_t * phys,size_t size,int align)232 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
233 			    size_t size, int align)
234 {
235 	char mz_name[RTE_MEMZONE_NAMESIZE];
236 	uint32_t core_id = rte_lcore_id();
237 	unsigned int socket_id;
238 
239 	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
240 		 (unsigned long)rte_get_timer_cycles());
241 	if (core_id == (unsigned int)LCORE_ID_ANY)
242 		core_id = rte_get_main_lcore();
243 	socket_id = rte_lcore_to_socket_id(core_id);
244 	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
245 					 RTE_MEMZONE_IOVA_CONTIG, align);
246 	if (*mz == NULL) {
247 		DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
248 			     size, rte_strerror(rte_errno));
249 		*phys = 0;
250 		return NULL;
251 	}
252 	*phys = (*mz)->iova;
253 	return (*mz)->addr;
254 }
255 
256 static int
dlb2_pf_ldb_port_create(struct dlb2_hw_dev * handle,struct dlb2_create_ldb_port_args * cfg,enum dlb2_cq_poll_modes poll_mode)257 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
258 			struct dlb2_create_ldb_port_args *cfg,
259 			enum dlb2_cq_poll_modes poll_mode)
260 {
261 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
262 	struct dlb2_cmd_response response = {0};
263 	struct dlb2_port_memory port_memory;
264 	int ret, cq_alloc_depth;
265 	uint8_t *port_base;
266 	const struct rte_memzone *mz;
267 	int alloc_sz, qe_sz;
268 	phys_addr_t cq_base;
269 	phys_addr_t pp_base;
270 	int is_dir = false;
271 
272 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
273 
274 	if (poll_mode == DLB2_CQ_POLL_MODE_STD)
275 		qe_sz = sizeof(struct dlb2_dequeue_qe);
276 	else
277 		qe_sz = RTE_CACHE_LINE_SIZE;
278 
279 	/* Calculate the port memory required, and round up to the nearest
280 	 * cache line.
281 	 */
282 	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
283 	alloc_sz = cq_alloc_depth * qe_sz;
284 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
285 
286 	port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
287 						PAGE_SIZE);
288 	if (port_base == NULL)
289 		return -ENOMEM;
290 
291 	/* Lock the page in memory */
292 	ret = rte_mem_lock_page(port_base);
293 	if (ret < 0) {
294 		DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
295 		goto create_port_err;
296 	}
297 
298 	memset(port_base, 0, alloc_sz);
299 
300 	ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
301 				      handle->domain_id,
302 				      cfg,
303 				      cq_base,
304 				      &response);
305 	if (ret)
306 		goto create_port_err;
307 
308 	pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
309 	dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
310 		(void *)(pp_base + (PAGE_SIZE * response.id));
311 
312 	dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
313 	memset(&port_memory, 0, sizeof(port_memory));
314 
315 	dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
316 
317 	dlb2_list_init_head(&port_memory.list);
318 
319 	cfg->response = response;
320 
321 	return 0;
322 
323 create_port_err:
324 
325 	rte_memzone_free(mz);
326 
327 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
328 		  __func__, ret);
329 	return ret;
330 }
331 
332 static int
dlb2_pf_dir_port_create(struct dlb2_hw_dev * handle,struct dlb2_create_dir_port_args * cfg,enum dlb2_cq_poll_modes poll_mode)333 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
334 			struct dlb2_create_dir_port_args *cfg,
335 			enum dlb2_cq_poll_modes poll_mode)
336 {
337 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
338 	struct dlb2_cmd_response response = {0};
339 	struct dlb2_port_memory port_memory;
340 	int ret;
341 	uint8_t *port_base;
342 	const struct rte_memzone *mz;
343 	int alloc_sz, qe_sz;
344 	phys_addr_t cq_base;
345 	phys_addr_t pp_base;
346 	int is_dir = true;
347 
348 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
349 
350 	if (poll_mode == DLB2_CQ_POLL_MODE_STD)
351 		qe_sz = sizeof(struct dlb2_dequeue_qe);
352 	else
353 		qe_sz = RTE_CACHE_LINE_SIZE;
354 
355 	/* Calculate the port memory required, and round up to the nearest
356 	 * cache line.
357 	 */
358 	alloc_sz = cfg->cq_depth * qe_sz;
359 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
360 
361 	port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
362 						PAGE_SIZE);
363 	if (port_base == NULL)
364 		return -ENOMEM;
365 
366 	/* Lock the page in memory */
367 	ret = rte_mem_lock_page(port_base);
368 	if (ret < 0) {
369 		DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
370 		goto create_port_err;
371 	}
372 
373 	memset(port_base, 0, alloc_sz);
374 
375 	ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
376 				      handle->domain_id,
377 				      cfg,
378 				      cq_base,
379 				      &response);
380 	if (ret)
381 		goto create_port_err;
382 
383 	pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
384 	dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
385 		(void *)(pp_base + (PAGE_SIZE * response.id));
386 
387 	dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
388 		(void *)(port_base);
389 	memset(&port_memory, 0, sizeof(port_memory));
390 
391 	dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
392 
393 	dlb2_list_init_head(&port_memory.list);
394 
395 	cfg->response = response;
396 
397 	return 0;
398 
399 create_port_err:
400 
401 	rte_memzone_free(mz);
402 
403 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
404 		  __func__, ret);
405 
406 	return ret;
407 }
408 
409 static int
dlb2_pf_dir_queue_create(struct dlb2_hw_dev * handle,struct dlb2_create_dir_queue_args * cfg)410 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
411 			 struct dlb2_create_dir_queue_args *cfg)
412 {
413 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
414 	struct dlb2_cmd_response response = {0};
415 	int ret;
416 
417 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
418 
419 	ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
420 				       handle->domain_id,
421 				       cfg,
422 				       &response);
423 
424 	cfg->response = response;
425 
426 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
427 		  __func__, ret);
428 
429 	return ret;
430 }
431 
432 static int
dlb2_pf_map_qid(struct dlb2_hw_dev * handle,struct dlb2_map_qid_args * cfg)433 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
434 		struct dlb2_map_qid_args *cfg)
435 {
436 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
437 	struct dlb2_cmd_response response = {0};
438 	int ret;
439 
440 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
441 
442 	ret = dlb2_hw_map_qid(&dlb2_dev->hw,
443 			      handle->domain_id,
444 			      cfg,
445 			      &response,
446 			      false,
447 			      0);
448 
449 	cfg->response = response;
450 
451 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
452 		  __func__, ret);
453 
454 	return ret;
455 }
456 
457 static int
dlb2_pf_unmap_qid(struct dlb2_hw_dev * handle,struct dlb2_unmap_qid_args * cfg)458 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
459 		  struct dlb2_unmap_qid_args *cfg)
460 {
461 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
462 	struct dlb2_cmd_response response = {0};
463 	int ret;
464 
465 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
466 
467 	ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
468 				handle->domain_id,
469 				cfg,
470 				&response,
471 				false,
472 				0);
473 
474 	cfg->response = response;
475 
476 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
477 		  __func__, ret);
478 
479 	return ret;
480 }
481 
482 static int
dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev * handle,struct dlb2_pending_port_unmaps_args * args)483 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
484 			    struct dlb2_pending_port_unmaps_args *args)
485 {
486 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
487 	struct dlb2_cmd_response response = {0};
488 	int ret;
489 
490 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
491 
492 	ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
493 					  handle->domain_id,
494 					  args,
495 					  &response,
496 					  false,
497 					  0);
498 
499 	args->response = response;
500 
501 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
502 		  __func__, ret);
503 
504 	return ret;
505 }
506 
507 static int
dlb2_pf_sched_domain_start(struct dlb2_hw_dev * handle,struct dlb2_start_domain_args * cfg)508 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
509 			   struct dlb2_start_domain_args *cfg)
510 {
511 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
512 	struct dlb2_cmd_response response = {0};
513 	int ret;
514 
515 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
516 
517 	ret = dlb2_pf_start_domain(&dlb2_dev->hw,
518 				   handle->domain_id,
519 				   cfg,
520 				   &response);
521 
522 	cfg->response = response;
523 
524 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
525 		  __func__, ret);
526 
527 	return ret;
528 }
529 
530 static int
dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev * handle,struct dlb2_get_ldb_queue_depth_args * args)531 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
532 			    struct dlb2_get_ldb_queue_depth_args *args)
533 {
534 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
535 	struct dlb2_cmd_response response = {0};
536 	int ret;
537 
538 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
539 
540 	ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
541 					  handle->domain_id,
542 					  args,
543 					  &response,
544 					  false,
545 					  0);
546 
547 	args->response = response;
548 
549 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
550 		  __func__, ret);
551 
552 	return ret;
553 }
554 
555 static int
dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev * handle,struct dlb2_get_dir_queue_depth_args * args)556 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
557 			    struct dlb2_get_dir_queue_depth_args *args)
558 {
559 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
560 	struct dlb2_cmd_response response = {0};
561 	int ret = 0;
562 
563 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
564 
565 	ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
566 					  handle->domain_id,
567 					  args,
568 					  &response,
569 					  false,
570 					  0);
571 
572 	args->response = response;
573 
574 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
575 		  __func__, ret);
576 
577 	return ret;
578 }
579 
580 static void
dlb2_pf_iface_fn_ptrs_init(void)581 dlb2_pf_iface_fn_ptrs_init(void)
582 {
583 	dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
584 	dlb2_iface_open = dlb2_pf_open;
585 	dlb2_iface_domain_reset = dlb2_pf_domain_reset;
586 	dlb2_iface_get_device_version = dlb2_pf_get_device_version;
587 	dlb2_iface_hardware_init = dlb2_pf_hardware_init;
588 	dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
589 	dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
590 	dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
591 	dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
592 	dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
593 	dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
594 	dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
595 	dlb2_iface_map_qid = dlb2_pf_map_qid;
596 	dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
597 	dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
598 	dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
599 	dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
600 	dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
601 	dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
602 	dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
603 	dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
604 }
605 
606 /* PCI DEV HOOKS */
607 static int
dlb2_eventdev_pci_init(struct rte_eventdev * eventdev)608 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
609 {
610 	int ret = 0;
611 	struct rte_pci_device *pci_dev;
612 	struct dlb2_devargs dlb2_args = {
613 		.socket_id = rte_socket_id(),
614 		.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
615 		.num_dir_credits_override = -1,
616 		.qid_depth_thresholds = { {0} },
617 		.cos_id = DLB2_COS_DEFAULT
618 	};
619 	struct dlb2_eventdev *dlb2;
620 
621 	DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
622 		     eventdev->data->dev_id, eventdev->data->socket_id);
623 
624 	dlb2_pf_iface_fn_ptrs_init();
625 
626 	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
627 
628 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
629 		dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
630 
631 		/* Probe the DLB2 PF layer */
632 		dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
633 
634 		if (dlb2->qm_instance.pf_dev == NULL) {
635 			DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
636 				     rte_errno);
637 			ret = -rte_errno;
638 			goto dlb2_probe_failed;
639 		}
640 
641 		/* Were we invoked with runtime parameters? */
642 		if (pci_dev->device.devargs) {
643 			ret = dlb2_parse_params(pci_dev->device.devargs->args,
644 						pci_dev->device.devargs->name,
645 						&dlb2_args);
646 			if (ret) {
647 				DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
648 					     ret, rte_errno);
649 				goto dlb2_probe_failed;
650 			}
651 		}
652 
653 		ret = dlb2_primary_eventdev_probe(eventdev,
654 						  event_dlb2_pf_name,
655 						  &dlb2_args);
656 	} else {
657 		ret = dlb2_secondary_eventdev_probe(eventdev,
658 						    event_dlb2_pf_name);
659 	}
660 	if (ret)
661 		goto dlb2_probe_failed;
662 
663 	DLB2_LOG_INFO("DLB2 PF Probe success\n");
664 
665 	return 0;
666 
667 dlb2_probe_failed:
668 
669 	DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
670 
671 	return ret;
672 }
673 
674 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
675 
676 static const struct rte_pci_id pci_id_dlb2_map[] = {
677 	{
678 		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
679 			       PCI_DEVICE_ID_INTEL_DLB2_PF)
680 	},
681 	{
682 		.vendor_id = 0,
683 	},
684 };
685 
686 static int
event_dlb2_pci_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)687 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
688 		     struct rte_pci_device *pci_dev)
689 {
690 	int ret;
691 
692 	ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
693 					     sizeof(struct dlb2_eventdev),
694 					     dlb2_eventdev_pci_init,
695 					     event_dlb2_pf_name);
696 	if (ret) {
697 		DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
698 				"ret=%d\n", ret);
699 	}
700 
701 	return ret;
702 }
703 
704 static int
event_dlb2_pci_remove(struct rte_pci_device * pci_dev)705 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
706 {
707 	int ret;
708 
709 	ret = rte_event_pmd_pci_remove(pci_dev, NULL);
710 
711 	if (ret) {
712 		DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
713 				"ret=%d\n", ret);
714 	}
715 
716 	return ret;
717 
718 }
719 
720 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
721 	.id_table = pci_id_dlb2_map,
722 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
723 	.probe = event_dlb2_pci_probe,
724 	.remove = event_dlb2_pci_remove,
725 };
726 
727 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
728 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
729