xref: /f-stack/dpdk/drivers/event/dlb2/dlb2_priv.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _DLB2_PRIV_H_
6 #define _DLB2_PRIV_H_
7 
8 #include <emmintrin.h>
9 #include <stdbool.h>
10 
11 #include <rte_eventdev.h>
12 #include <rte_config.h>
13 #include "dlb2_user.h"
14 #include "dlb2_log.h"
15 #include "rte_pmd_dlb2.h"
16 
17 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
18 #define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
19 #else
20 #define DLB2_INC_STAT(_stat, _incr_val)
21 #endif
22 
23 #define EVDEV_DLB2_NAME_PMD dlb2_event
24 
25 /*  command line arg strings */
26 #define NUMA_NODE_ARG "numa_node"
27 #define DLB2_MAX_NUM_EVENTS "max_num_events"
28 #define DLB2_NUM_DIR_CREDITS "num_dir_credits"
29 #define DEV_ID_ARG "dev_id"
30 #define DLB2_DEFER_SCHED_ARG "defer_sched"
31 #define DLB2_QID_DEPTH_THRESH_ARG "qid_depth_thresh"
32 #define DLB2_COS_ARG "cos"
33 
34 /* Begin HW related defines and structs */
35 
36 #define DLB2_MAX_NUM_DOMAINS 32
37 #define DLB2_MAX_NUM_VFS 16
38 #define DLB2_MAX_NUM_LDB_QUEUES 32
39 #define DLB2_MAX_NUM_LDB_PORTS 64
40 #define DLB2_MAX_NUM_DIR_PORTS 64
41 #define DLB2_MAX_NUM_DIR_QUEUES 64
42 #define DLB2_MAX_NUM_FLOWS (64 * 1024)
43 #define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
44 #define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024)
45 #define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64
46 #define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64
47 #define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
48 #define DLB2_MAX_NUM_AQOS_ENTRIES 2048
49 #define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
50 #define DLB2_QID_PRIORITIES 8
51 #define DLB2_MAX_DEVICE_PATH 32
52 #define DLB2_MIN_DEQUEUE_TIMEOUT_NS 1
53 /* Note: "- 1" here to support the timeout range check in eventdev_autotest */
54 #define DLB2_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
55 #define DLB2_SW_CREDIT_BATCH_SZ 32
56 #define DLB2_NUM_SN_GROUPS 2
57 #define DLB2_MAX_LDB_SN_ALLOC 1024
58 #define DLB2_MAX_QUEUE_DEPTH_THRESHOLD 8191
59 
60 /* 2048 total hist list entries and 64 total ldb ports, which
61  * makes for 2048/64 == 32 hist list entries per port. However, CQ
62  * depth must be a power of 2 and must also be >= HIST LIST entries.
63  * As a result we just limit the maximum dequeue depth to 32.
64  */
65 #define DLB2_MIN_CQ_DEPTH 1
66 #define DLB2_MAX_CQ_DEPTH 32
67 #define DLB2_MIN_HARDWARE_CQ_DEPTH 8
68 #define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
69 	DLB2_MAX_CQ_DEPTH
70 
71 /*
72  * Static per queue/port provisioning values
73  */
74 #define DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 64
75 
76 #define CQ_BASE(is_dir) ((is_dir) ? DLB2_DIR_CQ_BASE : DLB2_LDB_CQ_BASE)
77 #define CQ_SIZE(is_dir) ((is_dir) ? DLB2_DIR_CQ_MAX_SIZE : \
78 				    DLB2_LDB_CQ_MAX_SIZE)
79 #define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
80 
81 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
82 
83 #define DLB2_NUM_QES_PER_CACHE_LINE 4
84 
85 #define DLB2_MAX_ENQUEUE_DEPTH 64
86 #define DLB2_MIN_ENQUEUE_DEPTH 4
87 
88 #define DLB2_NAME_SIZE 64
89 
90 #define DLB2_1K 1024
91 #define DLB2_2K (2 * DLB2_1K)
92 #define DLB2_4K (4 * DLB2_1K)
93 #define DLB2_16K (16 * DLB2_1K)
94 #define DLB2_32K (32 * DLB2_1K)
95 #define DLB2_1MB (DLB2_1K * DLB2_1K)
96 #define DLB2_16MB (16 * DLB2_1MB)
97 
98 /* Use the upper 3 bits of the event priority to select the DLB2 priority */
99 #define EV_TO_DLB2_PRIO(x) ((x) >> 5)
100 #define DLB2_TO_EV_PRIO(x) ((x) << 5)
101 
102 enum dlb2_hw_port_types {
103 	DLB2_LDB_PORT,
104 	DLB2_DIR_PORT,
105 	DLB2_NUM_PORT_TYPES /* Must be last */
106 };
107 
108 enum dlb2_hw_queue_types {
109 	DLB2_LDB_QUEUE,
110 	DLB2_DIR_QUEUE,
111 	DLB2_NUM_QUEUE_TYPES /* Must be last */
112 };
113 
114 #define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT)
115 
116 /* Do not change - must match hardware! */
117 enum dlb2_hw_sched_type {
118 	DLB2_SCHED_ATOMIC = 0,
119 	DLB2_SCHED_UNORDERED,
120 	DLB2_SCHED_ORDERED,
121 	DLB2_SCHED_DIRECTED,
122 	/* DLB2_NUM_HW_SCHED_TYPES must be last */
123 	DLB2_NUM_HW_SCHED_TYPES
124 };
125 
126 struct dlb2_hw_rsrcs {
127 	int32_t nb_events_limit;
128 	uint32_t num_queues;		/* Total queues (lb + dir) */
129 	uint32_t num_ldb_queues;	/* Number of available ldb queues */
130 	uint32_t num_ldb_ports;         /* Number of load balanced ports */
131 	uint32_t num_dir_ports;         /* Number of directed ports */
132 	uint32_t num_ldb_credits;       /* Number of load balanced credits */
133 	uint32_t num_dir_credits;       /* Number of directed credits */
134 	uint32_t reorder_window_size;   /* Size of reorder window */
135 };
136 
137 struct dlb2_hw_resource_info {
138 	/**> Max resources that can be provided */
139 	struct dlb2_hw_rsrcs hw_rsrc_max;
140 	int num_sched_domains;
141 	uint32_t socket_id;
142 };
143 
144 enum dlb2_enqueue_type {
145 	/**>
146 	 * New : Used to inject a new packet into the QM.
147 	 */
148 	DLB2_ENQ_NEW,
149 	/**>
150 	 * Forward : Enqueues a packet, and
151 	 *  - if atomic: release any lock it holds in the QM
152 	 *  - if ordered: release the packet for egress re-ordering
153 	 */
154 	DLB2_ENQ_FWD,
155 	/**>
156 	 * Enqueue Drop : Release an inflight packet. Must be called with
157 	 * event == NULL. Used to drop a packet.
158 	 *
159 	 * Note that all packets dequeued from a load-balanced port must be
160 	 * released, either with DLB2_ENQ_DROP or DLB2_ENQ_FWD.
161 	 */
162 	DLB2_ENQ_DROP,
163 
164 	/* marker for array sizing etc. */
165 	_DLB2_NB_ENQ_TYPES
166 };
167 
168 /* hw-specific format - do not change */
169 
170 struct dlb2_event_type {
171 	uint8_t major:4;
172 	uint8_t unused:4;
173 	uint8_t sub;
174 };
175 
176 union dlb2_opaque_data {
177 	uint16_t opaque_data;
178 	struct dlb2_event_type event_type;
179 };
180 
181 struct dlb2_msg_info {
182 	uint8_t qid;
183 	uint8_t sched_type:2;
184 	uint8_t priority:3;
185 	uint8_t msg_type:3;
186 };
187 
188 #define DLB2_NEW_CMD_BYTE 0x08
189 #define DLB2_FWD_CMD_BYTE 0x0A
190 #define DLB2_COMP_CMD_BYTE 0x02
191 #define DLB2_POP_CMD_BYTE 0x01
192 #define DLB2_NOOP_CMD_BYTE 0x00
193 
194 /* hw-specific format - do not change */
195 struct dlb2_enqueue_qe {
196 	uint64_t data;
197 	/* Word 3 */
198 	union dlb2_opaque_data u;
199 	uint8_t qid;
200 	uint8_t sched_type:2;
201 	uint8_t priority:3;
202 	uint8_t msg_type:3;
203 	/* Word 4 */
204 	uint16_t lock_id;
205 	uint8_t meas_lat:1;
206 	uint8_t rsvd1:2;
207 	uint8_t no_dec:1;
208 	uint8_t cmp_id:4;
209 	union {
210 		uint8_t cmd_byte;
211 		struct {
212 			uint8_t cq_token:1;
213 			uint8_t qe_comp:1;
214 			uint8_t qe_frag:1;
215 			uint8_t qe_valid:1;
216 			uint8_t rsvd3:1;
217 			uint8_t error:1;
218 			uint8_t rsvd:2;
219 		};
220 	};
221 };
222 
223 /* hw-specific format - do not change */
224 struct dlb2_cq_pop_qe {
225 	uint64_t data;
226 	union dlb2_opaque_data u;
227 	uint8_t qid;
228 	uint8_t sched_type:2;
229 	uint8_t priority:3;
230 	uint8_t msg_type:3;
231 	uint16_t tokens:10;
232 	uint16_t rsvd2:6;
233 	uint8_t meas_lat:1;
234 	uint8_t rsvd1:2;
235 	uint8_t no_dec:1;
236 	uint8_t cmp_id:4;
237 	union {
238 		uint8_t cmd_byte;
239 		struct {
240 			uint8_t cq_token:1;
241 			uint8_t qe_comp:1;
242 			uint8_t qe_frag:1;
243 			uint8_t qe_valid:1;
244 			uint8_t rsvd3:1;
245 			uint8_t error:1;
246 			uint8_t rsvd:2;
247 		};
248 	};
249 };
250 
251 /* hw-specific format - do not change */
252 struct dlb2_dequeue_qe {
253 	uint64_t data;
254 	union dlb2_opaque_data u;
255 	uint8_t qid;
256 	uint8_t sched_type:2;
257 	uint8_t priority:3;
258 	uint8_t msg_type:3;
259 	uint16_t flow_id:16; /* was pp_id in v1 */
260 	uint8_t debug;
261 	uint8_t cq_gen:1;
262 	uint8_t qid_depth:2; /* 2 bits in v2 */
263 	uint8_t rsvd1:2;
264 	uint8_t error:1;
265 	uint8_t rsvd2:2;
266 };
267 
268 union dlb2_port_config {
269 	struct dlb2_create_ldb_port_args ldb;
270 	struct dlb2_create_dir_port_args dir;
271 };
272 
273 enum dlb2_port_state {
274 	PORT_CLOSED,
275 	PORT_STARTED,
276 	PORT_STOPPED
277 };
278 
279 enum dlb2_configuration_state {
280 	/* The resource has not been configured */
281 	DLB2_NOT_CONFIGURED,
282 	/* The resource was configured, but the device was stopped */
283 	DLB2_PREV_CONFIGURED,
284 	/* The resource is currently configured */
285 	DLB2_CONFIGURED
286 };
287 
288 struct dlb2_port {
289 	uint32_t id;
290 	bool is_directed;
291 	bool gen_bit;
292 	uint16_t dir_credits;
293 	uint32_t dequeue_depth;
294 	enum dlb2_token_pop_mode token_pop_mode;
295 	union dlb2_port_config cfg;
296 	uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
297 	uint16_t cached_ldb_credits;
298 	uint16_t ldb_credits;
299 	uint16_t cached_dir_credits;
300 	bool int_armed;
301 	uint16_t owed_tokens;
302 	int16_t issued_releases;
303 	int16_t token_pop_thresh;
304 	int cq_depth;
305 	uint16_t cq_idx;
306 	uint16_t cq_idx_unmasked;
307 	uint16_t cq_depth_mask;
308 	uint16_t gen_bit_shift;
309 	enum dlb2_port_state state;
310 	enum dlb2_configuration_state config_state;
311 	int num_mapped_qids;
312 	uint8_t *qid_mappings;
313 	struct dlb2_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
314 	struct dlb2_enqueue_qe *int_arm_qe;
315 	struct dlb2_cq_pop_qe *consume_qe;
316 	struct dlb2_eventdev *dlb2; /* back ptr */
317 	struct dlb2_eventdev_port *ev_port; /* back ptr */
318 };
319 
320 /* Per-process per-port mmio and memory pointers */
321 struct process_local_port_data {
322 	uint64_t *pp_addr;
323 	struct dlb2_dequeue_qe *cq_base;
324 	const struct rte_memzone *mz;
325 	bool mmaped;
326 };
327 
328 struct dlb2_eventdev;
329 
330 struct dlb2_config {
331 	int configured;
332 	int reserved;
333 	uint32_t num_ldb_credits;
334 	uint32_t num_dir_credits;
335 	struct dlb2_create_sched_domain_args resources;
336 };
337 
338 enum dlb2_cos {
339 	DLB2_COS_DEFAULT = -1,
340 	DLB2_COS_0 = 0,
341 	DLB2_COS_1,
342 	DLB2_COS_2,
343 	DLB2_COS_3
344 };
345 
346 struct dlb2_hw_dev {
347 	struct dlb2_config cfg;
348 	struct dlb2_hw_resource_info info;
349 	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */
350 	uint32_t domain_id;
351 	enum dlb2_cos cos_id;
352 	rte_spinlock_t resource_lock; /* for MP support */
353 } __rte_cache_aligned;
354 
355 /* End HW related defines and structs */
356 
357 /* Begin DLB2 PMD Eventdev related defines and structs */
358 
359 #define DLB2_MAX_NUM_QUEUES \
360 	(DLB2_MAX_NUM_DIR_QUEUES + DLB2_MAX_NUM_LDB_QUEUES)
361 
362 #define DLB2_MAX_NUM_PORTS (DLB2_MAX_NUM_DIR_PORTS + DLB2_MAX_NUM_LDB_PORTS)
363 #define DLB2_MAX_INPUT_QUEUE_DEPTH 256
364 
365 /** Structure to hold the queue to port link establishment attributes */
366 
367 struct dlb2_event_queue_link {
368 	uint8_t queue_id;
369 	uint8_t priority;
370 	bool mapped;
371 	bool valid;
372 };
373 
374 struct dlb2_traffic_stats {
375 	uint64_t rx_ok;
376 	uint64_t rx_drop;
377 	uint64_t rx_interrupt_wait;
378 	uint64_t rx_umonitor_umwait;
379 	uint64_t tx_ok;
380 	uint64_t total_polls;
381 	uint64_t zero_polls;
382 	uint64_t tx_nospc_ldb_hw_credits;
383 	uint64_t tx_nospc_dir_hw_credits;
384 	uint64_t tx_nospc_inflight_max;
385 	uint64_t tx_nospc_new_event_limit;
386 	uint64_t tx_nospc_inflight_credits;
387 };
388 
389 /* DLB2 HW sets the 2bit qid_depth in rx QEs based on the programmable depth
390  * threshold. The global default value in config/common_base (or rte_config.h)
391  * can be overridden on a per-qid basis using a vdev command line parameter.
392  * 3: depth > threshold
393  * 2: threshold >= depth > 3/4 threshold
394  * 1: 3/4 threshold >= depth > 1/2 threshold
395  * 0: depth <= 1/2 threshold.
396  */
397 #define DLB2_QID_DEPTH_LE50 0
398 #define DLB2_QID_DEPTH_GT50_LE75 1
399 #define DLB2_QID_DEPTH_GT75_LE100 2
400 #define DLB2_QID_DEPTH_GT100 3
401 #define DLB2_NUM_QID_DEPTH_STAT_VALS 4 /* 2 bits */
402 
403 struct dlb2_queue_stats {
404 	uint64_t enq_ok;
405 	uint64_t qid_depth[DLB2_NUM_QID_DEPTH_STAT_VALS];
406 };
407 
408 struct dlb2_port_stats {
409 	struct dlb2_traffic_stats traffic;
410 	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
411 	uint64_t tx_implicit_rel;
412 	uint64_t tx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
413 	uint64_t tx_invalid;
414 	uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
415 	uint64_t rx_sched_invalid;
416 	struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES];
417 };
418 
419 struct dlb2_eventdev_port {
420 	struct dlb2_port qm_port; /* hw specific data structure */
421 	struct rte_event_port_conf conf; /* user-supplied configuration */
422 	uint16_t inflight_credits; /* num credits this port has right now */
423 	uint16_t credit_update_quanta;
424 	struct dlb2_eventdev *dlb2; /* backlink optimization */
425 	struct dlb2_port_stats stats __rte_cache_aligned;
426 	struct dlb2_event_queue_link link[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
427 	int num_links;
428 	uint32_t id; /* port id */
429 	/* num releases yet to be completed on this port.
430 	 * Only applies to load-balanced ports.
431 	 */
432 	uint16_t outstanding_releases;
433 	uint16_t inflight_max; /* app requested max inflights for this port */
434 	/* setup_done is set when the event port is setup */
435 	bool setup_done;
436 	/* enq_configured is set when the qm port is created */
437 	bool enq_configured;
438 	uint8_t implicit_release; /* release events before dequeueing */
439 }  __rte_cache_aligned;
440 
441 struct dlb2_queue {
442 	uint32_t num_qid_inflights; /* User config */
443 	uint32_t num_atm_inflights; /* User config */
444 	enum dlb2_configuration_state config_state;
445 	int sched_type; /* LB queue only */
446 	uint32_t id;
447 	bool is_directed;
448 };
449 
450 struct dlb2_eventdev_queue {
451 	struct dlb2_queue qm_queue;
452 	struct rte_event_queue_conf conf; /* User config */
453 	int depth_threshold; /* use default if 0 */
454 	uint32_t id;
455 	bool setup_done;
456 	uint8_t num_links;
457 };
458 
459 enum dlb2_run_state {
460 	DLB2_RUN_STATE_STOPPED = 0,
461 	DLB2_RUN_STATE_STOPPING,
462 	DLB2_RUN_STATE_STARTING,
463 	DLB2_RUN_STATE_STARTED
464 };
465 
466 struct dlb2_eventdev {
467 	struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS];
468 	struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES];
469 	uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
470 	uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES];
471 	/* store num stats and offset of the stats for each queue */
472 	uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES];
473 	uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES];
474 	/* store num stats and offset of the stats for each port */
475 	uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS];
476 	uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS];
477 	struct dlb2_get_num_resources_args hw_rsrc_query_results;
478 	uint32_t xstats_count_mode_queue;
479 	struct dlb2_hw_dev qm_instance; /* strictly hw related */
480 	uint64_t global_dequeue_wait_ticks;
481 	struct dlb2_xstats_entry *xstats;
482 	struct rte_eventdev *event_dev; /* backlink to dev */
483 	uint32_t xstats_count_mode_dev;
484 	uint32_t xstats_count_mode_port;
485 	uint32_t xstats_count;
486 	uint32_t inflights; /* use __atomic builtins */
487 	uint32_t new_event_limit;
488 	int max_num_events_override;
489 	int num_dir_credits_override;
490 	volatile enum dlb2_run_state run_state;
491 	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
492 	uint16_t num_dir_credits;
493 	uint16_t num_ldb_credits;
494 	uint16_t num_queues; /* total queues */
495 	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
496 	uint16_t num_ports; /* total num of evdev ports requested */
497 	uint16_t num_ldb_ports; /* total num of ldb ports requested */
498 	uint16_t num_dir_ports; /* total num of dir ports requested */
499 	bool umwait_allowed;
500 	bool global_dequeue_wait; /* Not using per dequeue wait if true */
501 	bool defer_sched;
502 	enum dlb2_cq_poll_modes poll_mode;
503 	uint8_t revision;
504 	bool configured;
505 	uint16_t max_ldb_credits;
506 	uint16_t max_dir_credits;
507 
508 	/* force hw credit pool counters into exclusive cache lines */
509 
510 	/* use __atomic builtins */ /* shared hw cred */
511 	uint32_t ldb_credit_pool __rte_cache_aligned;
512 	/* use __atomic builtins */ /* shared hw cred */
513 	uint32_t dir_credit_pool __rte_cache_aligned;
514 };
515 
516 /* used for collecting and passing around the dev args */
517 struct dlb2_qid_depth_thresholds {
518 	int val[DLB2_MAX_NUM_QUEUES];
519 };
520 
521 struct dlb2_devargs {
522 	int socket_id;
523 	int max_num_events;
524 	int num_dir_credits_override;
525 	int dev_id;
526 	int defer_sched;
527 	struct dlb2_qid_depth_thresholds qid_depth_thresholds;
528 	enum dlb2_cos cos_id;
529 };
530 
531 /* End Eventdev related defines and structs */
532 
533 /* Forwards for non-inlined functions */
534 
535 void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
536 
537 int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
538 
539 void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
540 
541 int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
542 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
543 		const unsigned int ids[], uint64_t values[], unsigned int n);
544 
545 int dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
546 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
547 		struct rte_event_dev_xstats_name *xstat_names,
548 		unsigned int *ids, unsigned int size);
549 
550 uint64_t dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
551 					  const char *name, unsigned int *id);
552 
553 int dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
554 		enum rte_event_dev_xstats_mode mode,
555 		int16_t queue_port_id,
556 		const uint32_t ids[],
557 		uint32_t nb_ids);
558 
559 int test_dlb2_eventdev(void);
560 
561 int dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
562 				const char *name,
563 				struct dlb2_devargs *dlb2_args);
564 
565 int dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
566 				  const char *name);
567 
568 uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
569 			      struct dlb2_eventdev_queue *queue);
570 
571 int dlb2_parse_params(const char *params,
572 		      const char *name,
573 		      struct dlb2_devargs *dlb2_args);
574 
575 /* Extern globals */
576 extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES];
577 
578 #endif	/* _DLB2_PRIV_H_ */
579