xref: /dpdk/drivers/event/dlb2/dlb2_priv.h (revision 86fe66d4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _DLB2_PRIV_H_
6 #define _DLB2_PRIV_H_
7 
8 #include <emmintrin.h>
9 #include <stdbool.h>
10 
11 #include <rte_eventdev.h>
12 #include <rte_config.h>
13 #include "dlb2_user.h"
14 #include "dlb2_log.h"
15 #include "rte_pmd_dlb2.h"
16 
17 #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
18 #define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
19 #else
20 #define DLB2_INC_STAT(_stat, _incr_val)
21 #endif
22 
23 #define EVDEV_DLB2_NAME_PMD dlb2_event
24 
25 /* Default values for command line devargs */
26 #define DLB2_POLL_INTERVAL_DEFAULT 1000
27 #define DLB2_SW_CREDIT_QUANTA_DEFAULT 32 /* Default = Worker */
28 #define DLB2_SW_CREDIT_P_QUANTA_DEFAULT 256 /* Producer */
29 #define DLB2_SW_CREDIT_C_QUANTA_DEFAULT 256 /* Consumer */
30 #define DLB2_DEPTH_THRESH_DEFAULT 256
31 #define DLB2_MIN_CQ_DEPTH_OVERRIDE 32
32 #define DLB2_MAX_CQ_DEPTH_OVERRIDE 1024
33 
34 /*  command line arg strings */
35 #define NUMA_NODE_ARG "numa_node"
36 #define DLB2_MAX_NUM_EVENTS "max_num_events"
37 #define DLB2_NUM_DIR_CREDITS "num_dir_credits"
38 #define DEV_ID_ARG "dev_id"
39 #define DLB2_QID_DEPTH_THRESH_ARG "qid_depth_thresh"
40 #define DLB2_COS_ARG "cos"
41 #define DLB2_POLL_INTERVAL_ARG "poll_interval"
42 #define DLB2_SW_CREDIT_QUANTA_ARG "sw_credit_quanta"
43 #define DLB2_HW_CREDIT_QUANTA_ARG "hw_credit_quanta"
44 #define DLB2_DEPTH_THRESH_ARG "default_depth_thresh"
45 #define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable"
46 #define DLB2_MAX_CQ_DEPTH "max_cq_depth"
47 
48 /* Begin HW related defines and structs */
49 
50 #define DLB2_HW_V2 0
51 #define DLB2_HW_V2_5 1
52 #define DLB2_MAX_NUM_DOMAINS 32
53 #define DLB2_MAX_NUM_VFS 16
54 #define DLB2_MAX_NUM_LDB_QUEUES 32
55 #define DLB2_MAX_NUM_LDB_PORTS 64
56 #define DLB2_MAX_NUM_DIR_PORTS_V2		DLB2_MAX_NUM_DIR_QUEUES_V2
57 #define DLB2_MAX_NUM_DIR_PORTS_V2_5		DLB2_MAX_NUM_DIR_QUEUES_V2_5
58 #define DLB2_MAX_NUM_DIR_PORTS(ver)		(ver == DLB2_HW_V2 ? \
59 						 DLB2_MAX_NUM_DIR_PORTS_V2 : \
60 						 DLB2_MAX_NUM_DIR_PORTS_V2_5)
61 #define DLB2_MAX_NUM_DIR_QUEUES_V2		64 /* DIR == directed */
62 #define DLB2_MAX_NUM_DIR_QUEUES_V2_5		96
63 /* When needed for array sizing, the DLB 2.5 macro is used */
64 #define DLB2_MAX_NUM_DIR_QUEUES(ver)		(ver == DLB2_HW_V2 ? \
65 						 DLB2_MAX_NUM_DIR_QUEUES_V2 : \
66 						 DLB2_MAX_NUM_DIR_QUEUES_V2_5)
67 #define DLB2_MAX_NUM_FLOWS (64 * 1024)
68 #define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
69 #define DLB2_MAX_NUM_DIR_CREDITS(ver)		(ver == DLB2_HW_V2 ? 4096 : 0)
70 #define DLB2_MAX_NUM_CREDITS(ver)		(ver == DLB2_HW_V2 ? \
71 						 0 : DLB2_MAX_NUM_LDB_CREDITS)
72 #define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64
73 #define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64
74 #define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
75 #define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
76 #define DLB2_QID_PRIORITIES 8
77 #define DLB2_MAX_DEVICE_PATH 32
78 #define DLB2_MIN_DEQUEUE_TIMEOUT_NS 1
79 /* Note: "- 1" here to support the timeout range check in eventdev_autotest */
80 #define DLB2_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
81 #define DLB2_SW_CREDIT_BATCH_SZ 32 /* Default - Worker */
82 #define DLB2_SW_CREDIT_P_BATCH_SZ 256 /* Producer */
83 #define DLB2_SW_CREDIT_C_BATCH_SZ 256 /* Consumer */
84 #define DLB2_NUM_SN_GROUPS 2
85 #define DLB2_MAX_LDB_SN_ALLOC 1024
86 #define DLB2_MAX_QUEUE_DEPTH_THRESHOLD 8191
87 
88 /* 2048 total hist list entries and 64 total ldb ports, which
89  * makes for 2048/64 == 32 hist list entries per port. However, CQ
90  * depth must be a power of 2 and must also be >= HIST LIST entries.
91  * As a result we just limit the maximum dequeue depth to 32.
92  */
93 #define DLB2_MAX_HL_ENTRIES 2048
94 #define DLB2_MIN_CQ_DEPTH 1
95 #define DLB2_DEFAULT_CQ_DEPTH 32
96 #define DLB2_MIN_HARDWARE_CQ_DEPTH 8
97 #define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
98 	DLB2_DEFAULT_CQ_DEPTH
99 
100 #define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \
101 	(((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) ||        \
102 	  (_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_VF))   ?   \
103 		DLB2_HW_V2_5 : DLB2_HW_V2)
104 
105 /*
106  * Static per queue/port provisioning values
107  */
108 #define DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 64
109 
110 #define CQ_BASE(is_dir) ((is_dir) ? DLB2_DIR_CQ_BASE : DLB2_LDB_CQ_BASE)
111 #define CQ_SIZE(is_dir) ((is_dir) ? DLB2_DIR_CQ_MAX_SIZE : \
112 				    DLB2_LDB_CQ_MAX_SIZE)
113 #define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
114 
115 #define DLB2_NUM_QES_PER_CACHE_LINE 4
116 
117 #define DLB2_MAX_ENQUEUE_DEPTH 64
118 #define DLB2_MIN_ENQUEUE_DEPTH 4
119 
120 #define DLB2_NAME_SIZE 64
121 
122 #define DLB2_1K 1024
123 #define DLB2_2K (2 * DLB2_1K)
124 #define DLB2_4K (4 * DLB2_1K)
125 #define DLB2_16K (16 * DLB2_1K)
126 #define DLB2_32K (32 * DLB2_1K)
127 #define DLB2_1MB (DLB2_1K * DLB2_1K)
128 #define DLB2_16MB (16 * DLB2_1MB)
129 
130 /* Use the upper 3 bits of the event priority to select the DLB2 priority */
131 #define EV_TO_DLB2_PRIO(x) ((x) >> 5)
132 #define DLB2_TO_EV_PRIO(x) ((x) << 5)
133 
134 enum dlb2_hw_ver {
135 	DLB2_HW_VER_2,
136 	DLB2_HW_VER_2_5,
137 };
138 
139 enum dlb2_hw_port_types {
140 	DLB2_LDB_PORT,
141 	DLB2_DIR_PORT,
142 	DLB2_NUM_PORT_TYPES /* Must be last */
143 };
144 
145 enum dlb2_hw_queue_types {
146 	DLB2_LDB_QUEUE,
147 	DLB2_DIR_QUEUE,
148 	DLB2_NUM_QUEUE_TYPES /* Must be last */
149 };
150 
151 #define DLB2_COMBINED_POOL DLB2_LDB_QUEUE
152 
153 #define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT)
154 
155 /* Do not change - must match hardware! */
156 enum dlb2_hw_sched_type {
157 	DLB2_SCHED_ATOMIC = 0,
158 	DLB2_SCHED_UNORDERED,
159 	DLB2_SCHED_ORDERED,
160 	DLB2_SCHED_DIRECTED,
161 	/* DLB2_NUM_HW_SCHED_TYPES must be last */
162 	DLB2_NUM_HW_SCHED_TYPES
163 };
164 
165 struct dlb2_hw_rsrcs {
166 	int32_t nb_events_limit;
167 	uint32_t num_queues;		/* Total queues (lb + dir) */
168 	uint32_t num_ldb_queues;	/* Number of available ldb queues */
169 	uint32_t num_ldb_ports;         /* Number of load balanced ports */
170 	uint32_t num_dir_ports;         /* Number of directed ports */
171 	union {
172 		struct {
173 			uint32_t num_ldb_credits; /* Number of ldb credits */
174 			uint32_t num_dir_credits; /* Number of dir credits */
175 		};
176 		struct {
177 			uint32_t num_credits; /* Number of combined credits */
178 		};
179 	};
180 	uint32_t reorder_window_size;   /* Size of reorder window */
181 };
182 
183 struct dlb2_hw_resource_info {
184 	/**> Max resources that can be provided */
185 	struct dlb2_hw_rsrcs hw_rsrc_max;
186 	int num_sched_domains;
187 	uint32_t socket_id;
188 };
189 
190 enum dlb2_enqueue_type {
191 	/**>
192 	 * New : Used to inject a new packet into the QM.
193 	 */
194 	DLB2_ENQ_NEW,
195 	/**>
196 	 * Forward : Enqueues a packet, and
197 	 *  - if atomic: release any lock it holds in the QM
198 	 *  - if ordered: release the packet for egress re-ordering
199 	 */
200 	DLB2_ENQ_FWD,
201 	/**>
202 	 * Enqueue Drop : Release an inflight packet. Must be called with
203 	 * event == NULL. Used to drop a packet.
204 	 *
205 	 * Note that all packets dequeued from a load-balanced port must be
206 	 * released, either with DLB2_ENQ_DROP or DLB2_ENQ_FWD.
207 	 */
208 	DLB2_ENQ_DROP,
209 
210 	/* marker for array sizing etc. */
211 	_DLB2_NB_ENQ_TYPES
212 };
213 
214 /* hw-specific format - do not change */
215 
216 struct dlb2_event_type {
217 	uint16_t major:4;
218 	uint16_t unused:4;
219 	uint16_t sub:8;
220 };
221 
222 union dlb2_opaque_data {
223 	uint16_t opaque_data;
224 	struct dlb2_event_type event_type;
225 };
226 
227 struct dlb2_msg_info {
228 	uint8_t qid;
229 	uint8_t sched_type:2;
230 	uint8_t priority:3;
231 	uint8_t msg_type:3;
232 };
233 
234 #define DLB2_NEW_CMD_BYTE 0x08
235 #define DLB2_FWD_CMD_BYTE 0x0A
236 #define DLB2_COMP_CMD_BYTE 0x02
237 #define DLB2_POP_CMD_BYTE 0x01
238 #define DLB2_NOOP_CMD_BYTE 0x00
239 
240 /* hw-specific format - do not change */
241 struct dlb2_enqueue_qe {
242 	uint64_t data;
243 	/* Word 3 */
244 	union dlb2_opaque_data u;
245 	uint8_t qid;
246 	uint8_t sched_type:2;
247 	uint8_t priority:3;
248 	uint8_t msg_type:3;
249 	/* Word 4 */
250 	uint16_t lock_id;
251 	uint8_t meas_lat:1;
252 	uint8_t rsvd1:2;
253 	uint8_t no_dec:1;
254 	uint8_t cmp_id:4;
255 	union {
256 		uint8_t cmd_byte;
257 		struct {
258 			uint8_t cq_token:1;
259 			uint8_t qe_comp:1;
260 			uint8_t qe_frag:1;
261 			uint8_t qe_valid:1;
262 			uint8_t rsvd3:1;
263 			uint8_t error:1;
264 			uint8_t rsvd:2;
265 		};
266 	};
267 };
268 
269 /* hw-specific format - do not change */
270 struct dlb2_cq_pop_qe {
271 	uint64_t data;
272 	union dlb2_opaque_data u;
273 	uint8_t qid;
274 	uint8_t sched_type:2;
275 	uint8_t priority:3;
276 	uint8_t msg_type:3;
277 	uint16_t tokens:10;
278 	uint16_t rsvd2:6;
279 	uint8_t meas_lat:1;
280 	uint8_t rsvd1:2;
281 	uint8_t no_dec:1;
282 	uint8_t cmp_id:4;
283 	union {
284 		uint8_t cmd_byte;
285 		struct {
286 			uint8_t cq_token:1;
287 			uint8_t qe_comp:1;
288 			uint8_t qe_frag:1;
289 			uint8_t qe_valid:1;
290 			uint8_t rsvd3:1;
291 			uint8_t error:1;
292 			uint8_t rsvd:2;
293 		};
294 	};
295 };
296 
297 /* hw-specific format - do not change */
298 struct dlb2_dequeue_qe {
299 	uint64_t data;
300 	union dlb2_opaque_data u;
301 	uint8_t qid;
302 	uint8_t sched_type:2;
303 	uint8_t priority:3;
304 	uint8_t msg_type:3;
305 	uint16_t flow_id:16; /* was pp_id in v1 */
306 	uint8_t debug;
307 	uint8_t cq_gen:1;
308 	uint8_t qid_depth:2; /* 2 bits in v2 */
309 	uint8_t rsvd1:2;
310 	uint8_t error:1;
311 	uint8_t rsvd2:2;
312 };
313 
314 union dlb2_port_config {
315 	struct dlb2_create_ldb_port_args ldb;
316 	struct dlb2_create_dir_port_args dir;
317 };
318 
319 enum dlb2_port_state {
320 	PORT_CLOSED,
321 	PORT_STARTED,
322 	PORT_STOPPED
323 };
324 
325 enum dlb2_configuration_state {
326 	/* The resource has not been configured */
327 	DLB2_NOT_CONFIGURED,
328 	/* The resource was configured, but the device was stopped */
329 	DLB2_PREV_CONFIGURED,
330 	/* The resource is currently configured */
331 	DLB2_CONFIGURED
332 };
333 
334 struct dlb2_port {
335 	uint32_t id;
336 	bool is_directed;
337 	bool gen_bit;
338 	uint16_t dir_credits;
339 	uint32_t dequeue_depth;
340 	enum dlb2_token_pop_mode token_pop_mode;
341 	union dlb2_port_config cfg;
342 	uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
343 	union {
344 		struct {
345 			uint16_t cached_ldb_credits;
346 			uint16_t ldb_credits;
347 			uint16_t cached_dir_credits;
348 		};
349 		struct {
350 			uint16_t cached_credits;
351 			uint16_t credits;
352 		};
353 	};
354 	bool int_armed;
355 	uint16_t owed_tokens;
356 	int16_t issued_releases;
357 	int16_t token_pop_thresh;
358 	int cq_depth;
359 	uint16_t cq_idx;
360 	uint16_t cq_idx_unmasked;
361 	uint16_t cq_depth_mask;
362 	uint16_t gen_bit_shift;
363 	uint64_t cq_rolling_mask; /*
364 				   * rotate to always have right expected
365 				   * gen bits
366 				   */
367 	uint64_t cq_rolling_mask_2;
368 	void *cq_addr_cached; /* avoid multiple refs */
369 	enum dlb2_port_state state;
370 	enum dlb2_configuration_state config_state;
371 	int num_mapped_qids;
372 	uint8_t *qid_mappings;
373 	struct dlb2_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
374 	struct dlb2_enqueue_qe *int_arm_qe;
375 	struct dlb2_cq_pop_qe *consume_qe;
376 	struct dlb2_eventdev *dlb2; /* back ptr */
377 	struct dlb2_eventdev_port *ev_port; /* back ptr */
378 	bool use_scalar; /* force usage of scalar code */
379 	uint16_t hw_credit_quanta;
380 };
381 
382 /* Per-process per-port mmio and memory pointers */
383 struct process_local_port_data {
384 	uint64_t *pp_addr;
385 	struct dlb2_dequeue_qe *cq_base;
386 	const struct rte_memzone *mz;
387 	bool mmaped;
388 };
389 
390 struct dlb2_eventdev;
391 
392 struct dlb2_port_low_level_io_functions {
393 	void (*pp_enqueue_four)(void *qe4, void *pp_addr);
394 };
395 
396 struct dlb2_config {
397 	int configured;
398 	int reserved;
399 	union {
400 		struct {
401 			uint32_t num_ldb_credits;
402 			uint32_t num_dir_credits;
403 		};
404 		struct {
405 			uint32_t num_credits;
406 		};
407 	};
408 	struct dlb2_create_sched_domain_args resources;
409 };
410 
411 enum dlb2_cos {
412 	DLB2_COS_DEFAULT = -1,
413 	DLB2_COS_0 = 0,
414 	DLB2_COS_1,
415 	DLB2_COS_2,
416 	DLB2_COS_3
417 };
418 
419 struct dlb2_hw_dev {
420 	struct dlb2_config cfg;
421 	struct dlb2_hw_resource_info info;
422 	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */
423 	uint32_t domain_id;
424 	enum dlb2_cos cos_id;
425 	rte_spinlock_t resource_lock; /* for MP support */
426 } __rte_cache_aligned;
427 
428 /* End HW related defines and structs */
429 
430 /* Begin DLB2 PMD Eventdev related defines and structs */
431 
432 #define DLB2_MAX_NUM_QUEUES(ver)                                \
433 	(DLB2_MAX_NUM_DIR_QUEUES(ver) + DLB2_MAX_NUM_LDB_QUEUES)
434 
435 #define DLB2_MAX_NUM_PORTS(ver) \
436 	(DLB2_MAX_NUM_DIR_PORTS(ver) + DLB2_MAX_NUM_LDB_PORTS)
437 
438 #define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96
439 #define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5
440 #define DLB2_MAX_NUM_QUEUES_ALL \
441 	(DLB2_MAX_NUM_DIR_QUEUES_V2_5 + DLB2_MAX_NUM_LDB_QUEUES)
442 #define DLB2_MAX_NUM_PORTS_ALL \
443 	(DLB2_MAX_NUM_DIR_PORTS_V2_5 + DLB2_MAX_NUM_LDB_PORTS)
444 #define DLB2_MAX_INPUT_QUEUE_DEPTH 256
445 
446 /** Structure to hold the queue to port link establishment attributes */
447 
448 struct dlb2_event_queue_link {
449 	uint8_t queue_id;
450 	uint8_t priority;
451 	bool mapped;
452 	bool valid;
453 };
454 
455 struct dlb2_traffic_stats {
456 	uint64_t rx_ok;
457 	uint64_t rx_drop;
458 	uint64_t rx_interrupt_wait;
459 	uint64_t rx_umonitor_umwait;
460 	uint64_t tx_ok;
461 	uint64_t total_polls;
462 	uint64_t zero_polls;
463 	union {
464 		struct {
465 			uint64_t tx_nospc_ldb_hw_credits;
466 			uint64_t tx_nospc_dir_hw_credits;
467 		};
468 		struct {
469 			uint64_t tx_nospc_hw_credits;
470 		};
471 	};
472 	uint64_t tx_nospc_inflight_max;
473 	uint64_t tx_nospc_new_event_limit;
474 	uint64_t tx_nospc_inflight_credits;
475 };
476 
477 /* DLB2 HW sets the 2bit qid_depth in rx QEs based on the programmable depth
478  * threshold. The global default value in config/common_base (or rte_config.h)
479  * can be overridden on a per-qid basis using a vdev command line parameter.
480  * 3: depth > threshold
481  * 2: threshold >= depth > 3/4 threshold
482  * 1: 3/4 threshold >= depth > 1/2 threshold
483  * 0: depth <= 1/2 threshold.
484  */
485 #define DLB2_QID_DEPTH_LE50 0
486 #define DLB2_QID_DEPTH_GT50_LE75 1
487 #define DLB2_QID_DEPTH_GT75_LE100 2
488 #define DLB2_QID_DEPTH_GT100 3
489 #define DLB2_NUM_QID_DEPTH_STAT_VALS 4 /* 2 bits */
490 
491 struct dlb2_queue_stats {
492 	uint64_t enq_ok;
493 	uint64_t qid_depth[DLB2_NUM_QID_DEPTH_STAT_VALS];
494 };
495 
496 struct dlb2_port_stats {
497 	struct dlb2_traffic_stats traffic;
498 	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
499 	uint64_t tx_implicit_rel;
500 	uint64_t tx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
501 	uint64_t tx_invalid;
502 	uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
503 	uint64_t rx_sched_invalid;
504 	struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES_ALL];
505 };
506 
507 struct dlb2_eventdev_port {
508 	struct dlb2_port qm_port; /* hw specific data structure */
509 	struct rte_event_port_conf conf; /* user-supplied configuration */
510 	uint16_t inflight_credits; /* num credits this port has right now */
511 	uint16_t credit_update_quanta;
512 	struct dlb2_eventdev *dlb2; /* backlink optimization */
513 	struct dlb2_port_stats stats __rte_cache_aligned;
514 	struct dlb2_event_queue_link link[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
515 	int num_links;
516 	uint32_t id; /* port id */
517 	/* num releases yet to be completed on this port.
518 	 * Only applies to load-balanced ports.
519 	 */
520 	uint16_t outstanding_releases;
521 	uint16_t inflight_max; /* app requested max inflights for this port */
522 	/* setup_done is set when the event port is setup */
523 	bool setup_done;
524 	/* enq_configured is set when the qm port is created */
525 	bool enq_configured;
526 	uint8_t implicit_release; /* release events before dequeuing */
527 }  __rte_cache_aligned;
528 
529 struct dlb2_queue {
530 	uint32_t num_qid_inflights; /* User config */
531 	uint32_t num_atm_inflights; /* User config */
532 	enum dlb2_configuration_state config_state;
533 	int  sched_type; /* LB queue only */
534 	uint8_t id;
535 	bool	 is_directed;
536 };
537 
538 struct dlb2_eventdev_queue {
539 	struct dlb2_queue qm_queue;
540 	struct rte_event_queue_conf conf; /* User config */
541 	int depth_threshold; /* use default if 0 */
542 	uint32_t id;
543 	bool setup_done;
544 	uint8_t num_links;
545 };
546 
547 enum dlb2_run_state {
548 	DLB2_RUN_STATE_STOPPED = 0,
549 	DLB2_RUN_STATE_STOPPING,
550 	DLB2_RUN_STATE_STARTING,
551 	DLB2_RUN_STATE_STARTED
552 };
553 
554 struct dlb2_eventdev {
555 	struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS_ALL];
556 	struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES_ALL];
557 	uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
558 	uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
559 	/* store num stats and offset of the stats for each queue */
560 	uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES_ALL];
561 	uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES_ALL];
562 	/* store num stats and offset of the stats for each port */
563 	uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS_ALL];
564 	uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS_ALL];
565 	struct dlb2_get_num_resources_args hw_rsrc_query_results;
566 	uint32_t xstats_count_mode_queue;
567 	struct dlb2_hw_dev qm_instance; /* strictly hw related */
568 	uint64_t global_dequeue_wait_ticks;
569 	struct dlb2_xstats_entry *xstats;
570 	struct rte_eventdev *event_dev; /* backlink to dev */
571 	uint32_t xstats_count_mode_dev;
572 	uint32_t xstats_count_mode_port;
573 	uint32_t xstats_count;
574 	uint32_t inflights; /* use __atomic builtins */
575 	uint32_t new_event_limit;
576 	int max_num_events_override;
577 	int num_dir_credits_override;
578 	bool vector_opts_enabled;
579 	int max_cq_depth;
580 	volatile enum dlb2_run_state run_state;
581 	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
582 	union {
583 		struct {
584 			uint16_t num_dir_credits;
585 			uint16_t num_ldb_credits;
586 		};
587 		struct {
588 			uint16_t num_credits;
589 		};
590 	};
591 	uint16_t num_queues; /* total queues */
592 	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
593 	uint16_t num_ports; /* total num of evdev ports requested */
594 	uint16_t num_ldb_ports; /* total num of ldb ports requested */
595 	uint16_t num_dir_ports; /* total num of dir ports requested */
596 	bool umwait_allowed;
597 	bool global_dequeue_wait; /* Not using per dequeue wait if true */
598 	enum dlb2_cq_poll_modes poll_mode;
599 	int poll_interval;
600 	int sw_credit_quanta;
601 	int hw_credit_quanta;
602 	int default_depth_thresh;
603 	uint8_t revision;
604 	uint8_t version;
605 	bool configured;
606 	union {
607 		struct {
608 			uint16_t max_ldb_credits;
609 			uint16_t max_dir_credits;
610 			/* use __atomic builtins */ /* shared hw cred */
611 			uint32_t ldb_credit_pool __rte_cache_aligned;
612 			/* use __atomic builtins */ /* shared hw cred */
613 			uint32_t dir_credit_pool __rte_cache_aligned;
614 		};
615 		struct {
616 			uint16_t max_credits;
617 			/* use __atomic builtins */ /* shared hw cred */
618 			uint32_t credit_pool __rte_cache_aligned;
619 		};
620 	};
621 };
622 
623 /* used for collecting and passing around the dev args */
624 struct dlb2_qid_depth_thresholds {
625 	int val[DLB2_MAX_NUM_QUEUES_ALL];
626 };
627 
628 struct dlb2_devargs {
629 	int socket_id;
630 	int max_num_events;
631 	int num_dir_credits_override;
632 	int dev_id;
633 	struct dlb2_qid_depth_thresholds qid_depth_thresholds;
634 	enum dlb2_cos cos_id;
635 	int poll_interval;
636 	int sw_credit_quanta;
637 	int hw_credit_quanta;
638 	int default_depth_thresh;
639 	bool vector_opts_enabled;
640 	int max_cq_depth;
641 };
642 
643 /* End Eventdev related defines and structs */
644 
645 /* Forwards for non-inlined functions */
646 
647 void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
648 
649 int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
650 
651 void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
652 
653 int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
654 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
655 		const unsigned int ids[], uint64_t values[], unsigned int n);
656 
657 int dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
658 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
659 		struct rte_event_dev_xstats_name *xstat_names,
660 		unsigned int *ids, unsigned int size);
661 
662 uint64_t dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
663 					  const char *name, unsigned int *id);
664 
665 int dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
666 		enum rte_event_dev_xstats_mode mode,
667 		int16_t queue_port_id,
668 		const uint32_t ids[],
669 		uint32_t nb_ids);
670 
671 int test_dlb2_eventdev(void);
672 
673 int dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
674 				const char *name,
675 				struct dlb2_devargs *dlb2_args);
676 
677 int dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
678 				  const char *name);
679 
680 uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
681 			      struct dlb2_eventdev_queue *queue);
682 
683 int dlb2_parse_params(const char *params,
684 		      const char *name,
685 		      struct dlb2_devargs *dlb2_args,
686 		      uint8_t version);
687 
688 /* Extern globals */
689 extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES];
690 
691 #endif	/* _DLB2_PRIV_H_ */
692