xref: /f-stack/dpdk/drivers/event/dlb/dlb_priv.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _DLB_PRIV_H_
6 #define _DLB_PRIV_H_
7 
8 #include <emmintrin.h>
9 #include <stdbool.h>
10 
11 #include <rte_bus_pci.h>
12 #include <rte_eventdev.h>
13 #include <rte_eventdev_pmd.h>
14 #include <rte_eventdev_pmd_pci.h>
15 #include <rte_pci.h>
16 
17 #include "dlb_user.h"
18 #include "dlb_log.h"
19 #include "rte_pmd_dlb.h"
20 
21 #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
22 #define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
23 #else
24 #define DLB_INC_STAT(_stat, _incr_val)
25 #endif
26 
27 #define EVDEV_DLB_NAME_PMD_STR "dlb_event"
28 
29 /* command line arg strings */
30 #define NUMA_NODE_ARG "numa_node"
31 #define DLB_MAX_NUM_EVENTS "max_num_events"
32 #define DLB_NUM_DIR_CREDITS "num_dir_credits"
33 #define DEV_ID_ARG "dev_id"
34 #define DLB_DEFER_SCHED_ARG "defer_sched"
35 #define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
36 
37 /* Begin HW related defines and structs */
38 
39 #define DLB_MAX_NUM_DOMAINS 32
40 #define DLB_MAX_NUM_VFS 16
41 #define DLB_MAX_NUM_LDB_QUEUES 128
42 #define DLB_MAX_NUM_LDB_PORTS 64
43 #define DLB_MAX_NUM_DIR_PORTS 128
44 #define DLB_MAX_NUM_DIR_QUEUES 128
45 #define DLB_MAX_NUM_FLOWS (64 * 1024)
46 #define DLB_MAX_NUM_LDB_CREDITS 16384
47 #define DLB_MAX_NUM_DIR_CREDITS 4096
48 #define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
49 #define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
50 #define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
51 #define DLB_MAX_NUM_ATM_INFLIGHTS 2048
52 #define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
53 #define DLB_QID_PRIORITIES 8
54 #define DLB_MAX_DEVICE_PATH 32
55 #define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
56 #define DLB_NUM_SN_GROUPS 4
57 #define DLB_MAX_LDB_SN_ALLOC 1024
58 /* Note: "- 1" here to support the timeout range check in eventdev_autotest */
59 #define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
60 #define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
61 
62 /* 5120 total hist list entries and 64 total ldb ports, which
63  * makes for 5120/64 == 80 hist list entries per port. However, CQ
64  * depth must be a power of 2 and must also be >= HIST LIST entries.
65  * As a result we just limit the maximum dequeue depth to 64.
66  */
67 #define DLB_MIN_LDB_CQ_DEPTH 1
68 #define DLB_MIN_DIR_CQ_DEPTH 8
69 #define DLB_MIN_HARDWARE_CQ_DEPTH 8
70 #define DLB_MAX_CQ_DEPTH 64
71 #define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
72 	DLB_MAX_CQ_DEPTH
73 
74 /* Static per queue/port provisioning values */
75 #define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
76 
77 #define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
78 
79 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
80 
81 #define DLB_NUM_QES_PER_CACHE_LINE 4
82 
83 #define DLB_MAX_ENQUEUE_DEPTH 64
84 #define DLB_MIN_ENQUEUE_DEPTH 4
85 
86 #define DLB_NAME_SIZE 64
87 
88 /* Use the upper 3 bits of the event priority to select the DLB priority */
89 #define EV_TO_DLB_PRIO(x) ((x) >> 5)
90 #define DLB_TO_EV_PRIO(x) ((x) << 5)
91 
92 enum dlb_hw_port_type {
93 	DLB_LDB,
94 	DLB_DIR,
95 
96 	/* NUM_DLB_PORT_TYPES must be last */
97 	NUM_DLB_PORT_TYPES
98 };
99 
100 #define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
101 
102 /* Do not change - must match hardware! */
103 enum dlb_hw_sched_type {
104 	DLB_SCHED_ATOMIC = 0,
105 	DLB_SCHED_UNORDERED,
106 	DLB_SCHED_ORDERED,
107 	DLB_SCHED_DIRECTED,
108 
109 	/* DLB_NUM_HW_SCHED_TYPES must be last */
110 	DLB_NUM_HW_SCHED_TYPES
111 };
112 
113 struct dlb_devargs {
114 	int socket_id;
115 	int max_num_events;
116 	int num_dir_credits_override;
117 	int dev_id;
118 	int defer_sched;
119 	int num_atm_inflights;
120 };
121 
122 struct dlb_hw_rsrcs {
123 	int32_t nb_events_limit;
124 	uint32_t num_queues;		/* Total queues (ldb + dir) */
125 	uint32_t num_ldb_queues;	/* Number of available ldb queues */
126 	uint32_t num_ldb_ports;         /* Number of load balanced ports */
127 	uint32_t num_dir_ports;         /* Number of directed ports */
128 	uint32_t num_ldb_credits;       /* Number of load balanced credits */
129 	uint32_t num_dir_credits;       /* Number of directed credits */
130 	uint32_t reorder_window_size;   /* Size of reorder window */
131 };
132 
133 struct dlb_hw_resource_info {
134 	/**> Max resources that can be provided */
135 	struct dlb_hw_rsrcs hw_rsrc_max;
136 	int num_sched_domains;
137 	uint32_t socket_id;
138 	/**> EAL flags passed to this DLB instance, allowing the application to
139 	 * identify the pmd backend indicating hardware or software.
140 	 */
141 	const char *eal_flags;
142 };
143 
144 /* hw-specific format - do not change */
145 
146 struct dlb_event_type {
147 	uint8_t major:4;
148 	uint8_t unused:4;
149 	uint8_t sub;
150 };
151 
152 union dlb_opaque_data {
153 	uint16_t opaque_data;
154 	struct dlb_event_type event_type;
155 };
156 
157 struct dlb_msg_info {
158 	uint8_t qid;
159 	uint8_t sched_type:2;
160 	uint8_t priority:3;
161 	uint8_t msg_type:3;
162 };
163 
164 #define DLB_NEW_CMD_BYTE 0x08
165 #define DLB_FWD_CMD_BYTE 0x0A
166 #define DLB_COMP_CMD_BYTE 0x02
167 #define DLB_NOOP_CMD_BYTE 0x00
168 #define DLB_POP_CMD_BYTE 0x01
169 
170 /* hw-specific format - do not change */
171 struct dlb_enqueue_qe {
172 	uint64_t data;
173 	/* Word 3 */
174 	union dlb_opaque_data u;
175 	uint8_t qid;
176 	uint8_t sched_type:2;
177 	uint8_t priority:3;
178 	uint8_t msg_type:3;
179 	/* Word 4 */
180 	uint16_t lock_id;
181 	uint8_t meas_lat:1;
182 	uint8_t rsvd1:2;
183 	uint8_t no_dec:1;
184 	uint8_t cmp_id:4;
185 	union {
186 		uint8_t cmd_byte;
187 		struct {
188 			uint8_t cq_token:1;
189 			uint8_t qe_comp:1;
190 			uint8_t qe_frag:1;
191 			uint8_t qe_valid:1;
192 			uint8_t int_arm:1;
193 			uint8_t error:1;
194 			uint8_t rsvd:2;
195 		};
196 	};
197 };
198 
199 /* hw-specific format - do not change */
200 struct dlb_cq_pop_qe {
201 	uint64_t data;
202 	union dlb_opaque_data u;
203 	uint8_t qid;
204 	uint8_t sched_type:2;
205 	uint8_t priority:3;
206 	uint8_t msg_type:3;
207 	uint16_t tokens:10;
208 	uint16_t rsvd2:6;
209 	uint8_t meas_lat:1;
210 	uint8_t rsvd1:2;
211 	uint8_t no_dec:1;
212 	uint8_t cmp_id:4;
213 	union {
214 		uint8_t cmd_byte;
215 		struct {
216 			uint8_t cq_token:1;
217 			uint8_t qe_comp:1;
218 			uint8_t qe_frag:1;
219 			uint8_t qe_valid:1;
220 			uint8_t int_arm:1;
221 			uint8_t error:1;
222 			uint8_t rsvd:2;
223 		};
224 	};
225 };
226 
227 /* hw-specific format - do not change */
228 struct dlb_dequeue_qe {
229 	uint64_t data;
230 	union dlb_opaque_data u;
231 	uint8_t qid;
232 	uint8_t sched_type:2;
233 	uint8_t priority:3;
234 	uint8_t msg_type:3;
235 	uint16_t pp_id:10;
236 	uint16_t rsvd0:6;
237 	uint8_t debug;
238 	uint8_t cq_gen:1;
239 	uint8_t qid_depth:1;
240 	uint8_t rsvd1:3;
241 	uint8_t error:1;
242 	uint8_t rsvd2:2;
243 };
244 
245 enum dlb_port_state {
246 	PORT_CLOSED,
247 	PORT_STARTED,
248 	PORT_STOPPED
249 };
250 
251 enum dlb_configuration_state {
252 	/* The resource has not been configured */
253 	DLB_NOT_CONFIGURED,
254 	/* The resource was configured, but the device was stopped */
255 	DLB_PREV_CONFIGURED,
256 	/* The resource is currently configured */
257 	DLB_CONFIGURED
258 };
259 
260 struct dlb_port {
261 	uint32_t id;
262 	bool is_directed;
263 	bool gen_bit;
264 	uint16_t dir_credits;
265 	uint32_t dequeue_depth;
266 	enum dlb_token_pop_mode token_pop_mode;
267 	int pp_mmio_base;
268 	uint16_t cached_ldb_credits;
269 	uint16_t ldb_pushcount_at_credit_expiry;
270 	uint16_t ldb_credits;
271 	uint16_t cached_dir_credits;
272 	uint16_t dir_pushcount_at_credit_expiry;
273 	bool int_armed;
274 	bool use_rsvd_token_scheme;
275 	uint8_t cq_rsvd_token_deficit;
276 	uint16_t owed_tokens;
277 	int16_t issued_releases;
278 	int16_t token_pop_thresh;
279 	int cq_depth;
280 	uint16_t cq_idx;
281 	uint16_t cq_idx_unmasked;
282 	uint16_t cq_depth_mask;
283 	uint16_t gen_bit_shift;
284 	enum dlb_port_state state;
285 	enum dlb_configuration_state config_state;
286 	int num_mapped_qids;
287 	uint8_t *qid_mappings;
288 	struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
289 	struct dlb_cq_pop_qe *consume_qe;
290 	struct dlb_eventdev *dlb; /* back ptr */
291 	struct dlb_eventdev_port *ev_port; /* back ptr */
292 };
293 
294 /* Per-process per-port mmio and memory pointers */
295 struct process_local_port_data {
296 	uint64_t *pp_addr;
297 	uint16_t *ldb_popcount;
298 	uint16_t *dir_popcount;
299 	struct dlb_dequeue_qe *cq_base;
300 	const struct rte_memzone *mz;
301 	bool mmaped;
302 };
303 
304 struct dlb_config {
305 	int configured;
306 	int reserved;
307 	uint32_t ldb_credit_pool_id;
308 	uint32_t dir_credit_pool_id;
309 	uint32_t num_ldb_credits;
310 	uint32_t num_dir_credits;
311 	struct dlb_create_sched_domain_args resources;
312 };
313 
314 struct dlb_hw_dev {
315 	struct dlb_config cfg;
316 	struct dlb_hw_resource_info info;
317 	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
318 	int device_id;
319 	uint32_t domain_id;
320 	int domain_id_valid;
321 	rte_spinlock_t resource_lock; /* for MP support */
322 } __rte_cache_aligned;
323 
324 /* End HW related defines and structs */
325 
326 /* Begin DLB PMD Eventdev related defines and structs */
327 
328 #define DLB_MAX_NUM_QUEUES \
329 	(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
330 
331 #define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
332 #define DLB_MAX_INPUT_QUEUE_DEPTH 256
333 
334 /** Structure to hold the queue to port link establishment attributes */
335 
336 struct dlb_event_queue_link {
337 	uint8_t queue_id;
338 	uint8_t priority;
339 	bool mapped;
340 	bool valid;
341 };
342 
343 struct dlb_traffic_stats {
344 	uint64_t rx_ok;
345 	uint64_t rx_drop;
346 	uint64_t rx_interrupt_wait;
347 	uint64_t rx_umonitor_umwait;
348 	uint64_t tx_ok;
349 	uint64_t total_polls;
350 	uint64_t zero_polls;
351 	uint64_t tx_nospc_ldb_hw_credits;
352 	uint64_t tx_nospc_dir_hw_credits;
353 	uint64_t tx_nospc_inflight_max;
354 	uint64_t tx_nospc_new_event_limit;
355 	uint64_t tx_nospc_inflight_credits;
356 };
357 
358 struct dlb_port_stats {
359 	struct dlb_traffic_stats traffic;
360 	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
361 	uint64_t tx_implicit_rel;
362 	uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
363 	uint64_t tx_invalid;
364 	uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
365 	uint64_t rx_sched_invalid;
366 	uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
367 };
368 
369 struct dlb_eventdev_port {
370 	struct dlb_port qm_port; /* hw specific data structure */
371 	struct rte_event_port_conf conf; /* user-supplied configuration */
372 	uint16_t inflight_credits; /* num credits this port has right now */
373 	uint16_t credit_update_quanta;
374 	struct dlb_eventdev *dlb; /* backlink optimization */
375 	struct dlb_port_stats stats __rte_cache_aligned;
376 	struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
377 	int num_links;
378 	uint32_t id;
379 	/* num releases yet to be completed on this port.
380 	 * Only applies to load-balanced ports.
381 	 */
382 	uint16_t outstanding_releases;
383 	uint16_t inflight_max; /* app requested max inflights for this port */
384 	/* setup_done is set when the event port is setup */
385 	bool setup_done;
386 	/* enq_configured is set when the qm port is created */
387 	bool enq_configured;
388 	uint8_t implicit_release; /* release events before dequeueing */
389 } __rte_cache_aligned;
390 
391 struct dlb_queue {
392 	uint32_t num_qid_inflights; /* User config */
393 	uint32_t num_atm_inflights; /* User config */
394 	enum dlb_configuration_state config_state;
395 	int sched_type; /* LB queue only */
396 	uint32_t id;
397 	bool is_directed;
398 };
399 
400 struct dlb_eventdev_queue {
401 	struct dlb_queue qm_queue;
402 	struct rte_event_queue_conf conf; /* User config */
403 	uint64_t enq_ok;
404 	uint32_t id;
405 	bool setup_done;
406 	uint8_t num_links;
407 };
408 
409 enum dlb_run_state {
410 	DLB_RUN_STATE_STOPPED = 0,
411 	DLB_RUN_STATE_STOPPING,
412 	DLB_RUN_STATE_STARTING,
413 	DLB_RUN_STATE_STARTED
414 };
415 
416 struct dlb_eventdev {
417 	struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
418 	struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
419 	uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
420 	uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
421 
422 	/* store num stats and offset of the stats for each queue */
423 	uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
424 	uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
425 
426 	/* store num stats and offset of the stats for each port */
427 	uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
428 	uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
429 	struct dlb_get_num_resources_args hw_rsrc_query_results;
430 	uint32_t xstats_count_mode_queue;
431 	struct dlb_hw_dev qm_instance; /* strictly hw related */
432 	uint64_t global_dequeue_wait_ticks;
433 	struct dlb_xstats_entry *xstats;
434 	struct rte_eventdev *event_dev; /* backlink to dev */
435 	uint32_t xstats_count_mode_port;
436 	uint32_t xstats_count_mode_dev;
437 	uint32_t xstats_count;
438 	uint32_t inflights; /* use __atomic builtins to access */
439 	uint32_t new_event_limit;
440 	int max_num_events_override;
441 	int num_dir_credits_override;
442 	volatile enum dlb_run_state run_state;
443 	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
444 	uint16_t num_dir_credits;
445 	uint16_t num_ldb_credits;
446 	uint16_t num_queues; /* total queues */
447 	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
448 	uint16_t num_ports; /* total num of evdev ports requested */
449 	uint16_t num_ldb_ports; /* total num of ldb ports requested */
450 	uint16_t num_dir_ports; /* total num of dir ports requested */
451 	bool is_vdev;
452 	bool umwait_allowed;
453 	bool global_dequeue_wait; /* Not using per dequeue wait if true */
454 	bool defer_sched;
455 	unsigned int num_atm_inflights_per_queue;
456 	enum dlb_cq_poll_modes poll_mode;
457 	uint8_t revision;
458 	bool configured;
459 };
460 
461 /* End Eventdev related defines and structs */
462 
463 /* externs */
464 
465 extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
466 
467 /* Forwards for non-inlined functions */
468 
469 void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
470 
471 int dlb_xstats_init(struct dlb_eventdev *dlb);
472 
473 void dlb_xstats_uninit(struct dlb_eventdev *dlb);
474 
475 int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
476 			    enum rte_event_dev_xstats_mode mode,
477 			    uint8_t queue_port_id, const unsigned int ids[],
478 			    uint64_t values[], unsigned int n);
479 
480 int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
481 				  enum rte_event_dev_xstats_mode mode,
482 				  uint8_t queue_port_id,
483 				  struct rte_event_dev_xstats_name *xstat_names,
484 				  unsigned int *ids, unsigned int size);
485 
486 uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
487 					 const char *name, unsigned int *id);
488 
489 int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
490 			      enum rte_event_dev_xstats_mode mode,
491 			      int16_t queue_port_id,
492 			      const uint32_t ids[],
493 			      uint32_t nb_ids);
494 
495 int test_dlb_eventdev(void);
496 
497 int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
498 			       const char *name,
499 			       struct dlb_devargs *dlb_args);
500 
501 int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
502 				 const char *name);
503 
504 uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
505 			     struct dlb_eventdev_queue *queue);
506 
507 int dlb_parse_params(const char *params,
508 		     const char *name,
509 		     struct dlb_devargs *dlb_args);
510 
511 void dlb_entry_points_init(struct rte_eventdev *dev);
512 
513 #endif	/* _DLB_PRIV_H_ */
514