1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4
5 #include <inttypes.h>
6
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
9
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
12
13 enum dlb2_xstats_type {
14 /* common to device and port */
15 rx_ok, /**< Receive an event */
16 rx_drop, /**< Error bit set in received QE */
17 rx_interrupt_wait, /**< Wait on an interrupt */
18 rx_umonitor_umwait, /**< Block using umwait */
19 tx_ok, /**< Transmit an event */
20 total_polls, /**< Call dequeue_burst */
21 zero_polls, /**< Call dequeue burst and return 0 */
22 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
23 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
24 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
25 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
26 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
27 /* device specific */
28 nb_events_limit,
29 inflight_events,
30 ldb_pool_size,
31 dir_pool_size,
32 /* port specific */
33 tx_new, /**< Send an OP_NEW event */
34 tx_fwd, /**< Send an OP_FORWARD event */
35 tx_rel, /**< Send an OP_RELEASE event */
36 tx_implicit_rel, /**< Issue an implicit event release */
37 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
38 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
39 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
40 tx_sched_directed, /**< Send a directed event */
41 tx_invalid, /**< Send an event with an invalid op */
42 outstanding_releases, /**< # of releases a port owes */
43 max_outstanding_releases, /**< max # of releases a port can owe */
44 rx_sched_ordered, /**< Dequeue an ordered event */
45 rx_sched_unordered, /**< Dequeue an unordered event */
46 rx_sched_atomic, /**< Dequeue an atomic event */
47 rx_sched_directed, /**< Dequeue an directed event */
48 rx_sched_invalid, /**< Dequeue event sched type invalid */
49 /* common to port and queue */
50 is_configured, /**< Port is configured */
51 is_load_balanced, /**< Port is LDB */
52 hw_id, /**< Hardware ID */
53 /* queue specific */
54 num_links, /**< Number of ports linked */
55 sched_type, /**< Queue sched type */
56 enq_ok, /**< # events enqueued to the queue */
57 current_depth, /**< Current queue depth */
58 depth_threshold, /**< Programmed depth threshold */
59 depth_le50_threshold,
60 /**< Depth LE to 50% of the configured hardware threshold */
61 depth_gt50_le75_threshold,
62 /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
63 depth_gt75_le100_threshold,
64 /**< Depth GT 75%. but LE to the configured hardware threshold */
65 depth_gt100_threshold
66 /**< Depth GT 100% of the configured hw threshold */
67 };
68
69 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
70 uint16_t obj_idx, /* port or queue id */
71 enum dlb2_xstats_type stat, int extra_arg);
72
73 enum dlb2_xstats_fn_type {
74 DLB2_XSTATS_FN_DEV,
75 DLB2_XSTATS_FN_PORT,
76 DLB2_XSTATS_FN_QUEUE
77 };
78
79 struct dlb2_xstats_entry {
80 struct rte_event_dev_xstats_name name;
81 uint64_t reset_value; /* an offset to be taken away to emulate resets */
82 enum dlb2_xstats_fn_type fn_id;
83 enum dlb2_xstats_type stat;
84 enum rte_event_dev_xstats_mode mode;
85 int extra_arg;
86 uint16_t obj_idx;
87 uint8_t reset_allowed; /* when set, this value can be reset */
88 };
89
90 /* Some device stats are simply a summation of the corresponding port values */
91 static uint64_t
dlb2_device_traffic_stat_get(struct dlb2_eventdev * dlb2,int which_stat)92 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
93 int which_stat)
94 {
95 int i;
96 uint64_t val = 0;
97
98 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
99 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
100
101 if (!port->setup_done)
102 continue;
103
104 switch (which_stat) {
105 case rx_ok:
106 val += port->stats.traffic.rx_ok;
107 break;
108 case rx_drop:
109 val += port->stats.traffic.rx_drop;
110 break;
111 case rx_interrupt_wait:
112 val += port->stats.traffic.rx_interrupt_wait;
113 break;
114 case rx_umonitor_umwait:
115 val += port->stats.traffic.rx_umonitor_umwait;
116 break;
117 case tx_ok:
118 val += port->stats.traffic.tx_ok;
119 break;
120 case total_polls:
121 val += port->stats.traffic.total_polls;
122 break;
123 case zero_polls:
124 val += port->stats.traffic.zero_polls;
125 break;
126 case tx_nospc_ldb_hw_credits:
127 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
128 break;
129 case tx_nospc_dir_hw_credits:
130 val += port->stats.traffic.tx_nospc_dir_hw_credits;
131 break;
132 case tx_nospc_inflight_max:
133 val += port->stats.traffic.tx_nospc_inflight_max;
134 break;
135 case tx_nospc_new_event_limit:
136 val += port->stats.traffic.tx_nospc_new_event_limit;
137 break;
138 case tx_nospc_inflight_credits:
139 val += port->stats.traffic.tx_nospc_inflight_credits;
140 break;
141 default:
142 return -1;
143 }
144 }
145 return val;
146 }
147
148 static uint64_t
get_dev_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx __rte_unused,enum dlb2_xstats_type type,int extra_arg __rte_unused)149 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
150 enum dlb2_xstats_type type, int extra_arg __rte_unused)
151 {
152 switch (type) {
153 case rx_ok:
154 case rx_drop:
155 case rx_interrupt_wait:
156 case rx_umonitor_umwait:
157 case tx_ok:
158 case total_polls:
159 case zero_polls:
160 case tx_nospc_ldb_hw_credits:
161 case tx_nospc_dir_hw_credits:
162 case tx_nospc_inflight_max:
163 case tx_nospc_new_event_limit:
164 case tx_nospc_inflight_credits:
165 return dlb2_device_traffic_stat_get(dlb2, type);
166 case nb_events_limit:
167 return dlb2->new_event_limit;
168 case inflight_events:
169 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
170 case ldb_pool_size:
171 return dlb2->num_ldb_credits;
172 case dir_pool_size:
173 return dlb2->num_dir_credits;
174 default: return -1;
175 }
176 }
177
178 static uint64_t
get_port_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx,enum dlb2_xstats_type type,int extra_arg __rte_unused)179 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
180 enum dlb2_xstats_type type, int extra_arg __rte_unused)
181 {
182 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
183
184 switch (type) {
185 case rx_ok: return ev_port->stats.traffic.rx_ok;
186
187 case rx_drop: return ev_port->stats.traffic.rx_drop;
188
189 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
190
191 case rx_umonitor_umwait:
192 return ev_port->stats.traffic.rx_umonitor_umwait;
193
194 case tx_ok: return ev_port->stats.traffic.tx_ok;
195
196 case total_polls: return ev_port->stats.traffic.total_polls;
197
198 case zero_polls: return ev_port->stats.traffic.zero_polls;
199
200 case tx_nospc_ldb_hw_credits:
201 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
202
203 case tx_nospc_dir_hw_credits:
204 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
205
206 case tx_nospc_inflight_max:
207 return ev_port->stats.traffic.tx_nospc_inflight_max;
208
209 case tx_nospc_new_event_limit:
210 return ev_port->stats.traffic.tx_nospc_new_event_limit;
211
212 case tx_nospc_inflight_credits:
213 return ev_port->stats.traffic.tx_nospc_inflight_credits;
214
215 case is_configured: return ev_port->setup_done;
216
217 case is_load_balanced: return !ev_port->qm_port.is_directed;
218
219 case hw_id: return ev_port->qm_port.id;
220
221 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
222
223 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
224
225 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
226
227 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
228
229 case tx_sched_ordered:
230 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
231
232 case tx_sched_unordered:
233 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
234
235 case tx_sched_atomic:
236 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
237
238 case tx_sched_directed:
239 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
240
241 case tx_invalid: return ev_port->stats.tx_invalid;
242
243 case outstanding_releases: return ev_port->outstanding_releases;
244
245 case max_outstanding_releases:
246 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
247
248 case rx_sched_ordered:
249 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
250
251 case rx_sched_unordered:
252 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
253
254 case rx_sched_atomic:
255 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
256
257 case rx_sched_directed:
258 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
259
260 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
261
262 default: return -1;
263 }
264 }
265
266 static uint64_t
dlb2_get_threshold_stat(struct dlb2_eventdev * dlb2,int qid,int stat)267 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
268 {
269 int port = 0;
270 uint64_t tally = 0;
271
272 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
273 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
274
275 return tally;
276 }
277
278 static uint64_t
dlb2_get_enq_ok_stat(struct dlb2_eventdev * dlb2,int qid)279 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
280 {
281 int port = 0;
282 uint64_t enq_ok_tally = 0;
283
284 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++)
285 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
286
287 return enq_ok_tally;
288 }
289
290 static uint64_t
get_queue_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx,enum dlb2_xstats_type type,int extra_arg __rte_unused)291 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
292 enum dlb2_xstats_type type, int extra_arg __rte_unused)
293 {
294 struct dlb2_eventdev_queue *ev_queue =
295 &dlb2->ev_queues[obj_idx];
296
297 switch (type) {
298 case is_configured: return ev_queue->setup_done;
299
300 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
301
302 case hw_id: return ev_queue->qm_queue.id;
303
304 case num_links: return ev_queue->num_links;
305
306 case sched_type: return ev_queue->qm_queue.sched_type;
307
308 case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
309
310 case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
311
312 case depth_threshold: return ev_queue->depth_threshold;
313
314 case depth_le50_threshold:
315 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
316 DLB2_QID_DEPTH_LE50);
317
318 case depth_gt50_le75_threshold:
319 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
320 DLB2_QID_DEPTH_GT50_LE75);
321
322 case depth_gt75_le100_threshold:
323 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
324 DLB2_QID_DEPTH_GT75_LE100);
325
326 case depth_gt100_threshold:
327 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328 DLB2_QID_DEPTH_GT100);
329
330 default: return -1;
331 }
332 }
333
334 int
dlb2_xstats_init(struct dlb2_eventdev * dlb2)335 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
336 {
337 /*
338 * define the stats names and types. Used to build up the device
339 * xstats array
340 * There are multiple set of stats:
341 * - device-level,
342 * - per-port,
343 * - per-qid,
344 *
345 * For each of these sets, we have three parallel arrays, one for the
346 * names, the other for the stat type parameter to be passed in the fn
347 * call to get that stat. The third array allows resetting or not.
348 * All these arrays must be kept in sync
349 */
350 static const char * const dev_stats[] = {
351 "rx_ok",
352 "rx_drop",
353 "rx_interrupt_wait",
354 "rx_umonitor_umwait",
355 "tx_ok",
356 "total_polls",
357 "zero_polls",
358 "tx_nospc_ldb_hw_credits",
359 "tx_nospc_dir_hw_credits",
360 "tx_nospc_inflight_max",
361 "tx_nospc_new_event_limit",
362 "tx_nospc_inflight_credits",
363 "nb_events_limit",
364 "inflight_events",
365 "ldb_pool_size",
366 "dir_pool_size",
367 };
368 static const enum dlb2_xstats_type dev_types[] = {
369 rx_ok,
370 rx_drop,
371 rx_interrupt_wait,
372 rx_umonitor_umwait,
373 tx_ok,
374 total_polls,
375 zero_polls,
376 tx_nospc_ldb_hw_credits,
377 tx_nospc_dir_hw_credits,
378 tx_nospc_inflight_max,
379 tx_nospc_new_event_limit,
380 tx_nospc_inflight_credits,
381 nb_events_limit,
382 inflight_events,
383 ldb_pool_size,
384 dir_pool_size,
385 };
386 /* Note: generated device stats are not allowed to be reset. */
387 static const uint8_t dev_reset_allowed[] = {
388 0, /* rx_ok */
389 0, /* rx_drop */
390 0, /* rx_interrupt_wait */
391 0, /* rx_umonitor_umwait */
392 0, /* tx_ok */
393 0, /* total_polls */
394 0, /* zero_polls */
395 0, /* tx_nospc_ldb_hw_credits */
396 0, /* tx_nospc_dir_hw_credits */
397 0, /* tx_nospc_inflight_max */
398 0, /* tx_nospc_new_event_limit */
399 0, /* tx_nospc_inflight_credits */
400 0, /* nb_events_limit */
401 0, /* inflight_events */
402 0, /* ldb_pool_size */
403 0, /* dir_pool_size */
404 };
405 static const char * const port_stats[] = {
406 "is_configured",
407 "is_load_balanced",
408 "hw_id",
409 "rx_ok",
410 "rx_drop",
411 "rx_interrupt_wait",
412 "rx_umonitor_umwait",
413 "tx_ok",
414 "total_polls",
415 "zero_polls",
416 "tx_nospc_ldb_hw_credits",
417 "tx_nospc_dir_hw_credits",
418 "tx_nospc_inflight_max",
419 "tx_nospc_new_event_limit",
420 "tx_nospc_inflight_credits",
421 "tx_new",
422 "tx_fwd",
423 "tx_rel",
424 "tx_implicit_rel",
425 "tx_sched_ordered",
426 "tx_sched_unordered",
427 "tx_sched_atomic",
428 "tx_sched_directed",
429 "tx_invalid",
430 "outstanding_releases",
431 "max_outstanding_releases",
432 "rx_sched_ordered",
433 "rx_sched_unordered",
434 "rx_sched_atomic",
435 "rx_sched_directed",
436 "rx_sched_invalid"
437 };
438 static const enum dlb2_xstats_type port_types[] = {
439 is_configured,
440 is_load_balanced,
441 hw_id,
442 rx_ok,
443 rx_drop,
444 rx_interrupt_wait,
445 rx_umonitor_umwait,
446 tx_ok,
447 total_polls,
448 zero_polls,
449 tx_nospc_ldb_hw_credits,
450 tx_nospc_dir_hw_credits,
451 tx_nospc_inflight_max,
452 tx_nospc_new_event_limit,
453 tx_nospc_inflight_credits,
454 tx_new,
455 tx_fwd,
456 tx_rel,
457 tx_implicit_rel,
458 tx_sched_ordered,
459 tx_sched_unordered,
460 tx_sched_atomic,
461 tx_sched_directed,
462 tx_invalid,
463 outstanding_releases,
464 max_outstanding_releases,
465 rx_sched_ordered,
466 rx_sched_unordered,
467 rx_sched_atomic,
468 rx_sched_directed,
469 rx_sched_invalid
470 };
471 static const uint8_t port_reset_allowed[] = {
472 0, /* is_configured */
473 0, /* is_load_balanced */
474 0, /* hw_id */
475 1, /* rx_ok */
476 1, /* rx_drop */
477 1, /* rx_interrupt_wait */
478 1, /* rx_umonitor_umwait */
479 1, /* tx_ok */
480 1, /* total_polls */
481 1, /* zero_polls */
482 1, /* tx_nospc_ldb_hw_credits */
483 1, /* tx_nospc_dir_hw_credits */
484 1, /* tx_nospc_inflight_max */
485 1, /* tx_nospc_new_event_limit */
486 1, /* tx_nospc_inflight_credits */
487 1, /* tx_new */
488 1, /* tx_fwd */
489 1, /* tx_rel */
490 1, /* tx_implicit_rel */
491 1, /* tx_sched_ordered */
492 1, /* tx_sched_unordered */
493 1, /* tx_sched_atomic */
494 1, /* tx_sched_directed */
495 1, /* tx_invalid */
496 0, /* outstanding_releases */
497 0, /* max_outstanding_releases */
498 1, /* rx_sched_ordered */
499 1, /* rx_sched_unordered */
500 1, /* rx_sched_atomic */
501 1, /* rx_sched_directed */
502 1 /* rx_sched_invalid */
503 };
504
505 /* QID specific stats */
506 static const char * const qid_stats[] = {
507 "is_configured",
508 "is_load_balanced",
509 "hw_id",
510 "num_links",
511 "sched_type",
512 "enq_ok",
513 "current_depth",
514 "depth_threshold",
515 "depth_le50_threshold",
516 "depth_gt50_le75_threshold",
517 "depth_gt75_le100_threshold",
518 "depth_gt100_threshold",
519 };
520 static const enum dlb2_xstats_type qid_types[] = {
521 is_configured,
522 is_load_balanced,
523 hw_id,
524 num_links,
525 sched_type,
526 enq_ok,
527 current_depth,
528 depth_threshold,
529 depth_le50_threshold,
530 depth_gt50_le75_threshold,
531 depth_gt75_le100_threshold,
532 depth_gt100_threshold,
533 };
534 static const uint8_t qid_reset_allowed[] = {
535 0, /* is_configured */
536 0, /* is_load_balanced */
537 0, /* hw_id */
538 0, /* num_links */
539 0, /* sched_type */
540 1, /* enq_ok */
541 0, /* current_depth */
542 0, /* depth_threshold */
543 1, /* depth_le50_threshold */
544 1, /* depth_gt50_le75_threshold */
545 1, /* depth_gt75_le100_threshold */
546 1, /* depth_gt100_threshold */
547 };
548
549 /* ---- end of stat definitions ---- */
550
551 /* check sizes, since a missed comma can lead to strings being
552 * joined by the compiler.
553 */
554 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
555 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
556 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
557
558 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
559 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
560 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
561
562 /* other vars */
563 const unsigned int count = RTE_DIM(dev_stats) +
564 DLB2_MAX_NUM_PORTS * RTE_DIM(port_stats) +
565 DLB2_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
566 unsigned int i, port, qid, stat_id = 0;
567
568 dlb2->xstats = rte_zmalloc_socket(NULL,
569 sizeof(dlb2->xstats[0]) * count, 0,
570 dlb2->qm_instance.info.socket_id);
571 if (dlb2->xstats == NULL)
572 return -ENOMEM;
573
574 #define sname dlb2->xstats[stat_id].name.name
575 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
576 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
577 .fn_id = DLB2_XSTATS_FN_DEV,
578 .stat = dev_types[i],
579 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
580 .reset_allowed = dev_reset_allowed[i],
581 };
582 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
583 }
584 dlb2->xstats_count_mode_dev = stat_id;
585
586 for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) {
587 dlb2->xstats_offset_for_port[port] = stat_id;
588
589 uint32_t count_offset = stat_id;
590
591 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
592 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
593 .fn_id = DLB2_XSTATS_FN_PORT,
594 .obj_idx = port,
595 .stat = port_types[i],
596 .mode = RTE_EVENT_DEV_XSTATS_PORT,
597 .reset_allowed = port_reset_allowed[i],
598 };
599 snprintf(sname, sizeof(sname), "port_%u_%s",
600 port, port_stats[i]);
601 }
602
603 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
604 }
605
606 dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
607
608 for (qid = 0; qid < DLB2_MAX_NUM_QUEUES; qid++) {
609 uint32_t count_offset = stat_id;
610
611 dlb2->xstats_offset_for_qid[qid] = stat_id;
612
613 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
614 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
615 .fn_id = DLB2_XSTATS_FN_QUEUE,
616 .obj_idx = qid,
617 .stat = qid_types[i],
618 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
619 .reset_allowed = qid_reset_allowed[i],
620 };
621 snprintf(sname, sizeof(sname), "qid_%u_%s",
622 qid, qid_stats[i]);
623 }
624
625 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
626 }
627
628 dlb2->xstats_count_mode_queue = stat_id -
629 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
630 #undef sname
631
632 dlb2->xstats_count = stat_id;
633
634 return 0;
635 }
636
637 void
dlb2_xstats_uninit(struct dlb2_eventdev * dlb2)638 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
639 {
640 rte_free(dlb2->xstats);
641 dlb2->xstats_count = 0;
642 }
643
644 int
dlb2_eventdev_xstats_get_names(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)645 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
646 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
647 struct rte_event_dev_xstats_name *xstats_names,
648 unsigned int *ids, unsigned int size)
649 {
650 const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
651 unsigned int i;
652 unsigned int xidx = 0;
653 uint32_t xstats_mode_count = 0;
654 uint32_t start_offset = 0;
655
656 switch (mode) {
657 case RTE_EVENT_DEV_XSTATS_DEVICE:
658 xstats_mode_count = dlb2->xstats_count_mode_dev;
659 break;
660 case RTE_EVENT_DEV_XSTATS_PORT:
661 if (queue_port_id >= DLB2_MAX_NUM_PORTS)
662 break;
663 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
664 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
665 break;
666 case RTE_EVENT_DEV_XSTATS_QUEUE:
667 #if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
668 if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
669 break;
670 #endif
671 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
672 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
673 break;
674 default:
675 return -EINVAL;
676 };
677
678 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
679 return xstats_mode_count;
680
681 for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
682 if (dlb2->xstats[i].mode != mode)
683 continue;
684
685 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
686 queue_port_id != dlb2->xstats[i].obj_idx)
687 continue;
688
689 xstats_names[xidx] = dlb2->xstats[i].name;
690 if (ids)
691 ids[xidx] = start_offset + xidx;
692 xidx++;
693 }
694 return xidx;
695 }
696
697 static int
dlb2_xstats_update(struct dlb2_eventdev * dlb2,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n,const uint32_t reset)698 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
699 enum rte_event_dev_xstats_mode mode,
700 uint8_t queue_port_id, const unsigned int ids[],
701 uint64_t values[], unsigned int n, const uint32_t reset)
702 {
703 unsigned int i;
704 unsigned int xidx = 0;
705 uint32_t xstats_mode_count = 0;
706
707 switch (mode) {
708 case RTE_EVENT_DEV_XSTATS_DEVICE:
709 xstats_mode_count = dlb2->xstats_count_mode_dev;
710 break;
711 case RTE_EVENT_DEV_XSTATS_PORT:
712 if (queue_port_id >= DLB2_MAX_NUM_PORTS)
713 goto invalid_value;
714 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
715 break;
716 case RTE_EVENT_DEV_XSTATS_QUEUE:
717 #if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
718 if (queue_port_id >= DLB2_MAX_NUM_QUEUES)
719 goto invalid_value;
720 #endif
721 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
722 break;
723 default:
724 goto invalid_value;
725 };
726
727 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
728 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
729 dlb2_xstats_fn fn;
730
731 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
732 continue;
733
734 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
735 queue_port_id != xs->obj_idx)
736 continue;
737
738 switch (xs->fn_id) {
739 case DLB2_XSTATS_FN_DEV:
740 fn = get_dev_stat;
741 break;
742 case DLB2_XSTATS_FN_PORT:
743 fn = get_port_stat;
744 break;
745 case DLB2_XSTATS_FN_QUEUE:
746 fn = get_queue_stat;
747 break;
748 default:
749 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
750 goto invalid_value;
751 }
752
753 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
754 xs->extra_arg) - xs->reset_value;
755
756 if (values)
757 values[xidx] = val;
758
759 if (xs->reset_allowed && reset)
760 xs->reset_value += val;
761
762 xidx++;
763 }
764
765 return xidx;
766
767 invalid_value:
768 return -EINVAL;
769 }
770
771 int
dlb2_eventdev_xstats_get(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)772 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
773 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
774 const unsigned int ids[], uint64_t values[], unsigned int n)
775 {
776 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
777 const uint32_t reset = 0;
778
779 return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
780 reset);
781 }
782
783 uint64_t
dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev * dev,const char * name,unsigned int * id)784 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
785 const char *name, unsigned int *id)
786 {
787 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
788 unsigned int i;
789 dlb2_xstats_fn fn;
790
791 for (i = 0; i < dlb2->xstats_count; i++) {
792 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
793
794 if (strncmp(xs->name.name, name,
795 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
796 if (id != NULL)
797 *id = i;
798
799 switch (xs->fn_id) {
800 case DLB2_XSTATS_FN_DEV:
801 fn = get_dev_stat;
802 break;
803 case DLB2_XSTATS_FN_PORT:
804 fn = get_port_stat;
805 break;
806 case DLB2_XSTATS_FN_QUEUE:
807 fn = get_queue_stat;
808 break;
809 default:
810 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
811 xs->fn_id);
812 return (uint64_t)-1;
813 }
814
815 return fn(dlb2, xs->obj_idx, xs->stat,
816 xs->extra_arg) - xs->reset_value;
817 }
818 }
819 if (id != NULL)
820 *id = (uint32_t)-1;
821 return (uint64_t)-1;
822 }
823
824 static void
dlb2_xstats_reset_range(struct dlb2_eventdev * dlb2,uint32_t start,uint32_t num)825 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
826 uint32_t num)
827 {
828 uint32_t i;
829 dlb2_xstats_fn fn;
830
831 for (i = start; i < start + num; i++) {
832 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
833
834 if (!xs->reset_allowed)
835 continue;
836
837 switch (xs->fn_id) {
838 case DLB2_XSTATS_FN_DEV:
839 fn = get_dev_stat;
840 break;
841 case DLB2_XSTATS_FN_PORT:
842 fn = get_port_stat;
843 break;
844 case DLB2_XSTATS_FN_QUEUE:
845 fn = get_queue_stat;
846 break;
847 default:
848 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
849 return;
850 }
851
852 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
853 xs->reset_value = val;
854 }
855 }
856
857 static int
dlb2_xstats_reset_queue(struct dlb2_eventdev * dlb2,uint8_t queue_id,const uint32_t ids[],uint32_t nb_ids)858 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
859 const uint32_t ids[], uint32_t nb_ids)
860 {
861 const uint32_t reset = 1;
862
863 if (ids) {
864 uint32_t nb_reset = dlb2_xstats_update(dlb2,
865 RTE_EVENT_DEV_XSTATS_QUEUE,
866 queue_id, ids, NULL, nb_ids,
867 reset);
868 return nb_reset == nb_ids ? 0 : -EINVAL;
869 }
870
871 if (ids == NULL)
872 dlb2_xstats_reset_range(dlb2,
873 dlb2->xstats_offset_for_qid[queue_id],
874 dlb2->xstats_count_per_qid[queue_id]);
875
876 return 0;
877 }
878
879 static int
dlb2_xstats_reset_port(struct dlb2_eventdev * dlb2,uint8_t port_id,const uint32_t ids[],uint32_t nb_ids)880 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
881 const uint32_t ids[], uint32_t nb_ids)
882 {
883 const uint32_t reset = 1;
884 int offset = dlb2->xstats_offset_for_port[port_id];
885 int nb_stat = dlb2->xstats_count_per_port[port_id];
886
887 if (ids) {
888 uint32_t nb_reset = dlb2_xstats_update(dlb2,
889 RTE_EVENT_DEV_XSTATS_PORT, port_id,
890 ids, NULL, nb_ids,
891 reset);
892 return nb_reset == nb_ids ? 0 : -EINVAL;
893 }
894
895 dlb2_xstats_reset_range(dlb2, offset, nb_stat);
896 return 0;
897 }
898
899 static int
dlb2_xstats_reset_dev(struct dlb2_eventdev * dlb2,const uint32_t ids[],uint32_t nb_ids)900 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
901 uint32_t nb_ids)
902 {
903 uint32_t i;
904
905 if (ids) {
906 for (i = 0; i < nb_ids; i++) {
907 uint32_t id = ids[i];
908
909 if (id >= dlb2->xstats_count_mode_dev)
910 return -EINVAL;
911 dlb2_xstats_reset_range(dlb2, id, 1);
912 }
913 } else {
914 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
915 dlb2_xstats_reset_range(dlb2, i, 1);
916 }
917
918 return 0;
919 }
920
921 int
dlb2_eventdev_xstats_reset(struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t nb_ids)922 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
923 enum rte_event_dev_xstats_mode mode,
924 int16_t queue_port_id,
925 const uint32_t ids[],
926 uint32_t nb_ids)
927 {
928 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
929 uint32_t i;
930
931 /* handle -1 for queue_port_id here, looping over all ports/queues */
932 switch (mode) {
933 case RTE_EVENT_DEV_XSTATS_DEVICE:
934 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
935 return -EINVAL;
936 break;
937 case RTE_EVENT_DEV_XSTATS_PORT:
938 if (queue_port_id == -1) {
939 for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
940 if (dlb2_xstats_reset_port(dlb2, i,
941 ids, nb_ids))
942 return -EINVAL;
943 }
944 } else if (queue_port_id < DLB2_MAX_NUM_PORTS) {
945 if (dlb2_xstats_reset_port(dlb2, queue_port_id,
946 ids, nb_ids))
947 return -EINVAL;
948 }
949 break;
950 case RTE_EVENT_DEV_XSTATS_QUEUE:
951 if (queue_port_id == -1) {
952 for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) {
953 if (dlb2_xstats_reset_queue(dlb2, i,
954 ids, nb_ids))
955 return -EINVAL;
956 }
957 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES) {
958 if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
959 ids, nb_ids))
960 return -EINVAL;
961 }
962 break;
963 };
964
965 return 0;
966 }
967
968 void
dlb2_eventdev_dump(struct rte_eventdev * dev,FILE * f)969 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
970 {
971 struct dlb2_eventdev *dlb2;
972 struct dlb2_hw_dev *handle;
973 int i;
974
975 dlb2 = dlb2_pmd_priv(dev);
976
977 if (dlb2 == NULL) {
978 fprintf(f, "DLB2 Event device cannot be dumped!\n");
979 return;
980 }
981
982 if (!dlb2->configured)
983 fprintf(f, "DLB2 Event device is not configured\n");
984
985 handle = &dlb2->qm_instance;
986
987 fprintf(f, "================\n");
988 fprintf(f, "DLB2 Device Dump\n");
989 fprintf(f, "================\n");
990
991 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
992 dlb2->umwait_allowed ? "yes" : "no");
993
994 /* Generic top level device information */
995
996 fprintf(f, "device is configured and run state =");
997 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
998 fprintf(f, "STOPPED\n");
999 else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1000 fprintf(f, "STOPPING\n");
1001 else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1002 fprintf(f, "STARTING\n");
1003 else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1004 fprintf(f, "STARTED\n");
1005 else
1006 fprintf(f, "UNEXPECTED\n");
1007
1008 fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1009 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1010
1011 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1012 dlb2->num_dir_ports, dlb2->num_dir_queues);
1013
1014 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1015 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1016
1017 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1018 handle->cfg.resources.num_atomic_inflights,
1019 handle->cfg.resources.num_hist_list_entries);
1020
1021 fprintf(f, "results from most recent hw resource query:\n");
1022
1023 fprintf(f, "\tnum_sched_domains = %u\n",
1024 dlb2->hw_rsrc_query_results.num_sched_domains);
1025
1026 fprintf(f, "\tnum_ldb_queues = %u\n",
1027 dlb2->hw_rsrc_query_results.num_ldb_queues);
1028
1029 fprintf(f, "\tnum_ldb_ports = %u\n",
1030 dlb2->hw_rsrc_query_results.num_ldb_ports);
1031
1032 fprintf(f, "\tnum_dir_ports = %u\n",
1033 dlb2->hw_rsrc_query_results.num_dir_ports);
1034
1035 fprintf(f, "\tnum_atomic_inflights = %u\n",
1036 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1037
1038 fprintf(f, "\tnum_hist_list_entries = %u\n",
1039 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1040
1041 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1042 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1043
1044 fprintf(f, "\tnum_ldb_credits = %u\n",
1045 dlb2->hw_rsrc_query_results.num_ldb_credits);
1046
1047 fprintf(f, "\tnum_dir_credits = %u\n",
1048 dlb2->hw_rsrc_query_results.num_dir_credits);
1049
1050 /* Port level information */
1051
1052 for (i = 0; i < dlb2->num_ports; i++) {
1053 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1054 int j;
1055
1056 if (!p->enq_configured)
1057 fprintf(f, "Port_%d is not configured\n", i);
1058
1059 fprintf(f, "Port_%d\n", i);
1060 fprintf(f, "=======\n");
1061
1062 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1063 p->id, p->setup_done);
1064
1065 fprintf(f, "\tconfig state=%d, port state=%d\n",
1066 p->qm_port.config_state, p->qm_port.state);
1067
1068 fprintf(f, "\tport is %s\n",
1069 p->qm_port.is_directed ? "directed" : "load balanced");
1070
1071 fprintf(f, "\toutstanding releases=%u\n",
1072 p->outstanding_releases);
1073
1074 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1075 p->inflight_max, p->inflight_credits);
1076
1077 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1078 p->credit_update_quanta, p->implicit_release);
1079
1080 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1081
1082 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1083 if (p->link[j].valid)
1084 fprintf(f, "id=%u prio=%u ",
1085 p->link[j].queue_id,
1086 p->link[j].priority);
1087 }
1088 fprintf(f, "\n");
1089
1090 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1091
1092 fprintf(f, "\tcached_ldb_credits=%u\n",
1093 p->qm_port.cached_ldb_credits);
1094
1095 fprintf(f, "\tldb_credits = %u\n",
1096 p->qm_port.ldb_credits);
1097
1098 fprintf(f, "\tcached_dir_credits = %u\n",
1099 p->qm_port.cached_dir_credits);
1100
1101 fprintf(f, "\tdir_credits = %u\n",
1102 p->qm_port.dir_credits);
1103
1104 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1105 p->qm_port.gen_bit,
1106 p->qm_port.cq_idx,
1107 p->qm_port.cq_depth);
1108
1109 fprintf(f, "\tinterrupt armed=%d\n",
1110 p->qm_port.int_armed);
1111
1112 fprintf(f, "\tPort statistics\n");
1113
1114 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1115 p->stats.traffic.rx_ok);
1116
1117 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1118 p->stats.traffic.rx_drop);
1119
1120 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1121 p->stats.traffic.rx_interrupt_wait);
1122
1123 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1124 p->stats.traffic.rx_umonitor_umwait);
1125
1126 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1127 p->stats.traffic.tx_ok);
1128
1129 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1130 p->stats.traffic.total_polls);
1131
1132 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1133 p->stats.traffic.zero_polls);
1134
1135 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1136 p->stats.traffic.tx_nospc_ldb_hw_credits);
1137
1138 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1139 p->stats.traffic.tx_nospc_dir_hw_credits);
1140
1141 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1142 p->stats.traffic.tx_nospc_inflight_max);
1143
1144 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1145 p->stats.traffic.tx_nospc_new_event_limit);
1146
1147 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1148 p->stats.traffic.tx_nospc_inflight_credits);
1149
1150 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1151 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1152
1153 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1154 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1155
1156 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1157 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1158
1159 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1160 p->stats.tx_implicit_rel);
1161
1162 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1163 p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1164
1165 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1166 p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1167
1168 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1169 p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1170
1171 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1172 p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1173
1174 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1175 p->stats.tx_invalid);
1176
1177 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1178 p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1179
1180 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1181 p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1182
1183 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1184 p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1185
1186 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1187 p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1188
1189 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1190 p->stats.rx_sched_invalid);
1191 }
1192
1193 /* Queue level information */
1194
1195 for (i = 0; i < dlb2->num_queues; i++) {
1196 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1197 int j, k;
1198
1199 if (!q->setup_done)
1200 fprintf(f, "Queue_%d is not configured\n", i);
1201
1202 fprintf(f, "Queue_%d\n", i);
1203 fprintf(f, "========\n");
1204
1205 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1206
1207 fprintf(f, "\tqueue is %s\n",
1208 q->qm_queue.is_directed ? "directed" : "load balanced");
1209
1210 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1211
1212 for (j = 0; j < dlb2->num_ports; j++) {
1213 struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1214
1215 for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1216 if (p->link[k].valid &&
1217 p->link[k].queue_id == q->id)
1218 fprintf(f, "id=%u prio=%u ",
1219 p->id, p->link[k].priority);
1220 }
1221 }
1222 fprintf(f, "\n");
1223
1224 fprintf(f, "\tcurrent depth: %u events\n",
1225 dlb2_get_queue_depth(dlb2, q));
1226
1227 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1228 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
1229 }
1230 }
1231