1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4
5 #include <inttypes.h>
6
7 #include <rte_malloc.h>
8 #include <rte_eventdev.h>
9
10 #include "dlb2_priv.h"
11 #include "dlb2_inline_fns.h"
12 #include "pf/base/dlb2_regs.h"
13
14 enum dlb2_xstats_type {
15 /* common to device and port */
16 rx_ok, /**< Receive an event */
17 rx_drop, /**< Error bit set in received QE */
18 rx_interrupt_wait, /**< Wait on an interrupt */
19 rx_umonitor_umwait, /**< Block using umwait */
20 tx_ok, /**< Transmit an event */
21 total_polls, /**< Call dequeue_burst */
22 zero_polls, /**< Call dequeue burst and return 0 */
23 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
24 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
25 tx_nospc_hw_credits, /**< Insufficient h/w credits */
26 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
27 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
28 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
29 /* device specific */
30 nb_events_limit,
31 inflight_events,
32 ldb_pool_size,
33 dir_pool_size,
34 pool_size,
35 /* port specific */
36 tx_new, /**< Send an OP_NEW event */
37 tx_fwd, /**< Send an OP_FORWARD event */
38 tx_rel, /**< Send an OP_RELEASE event */
39 tx_implicit_rel, /**< Issue an implicit event release */
40 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
41 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
42 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
43 tx_sched_directed, /**< Send a directed event */
44 tx_invalid, /**< Send an event with an invalid op */
45 outstanding_releases, /**< # of releases a port owes */
46 max_outstanding_releases, /**< max # of releases a port can owe */
47 rx_sched_ordered, /**< Dequeue an ordered event */
48 rx_sched_unordered, /**< Dequeue an unordered event */
49 rx_sched_atomic, /**< Dequeue an atomic event */
50 rx_sched_directed, /**< Dequeue an directed event */
51 rx_sched_invalid, /**< Dequeue event sched type invalid */
52 /* common to port and queue */
53 is_configured, /**< Port is configured */
54 is_load_balanced, /**< Port is LDB */
55 hw_id, /**< Hardware ID */
56 /* queue specific */
57 num_links, /**< Number of ports linked */
58 sched_type, /**< Queue sched type */
59 enq_ok, /**< # events enqueued to the queue */
60 current_depth, /**< Current queue depth */
61 depth_threshold, /**< Programmed depth threshold */
62 depth_le50_threshold,
63 /**< Depth LE to 50% of the configured hardware threshold */
64 depth_gt50_le75_threshold,
65 /**< Depth GT 50%, but LE to 75% of the configured hardware threshold */
66 depth_gt75_le100_threshold,
67 /**< Depth GT 75%. but LE to the configured hardware threshold */
68 depth_gt100_threshold
69 /**< Depth GT 100% of the configured hw threshold */
70 };
71
72 typedef uint64_t (*dlb2_xstats_fn)(struct dlb2_eventdev *dlb2,
73 uint16_t obj_idx, /* port or queue id */
74 enum dlb2_xstats_type stat, int extra_arg);
75
76 enum dlb2_xstats_fn_type {
77 DLB2_XSTATS_FN_DEV,
78 DLB2_XSTATS_FN_PORT,
79 DLB2_XSTATS_FN_QUEUE
80 };
81
82 struct dlb2_xstats_entry {
83 struct rte_event_dev_xstats_name name;
84 uint64_t reset_value; /* an offset to be taken away to emulate resets */
85 enum dlb2_xstats_fn_type fn_id;
86 enum dlb2_xstats_type stat;
87 enum rte_event_dev_xstats_mode mode;
88 int extra_arg;
89 uint16_t obj_idx;
90 uint8_t reset_allowed; /* when set, this value can be reset */
91 };
92
93 /* Some device stats are simply a summation of the corresponding port values */
94 static uint64_t
dlb2_device_traffic_stat_get(struct dlb2_eventdev * dlb2,int which_stat)95 dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2,
96 int which_stat)
97 {
98 int i;
99 uint64_t val = 0;
100
101 for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
102 struct dlb2_eventdev_port *port = &dlb2->ev_ports[i];
103
104 if (!port->setup_done)
105 continue;
106
107 switch (which_stat) {
108 case rx_ok:
109 val += port->stats.traffic.rx_ok;
110 break;
111 case rx_drop:
112 val += port->stats.traffic.rx_drop;
113 break;
114 case rx_interrupt_wait:
115 val += port->stats.traffic.rx_interrupt_wait;
116 break;
117 case rx_umonitor_umwait:
118 val += port->stats.traffic.rx_umonitor_umwait;
119 break;
120 case tx_ok:
121 val += port->stats.traffic.tx_ok;
122 break;
123 case total_polls:
124 val += port->stats.traffic.total_polls;
125 break;
126 case zero_polls:
127 val += port->stats.traffic.zero_polls;
128 break;
129 case tx_nospc_ldb_hw_credits:
130 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
131 break;
132 case tx_nospc_dir_hw_credits:
133 val += port->stats.traffic.tx_nospc_dir_hw_credits;
134 break;
135 case tx_nospc_hw_credits:
136 val += port->stats.traffic.tx_nospc_hw_credits;
137 break;
138 case tx_nospc_inflight_max:
139 val += port->stats.traffic.tx_nospc_inflight_max;
140 break;
141 case tx_nospc_new_event_limit:
142 val += port->stats.traffic.tx_nospc_new_event_limit;
143 break;
144 case tx_nospc_inflight_credits:
145 val += port->stats.traffic.tx_nospc_inflight_credits;
146 break;
147 default:
148 return -1;
149 }
150 }
151 return val;
152 }
153
154 static uint64_t
get_dev_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx __rte_unused,enum dlb2_xstats_type type,int extra_arg __rte_unused)155 get_dev_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx __rte_unused,
156 enum dlb2_xstats_type type, int extra_arg __rte_unused)
157 {
158 switch (type) {
159 case rx_ok:
160 case rx_drop:
161 case rx_interrupt_wait:
162 case rx_umonitor_umwait:
163 case tx_ok:
164 case total_polls:
165 case zero_polls:
166 case tx_nospc_ldb_hw_credits:
167 case tx_nospc_dir_hw_credits:
168 case tx_nospc_hw_credits:
169 case tx_nospc_inflight_max:
170 case tx_nospc_new_event_limit:
171 case tx_nospc_inflight_credits:
172 return dlb2_device_traffic_stat_get(dlb2, type);
173 case nb_events_limit:
174 return dlb2->new_event_limit;
175 case inflight_events:
176 return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
177 case ldb_pool_size:
178 return dlb2->num_ldb_credits;
179 case dir_pool_size:
180 return dlb2->num_dir_credits;
181 case pool_size:
182 return dlb2->num_credits;
183 default: return -1;
184 }
185 }
186
187 static uint64_t
get_port_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx,enum dlb2_xstats_type type,int extra_arg __rte_unused)188 get_port_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
189 enum dlb2_xstats_type type, int extra_arg __rte_unused)
190 {
191 struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[obj_idx];
192
193 switch (type) {
194 case rx_ok: return ev_port->stats.traffic.rx_ok;
195
196 case rx_drop: return ev_port->stats.traffic.rx_drop;
197
198 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
199
200 case rx_umonitor_umwait:
201 return ev_port->stats.traffic.rx_umonitor_umwait;
202
203 case tx_ok: return ev_port->stats.traffic.tx_ok;
204
205 case total_polls: return ev_port->stats.traffic.total_polls;
206
207 case zero_polls: return ev_port->stats.traffic.zero_polls;
208
209 case tx_nospc_ldb_hw_credits:
210 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
211
212 case tx_nospc_dir_hw_credits:
213 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
214
215 case tx_nospc_hw_credits:
216 return ev_port->stats.traffic.tx_nospc_hw_credits;
217
218 case tx_nospc_inflight_max:
219 return ev_port->stats.traffic.tx_nospc_inflight_max;
220
221 case tx_nospc_new_event_limit:
222 return ev_port->stats.traffic.tx_nospc_new_event_limit;
223
224 case tx_nospc_inflight_credits:
225 return ev_port->stats.traffic.tx_nospc_inflight_credits;
226
227 case is_configured: return ev_port->setup_done;
228
229 case is_load_balanced: return !ev_port->qm_port.is_directed;
230
231 case hw_id: return ev_port->qm_port.id;
232
233 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
234
235 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
236
237 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
238
239 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
240
241 case tx_sched_ordered:
242 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ORDERED];
243
244 case tx_sched_unordered:
245 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED];
246
247 case tx_sched_atomic:
248 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC];
249
250 case tx_sched_directed:
251 return ev_port->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED];
252
253 case tx_invalid: return ev_port->stats.tx_invalid;
254
255 case outstanding_releases: return ev_port->outstanding_releases;
256
257 case max_outstanding_releases:
258 return DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
259
260 case rx_sched_ordered:
261 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ORDERED];
262
263 case rx_sched_unordered:
264 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED];
265
266 case rx_sched_atomic:
267 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC];
268
269 case rx_sched_directed:
270 return ev_port->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED];
271
272 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
273
274 default: return -1;
275 }
276 }
277
278 static uint64_t
dlb2_get_threshold_stat(struct dlb2_eventdev * dlb2,int qid,int stat)279 dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat)
280 {
281 int port = 0;
282 uint64_t tally = 0;
283
284 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
285 tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat];
286
287 return tally;
288 }
289
290 static uint64_t
dlb2_get_enq_ok_stat(struct dlb2_eventdev * dlb2,int qid)291 dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid)
292 {
293 int port = 0;
294 uint64_t enq_ok_tally = 0;
295
296 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++)
297 enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok;
298
299 return enq_ok_tally;
300 }
301
302 static uint64_t
get_queue_stat(struct dlb2_eventdev * dlb2,uint16_t obj_idx,enum dlb2_xstats_type type,int extra_arg __rte_unused)303 get_queue_stat(struct dlb2_eventdev *dlb2, uint16_t obj_idx,
304 enum dlb2_xstats_type type, int extra_arg __rte_unused)
305 {
306 struct dlb2_eventdev_queue *ev_queue =
307 &dlb2->ev_queues[obj_idx];
308
309 switch (type) {
310 case is_configured: return ev_queue->setup_done;
311
312 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
313
314 case hw_id: return ev_queue->qm_queue.id;
315
316 case num_links: return ev_queue->num_links;
317
318 case sched_type: return ev_queue->qm_queue.sched_type;
319
320 case enq_ok: return dlb2_get_enq_ok_stat(dlb2, obj_idx);
321
322 case current_depth: return dlb2_get_queue_depth(dlb2, ev_queue);
323
324 case depth_threshold: return ev_queue->depth_threshold;
325
326 case depth_le50_threshold:
327 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
328 DLB2_QID_DEPTH_LE50);
329
330 case depth_gt50_le75_threshold:
331 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
332 DLB2_QID_DEPTH_GT50_LE75);
333
334 case depth_gt75_le100_threshold:
335 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
336 DLB2_QID_DEPTH_GT75_LE100);
337
338 case depth_gt100_threshold:
339 return dlb2_get_threshold_stat(dlb2, ev_queue->id,
340 DLB2_QID_DEPTH_GT100);
341
342 default: return -1;
343 }
344 }
345
346 int
dlb2_xstats_init(struct dlb2_eventdev * dlb2)347 dlb2_xstats_init(struct dlb2_eventdev *dlb2)
348 {
349 /*
350 * define the stats names and types. Used to build up the device
351 * xstats array
352 * There are multiple set of stats:
353 * - device-level,
354 * - per-port,
355 * - per-qid,
356 *
357 * For each of these sets, we have three parallel arrays, one for the
358 * names, the other for the stat type parameter to be passed in the fn
359 * call to get that stat. The third array allows resetting or not.
360 * All these arrays must be kept in sync
361 */
362 static const char * const dev_stats[] = {
363 "rx_ok",
364 "rx_drop",
365 "rx_interrupt_wait",
366 "rx_umonitor_umwait",
367 "tx_ok",
368 "total_polls",
369 "zero_polls",
370 "tx_nospc_ldb_hw_credits",
371 "tx_nospc_dir_hw_credits",
372 "tx_nospc_hw_credits",
373 "tx_nospc_inflight_max",
374 "tx_nospc_new_event_limit",
375 "tx_nospc_inflight_credits",
376 "nb_events_limit",
377 "inflight_events",
378 "ldb_pool_size",
379 "dir_pool_size",
380 "pool_size",
381 };
382 static const enum dlb2_xstats_type dev_types[] = {
383 rx_ok,
384 rx_drop,
385 rx_interrupt_wait,
386 rx_umonitor_umwait,
387 tx_ok,
388 total_polls,
389 zero_polls,
390 tx_nospc_ldb_hw_credits,
391 tx_nospc_dir_hw_credits,
392 tx_nospc_hw_credits,
393 tx_nospc_inflight_max,
394 tx_nospc_new_event_limit,
395 tx_nospc_inflight_credits,
396 nb_events_limit,
397 inflight_events,
398 ldb_pool_size,
399 dir_pool_size,
400 pool_size,
401 };
402 /* Note: generated device stats are not allowed to be reset. */
403 static const uint8_t dev_reset_allowed[] = {
404 0, /* rx_ok */
405 0, /* rx_drop */
406 0, /* rx_interrupt_wait */
407 0, /* rx_umonitor_umwait */
408 0, /* tx_ok */
409 0, /* total_polls */
410 0, /* zero_polls */
411 0, /* tx_nospc_ldb_hw_credits */
412 0, /* tx_nospc_dir_hw_credits */
413 0, /* tx_nospc_hw_credits */
414 0, /* tx_nospc_inflight_max */
415 0, /* tx_nospc_new_event_limit */
416 0, /* tx_nospc_inflight_credits */
417 0, /* nb_events_limit */
418 0, /* inflight_events */
419 0, /* ldb_pool_size */
420 0, /* dir_pool_size */
421 0, /* pool_size */
422 };
423 static const char * const port_stats[] = {
424 "is_configured",
425 "is_load_balanced",
426 "hw_id",
427 "rx_ok",
428 "rx_drop",
429 "rx_interrupt_wait",
430 "rx_umonitor_umwait",
431 "tx_ok",
432 "total_polls",
433 "zero_polls",
434 "tx_nospc_ldb_hw_credits",
435 "tx_nospc_dir_hw_credits",
436 "tx_nospc_hw_credits",
437 "tx_nospc_inflight_max",
438 "tx_nospc_new_event_limit",
439 "tx_nospc_inflight_credits",
440 "tx_new",
441 "tx_fwd",
442 "tx_rel",
443 "tx_implicit_rel",
444 "tx_sched_ordered",
445 "tx_sched_unordered",
446 "tx_sched_atomic",
447 "tx_sched_directed",
448 "tx_invalid",
449 "outstanding_releases",
450 "max_outstanding_releases",
451 "rx_sched_ordered",
452 "rx_sched_unordered",
453 "rx_sched_atomic",
454 "rx_sched_directed",
455 "rx_sched_invalid"
456 };
457 static const enum dlb2_xstats_type port_types[] = {
458 is_configured,
459 is_load_balanced,
460 hw_id,
461 rx_ok,
462 rx_drop,
463 rx_interrupt_wait,
464 rx_umonitor_umwait,
465 tx_ok,
466 total_polls,
467 zero_polls,
468 tx_nospc_ldb_hw_credits,
469 tx_nospc_dir_hw_credits,
470 tx_nospc_hw_credits,
471 tx_nospc_inflight_max,
472 tx_nospc_new_event_limit,
473 tx_nospc_inflight_credits,
474 tx_new,
475 tx_fwd,
476 tx_rel,
477 tx_implicit_rel,
478 tx_sched_ordered,
479 tx_sched_unordered,
480 tx_sched_atomic,
481 tx_sched_directed,
482 tx_invalid,
483 outstanding_releases,
484 max_outstanding_releases,
485 rx_sched_ordered,
486 rx_sched_unordered,
487 rx_sched_atomic,
488 rx_sched_directed,
489 rx_sched_invalid
490 };
491 static const uint8_t port_reset_allowed[] = {
492 0, /* is_configured */
493 0, /* is_load_balanced */
494 0, /* hw_id */
495 1, /* rx_ok */
496 1, /* rx_drop */
497 1, /* rx_interrupt_wait */
498 1, /* rx_umonitor_umwait */
499 1, /* tx_ok */
500 1, /* total_polls */
501 1, /* zero_polls */
502 1, /* tx_nospc_ldb_hw_credits */
503 1, /* tx_nospc_dir_hw_credits */
504 1, /* tx_nospc_hw_credits */
505 1, /* tx_nospc_inflight_max */
506 1, /* tx_nospc_new_event_limit */
507 1, /* tx_nospc_inflight_credits */
508 1, /* tx_new */
509 1, /* tx_fwd */
510 1, /* tx_rel */
511 1, /* tx_implicit_rel */
512 1, /* tx_sched_ordered */
513 1, /* tx_sched_unordered */
514 1, /* tx_sched_atomic */
515 1, /* tx_sched_directed */
516 1, /* tx_invalid */
517 0, /* outstanding_releases */
518 0, /* max_outstanding_releases */
519 1, /* rx_sched_ordered */
520 1, /* rx_sched_unordered */
521 1, /* rx_sched_atomic */
522 1, /* rx_sched_directed */
523 1 /* rx_sched_invalid */
524 };
525
526 /* QID specific stats */
527 static const char * const qid_stats[] = {
528 "is_configured",
529 "is_load_balanced",
530 "hw_id",
531 "num_links",
532 "sched_type",
533 "enq_ok",
534 "current_depth",
535 "depth_threshold",
536 "depth_le50_threshold",
537 "depth_gt50_le75_threshold",
538 "depth_gt75_le100_threshold",
539 "depth_gt100_threshold",
540 };
541 static const enum dlb2_xstats_type qid_types[] = {
542 is_configured,
543 is_load_balanced,
544 hw_id,
545 num_links,
546 sched_type,
547 enq_ok,
548 current_depth,
549 depth_threshold,
550 depth_le50_threshold,
551 depth_gt50_le75_threshold,
552 depth_gt75_le100_threshold,
553 depth_gt100_threshold,
554 };
555 static const uint8_t qid_reset_allowed[] = {
556 0, /* is_configured */
557 0, /* is_load_balanced */
558 0, /* hw_id */
559 0, /* num_links */
560 0, /* sched_type */
561 1, /* enq_ok */
562 0, /* current_depth */
563 0, /* depth_threshold */
564 1, /* depth_le50_threshold */
565 1, /* depth_gt50_le75_threshold */
566 1, /* depth_gt75_le100_threshold */
567 1, /* depth_gt100_threshold */
568 };
569
570 /* ---- end of stat definitions ---- */
571
572 /* check sizes, since a missed comma can lead to strings being
573 * joined by the compiler.
574 */
575 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
576 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
577 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
578
579 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
580 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
581 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
582
583 /* other vars */
584 const unsigned int count = RTE_DIM(dev_stats) +
585 DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) +
586 DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats);
587 unsigned int i, port, qid, stat_id = 0;
588
589 dlb2->xstats = rte_zmalloc_socket(NULL,
590 sizeof(dlb2->xstats[0]) * count, 0,
591 dlb2->qm_instance.info.socket_id);
592 if (dlb2->xstats == NULL)
593 return -ENOMEM;
594
595 #define sname dlb2->xstats[stat_id].name.name
596 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
597 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry) {
598 .fn_id = DLB2_XSTATS_FN_DEV,
599 .stat = dev_types[i],
600 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
601 .reset_allowed = dev_reset_allowed[i],
602 };
603 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
604 }
605 dlb2->xstats_count_mode_dev = stat_id;
606
607 for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) {
608 dlb2->xstats_offset_for_port[port] = stat_id;
609
610 uint32_t count_offset = stat_id;
611
612 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
613 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
614 .fn_id = DLB2_XSTATS_FN_PORT,
615 .obj_idx = port,
616 .stat = port_types[i],
617 .mode = RTE_EVENT_DEV_XSTATS_PORT,
618 .reset_allowed = port_reset_allowed[i],
619 };
620 snprintf(sname, sizeof(sname), "port_%u_%s",
621 port, port_stats[i]);
622 }
623
624 dlb2->xstats_count_per_port[port] = stat_id - count_offset;
625 }
626
627 dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev;
628
629 for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) {
630 uint32_t count_offset = stat_id;
631
632 dlb2->xstats_offset_for_qid[qid] = stat_id;
633
634 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
635 dlb2->xstats[stat_id] = (struct dlb2_xstats_entry){
636 .fn_id = DLB2_XSTATS_FN_QUEUE,
637 .obj_idx = qid,
638 .stat = qid_types[i],
639 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
640 .reset_allowed = qid_reset_allowed[i],
641 };
642 snprintf(sname, sizeof(sname), "qid_%u_%s",
643 qid, qid_stats[i]);
644 }
645
646 dlb2->xstats_count_per_qid[qid] = stat_id - count_offset;
647 }
648
649 dlb2->xstats_count_mode_queue = stat_id -
650 (dlb2->xstats_count_mode_dev + dlb2->xstats_count_mode_port);
651 #undef sname
652
653 dlb2->xstats_count = stat_id;
654
655 return 0;
656 }
657
658 void
dlb2_xstats_uninit(struct dlb2_eventdev * dlb2)659 dlb2_xstats_uninit(struct dlb2_eventdev *dlb2)
660 {
661 rte_free(dlb2->xstats);
662 dlb2->xstats_count = 0;
663 }
664
665 int
dlb2_eventdev_xstats_get_names(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)666 dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
667 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
668 struct rte_event_dev_xstats_name *xstats_names,
669 unsigned int *ids, unsigned int size)
670 {
671 const struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
672 unsigned int i;
673 unsigned int xidx = 0;
674 uint32_t xstats_mode_count = 0;
675 uint32_t start_offset = 0;
676
677 switch (mode) {
678 case RTE_EVENT_DEV_XSTATS_DEVICE:
679 xstats_mode_count = dlb2->xstats_count_mode_dev;
680 break;
681 case RTE_EVENT_DEV_XSTATS_PORT:
682 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
683 break;
684 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
685 start_offset = dlb2->xstats_offset_for_port[queue_port_id];
686 break;
687 case RTE_EVENT_DEV_XSTATS_QUEUE:
688 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) &&
689 (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255))
690 break;
691 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
692 start_offset = dlb2->xstats_offset_for_qid[queue_port_id];
693 break;
694 default:
695 return -EINVAL;
696 };
697
698 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
699 return xstats_mode_count;
700
701 for (i = 0; i < dlb2->xstats_count && xidx < size; i++) {
702 if (dlb2->xstats[i].mode != mode)
703 continue;
704
705 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
706 queue_port_id != dlb2->xstats[i].obj_idx)
707 continue;
708
709 xstats_names[xidx] = dlb2->xstats[i].name;
710 if (ids)
711 ids[xidx] = start_offset + xidx;
712 xidx++;
713 }
714 return xidx;
715 }
716
717 static int
dlb2_xstats_update(struct dlb2_eventdev * dlb2,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n,const uint32_t reset)718 dlb2_xstats_update(struct dlb2_eventdev *dlb2,
719 enum rte_event_dev_xstats_mode mode,
720 uint8_t queue_port_id, const unsigned int ids[],
721 uint64_t values[], unsigned int n, const uint32_t reset)
722 {
723 unsigned int i;
724 unsigned int xidx = 0;
725 uint32_t xstats_mode_count = 0;
726
727 switch (mode) {
728 case RTE_EVENT_DEV_XSTATS_DEVICE:
729 xstats_mode_count = dlb2->xstats_count_mode_dev;
730 break;
731 case RTE_EVENT_DEV_XSTATS_PORT:
732 if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
733 goto invalid_value;
734 xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id];
735 break;
736 case RTE_EVENT_DEV_XSTATS_QUEUE:
737 #if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */
738 if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version))
739 goto invalid_value;
740 #endif
741 xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id];
742 break;
743 default:
744 goto invalid_value;
745 };
746
747 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
748 struct dlb2_xstats_entry *xs = &dlb2->xstats[ids[i]];
749 dlb2_xstats_fn fn;
750
751 if (ids[i] > dlb2->xstats_count || xs->mode != mode)
752 continue;
753
754 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
755 queue_port_id != xs->obj_idx)
756 continue;
757
758 switch (xs->fn_id) {
759 case DLB2_XSTATS_FN_DEV:
760 fn = get_dev_stat;
761 break;
762 case DLB2_XSTATS_FN_PORT:
763 fn = get_port_stat;
764 break;
765 case DLB2_XSTATS_FN_QUEUE:
766 fn = get_queue_stat;
767 break;
768 default:
769 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
770 goto invalid_value;
771 }
772
773 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat,
774 xs->extra_arg) - xs->reset_value;
775
776 if (values)
777 values[xidx] = val;
778
779 if (xs->reset_allowed && reset)
780 xs->reset_value += val;
781
782 xidx++;
783 }
784
785 return xidx;
786
787 invalid_value:
788 return -EINVAL;
789 }
790
791 int
dlb2_eventdev_xstats_get(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)792 dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
793 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
794 const unsigned int ids[], uint64_t values[], unsigned int n)
795 {
796 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
797 const uint32_t reset = 0;
798
799 return dlb2_xstats_update(dlb2, mode, queue_port_id, ids, values, n,
800 reset);
801 }
802
803 uint64_t
dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev * dev,const char * name,unsigned int * id)804 dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
805 const char *name, unsigned int *id)
806 {
807 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
808 unsigned int i;
809 dlb2_xstats_fn fn;
810
811 for (i = 0; i < dlb2->xstats_count; i++) {
812 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
813
814 if (strncmp(xs->name.name, name,
815 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
816 if (id != NULL)
817 *id = i;
818
819 switch (xs->fn_id) {
820 case DLB2_XSTATS_FN_DEV:
821 fn = get_dev_stat;
822 break;
823 case DLB2_XSTATS_FN_PORT:
824 fn = get_port_stat;
825 break;
826 case DLB2_XSTATS_FN_QUEUE:
827 fn = get_queue_stat;
828 break;
829 default:
830 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n",
831 xs->fn_id);
832 return (uint64_t)-1;
833 }
834
835 return fn(dlb2, xs->obj_idx, xs->stat,
836 xs->extra_arg) - xs->reset_value;
837 }
838 }
839 if (id != NULL)
840 *id = (uint32_t)-1;
841 return (uint64_t)-1;
842 }
843
844 static void
dlb2_xstats_reset_range(struct dlb2_eventdev * dlb2,uint32_t start,uint32_t num)845 dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start,
846 uint32_t num)
847 {
848 uint32_t i;
849 dlb2_xstats_fn fn;
850
851 for (i = start; i < start + num; i++) {
852 struct dlb2_xstats_entry *xs = &dlb2->xstats[i];
853
854 if (!xs->reset_allowed)
855 continue;
856
857 switch (xs->fn_id) {
858 case DLB2_XSTATS_FN_DEV:
859 fn = get_dev_stat;
860 break;
861 case DLB2_XSTATS_FN_PORT:
862 fn = get_port_stat;
863 break;
864 case DLB2_XSTATS_FN_QUEUE:
865 fn = get_queue_stat;
866 break;
867 default:
868 DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
869 return;
870 }
871
872 uint64_t val = fn(dlb2, xs->obj_idx, xs->stat, xs->extra_arg);
873 xs->reset_value = val;
874 }
875 }
876
877 static int
dlb2_xstats_reset_queue(struct dlb2_eventdev * dlb2,uint8_t queue_id,const uint32_t ids[],uint32_t nb_ids)878 dlb2_xstats_reset_queue(struct dlb2_eventdev *dlb2, uint8_t queue_id,
879 const uint32_t ids[], uint32_t nb_ids)
880 {
881 const uint32_t reset = 1;
882
883 if (ids) {
884 uint32_t nb_reset = dlb2_xstats_update(dlb2,
885 RTE_EVENT_DEV_XSTATS_QUEUE,
886 queue_id, ids, NULL, nb_ids,
887 reset);
888 return nb_reset == nb_ids ? 0 : -EINVAL;
889 }
890
891 if (ids == NULL)
892 dlb2_xstats_reset_range(dlb2,
893 dlb2->xstats_offset_for_qid[queue_id],
894 dlb2->xstats_count_per_qid[queue_id]);
895
896 return 0;
897 }
898
899 static int
dlb2_xstats_reset_port(struct dlb2_eventdev * dlb2,uint8_t port_id,const uint32_t ids[],uint32_t nb_ids)900 dlb2_xstats_reset_port(struct dlb2_eventdev *dlb2, uint8_t port_id,
901 const uint32_t ids[], uint32_t nb_ids)
902 {
903 const uint32_t reset = 1;
904 int offset = dlb2->xstats_offset_for_port[port_id];
905 int nb_stat = dlb2->xstats_count_per_port[port_id];
906
907 if (ids) {
908 uint32_t nb_reset = dlb2_xstats_update(dlb2,
909 RTE_EVENT_DEV_XSTATS_PORT, port_id,
910 ids, NULL, nb_ids,
911 reset);
912 return nb_reset == nb_ids ? 0 : -EINVAL;
913 }
914
915 dlb2_xstats_reset_range(dlb2, offset, nb_stat);
916 return 0;
917 }
918
919 static int
dlb2_xstats_reset_dev(struct dlb2_eventdev * dlb2,const uint32_t ids[],uint32_t nb_ids)920 dlb2_xstats_reset_dev(struct dlb2_eventdev *dlb2, const uint32_t ids[],
921 uint32_t nb_ids)
922 {
923 uint32_t i;
924
925 if (ids) {
926 for (i = 0; i < nb_ids; i++) {
927 uint32_t id = ids[i];
928
929 if (id >= dlb2->xstats_count_mode_dev)
930 return -EINVAL;
931 dlb2_xstats_reset_range(dlb2, id, 1);
932 }
933 } else {
934 for (i = 0; i < dlb2->xstats_count_mode_dev; i++)
935 dlb2_xstats_reset_range(dlb2, i, 1);
936 }
937
938 return 0;
939 }
940
941 int
dlb2_eventdev_xstats_reset(struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t nb_ids)942 dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
943 enum rte_event_dev_xstats_mode mode,
944 int16_t queue_port_id,
945 const uint32_t ids[],
946 uint32_t nb_ids)
947 {
948 struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
949 uint32_t i;
950
951 /* handle -1 for queue_port_id here, looping over all ports/queues */
952 switch (mode) {
953 case RTE_EVENT_DEV_XSTATS_DEVICE:
954 if (dlb2_xstats_reset_dev(dlb2, ids, nb_ids))
955 return -EINVAL;
956 break;
957 case RTE_EVENT_DEV_XSTATS_PORT:
958 if (queue_port_id == -1) {
959 for (i = 0;
960 i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) {
961 if (dlb2_xstats_reset_port(dlb2, i,
962 ids, nb_ids))
963 return -EINVAL;
964 }
965 } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) {
966 if (dlb2_xstats_reset_port(dlb2, queue_port_id,
967 ids, nb_ids))
968 return -EINVAL;
969 }
970 break;
971 case RTE_EVENT_DEV_XSTATS_QUEUE:
972 if (queue_port_id == -1) {
973 for (i = 0;
974 i < DLB2_MAX_NUM_QUEUES(dlb2->version); i++) {
975 if (dlb2_xstats_reset_queue(dlb2, i,
976 ids, nb_ids))
977 return -EINVAL;
978 }
979 } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) {
980 if (dlb2_xstats_reset_queue(dlb2, queue_port_id,
981 ids, nb_ids))
982 return -EINVAL;
983 }
984 break;
985 };
986
987 return 0;
988 }
989
990 void
dlb2_eventdev_dump(struct rte_eventdev * dev,FILE * f)991 dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
992 {
993 struct dlb2_eventdev *dlb2;
994 struct dlb2_hw_dev *handle;
995 int i;
996
997 dlb2 = dlb2_pmd_priv(dev);
998
999 if (dlb2 == NULL) {
1000 fprintf(f, "DLB2 Event device cannot be dumped!\n");
1001 return;
1002 }
1003
1004 if (!dlb2->configured)
1005 fprintf(f, "DLB2 Event device is not configured\n");
1006
1007 handle = &dlb2->qm_instance;
1008
1009 fprintf(f, "================\n");
1010 fprintf(f, "DLB2 Device Dump\n");
1011 fprintf(f, "================\n");
1012
1013 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
1014 dlb2->umwait_allowed ? "yes" : "no");
1015
1016 /* Generic top level device information */
1017
1018 fprintf(f, "device is configured and run state =");
1019 if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1020 fprintf(f, "STOPPED\n");
1021 else if (dlb2->run_state == DLB2_RUN_STATE_STOPPING)
1022 fprintf(f, "STOPPING\n");
1023 else if (dlb2->run_state == DLB2_RUN_STATE_STARTING)
1024 fprintf(f, "STARTING\n");
1025 else if (dlb2->run_state == DLB2_RUN_STATE_STARTED)
1026 fprintf(f, "STARTED\n");
1027 else
1028 fprintf(f, "UNEXPECTED\n");
1029
1030 fprintf(f, "domain ID=%u, socket_id=%u, evdev=%p\n",
1031 handle->domain_id, handle->info.socket_id, dlb2->event_dev);
1032
1033 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
1034 dlb2->num_dir_ports, dlb2->num_dir_queues);
1035
1036 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
1037 dlb2->num_ldb_ports, dlb2->num_ldb_queues);
1038
1039 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
1040 handle->cfg.resources.num_atomic_inflights,
1041 handle->cfg.resources.num_hist_list_entries);
1042
1043 fprintf(f, "results from most recent hw resource query:\n");
1044
1045 fprintf(f, "\tnum_sched_domains = %u\n",
1046 dlb2->hw_rsrc_query_results.num_sched_domains);
1047
1048 fprintf(f, "\tnum_ldb_queues = %u\n",
1049 dlb2->hw_rsrc_query_results.num_ldb_queues);
1050
1051 fprintf(f, "\tnum_ldb_ports = %u\n",
1052 dlb2->hw_rsrc_query_results.num_ldb_ports);
1053
1054 fprintf(f, "\tnum_dir_ports = %u\n",
1055 dlb2->hw_rsrc_query_results.num_dir_ports);
1056
1057 fprintf(f, "\tnum_atomic_inflights = %u\n",
1058 dlb2->hw_rsrc_query_results.num_atomic_inflights);
1059
1060 fprintf(f, "\tnum_hist_list_entries = %u\n",
1061 dlb2->hw_rsrc_query_results.num_hist_list_entries);
1062
1063 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1064 dlb2->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1065
1066 fprintf(f, "\tnum_ldb_credits = %u\n",
1067 dlb2->hw_rsrc_query_results.num_ldb_credits);
1068
1069 fprintf(f, "\tnum_dir_credits = %u\n",
1070 dlb2->hw_rsrc_query_results.num_dir_credits);
1071
1072 fprintf(f, "\tnum_credits = %u\n",
1073 dlb2->hw_rsrc_query_results.num_credits);
1074
1075 /* Port level information */
1076
1077 for (i = 0; i < dlb2->num_ports; i++) {
1078 struct dlb2_eventdev_port *p = &dlb2->ev_ports[i];
1079 int j;
1080
1081 if (!p->enq_configured)
1082 fprintf(f, "Port_%d is not configured\n", i);
1083
1084 fprintf(f, "Port_%d\n", i);
1085 fprintf(f, "=======\n");
1086
1087 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1088 p->id, p->setup_done);
1089
1090 fprintf(f, "\tconfig state=%d, port state=%d\n",
1091 p->qm_port.config_state, p->qm_port.state);
1092
1093 fprintf(f, "\tport is %s\n",
1094 p->qm_port.is_directed ? "directed" : "load balanced");
1095
1096 fprintf(f, "\toutstanding releases=%u\n",
1097 p->outstanding_releases);
1098
1099 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1100 p->inflight_max, p->inflight_credits);
1101
1102 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1103 p->credit_update_quanta, p->implicit_release);
1104
1105 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1106
1107 for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1108 if (p->link[j].valid)
1109 fprintf(f, "id=%u prio=%u ",
1110 p->link[j].queue_id,
1111 p->link[j].priority);
1112 }
1113 fprintf(f, "\n");
1114
1115 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1116
1117 fprintf(f, "\tcached_ldb_credits=%u\n",
1118 p->qm_port.cached_ldb_credits);
1119
1120 fprintf(f, "\tldb_credits = %u\n",
1121 p->qm_port.ldb_credits);
1122
1123 fprintf(f, "\tcached_dir_credits = %u\n",
1124 p->qm_port.cached_dir_credits);
1125
1126 fprintf(f, "\tdir_credits = %u\n",
1127 p->qm_port.dir_credits);
1128
1129 fprintf(f, "\tcached_credits = %u\n",
1130 p->qm_port.cached_credits);
1131
1132 fprintf(f, "\tdir_credits = %u\n",
1133 p->qm_port.credits);
1134
1135 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1136 p->qm_port.gen_bit,
1137 p->qm_port.cq_idx,
1138 p->qm_port.cq_depth);
1139
1140 fprintf(f, "\tinterrupt armed=%d\n",
1141 p->qm_port.int_armed);
1142
1143 fprintf(f, "\tPort statistics\n");
1144
1145 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1146 p->stats.traffic.rx_ok);
1147
1148 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1149 p->stats.traffic.rx_drop);
1150
1151 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1152 p->stats.traffic.rx_interrupt_wait);
1153
1154 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1155 p->stats.traffic.rx_umonitor_umwait);
1156
1157 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1158 p->stats.traffic.tx_ok);
1159
1160 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1161 p->stats.traffic.total_polls);
1162
1163 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1164 p->stats.traffic.zero_polls);
1165
1166 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1167 p->stats.traffic.tx_nospc_ldb_hw_credits);
1168
1169 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1170 p->stats.traffic.tx_nospc_dir_hw_credits);
1171
1172 fprintf(f, "\t\ttx_nospc_hw_credits %" PRIu64 "\n",
1173 p->stats.traffic.tx_nospc_hw_credits);
1174
1175 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1176 p->stats.traffic.tx_nospc_inflight_max);
1177
1178 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1179 p->stats.traffic.tx_nospc_new_event_limit);
1180
1181 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1182 p->stats.traffic.tx_nospc_inflight_credits);
1183
1184 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1185 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1186
1187 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1188 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1189
1190 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1191 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1192
1193 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1194 p->stats.tx_implicit_rel);
1195
1196 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1197 p->stats.tx_sched_cnt[DLB2_SCHED_ORDERED]);
1198
1199 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1200 p->stats.tx_sched_cnt[DLB2_SCHED_UNORDERED]);
1201
1202 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1203 p->stats.tx_sched_cnt[DLB2_SCHED_ATOMIC]);
1204
1205 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1206 p->stats.tx_sched_cnt[DLB2_SCHED_DIRECTED]);
1207
1208 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1209 p->stats.tx_invalid);
1210
1211 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1212 p->stats.rx_sched_cnt[DLB2_SCHED_ORDERED]);
1213
1214 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1215 p->stats.rx_sched_cnt[DLB2_SCHED_UNORDERED]);
1216
1217 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1218 p->stats.rx_sched_cnt[DLB2_SCHED_ATOMIC]);
1219
1220 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1221 p->stats.rx_sched_cnt[DLB2_SCHED_DIRECTED]);
1222
1223 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1224 p->stats.rx_sched_invalid);
1225 }
1226
1227 /* Queue level information */
1228
1229 for (i = 0; i < dlb2->num_queues; i++) {
1230 struct dlb2_eventdev_queue *q = &dlb2->ev_queues[i];
1231 int j, k;
1232
1233 if (!q->setup_done)
1234 fprintf(f, "Queue_%d is not configured\n", i);
1235
1236 fprintf(f, "Queue_%d\n", i);
1237 fprintf(f, "========\n");
1238
1239 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1240
1241 fprintf(f, "\tqueue is %s\n",
1242 q->qm_queue.is_directed ? "directed" : "load balanced");
1243
1244 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1245
1246 for (j = 0; j < dlb2->num_ports; j++) {
1247 struct dlb2_eventdev_port *p = &dlb2->ev_ports[j];
1248
1249 for (k = 0; k < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1250 if (p->link[k].valid &&
1251 p->link[k].queue_id == q->id)
1252 fprintf(f, "id=%u prio=%u ",
1253 p->id, p->link[k].priority);
1254 }
1255 }
1256 fprintf(f, "\n");
1257
1258 fprintf(f, "\tcurrent depth: %u events\n",
1259 dlb2_get_queue_depth(dlb2, q));
1260
1261 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1262 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
1263 }
1264 }
1265