1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <inttypes.h>
7
8 #include "dlb_priv.h"
9 #include "dlb_inline_fns.h"
10
11 enum dlb_xstats_type {
12 /* common to device and port */
13 rx_ok, /**< Receive an event */
14 rx_drop, /**< Error bit set in received QE */
15 rx_interrupt_wait, /**< Wait on an interrupt */
16 rx_umonitor_umwait, /**< Block using umwait */
17 tx_ok, /**< Transmit an event */
18 total_polls, /**< Call dequeue_burst */
19 zero_polls, /**< Call dequeue burst and return 0 */
20 tx_nospc_ldb_hw_credits, /**< Insufficient LDB h/w credits */
21 tx_nospc_dir_hw_credits, /**< Insufficient DIR h/w credits */
22 tx_nospc_inflight_max, /**< Reach the new_event_threshold */
23 tx_nospc_new_event_limit, /**< Insufficient s/w credits */
24 tx_nospc_inflight_credits, /**< Port has too few s/w credits */
25 /* device specific */
26 nb_events_limit, /**< Maximum num of events */
27 inflight_events, /**< Current num events outstanding */
28 ldb_pool_size, /**< Num load balanced credits */
29 dir_pool_size, /**< Num directed credits */
30 /* port specific */
31 tx_new, /**< Send an OP_NEW event */
32 tx_fwd, /**< Send an OP_FORWARD event */
33 tx_rel, /**< Send an OP_RELEASE event */
34 tx_implicit_rel, /**< Issue an implicit event release */
35 tx_sched_ordered, /**< Send a SCHED_TYPE_ORDERED event */
36 tx_sched_unordered, /**< Send a SCHED_TYPE_PARALLEL event */
37 tx_sched_atomic, /**< Send a SCHED_TYPE_ATOMIC event */
38 tx_sched_directed, /**< Send a directed event */
39 tx_invalid, /**< Send an event with an invalid op */
40 outstanding_releases, /**< # of releases a port owes */
41 max_outstanding_releases, /**< max # of releases a port can owe */
42 rx_sched_ordered, /**< Dequeue an ordered event */
43 rx_sched_unordered, /**< Dequeue an unordered event */
44 rx_sched_atomic, /**< Dequeue an atomic event */
45 rx_sched_directed, /**< Dequeue an directed event */
46 rx_sched_invalid, /**< Dequeue event sched type invalid */
47 /* common to port and queue */
48 is_configured, /**< Port is configured */
49 is_load_balanced, /**< Port is LDB */
50 hw_id, /**< Hardware ID */
51 /* queue specific */
52 num_links, /**< Number of ports linked */
53 sched_type, /**< Queue sched type */
54 enq_ok, /**< # events enqueued to the queue */
55 current_depth /**< Current queue depth */
56 };
57
58 typedef uint64_t (*dlb_xstats_fn)(struct dlb_eventdev *dlb,
59 uint16_t obj_idx, /* port or queue id */
60 enum dlb_xstats_type stat, int extra_arg);
61
62 enum dlb_xstats_fn_type {
63 DLB_XSTATS_FN_DEV,
64 DLB_XSTATS_FN_PORT,
65 DLB_XSTATS_FN_QUEUE
66 };
67
68 struct dlb_xstats_entry {
69 struct rte_event_dev_xstats_name name;
70 uint64_t reset_value; /* an offset to be taken away to emulate resets */
71 enum dlb_xstats_fn_type fn_id;
72 enum dlb_xstats_type stat;
73 enum rte_event_dev_xstats_mode mode;
74 int extra_arg;
75 uint16_t obj_idx;
76 uint8_t reset_allowed; /* when set, this value can be reset */
77 };
78
79 /* Some device stats are simply a summation of the corresponding port values */
80 static uint64_t
dlb_device_traffic_stat_get(struct dlb_eventdev * dlb,int which_stat)81 dlb_device_traffic_stat_get(struct dlb_eventdev *dlb, int which_stat)
82 {
83 int i;
84 uint64_t val = 0;
85
86 for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
87 struct dlb_eventdev_port *port = &dlb->ev_ports[i];
88
89 if (!port->setup_done)
90 continue;
91
92 switch (which_stat) {
93 case rx_ok:
94 val += port->stats.traffic.rx_ok;
95 break;
96 case rx_drop:
97 val += port->stats.traffic.rx_drop;
98 break;
99 case rx_interrupt_wait:
100 val += port->stats.traffic.rx_interrupt_wait;
101 break;
102 case rx_umonitor_umwait:
103 val += port->stats.traffic.rx_umonitor_umwait;
104 break;
105 case tx_ok:
106 val += port->stats.traffic.tx_ok;
107 break;
108 case total_polls:
109 val += port->stats.traffic.total_polls;
110 break;
111 case zero_polls:
112 val += port->stats.traffic.zero_polls;
113 break;
114 case tx_nospc_ldb_hw_credits:
115 val += port->stats.traffic.tx_nospc_ldb_hw_credits;
116 break;
117 case tx_nospc_dir_hw_credits:
118 val += port->stats.traffic.tx_nospc_dir_hw_credits;
119 break;
120 case tx_nospc_inflight_max:
121 val += port->stats.traffic.tx_nospc_inflight_max;
122 break;
123 case tx_nospc_new_event_limit:
124 val += port->stats.traffic.tx_nospc_new_event_limit;
125 break;
126 case tx_nospc_inflight_credits:
127 val += port->stats.traffic.tx_nospc_inflight_credits;
128 break;
129 default:
130 return -1;
131 }
132 }
133 return val;
134 }
135
136 static uint64_t
get_dev_stat(struct dlb_eventdev * dlb,uint16_t obj_idx __rte_unused,enum dlb_xstats_type type,int extra_arg __rte_unused)137 get_dev_stat(struct dlb_eventdev *dlb, uint16_t obj_idx __rte_unused,
138 enum dlb_xstats_type type, int extra_arg __rte_unused)
139 {
140 switch (type) {
141 case rx_ok:
142 case rx_drop:
143 case rx_interrupt_wait:
144 case rx_umonitor_umwait:
145 case tx_ok:
146 case total_polls:
147 case zero_polls:
148 case tx_nospc_ldb_hw_credits:
149 case tx_nospc_dir_hw_credits:
150 case tx_nospc_inflight_max:
151 case tx_nospc_new_event_limit:
152 case tx_nospc_inflight_credits:
153 return dlb_device_traffic_stat_get(dlb, type);
154 case nb_events_limit:
155 return dlb->new_event_limit;
156 case inflight_events:
157 return __atomic_load_n(&dlb->inflights, __ATOMIC_SEQ_CST);
158 case ldb_pool_size:
159 return dlb->num_ldb_credits;
160 case dir_pool_size:
161 return dlb->num_dir_credits;
162 default: return -1;
163 }
164 }
165
166 static uint64_t
get_port_stat(struct dlb_eventdev * dlb,uint16_t obj_idx,enum dlb_xstats_type type,int extra_arg __rte_unused)167 get_port_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
168 enum dlb_xstats_type type, int extra_arg __rte_unused)
169 {
170 struct dlb_eventdev_port *ev_port = &dlb->ev_ports[obj_idx];
171
172 switch (type) {
173 case rx_ok: return ev_port->stats.traffic.rx_ok;
174
175 case rx_drop: return ev_port->stats.traffic.rx_drop;
176
177 case rx_interrupt_wait: return ev_port->stats.traffic.rx_interrupt_wait;
178
179 case rx_umonitor_umwait:
180 return ev_port->stats.traffic.rx_umonitor_umwait;
181
182 case tx_ok: return ev_port->stats.traffic.tx_ok;
183
184 case total_polls: return ev_port->stats.traffic.total_polls;
185
186 case zero_polls: return ev_port->stats.traffic.zero_polls;
187
188 case tx_nospc_ldb_hw_credits:
189 return ev_port->stats.traffic.tx_nospc_ldb_hw_credits;
190
191 case tx_nospc_dir_hw_credits:
192 return ev_port->stats.traffic.tx_nospc_dir_hw_credits;
193
194 case tx_nospc_inflight_max:
195 return ev_port->stats.traffic.tx_nospc_inflight_max;
196
197 case tx_nospc_new_event_limit:
198 return ev_port->stats.traffic.tx_nospc_new_event_limit;
199
200 case tx_nospc_inflight_credits:
201 return ev_port->stats.traffic.tx_nospc_inflight_credits;
202
203 case is_configured: return ev_port->setup_done;
204
205 case is_load_balanced: return !ev_port->qm_port.is_directed;
206
207 case hw_id: return ev_port->qm_port.id;
208
209 case tx_new: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_NEW];
210
211 case tx_fwd: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD];
212
213 case tx_rel: return ev_port->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE];
214
215 case tx_implicit_rel: return ev_port->stats.tx_implicit_rel;
216
217 case tx_sched_ordered:
218 return ev_port->stats.tx_sched_cnt[DLB_SCHED_ORDERED];
219
220 case tx_sched_unordered:
221 return ev_port->stats.tx_sched_cnt[DLB_SCHED_UNORDERED];
222
223 case tx_sched_atomic:
224 return ev_port->stats.tx_sched_cnt[DLB_SCHED_ATOMIC];
225
226 case tx_sched_directed:
227 return ev_port->stats.tx_sched_cnt[DLB_SCHED_DIRECTED];
228
229 case tx_invalid: return ev_port->stats.tx_invalid;
230
231 case outstanding_releases: return ev_port->outstanding_releases;
232
233 case max_outstanding_releases:
234 return DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
235
236 case rx_sched_ordered:
237 return ev_port->stats.rx_sched_cnt[DLB_SCHED_ORDERED];
238
239 case rx_sched_unordered:
240 return ev_port->stats.rx_sched_cnt[DLB_SCHED_UNORDERED];
241
242 case rx_sched_atomic:
243 return ev_port->stats.rx_sched_cnt[DLB_SCHED_ATOMIC];
244
245 case rx_sched_directed:
246 return ev_port->stats.rx_sched_cnt[DLB_SCHED_DIRECTED];
247
248 case rx_sched_invalid: return ev_port->stats.rx_sched_invalid;
249
250 default: return -1;
251 }
252 }
253
254 static uint64_t
get_queue_stat(struct dlb_eventdev * dlb,uint16_t obj_idx,enum dlb_xstats_type type,int extra_arg __rte_unused)255 get_queue_stat(struct dlb_eventdev *dlb, uint16_t obj_idx,
256 enum dlb_xstats_type type, int extra_arg __rte_unused)
257 {
258 struct dlb_eventdev_queue *ev_queue = &dlb->ev_queues[obj_idx];
259
260 switch (type) {
261 case is_configured: return ev_queue->setup_done;
262
263 case is_load_balanced: return !ev_queue->qm_queue.is_directed;
264
265 case hw_id: return ev_queue->qm_queue.id;
266
267 case num_links: return ev_queue->num_links;
268
269 case sched_type: return ev_queue->qm_queue.sched_type;
270
271 case enq_ok:
272 {
273 int port_count = 0;
274 uint64_t enq_ok_tally = 0;
275
276 ev_queue->enq_ok = 0;
277 for (port_count = 0; port_count < DLB_MAX_NUM_PORTS;
278 port_count++) {
279 struct dlb_eventdev_port *ev_port =
280 &dlb->ev_ports[port_count];
281 enq_ok_tally += ev_port->stats.enq_ok[ev_queue->id];
282 }
283 ev_queue->enq_ok = enq_ok_tally;
284 return ev_queue->enq_ok;
285 }
286
287 case current_depth: return dlb_get_queue_depth(dlb, ev_queue);
288
289 default: return -1;
290 }
291 }
292
293 int
dlb_xstats_init(struct dlb_eventdev * dlb)294 dlb_xstats_init(struct dlb_eventdev *dlb)
295 {
296 /*
297 * define the stats names and types. Used to build up the device
298 * xstats array
299 * There are multiple set of stats:
300 * - device-level,
301 * - per-port,
302 * - per-qid,
303 *
304 * For each of these sets, we have three parallel arrays, one for the
305 * names, the other for the stat type parameter to be passed in the fn
306 * call to get that stat. The third array allows resetting or not.
307 * All these arrays must be kept in sync
308 */
309 static const char * const dev_stats[] = {
310 "rx_ok",
311 "rx_drop",
312 "rx_interrupt_wait",
313 "rx_umonitor_umwait",
314 "tx_ok",
315 "total_polls",
316 "zero_polls",
317 "tx_nospc_ldb_hw_credits",
318 "tx_nospc_dir_hw_credits",
319 "tx_nospc_inflight_max",
320 "tx_nospc_new_event_limit",
321 "tx_nospc_inflight_credits",
322 "nb_events_limit",
323 "inflight_events",
324 "ldb_pool_size",
325 "dir_pool_size",
326 };
327 static const enum dlb_xstats_type dev_types[] = {
328 rx_ok,
329 rx_drop,
330 rx_interrupt_wait,
331 rx_umonitor_umwait,
332 tx_ok,
333 total_polls,
334 zero_polls,
335 tx_nospc_ldb_hw_credits,
336 tx_nospc_dir_hw_credits,
337 tx_nospc_inflight_max,
338 tx_nospc_new_event_limit,
339 tx_nospc_inflight_credits,
340 nb_events_limit,
341 inflight_events,
342 ldb_pool_size,
343 dir_pool_size,
344 };
345 /* Note: generated device stats are not allowed to be reset. */
346 static const uint8_t dev_reset_allowed[] = {
347 0, /* rx_ok */
348 0, /* rx_drop */
349 0, /* rx_interrupt_wait */
350 0, /* rx_umonitor_umwait */
351 0, /* tx_ok */
352 0, /* total_polls */
353 0, /* zero_polls */
354 0, /* tx_nospc_ldb_hw_credits */
355 0, /* tx_nospc_dir_hw_credits */
356 0, /* tx_nospc_inflight_max */
357 0, /* tx_nospc_new_event_limit */
358 0, /* tx_nospc_inflight_credits */
359 0, /* nb_events_limit */
360 0, /* inflight_events */
361 0, /* ldb_pool_size */
362 0, /* dir_pool_size */
363 };
364 static const char * const port_stats[] = {
365 "is_configured",
366 "is_load_balanced",
367 "hw_id",
368 "rx_ok",
369 "rx_drop",
370 "rx_interrupt_wait",
371 "rx_umonitor_umwait",
372 "tx_ok",
373 "total_polls",
374 "zero_polls",
375 "tx_nospc_ldb_hw_credits",
376 "tx_nospc_dir_hw_credits",
377 "tx_nospc_inflight_max",
378 "tx_nospc_new_event_limit",
379 "tx_nospc_inflight_credits",
380 "tx_new",
381 "tx_fwd",
382 "tx_rel",
383 "tx_implicit_rel",
384 "tx_sched_ordered",
385 "tx_sched_unordered",
386 "tx_sched_atomic",
387 "tx_sched_directed",
388 "tx_invalid",
389 "outstanding_releases",
390 "max_outstanding_releases",
391 "rx_sched_ordered",
392 "rx_sched_unordered",
393 "rx_sched_atomic",
394 "rx_sched_directed",
395 "rx_sched_invalid"
396 };
397 static const enum dlb_xstats_type port_types[] = {
398 is_configured,
399 is_load_balanced,
400 hw_id,
401 rx_ok,
402 rx_drop,
403 rx_interrupt_wait,
404 rx_umonitor_umwait,
405 tx_ok,
406 total_polls,
407 zero_polls,
408 tx_nospc_ldb_hw_credits,
409 tx_nospc_dir_hw_credits,
410 tx_nospc_inflight_max,
411 tx_nospc_new_event_limit,
412 tx_nospc_inflight_credits,
413 tx_new,
414 tx_fwd,
415 tx_rel,
416 tx_implicit_rel,
417 tx_sched_ordered,
418 tx_sched_unordered,
419 tx_sched_atomic,
420 tx_sched_directed,
421 tx_invalid,
422 outstanding_releases,
423 max_outstanding_releases,
424 rx_sched_ordered,
425 rx_sched_unordered,
426 rx_sched_atomic,
427 rx_sched_directed,
428 rx_sched_invalid
429 };
430 static const uint8_t port_reset_allowed[] = {
431 0, /* is_configured */
432 0, /* is_load_balanced */
433 0, /* hw_id */
434 1, /* rx_ok */
435 1, /* rx_drop */
436 1, /* rx_interrupt_wait */
437 1, /* rx_umonitor_umwait */
438 1, /* tx_ok */
439 1, /* total_polls */
440 1, /* zero_polls */
441 1, /* tx_nospc_ldb_hw_credits */
442 1, /* tx_nospc_dir_hw_credits */
443 1, /* tx_nospc_inflight_max */
444 1, /* tx_nospc_new_event_limit */
445 1, /* tx_nospc_inflight_credits */
446 1, /* tx_new */
447 1, /* tx_fwd */
448 1, /* tx_rel */
449 1, /* tx_implicit_rel */
450 1, /* tx_sched_ordered */
451 1, /* tx_sched_unordered */
452 1, /* tx_sched_atomic */
453 1, /* tx_sched_directed */
454 1, /* tx_invalid */
455 0, /* outstanding_releases */
456 0, /* max_outstanding_releases */
457 1, /* rx_sched_ordered */
458 1, /* rx_sched_unordered */
459 1, /* rx_sched_atomic */
460 1, /* rx_sched_directed */
461 1 /* rx_sched_invalid */
462 };
463
464 /* QID specific stats */
465 static const char * const qid_stats[] = {
466 "is_configured",
467 "is_load_balanced",
468 "hw_id",
469 "num_links",
470 "sched_type",
471 "enq_ok",
472 "current_depth",
473 };
474 static const enum dlb_xstats_type qid_types[] = {
475 is_configured,
476 is_load_balanced,
477 hw_id,
478 num_links,
479 sched_type,
480 enq_ok,
481 current_depth,
482 };
483 static const uint8_t qid_reset_allowed[] = {
484 0, /* is_configured */
485 0, /* is_load_balanced */
486 0, /* hw_id */
487 0, /* num_links */
488 0, /* sched_type */
489 1, /* enq_ok */
490 0, /* current_depth */
491 };
492
493 /* ---- end of stat definitions ---- */
494
495 /* check sizes, since a missed comma can lead to strings being
496 * joined by the compiler.
497 */
498 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
499 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
500 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
501
502 RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_reset_allowed));
503 RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
504 RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
505
506 /* other vars */
507 const unsigned int count = RTE_DIM(dev_stats) +
508 DLB_MAX_NUM_PORTS * RTE_DIM(port_stats) +
509 DLB_MAX_NUM_QUEUES * RTE_DIM(qid_stats);
510 unsigned int i, port, qid, stat_id = 0;
511
512 dlb->xstats = rte_zmalloc_socket(NULL,
513 sizeof(dlb->xstats[0]) * count, 0,
514 dlb->qm_instance.info.socket_id);
515 if (dlb->xstats == NULL)
516 return -ENOMEM;
517
518 #define sname dlb->xstats[stat_id].name.name
519 for (i = 0; i < RTE_DIM(dev_stats); i++, stat_id++) {
520 dlb->xstats[stat_id] = (struct dlb_xstats_entry) {
521 .fn_id = DLB_XSTATS_FN_DEV,
522 .stat = dev_types[i],
523 .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
524 .reset_allowed = dev_reset_allowed[i],
525 };
526 snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
527 }
528 dlb->xstats_count_mode_dev = stat_id;
529
530 for (port = 0; port < DLB_MAX_NUM_PORTS; port++) {
531 uint32_t count_offset = stat_id;
532
533 dlb->xstats_offset_for_port[port] = stat_id;
534
535 for (i = 0; i < RTE_DIM(port_stats); i++, stat_id++) {
536 dlb->xstats[stat_id] = (struct dlb_xstats_entry){
537 .fn_id = DLB_XSTATS_FN_PORT,
538 .obj_idx = port,
539 .stat = port_types[i],
540 .mode = RTE_EVENT_DEV_XSTATS_PORT,
541 .reset_allowed = port_reset_allowed[i],
542 };
543 snprintf(sname, sizeof(sname), "port_%u_%s",
544 port, port_stats[i]);
545 }
546
547 dlb->xstats_count_per_port[port] = stat_id - count_offset;
548 }
549
550 dlb->xstats_count_mode_port = stat_id - dlb->xstats_count_mode_dev;
551
552 for (qid = 0; qid < DLB_MAX_NUM_QUEUES; qid++) {
553 uint32_t count_offset = stat_id;
554
555 dlb->xstats_offset_for_qid[qid] = stat_id;
556
557 for (i = 0; i < RTE_DIM(qid_stats); i++, stat_id++) {
558 dlb->xstats[stat_id] = (struct dlb_xstats_entry){
559 .fn_id = DLB_XSTATS_FN_QUEUE,
560 .obj_idx = qid,
561 .stat = qid_types[i],
562 .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
563 .reset_allowed = qid_reset_allowed[i],
564 };
565 snprintf(sname, sizeof(sname), "qid_%u_%s",
566 qid, qid_stats[i]);
567 }
568
569 dlb->xstats_count_per_qid[qid] = stat_id - count_offset;
570 }
571
572 dlb->xstats_count_mode_queue = stat_id -
573 (dlb->xstats_count_mode_dev + dlb->xstats_count_mode_port);
574 #undef sname
575
576 dlb->xstats_count = stat_id;
577
578 return 0;
579 }
580
581 void
dlb_xstats_uninit(struct dlb_eventdev * dlb)582 dlb_xstats_uninit(struct dlb_eventdev *dlb)
583 {
584 rte_free(dlb->xstats);
585 dlb->xstats_count = 0;
586 }
587
588 int
dlb_eventdev_xstats_get_names(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)589 dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
590 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
591 struct rte_event_dev_xstats_name *xstats_names,
592 unsigned int *ids, unsigned int size)
593 {
594 const struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
595 unsigned int i;
596 unsigned int xidx = 0;
597 uint32_t xstats_mode_count = 0;
598 uint32_t start_offset = 0;
599
600 switch (mode) {
601 case RTE_EVENT_DEV_XSTATS_DEVICE:
602 xstats_mode_count = dlb->xstats_count_mode_dev;
603 break;
604 case RTE_EVENT_DEV_XSTATS_PORT:
605 if (queue_port_id >= DLB_MAX_NUM_PORTS)
606 break;
607 xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
608 start_offset = dlb->xstats_offset_for_port[queue_port_id];
609 break;
610 case RTE_EVENT_DEV_XSTATS_QUEUE:
611 #if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
612 if (queue_port_id >= DLB_MAX_NUM_QUEUES)
613 break;
614 #endif
615 xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
616 start_offset = dlb->xstats_offset_for_qid[queue_port_id];
617 break;
618 default:
619 return -EINVAL;
620 };
621
622 if (xstats_mode_count > size || ids == NULL || xstats_names == NULL)
623 return xstats_mode_count;
624
625 for (i = 0; i < dlb->xstats_count && xidx < size; i++) {
626 if (dlb->xstats[i].mode != mode)
627 continue;
628
629 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
630 queue_port_id != dlb->xstats[i].obj_idx)
631 continue;
632
633 xstats_names[xidx] = dlb->xstats[i].name;
634 if (ids)
635 ids[xidx] = start_offset + xidx;
636 xidx++;
637 }
638 return xidx;
639 }
640
641 static int
dlb_xstats_update(struct dlb_eventdev * dlb,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n,const uint32_t reset)642 dlb_xstats_update(struct dlb_eventdev *dlb,
643 enum rte_event_dev_xstats_mode mode,
644 uint8_t queue_port_id, const unsigned int ids[],
645 uint64_t values[], unsigned int n, const uint32_t reset)
646 {
647 unsigned int i;
648 unsigned int xidx = 0;
649 uint32_t xstats_mode_count = 0;
650
651 switch (mode) {
652 case RTE_EVENT_DEV_XSTATS_DEVICE:
653 xstats_mode_count = dlb->xstats_count_mode_dev;
654 break;
655 case RTE_EVENT_DEV_XSTATS_PORT:
656 if (queue_port_id >= DLB_MAX_NUM_PORTS)
657 goto invalid_value;
658 xstats_mode_count = dlb->xstats_count_per_port[queue_port_id];
659 break;
660 case RTE_EVENT_DEV_XSTATS_QUEUE:
661 #if (DLB_MAX_NUM_QUEUES <= 255) /* max 8 bit value */
662 if (queue_port_id >= DLB_MAX_NUM_QUEUES)
663 goto invalid_value;
664 #endif
665 xstats_mode_count = dlb->xstats_count_per_qid[queue_port_id];
666 break;
667 default:
668 goto invalid_value;
669 };
670
671 for (i = 0; i < n && xidx < xstats_mode_count; i++) {
672 struct dlb_xstats_entry *xs = &dlb->xstats[ids[i]];
673 dlb_xstats_fn fn;
674
675 if (ids[i] > dlb->xstats_count || xs->mode != mode)
676 continue;
677
678 if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
679 queue_port_id != xs->obj_idx)
680 continue;
681
682 switch (xs->fn_id) {
683 case DLB_XSTATS_FN_DEV:
684 fn = get_dev_stat;
685 break;
686 case DLB_XSTATS_FN_PORT:
687 fn = get_port_stat;
688 break;
689 case DLB_XSTATS_FN_QUEUE:
690 fn = get_queue_stat;
691 break;
692 default:
693 DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
694 xs->fn_id);
695 return -EINVAL;
696 }
697
698 uint64_t val = fn(dlb, xs->obj_idx, xs->stat,
699 xs->extra_arg) - xs->reset_value;
700
701 if (values)
702 values[xidx] = val;
703
704 if (xs->reset_allowed && reset)
705 xs->reset_value += val;
706
707 xidx++;
708 }
709
710 return xidx;
711
712 invalid_value:
713 return -EINVAL;
714 }
715
716 int
dlb_eventdev_xstats_get(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)717 dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
718 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
719 const unsigned int ids[], uint64_t values[], unsigned int n)
720 {
721 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
722 const uint32_t reset = 0;
723
724 return dlb_xstats_update(dlb, mode, queue_port_id, ids, values, n,
725 reset);
726 }
727
728 uint64_t
dlb_eventdev_xstats_get_by_name(const struct rte_eventdev * dev,const char * name,unsigned int * id)729 dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
730 const char *name, unsigned int *id)
731 {
732 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
733 unsigned int i;
734 dlb_xstats_fn fn;
735
736 for (i = 0; i < dlb->xstats_count; i++) {
737 struct dlb_xstats_entry *xs = &dlb->xstats[i];
738
739 if (strncmp(xs->name.name, name,
740 RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
741 if (id != NULL)
742 *id = i;
743
744 switch (xs->fn_id) {
745 case DLB_XSTATS_FN_DEV:
746 fn = get_dev_stat;
747 break;
748 case DLB_XSTATS_FN_PORT:
749 fn = get_port_stat;
750 break;
751 case DLB_XSTATS_FN_QUEUE:
752 fn = get_queue_stat;
753 break;
754 default:
755 DLB_LOG_ERR("Unexpected xstat fn_id %d\n",
756 xs->fn_id);
757 return (uint64_t)-1;
758 }
759
760 return fn(dlb, xs->obj_idx, xs->stat,
761 xs->extra_arg) - xs->reset_value;
762 }
763 }
764 if (id != NULL)
765 *id = (uint32_t)-1;
766 return (uint64_t)-1;
767 }
768
769 static void
dlb_xstats_reset_range(struct dlb_eventdev * dlb,uint32_t start,uint32_t num)770 dlb_xstats_reset_range(struct dlb_eventdev *dlb, uint32_t start,
771 uint32_t num)
772 {
773 uint32_t i;
774 dlb_xstats_fn fn;
775
776 for (i = start; i < start + num; i++) {
777 struct dlb_xstats_entry *xs = &dlb->xstats[i];
778
779 if (!xs->reset_allowed)
780 continue;
781
782 switch (xs->fn_id) {
783 case DLB_XSTATS_FN_DEV:
784 fn = get_dev_stat;
785 break;
786 case DLB_XSTATS_FN_PORT:
787 fn = get_port_stat;
788 break;
789 case DLB_XSTATS_FN_QUEUE:
790 fn = get_queue_stat;
791 break;
792 default:
793 DLB_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id);
794 return;
795 }
796
797 uint64_t val = fn(dlb, xs->obj_idx, xs->stat, xs->extra_arg);
798 xs->reset_value = val;
799 }
800 }
801
802 static int
dlb_xstats_reset_queue(struct dlb_eventdev * dlb,uint8_t queue_id,const uint32_t ids[],uint32_t nb_ids)803 dlb_xstats_reset_queue(struct dlb_eventdev *dlb, uint8_t queue_id,
804 const uint32_t ids[], uint32_t nb_ids)
805 {
806 const uint32_t reset = 1;
807
808 if (ids) {
809 uint32_t nb_reset = dlb_xstats_update(dlb,
810 RTE_EVENT_DEV_XSTATS_QUEUE,
811 queue_id, ids, NULL, nb_ids,
812 reset);
813 return nb_reset == nb_ids ? 0 : -EINVAL;
814 }
815
816 if (ids == NULL)
817 dlb_xstats_reset_range(dlb,
818 dlb->xstats_offset_for_qid[queue_id],
819 dlb->xstats_count_per_qid[queue_id]);
820
821 return 0;
822 }
823
824 static int
dlb_xstats_reset_port(struct dlb_eventdev * dlb,uint8_t port_id,const uint32_t ids[],uint32_t nb_ids)825 dlb_xstats_reset_port(struct dlb_eventdev *dlb, uint8_t port_id,
826 const uint32_t ids[], uint32_t nb_ids)
827 {
828 const uint32_t reset = 1;
829 int offset = dlb->xstats_offset_for_port[port_id];
830 int nb_stat = dlb->xstats_count_per_port[port_id];
831
832 if (ids) {
833 uint32_t nb_reset = dlb_xstats_update(dlb,
834 RTE_EVENT_DEV_XSTATS_PORT, port_id,
835 ids, NULL, nb_ids,
836 reset);
837 return nb_reset == nb_ids ? 0 : -EINVAL;
838 }
839
840 dlb_xstats_reset_range(dlb, offset, nb_stat);
841 return 0;
842 }
843
844 static int
dlb_xstats_reset_dev(struct dlb_eventdev * dlb,const uint32_t ids[],uint32_t nb_ids)845 dlb_xstats_reset_dev(struct dlb_eventdev *dlb, const uint32_t ids[],
846 uint32_t nb_ids)
847 {
848 uint32_t i;
849
850 if (ids) {
851 for (i = 0; i < nb_ids; i++) {
852 uint32_t id = ids[i];
853
854 if (id >= dlb->xstats_count_mode_dev)
855 return -EINVAL;
856 dlb_xstats_reset_range(dlb, id, 1);
857 }
858 } else {
859 for (i = 0; i < dlb->xstats_count_mode_dev; i++)
860 dlb_xstats_reset_range(dlb, i, 1);
861 }
862
863 return 0;
864 }
865
866 int
dlb_eventdev_xstats_reset(struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t nb_ids)867 dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
868 enum rte_event_dev_xstats_mode mode,
869 int16_t queue_port_id,
870 const uint32_t ids[],
871 uint32_t nb_ids)
872 {
873 struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
874 uint32_t i;
875
876 /* handle -1 for queue_port_id here, looping over all ports/queues */
877 switch (mode) {
878 case RTE_EVENT_DEV_XSTATS_DEVICE:
879 if (dlb_xstats_reset_dev(dlb, ids, nb_ids))
880 return -EINVAL;
881 break;
882 case RTE_EVENT_DEV_XSTATS_PORT:
883 if (queue_port_id == -1) {
884 for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
885 if (dlb_xstats_reset_port(dlb, i, ids,
886 nb_ids))
887 return -EINVAL;
888 }
889 } else if (queue_port_id < DLB_MAX_NUM_PORTS) {
890 if (dlb_xstats_reset_port(dlb, queue_port_id, ids,
891 nb_ids))
892 return -EINVAL;
893 } else {
894 return -EINVAL;
895 }
896 break;
897 case RTE_EVENT_DEV_XSTATS_QUEUE:
898 if (queue_port_id == -1) {
899 for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) {
900 if (dlb_xstats_reset_queue(dlb, i, ids,
901 nb_ids))
902 return -EINVAL;
903 }
904 } else if (queue_port_id < DLB_MAX_NUM_QUEUES) {
905 if (dlb_xstats_reset_queue(dlb, queue_port_id, ids,
906 nb_ids))
907 return -EINVAL;
908 } else {
909 return -EINVAL;
910 }
911 break;
912 };
913
914 return 0;
915 }
916
917 void
dlb_eventdev_dump(struct rte_eventdev * dev,FILE * f)918 dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
919 {
920 struct dlb_eventdev *dlb;
921 struct dlb_hw_dev *handle;
922 int i;
923
924 dlb = dlb_pmd_priv(dev);
925
926 if (dlb == NULL) {
927 fprintf(f, "DLB Event device cannot be dumped!\n");
928 return;
929 }
930
931 if (!dlb->configured)
932 fprintf(f, "DLB Event device is not configured\n");
933
934 handle = &dlb->qm_instance;
935
936 fprintf(f, "================\n");
937 fprintf(f, "DLB Device Dump\n");
938 fprintf(f, "================\n");
939
940 fprintf(f, "Processor supports umonitor/umwait instructions = %s\n",
941 dlb->umwait_allowed ? "yes" : "no");
942
943 /* Generic top level device information */
944
945 fprintf(f, "device is configured and run state =");
946 if (dlb->run_state == DLB_RUN_STATE_STOPPED)
947 fprintf(f, "STOPPED\n");
948 else if (dlb->run_state == DLB_RUN_STATE_STOPPING)
949 fprintf(f, "STOPPING\n");
950 else if (dlb->run_state == DLB_RUN_STATE_STARTING)
951 fprintf(f, "STARTING\n");
952 else if (dlb->run_state == DLB_RUN_STATE_STARTED)
953 fprintf(f, "STARTED\n");
954 else
955 fprintf(f, "UNEXPECTED\n");
956
957 fprintf(f,
958 "dev ID=%d, dom ID=%u, sock=%u, evdev=%p\n",
959 handle->device_id, handle->domain_id,
960 handle->info.socket_id, dlb->event_dev);
961
962 fprintf(f, "num dir ports=%u, num dir queues=%u\n",
963 dlb->num_dir_ports, dlb->num_dir_queues);
964
965 fprintf(f, "num ldb ports=%u, num ldb queues=%u\n",
966 dlb->num_ldb_ports, dlb->num_ldb_queues);
967
968 fprintf(f, "dir_credit_pool_id=%u, num_credits=%u\n",
969 handle->cfg.dir_credit_pool_id, handle->cfg.num_dir_credits);
970
971 fprintf(f, "ldb_credit_pool_id=%u, num_credits=%u\n",
972 handle->cfg.ldb_credit_pool_id, handle->cfg.num_ldb_credits);
973
974 fprintf(f, "num atomic inflights=%u, hist list entries=%u\n",
975 handle->cfg.resources.num_atomic_inflights,
976 handle->cfg.resources.num_hist_list_entries);
977
978 fprintf(f, "results from most recent hw resource query:\n");
979
980 fprintf(f, "\tnum_sched_domains = %u\n",
981 dlb->hw_rsrc_query_results.num_sched_domains);
982
983 fprintf(f, "\tnum_ldb_queues = %u\n",
984 dlb->hw_rsrc_query_results.num_ldb_queues);
985
986 fprintf(f, "\tnum_ldb_ports = %u\n",
987 dlb->hw_rsrc_query_results.num_ldb_ports);
988
989 fprintf(f, "\tnum_dir_ports = %u\n",
990 dlb->hw_rsrc_query_results.num_dir_ports);
991
992 fprintf(f, "\tnum_atomic_inflights = %u\n",
993 dlb->hw_rsrc_query_results.num_atomic_inflights);
994
995 fprintf(f, "\tmax_contiguous_atomic_inflights = %u\n",
996 dlb->hw_rsrc_query_results.max_contiguous_atomic_inflights);
997
998 fprintf(f, "\tnum_hist_list_entries = %u\n",
999 dlb->hw_rsrc_query_results.num_hist_list_entries);
1000
1001 fprintf(f, "\tmax_contiguous_hist_list_entries = %u\n",
1002 dlb->hw_rsrc_query_results.max_contiguous_hist_list_entries);
1003
1004 fprintf(f, "\tnum_ldb_credits = %u\n",
1005 dlb->hw_rsrc_query_results.num_ldb_credits);
1006
1007 fprintf(f, "\tmax_contiguous_ldb_credits = %u\n",
1008 dlb->hw_rsrc_query_results.max_contiguous_ldb_credits);
1009
1010 fprintf(f, "\tnum_dir_credits = %u\n",
1011 dlb->hw_rsrc_query_results.num_dir_credits);
1012
1013 fprintf(f, "\tmax_contiguous_dir_credits = %u\n",
1014 dlb->hw_rsrc_query_results.max_contiguous_dir_credits);
1015
1016 fprintf(f, "\tnum_ldb_credit_pools = %u\n",
1017 dlb->hw_rsrc_query_results.num_ldb_credit_pools);
1018
1019 fprintf(f, "\tnum_dir_credit_pools = %u\n",
1020 dlb->hw_rsrc_query_results.num_dir_credit_pools);
1021
1022 /* Port level information */
1023
1024 for (i = 0; i < dlb->num_ports; i++) {
1025 struct dlb_eventdev_port *p = &dlb->ev_ports[i];
1026 int j;
1027
1028 if (!p->enq_configured)
1029 fprintf(f, "Port_%d is not configured\n", i);
1030
1031 fprintf(f, "Port_%d\n", i);
1032 fprintf(f, "=======\n");
1033
1034 fprintf(f, "\tevport_%u is configured, setup done=%d\n",
1035 p->id, p->setup_done);
1036
1037 fprintf(f, "\tconfig state=%d, port state=%d\n",
1038 p->qm_port.config_state, p->qm_port.state);
1039
1040 fprintf(f, "\tport is %s\n",
1041 p->qm_port.is_directed ? "directed" : "load balanced");
1042
1043 fprintf(f, "\toutstanding releases=%u\n",
1044 p->outstanding_releases);
1045
1046 fprintf(f, "\tinflight max=%u, inflight credits=%u\n",
1047 p->inflight_max, p->inflight_credits);
1048
1049 fprintf(f, "\tcredit update quanta=%u, implicit release =%u\n",
1050 p->credit_update_quanta, p->implicit_release);
1051
1052 fprintf(f, "\tnum_links=%d, queues -> ", p->num_links);
1053
1054 for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
1055 if (p->link[j].valid)
1056 fprintf(f, "id=%u prio=%u ",
1057 p->link[j].queue_id,
1058 p->link[j].priority);
1059 }
1060 fprintf(f, "\n");
1061
1062 fprintf(f, "\thardware port id=%u\n", p->qm_port.id);
1063
1064 fprintf(f, "\tcached_ldb_credits=%u\n",
1065 p->qm_port.cached_ldb_credits);
1066
1067 fprintf(f, "\tldb_pushcount_at_credit_expiry = %u\n",
1068 p->qm_port.ldb_pushcount_at_credit_expiry);
1069
1070 fprintf(f, "\tldb_credits = %u\n",
1071 p->qm_port.ldb_credits);
1072
1073 fprintf(f, "\tcached_dir_credits = %u\n",
1074 p->qm_port.cached_dir_credits);
1075
1076 fprintf(f, "\tdir_pushcount_at_credit_expiry=%u\n",
1077 p->qm_port.dir_pushcount_at_credit_expiry);
1078
1079 fprintf(f, "\tdir_credits = %u\n",
1080 p->qm_port.dir_credits);
1081
1082 fprintf(f, "\tgenbit=%d, cq_idx=%d, cq_depth=%d\n",
1083 p->qm_port.gen_bit,
1084 p->qm_port.cq_idx,
1085 p->qm_port.cq_depth);
1086
1087 fprintf(f, "\tuse reserved token scheme=%d, cq_rsvd_token_deficit=%u\n",
1088 p->qm_port.use_rsvd_token_scheme,
1089 p->qm_port.cq_rsvd_token_deficit);
1090
1091 fprintf(f, "\tinterrupt armed=%d\n",
1092 p->qm_port.int_armed);
1093
1094 fprintf(f, "\tPort statistics\n");
1095
1096 fprintf(f, "\t\trx_ok %" PRIu64 "\n",
1097 p->stats.traffic.rx_ok);
1098
1099 fprintf(f, "\t\trx_drop %" PRIu64 "\n",
1100 p->stats.traffic.rx_drop);
1101
1102 fprintf(f, "\t\trx_interrupt_wait %" PRIu64 "\n",
1103 p->stats.traffic.rx_interrupt_wait);
1104
1105 fprintf(f, "\t\trx_umonitor_umwait %" PRIu64 "\n",
1106 p->stats.traffic.rx_umonitor_umwait);
1107
1108 fprintf(f, "\t\ttx_ok %" PRIu64 "\n",
1109 p->stats.traffic.tx_ok);
1110
1111 fprintf(f, "\t\ttotal_polls %" PRIu64 "\n",
1112 p->stats.traffic.total_polls);
1113
1114 fprintf(f, "\t\tzero_polls %" PRIu64 "\n",
1115 p->stats.traffic.zero_polls);
1116
1117 fprintf(f, "\t\ttx_nospc_ldb_hw_credits %" PRIu64 "\n",
1118 p->stats.traffic.tx_nospc_ldb_hw_credits);
1119
1120 fprintf(f, "\t\ttx_nospc_dir_hw_credits %" PRIu64 "\n",
1121 p->stats.traffic.tx_nospc_dir_hw_credits);
1122
1123 fprintf(f, "\t\ttx_nospc_inflight_max %" PRIu64 "\n",
1124 p->stats.traffic.tx_nospc_inflight_max);
1125
1126 fprintf(f, "\t\ttx_nospc_new_event_limit %" PRIu64 "\n",
1127 p->stats.traffic.tx_nospc_new_event_limit);
1128
1129 fprintf(f, "\t\ttx_nospc_inflight_credits %" PRIu64 "\n",
1130 p->stats.traffic.tx_nospc_inflight_credits);
1131
1132 fprintf(f, "\t\ttx_new %" PRIu64 "\n",
1133 p->stats.tx_op_cnt[RTE_EVENT_OP_NEW]);
1134
1135 fprintf(f, "\t\ttx_fwd %" PRIu64 "\n",
1136 p->stats.tx_op_cnt[RTE_EVENT_OP_FORWARD]);
1137
1138 fprintf(f, "\t\ttx_rel %" PRIu64 "\n",
1139 p->stats.tx_op_cnt[RTE_EVENT_OP_RELEASE]);
1140
1141 fprintf(f, "\t\ttx_implicit_rel %" PRIu64 "\n",
1142 p->stats.tx_implicit_rel);
1143
1144 fprintf(f, "\t\ttx_sched_ordered %" PRIu64 "\n",
1145 p->stats.tx_sched_cnt[DLB_SCHED_ORDERED]);
1146
1147 fprintf(f, "\t\ttx_sched_unordered %" PRIu64 "\n",
1148 p->stats.tx_sched_cnt[DLB_SCHED_UNORDERED]);
1149
1150 fprintf(f, "\t\ttx_sched_atomic %" PRIu64 "\n",
1151 p->stats.tx_sched_cnt[DLB_SCHED_ATOMIC]);
1152
1153 fprintf(f, "\t\ttx_sched_directed %" PRIu64 "\n",
1154 p->stats.tx_sched_cnt[DLB_SCHED_DIRECTED]);
1155
1156 fprintf(f, "\t\ttx_invalid %" PRIu64 "\n",
1157 p->stats.tx_invalid);
1158
1159 fprintf(f, "\t\trx_sched_ordered %" PRIu64 "\n",
1160 p->stats.rx_sched_cnt[DLB_SCHED_ORDERED]);
1161
1162 fprintf(f, "\t\trx_sched_unordered %" PRIu64 "\n",
1163 p->stats.rx_sched_cnt[DLB_SCHED_UNORDERED]);
1164
1165 fprintf(f, "\t\trx_sched_atomic %" PRIu64 "\n",
1166 p->stats.rx_sched_cnt[DLB_SCHED_ATOMIC]);
1167
1168 fprintf(f, "\t\trx_sched_directed %" PRIu64 "\n",
1169 p->stats.rx_sched_cnt[DLB_SCHED_DIRECTED]);
1170
1171 fprintf(f, "\t\trx_sched_invalid %" PRIu64 "\n",
1172 p->stats.rx_sched_invalid);
1173 }
1174
1175 /* Queue level information */
1176
1177 for (i = 0; i < dlb->num_queues; i++) {
1178 struct dlb_eventdev_queue *q = &dlb->ev_queues[i];
1179 int j, k;
1180
1181 if (!q->setup_done)
1182 fprintf(f, "Queue_%d is not configured\n", i);
1183
1184 fprintf(f, "Queue_%d\n", i);
1185 fprintf(f, "========\n");
1186
1187 fprintf(f, "\tevqueue_%u is set up\n", q->id);
1188
1189 fprintf(f, "\tqueue is %s\n",
1190 q->qm_queue.is_directed ? "directed" : "load balanced");
1191
1192 fprintf(f, "\tnum_links=%d, ports -> ", q->num_links);
1193
1194 for (j = 0; j < dlb->num_ports; j++) {
1195 struct dlb_eventdev_port *p = &dlb->ev_ports[j];
1196
1197 for (k = 0; k < DLB_MAX_NUM_QIDS_PER_LDB_CQ; k++) {
1198 if (p->link[k].valid &&
1199 p->link[k].queue_id == q->id)
1200 fprintf(f, "id=%u prio=%u ",
1201 p->id, p->link[k].priority);
1202 }
1203 }
1204 fprintf(f, "\n");
1205
1206 fprintf(f, "\tcurrent depth: %u events\n",
1207 dlb_get_queue_depth(dlb, q));
1208
1209 fprintf(f, "\tnum qid inflights=%u, sched_type=%d\n",
1210 q->qm_queue.num_qid_inflights, q->qm_queue.sched_type);
1211 }
1212 }
1213