xref: /dpdk/drivers/event/sw/sw_evdev_xstats.c (revision 324b37e6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <rte_event_ring.h>
6 #include "sw_evdev.h"
7 #include "iq_chunk.h"
8 
9 enum xstats_type {
10 	/* common stats */
11 	rx,
12 	tx,
13 	dropped,
14 	inflight,
15 	calls,
16 	credits,
17 	/* device instance specific */
18 	no_iq_enq,
19 	no_cq_enq,
20 	sched_last_iter_bitmask,
21 	sched_progress_last_iter,
22 	/* port_specific */
23 	rx_used,
24 	rx_free,
25 	tx_used,
26 	tx_free,
27 	pkt_cycles,
28 	poll_return, /* for zero-count and used also for port bucket loop */
29 	/* qid_specific */
30 	iq_used,
31 	/* qid port mapping specific */
32 	pinned,
33 	pkts, /* note: qid-to-port pkts */
34 };
35 
36 typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
37 		uint16_t obj_idx, /* port or queue id */
38 		enum xstats_type stat, int extra_arg);
39 
40 struct sw_xstats_entry {
41 	struct rte_event_dev_xstats_name name;
42 	xstats_fn fn;
43 	uint16_t obj_idx;
44 	enum xstats_type stat;
45 	enum rte_event_dev_xstats_mode mode;
46 	int extra_arg;
47 	uint8_t reset_allowed; /* when set, this value can be reset */
48 	uint64_t reset_value; /* an offset to be taken away to emulate resets */
49 };
50 
51 static uint64_t
get_dev_stat(const struct sw_evdev * sw,uint16_t obj_idx __rte_unused,enum xstats_type type,int extra_arg __rte_unused)52 get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
53 		enum xstats_type type, int extra_arg __rte_unused)
54 {
55 	switch (type) {
56 	case rx: return sw->stats.rx_pkts;
57 	case tx: return sw->stats.tx_pkts;
58 	case dropped: return sw->stats.rx_dropped;
59 	case calls: return sw->sched_called;
60 	case no_iq_enq: return sw->sched_no_iq_enqueues;
61 	case no_cq_enq: return sw->sched_no_cq_enqueues;
62 	case sched_last_iter_bitmask: return sw->sched_last_iter_bitmask;
63 	case sched_progress_last_iter: return sw->sched_progress_last_iter;
64 
65 	default: return -1;
66 	}
67 }
68 
69 static uint64_t
get_port_stat(const struct sw_evdev * sw,uint16_t obj_idx,enum xstats_type type,int extra_arg __rte_unused)70 get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
71 		enum xstats_type type, int extra_arg __rte_unused)
72 {
73 	const struct sw_port *p = &sw->ports[obj_idx];
74 
75 	switch (type) {
76 	case rx: return p->stats.rx_pkts;
77 	case tx: return p->stats.tx_pkts;
78 	case dropped: return p->stats.rx_dropped;
79 	case inflight: return p->inflights;
80 	case pkt_cycles: return p->avg_pkt_ticks;
81 	case calls: return p->total_polls;
82 	case credits: return p->inflight_credits;
83 	case poll_return: return p->zero_polls;
84 	case rx_used: return rte_event_ring_count(p->rx_worker_ring);
85 	case rx_free: return rte_event_ring_free_count(p->rx_worker_ring);
86 	case tx_used: return rte_event_ring_count(p->cq_worker_ring);
87 	case tx_free: return rte_event_ring_free_count(p->cq_worker_ring);
88 	default: return -1;
89 	}
90 }
91 
92 static uint64_t
get_port_bucket_stat(const struct sw_evdev * sw,uint16_t obj_idx,enum xstats_type type,int extra_arg)93 get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
94 		enum xstats_type type, int extra_arg)
95 {
96 	const struct sw_port *p = &sw->ports[obj_idx];
97 
98 	switch (type) {
99 	case poll_return: return p->poll_buckets[extra_arg];
100 	default: return -1;
101 	}
102 }
103 
104 static uint64_t
get_qid_stat(const struct sw_evdev * sw,uint16_t obj_idx,enum xstats_type type,int extra_arg __rte_unused)105 get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
106 		enum xstats_type type, int extra_arg __rte_unused)
107 {
108 	const struct sw_qid *qid = &sw->qids[obj_idx];
109 
110 	switch (type) {
111 	case rx: return qid->stats.rx_pkts;
112 	case tx: return qid->stats.tx_pkts;
113 	case dropped: return qid->stats.rx_dropped;
114 	case inflight:
115 		do {
116 			uint64_t infl = 0;
117 			unsigned int i;
118 			for (i = 0; i < RTE_DIM(qid->fids); i++)
119 				infl += qid->fids[i].pcount;
120 			return infl;
121 		} while (0);
122 		break;
123 	default: return -1;
124 	}
125 }
126 
127 static uint64_t
get_qid_iq_stat(const struct sw_evdev * sw,uint16_t obj_idx,enum xstats_type type,int extra_arg)128 get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
129 		enum xstats_type type, int extra_arg)
130 {
131 	const struct sw_qid *qid = &sw->qids[obj_idx];
132 	const int iq_idx = extra_arg;
133 
134 	switch (type) {
135 	case iq_used: return iq_count(&qid->iq[iq_idx]);
136 	default: return -1;
137 	}
138 }
139 
140 static uint64_t
get_qid_port_stat(const struct sw_evdev * sw,uint16_t obj_idx,enum xstats_type type,int extra_arg)141 get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
142 		enum xstats_type type, int extra_arg)
143 {
144 	const struct sw_qid *qid = &sw->qids[obj_idx];
145 	uint16_t port = extra_arg;
146 
147 	switch (type) {
148 	case pinned:
149 		do {
150 			uint64_t pin = 0;
151 			unsigned int i;
152 			for (i = 0; i < RTE_DIM(qid->fids); i++)
153 				if (qid->fids[i].cq == port)
154 					pin++;
155 			return pin;
156 		} while (0);
157 		break;
158 	case pkts:
159 		return qid->to_port[port];
160 	default: return -1;
161 	}
162 }
163 
164 int
sw_xstats_init(struct sw_evdev * sw)165 sw_xstats_init(struct sw_evdev *sw)
166 {
167 	/*
168 	 * define the stats names and types. Used to build up the device
169 	 * xstats array
170 	 * There are multiple set of stats:
171 	 *   - device-level,
172 	 *   - per-port,
173 	 *   - per-port-dequeue-burst-sizes
174 	 *   - per-qid,
175 	 *   - per-iq
176 	 *   - per-port-per-qid
177 	 *
178 	 * For each of these sets, we have three parallel arrays, one for the
179 	 * names, the other for the stat type parameter to be passed in the fn
180 	 * call to get that stat. The third array allows resetting or not.
181 	 * All these arrays must be kept in sync
182 	 */
183 	static const char * const dev_stats[] = { "rx", "tx", "drop",
184 			"sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
185 			"sched_last_iter_bitmask", "sched_progress_last_iter",
186 	};
187 	static const enum xstats_type dev_types[] = { rx, tx, dropped,
188 			calls, no_iq_enq, no_cq_enq, sched_last_iter_bitmask,
189 			sched_progress_last_iter,
190 	};
191 	/* all device stats are allowed to be reset */
192 
193 	static const char * const port_stats[] = {"rx", "tx", "drop",
194 			"inflight", "avg_pkt_cycles", "credits",
195 			"rx_ring_used", "rx_ring_free",
196 			"cq_ring_used", "cq_ring_free",
197 			"dequeue_calls", "dequeues_returning_0",
198 	};
199 	static const enum xstats_type port_types[] = { rx, tx, dropped,
200 			inflight, pkt_cycles, credits,
201 			rx_used, rx_free, tx_used, tx_free,
202 			calls, poll_return,
203 	};
204 	static const uint8_t port_reset_allowed[] = {1, 1, 1,
205 			0, 1, 0,
206 			0, 0, 0, 0,
207 			1, 1,
208 	};
209 
210 	static const char * const port_bucket_stats[] = {
211 			"dequeues_returning" };
212 	static const enum xstats_type port_bucket_types[] = { poll_return };
213 	/* all bucket dequeues are allowed to be reset, handled in loop below */
214 
215 	static const char * const qid_stats[] = {"rx", "tx", "drop",
216 			"inflight"
217 	};
218 	static const enum xstats_type qid_types[] = { rx, tx, dropped,
219 			inflight
220 	};
221 	static const uint8_t qid_reset_allowed[] = {1, 1, 1,
222 			0
223 	};
224 
225 	static const char * const qid_iq_stats[] = { "used" };
226 	static const enum xstats_type qid_iq_types[] = { iq_used };
227 	/* reset allowed */
228 
229 	static const char * const qid_port_stats[] = { "pinned_flows",
230 		"packets"
231 	};
232 	static const enum xstats_type qid_port_types[] = { pinned, pkts };
233 	static const uint8_t qid_port_reset_allowed[] = {0, 1};
234 	/* reset allowed */
235 	/* ---- end of stat definitions ---- */
236 
237 	/* check sizes, since a missed comma can lead to strings being
238 	 * joined by the compiler.
239 	 */
240 	RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
241 	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
242 	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
243 	RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
244 	RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
245 	RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
246 			RTE_DIM(port_bucket_types));
247 
248 	RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
249 	RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
250 
251 	/* other vars */
252 	const uint32_t cons_bkt_shift =
253 		(MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
254 	const unsigned int count = RTE_DIM(dev_stats) +
255 			sw->port_count * RTE_DIM(port_stats) +
256 			sw->port_count * RTE_DIM(port_bucket_stats) *
257 				(cons_bkt_shift + 1) +
258 			sw->qid_count * RTE_DIM(qid_stats) +
259 			sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
260 			sw->qid_count * sw->port_count *
261 				RTE_DIM(qid_port_stats);
262 	unsigned int i, port, qid, iq, bkt, stat = 0;
263 
264 	sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
265 			sw->data->socket_id);
266 	if (sw->xstats == NULL)
267 		return -ENOMEM;
268 
269 #define sname sw->xstats[stat].name.name
270 	for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
271 		sw->xstats[stat] = (struct sw_xstats_entry){
272 			.fn = get_dev_stat,
273 			.stat = dev_types[i],
274 			.mode = RTE_EVENT_DEV_XSTATS_DEVICE,
275 			.reset_allowed = 1,
276 		};
277 		snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
278 	}
279 	sw->xstats_count_mode_dev = stat;
280 
281 	for (port = 0; port < sw->port_count; port++) {
282 		sw->xstats_offset_for_port[port] = stat;
283 
284 		uint32_t count_offset = stat;
285 
286 		for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
287 			sw->xstats[stat] = (struct sw_xstats_entry){
288 				.fn = get_port_stat,
289 				.obj_idx = port,
290 				.stat = port_types[i],
291 				.mode = RTE_EVENT_DEV_XSTATS_PORT,
292 				.reset_allowed = port_reset_allowed[i],
293 			};
294 			snprintf(sname, sizeof(sname), "port_%u_%s",
295 					port, port_stats[i]);
296 		}
297 
298 		for (bkt = 0; bkt < (rte_event_ring_get_capacity(
299 				sw->ports[port].cq_worker_ring) >>
300 					SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
301 			for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
302 				sw->xstats[stat] = (struct sw_xstats_entry){
303 					.fn = get_port_bucket_stat,
304 					.obj_idx = port,
305 					.stat = port_bucket_types[i],
306 					.mode = RTE_EVENT_DEV_XSTATS_PORT,
307 					.extra_arg = bkt,
308 					.reset_allowed = 1,
309 				};
310 				snprintf(sname, sizeof(sname),
311 					"port_%u_%s_%u-%u",
312 					port, port_bucket_stats[i],
313 					(bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
314 					(bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
315 				stat++;
316 			}
317 		}
318 
319 		sw->xstats_count_per_port[port] = stat - count_offset;
320 	}
321 
322 	sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
323 
324 	for (qid = 0; qid < sw->qid_count; qid++) {
325 		uint32_t count_offset = stat;
326 		sw->xstats_offset_for_qid[qid] = stat;
327 
328 		for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
329 			sw->xstats[stat] = (struct sw_xstats_entry){
330 				.fn = get_qid_stat,
331 				.obj_idx = qid,
332 				.stat = qid_types[i],
333 				.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
334 				.reset_allowed = qid_reset_allowed[i],
335 			};
336 			snprintf(sname, sizeof(sname), "qid_%u_%s",
337 					qid, qid_stats[i]);
338 		}
339 		for (iq = 0; iq < SW_IQS_MAX; iq++)
340 			for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
341 				sw->xstats[stat] = (struct sw_xstats_entry){
342 					.fn = get_qid_iq_stat,
343 					.obj_idx = qid,
344 					.stat = qid_iq_types[i],
345 					.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
346 					.extra_arg = iq,
347 					.reset_allowed = 0,
348 				};
349 				snprintf(sname, sizeof(sname),
350 						"qid_%u_iq_%u_%s",
351 						qid, iq,
352 						qid_iq_stats[i]);
353 			}
354 
355 		for (port = 0; port < sw->port_count; port++)
356 			for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
357 				sw->xstats[stat] = (struct sw_xstats_entry){
358 					.fn = get_qid_port_stat,
359 					.obj_idx = qid,
360 					.stat = qid_port_types[i],
361 					.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
362 					.extra_arg = port,
363 					.reset_allowed =
364 						qid_port_reset_allowed[i],
365 				};
366 				snprintf(sname, sizeof(sname),
367 						"qid_%u_port_%u_%s",
368 						qid, port,
369 						qid_port_stats[i]);
370 			}
371 
372 		sw->xstats_count_per_qid[qid] = stat - count_offset;
373 	}
374 
375 	sw->xstats_count_mode_queue = stat -
376 		(sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
377 #undef sname
378 
379 	sw->xstats_count = stat;
380 
381 	return stat;
382 }
383 
384 int
sw_xstats_uninit(struct sw_evdev * sw)385 sw_xstats_uninit(struct sw_evdev *sw)
386 {
387 	rte_free(sw->xstats);
388 	sw->xstats_count = 0;
389 	return 0;
390 }
391 
392 int
sw_xstats_get_names(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)393 sw_xstats_get_names(const struct rte_eventdev *dev,
394 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
395 		struct rte_event_dev_xstats_name *xstats_names,
396 		unsigned int *ids, unsigned int size)
397 {
398 	const struct sw_evdev *sw = sw_pmd_priv_const(dev);
399 	unsigned int i;
400 	unsigned int xidx = 0;
401 
402 	uint32_t xstats_mode_count = 0;
403 	uint32_t start_offset = 0;
404 
405 	switch (mode) {
406 	case RTE_EVENT_DEV_XSTATS_DEVICE:
407 		xstats_mode_count = sw->xstats_count_mode_dev;
408 		break;
409 	case RTE_EVENT_DEV_XSTATS_PORT:
410 		if (queue_port_id >= (signed int)sw->port_count)
411 			break;
412 		xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
413 		start_offset = sw->xstats_offset_for_port[queue_port_id];
414 		break;
415 	case RTE_EVENT_DEV_XSTATS_QUEUE:
416 		if (queue_port_id >= (signed int)sw->qid_count)
417 			break;
418 		xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
419 		start_offset = sw->xstats_offset_for_qid[queue_port_id];
420 		break;
421 	default:
422 		SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
423 		return -EINVAL;
424 	};
425 
426 	if (xstats_mode_count > size || !ids || !xstats_names)
427 		return xstats_mode_count;
428 
429 	for (i = 0; i < sw->xstats_count && xidx < size; i++) {
430 		if (sw->xstats[i].mode != mode)
431 			continue;
432 
433 		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
434 				queue_port_id != sw->xstats[i].obj_idx)
435 			continue;
436 
437 		xstats_names[xidx] = sw->xstats[i].name;
438 		if (ids)
439 			ids[xidx] = start_offset + xidx;
440 		xidx++;
441 	}
442 	return xidx;
443 }
444 
445 static int
sw_xstats_update(struct sw_evdev * sw,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n,const uint32_t reset,const uint32_t ret_if_n_lt_nstats)446 sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
447 		uint8_t queue_port_id, const unsigned int ids[],
448 		uint64_t values[], unsigned int n, const uint32_t reset,
449 		const uint32_t ret_if_n_lt_nstats)
450 {
451 	unsigned int i;
452 	unsigned int xidx = 0;
453 	RTE_SET_USED(mode);
454 	RTE_SET_USED(queue_port_id);
455 
456 	uint32_t xstats_mode_count = 0;
457 
458 	switch (mode) {
459 	case RTE_EVENT_DEV_XSTATS_DEVICE:
460 		xstats_mode_count = sw->xstats_count_mode_dev;
461 		break;
462 	case RTE_EVENT_DEV_XSTATS_PORT:
463 		if (queue_port_id >= (signed int)sw->port_count)
464 			goto invalid_value;
465 		xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
466 		break;
467 	case RTE_EVENT_DEV_XSTATS_QUEUE:
468 		if (queue_port_id >= (signed int)sw->qid_count)
469 			goto invalid_value;
470 		xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
471 		break;
472 	default:
473 		SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
474 		goto invalid_value;
475 	};
476 
477 	/* this function can check num stats and return them (xstats_get() style
478 	 * behaviour) or ignore n for reset() of a single stat style behaviour.
479 	 */
480 	if (ret_if_n_lt_nstats && xstats_mode_count > n)
481 		return xstats_mode_count;
482 
483 	for (i = 0; i < n && xidx < xstats_mode_count; i++) {
484 		struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
485 		if (ids[i] > sw->xstats_count || xs->mode != mode)
486 			continue;
487 
488 		if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
489 				queue_port_id != xs->obj_idx)
490 			continue;
491 
492 		uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
493 					- xs->reset_value;
494 
495 		if (values)
496 			values[xidx] = val;
497 
498 		if (xs->reset_allowed && reset)
499 			xs->reset_value += val;
500 
501 		xidx++;
502 	}
503 
504 	return xidx;
505 invalid_value:
506 	return -EINVAL;
507 }
508 
509 int
sw_xstats_get(const struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)510 sw_xstats_get(const struct rte_eventdev *dev,
511 		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
512 		const unsigned int ids[], uint64_t values[], unsigned int n)
513 {
514 	struct sw_evdev *sw = sw_pmd_priv(dev);
515 	const uint32_t reset = 0;
516 	const uint32_t ret_n_lt_stats = 0;
517 	return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
518 				reset, ret_n_lt_stats);
519 }
520 
521 uint64_t
sw_xstats_get_by_name(const struct rte_eventdev * dev,const char * name,unsigned int * id)522 sw_xstats_get_by_name(const struct rte_eventdev *dev,
523 		const char *name, unsigned int *id)
524 {
525 	const struct sw_evdev *sw = sw_pmd_priv_const(dev);
526 	unsigned int i;
527 
528 	for (i = 0; i < sw->xstats_count; i++) {
529 		struct sw_xstats_entry *xs = &sw->xstats[i];
530 		if (strncmp(xs->name.name, name,
531 				RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
532 			if (id != NULL)
533 				*id = i;
534 			return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
535 					- xs->reset_value;
536 		}
537 	}
538 	if (id != NULL)
539 		*id = (uint32_t)-1;
540 	return (uint64_t)-1;
541 }
542 
543 static void
sw_xstats_reset_range(struct sw_evdev * sw,uint32_t start,uint32_t num)544 sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
545 {
546 	uint32_t i;
547 	for (i = start; i < start + num; i++) {
548 		struct sw_xstats_entry *xs = &sw->xstats[i];
549 		if (!xs->reset_allowed)
550 			continue;
551 
552 		uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg);
553 		xs->reset_value = val;
554 	}
555 }
556 
557 static int
sw_xstats_reset_queue(struct sw_evdev * sw,uint8_t queue_id,const uint32_t ids[],uint32_t nb_ids)558 sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
559 		const uint32_t ids[], uint32_t nb_ids)
560 {
561 	const uint32_t reset = 1;
562 	const uint32_t ret_n_lt_stats = 0;
563 	if (ids) {
564 		uint32_t nb_reset = sw_xstats_update(sw,
565 					RTE_EVENT_DEV_XSTATS_QUEUE,
566 					queue_id, ids, NULL, nb_ids,
567 					reset, ret_n_lt_stats);
568 		return nb_reset == nb_ids ? 0 : -EINVAL;
569 	}
570 
571 	if (ids == NULL)
572 		sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
573 				      sw->xstats_count_per_qid[queue_id]);
574 
575 	return 0;
576 }
577 
578 static int
sw_xstats_reset_port(struct sw_evdev * sw,uint8_t port_id,const uint32_t ids[],uint32_t nb_ids)579 sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
580 		const uint32_t ids[], uint32_t nb_ids)
581 {
582 	const uint32_t reset = 1;
583 	const uint32_t ret_n_lt_stats = 0;
584 	int offset = sw->xstats_offset_for_port[port_id];
585 	int nb_stat = sw->xstats_count_per_port[port_id];
586 
587 	if (ids) {
588 		uint32_t nb_reset = sw_xstats_update(sw,
589 					RTE_EVENT_DEV_XSTATS_PORT, port_id,
590 					ids, NULL, nb_ids,
591 					reset, ret_n_lt_stats);
592 		return nb_reset == nb_ids ? 0 : -EINVAL;
593 	}
594 
595 	sw_xstats_reset_range(sw, offset, nb_stat);
596 	return 0;
597 }
598 
599 static int
sw_xstats_reset_dev(struct sw_evdev * sw,const uint32_t ids[],uint32_t nb_ids)600 sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
601 {
602 	uint32_t i;
603 	if (ids) {
604 		for (i = 0; i < nb_ids; i++) {
605 			uint32_t id = ids[i];
606 			if (id >= sw->xstats_count_mode_dev)
607 				return -EINVAL;
608 			sw_xstats_reset_range(sw, id, 1);
609 		}
610 	} else {
611 		for (i = 0; i < sw->xstats_count_mode_dev; i++)
612 			sw_xstats_reset_range(sw, i, 1);
613 	}
614 
615 	return 0;
616 }
617 
618 int
sw_xstats_reset(struct rte_eventdev * dev,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t nb_ids)619 sw_xstats_reset(struct rte_eventdev *dev,
620 		enum rte_event_dev_xstats_mode mode,
621 		int16_t queue_port_id,
622 		const uint32_t ids[],
623 		uint32_t nb_ids)
624 {
625 	struct sw_evdev *sw = sw_pmd_priv(dev);
626 	uint32_t i, err;
627 
628 	/* handle -1 for queue_port_id here, looping over all ports/queues */
629 	switch (mode) {
630 	case RTE_EVENT_DEV_XSTATS_DEVICE:
631 		sw_xstats_reset_dev(sw, ids, nb_ids);
632 		break;
633 	case RTE_EVENT_DEV_XSTATS_PORT:
634 		if (queue_port_id == -1) {
635 			for (i = 0; i < sw->port_count; i++) {
636 				err = sw_xstats_reset_port(sw, i, ids, nb_ids);
637 				if (err)
638 					return -EINVAL;
639 			}
640 		} else if (queue_port_id < (int16_t)sw->port_count)
641 			sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
642 		break;
643 	case RTE_EVENT_DEV_XSTATS_QUEUE:
644 		if (queue_port_id == -1) {
645 			for (i = 0; i < sw->qid_count; i++) {
646 				err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
647 				if (err)
648 					return -EINVAL;
649 			}
650 		} else if (queue_port_id < (int16_t)sw->qid_count)
651 			sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);
652 		break;
653 	};
654 
655 	return 0;
656 }
657