1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #ifndef __OTX2_EVDEV_STATS_H__
6 #define __OTX2_EVDEV_STATS_H__
7
8 #include "otx2_evdev.h"
9
10 struct otx2_sso_xstats_name {
11 const char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
12 const size_t offset;
13 const uint64_t mask;
14 const uint8_t shift;
15 uint64_t reset_snap[OTX2_SSO_MAX_VHGRP];
16 };
17
18 static struct otx2_sso_xstats_name sso_hws_xstats[] = {
19 {"last_grp_serviced", offsetof(struct sso_hws_stats, arbitration),
20 0x3FF, 0, {0} },
21 {"affinity_arbitration_credits",
22 offsetof(struct sso_hws_stats, arbitration),
23 0xF, 16, {0} },
24 };
25
26 static struct otx2_sso_xstats_name sso_grp_xstats[] = {
27 {"wrk_sched", offsetof(struct sso_grp_stats, ws_pc), ~0x0, 0,
28 {0} },
29 {"xaq_dram", offsetof(struct sso_grp_stats, ext_pc), ~0x0,
30 0, {0} },
31 {"add_wrk", offsetof(struct sso_grp_stats, wa_pc), ~0x0, 0,
32 {0} },
33 {"tag_switch_req", offsetof(struct sso_grp_stats, ts_pc), ~0x0, 0,
34 {0} },
35 {"desched_req", offsetof(struct sso_grp_stats, ds_pc), ~0x0, 0,
36 {0} },
37 {"desched_wrk", offsetof(struct sso_grp_stats, dq_pc), ~0x0, 0,
38 {0} },
39 {"xaq_cached", offsetof(struct sso_grp_stats, aw_status), 0x3,
40 0, {0} },
41 {"work_inflight", offsetof(struct sso_grp_stats, aw_status), 0x3F,
42 16, {0} },
43 {"inuse_pages", offsetof(struct sso_grp_stats, page_cnt),
44 0xFFFFFFFF, 0, {0} },
45 };
46
47 #define OTX2_SSO_NUM_HWS_XSTATS RTE_DIM(sso_hws_xstats)
48 #define OTX2_SSO_NUM_GRP_XSTATS RTE_DIM(sso_grp_xstats)
49
50 #define OTX2_SSO_NUM_XSTATS (OTX2_SSO_NUM_HWS_XSTATS + OTX2_SSO_NUM_GRP_XSTATS)
51
52 static int
otx2_sso_xstats_get(const struct rte_eventdev * event_dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,const unsigned int ids[],uint64_t values[],unsigned int n)53 otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
54 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
55 const unsigned int ids[], uint64_t values[], unsigned int n)
56 {
57 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
58 struct otx2_sso_xstats_name *xstats;
59 struct otx2_sso_xstats_name *xstat;
60 struct otx2_mbox *mbox = dev->mbox;
61 uint32_t xstats_mode_count = 0;
62 uint32_t start_offset = 0;
63 unsigned int i;
64 uint64_t value;
65 void *req_rsp;
66 int rc;
67
68 switch (mode) {
69 case RTE_EVENT_DEV_XSTATS_DEVICE:
70 return 0;
71 case RTE_EVENT_DEV_XSTATS_PORT:
72 if (queue_port_id >= (signed int)dev->nb_event_ports)
73 goto invalid_value;
74
75 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
76 xstats = sso_hws_xstats;
77
78 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
79 ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
80 2 * queue_port_id : queue_port_id;
81 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
82 if (rc < 0)
83 goto invalid_value;
84
85 if (dev->dual_ws) {
86 for (i = 0; i < n && i < xstats_mode_count; i++) {
87 xstat = &xstats[ids[i] - start_offset];
88 values[i] = *(uint64_t *)
89 ((char *)req_rsp + xstat->offset);
90 values[i] = (values[i] >> xstat->shift) &
91 xstat->mask;
92 }
93
94 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
95 ((struct sso_info_req *)req_rsp)->hws =
96 (2 * queue_port_id) + 1;
97 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
98 if (rc < 0)
99 goto invalid_value;
100 }
101
102 break;
103 case RTE_EVENT_DEV_XSTATS_QUEUE:
104 if (queue_port_id >= (signed int)dev->nb_event_queues)
105 goto invalid_value;
106
107 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
108 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
109 xstats = sso_grp_xstats;
110
111 req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
112 ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
113 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
114 if (rc < 0)
115 goto invalid_value;
116
117 break;
118 default:
119 otx2_err("Invalid mode received");
120 goto invalid_value;
121 };
122
123 for (i = 0; i < n && i < xstats_mode_count; i++) {
124 xstat = &xstats[ids[i] - start_offset];
125 value = *(uint64_t *)((char *)req_rsp + xstat->offset);
126 value = (value >> xstat->shift) & xstat->mask;
127
128 if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
129 values[i] += value;
130 else
131 values[i] = value;
132
133 values[i] -= xstat->reset_snap[queue_port_id];
134 }
135
136 return i;
137 invalid_value:
138 return -EINVAL;
139 }
140
141 static int
otx2_sso_xstats_reset(struct rte_eventdev * event_dev,enum rte_event_dev_xstats_mode mode,int16_t queue_port_id,const uint32_t ids[],uint32_t n)142 otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
143 enum rte_event_dev_xstats_mode mode,
144 int16_t queue_port_id, const uint32_t ids[], uint32_t n)
145 {
146 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
147 struct otx2_sso_xstats_name *xstats;
148 struct otx2_sso_xstats_name *xstat;
149 struct otx2_mbox *mbox = dev->mbox;
150 uint32_t xstats_mode_count = 0;
151 uint32_t start_offset = 0;
152 unsigned int i;
153 uint64_t value;
154 void *req_rsp;
155 int rc;
156
157 switch (mode) {
158 case RTE_EVENT_DEV_XSTATS_DEVICE:
159 return 0;
160 case RTE_EVENT_DEV_XSTATS_PORT:
161 if (queue_port_id >= (signed int)dev->nb_event_ports)
162 goto invalid_value;
163
164 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
165 xstats = sso_hws_xstats;
166
167 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
168 ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
169 2 * queue_port_id : queue_port_id;
170 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
171 if (rc < 0)
172 goto invalid_value;
173
174 if (dev->dual_ws) {
175 for (i = 0; i < n && i < xstats_mode_count; i++) {
176 xstat = &xstats[ids[i] - start_offset];
177 xstat->reset_snap[queue_port_id] = *(uint64_t *)
178 ((char *)req_rsp + xstat->offset);
179 xstat->reset_snap[queue_port_id] =
180 (xstat->reset_snap[queue_port_id] >>
181 xstat->shift) & xstat->mask;
182 }
183
184 req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
185 ((struct sso_info_req *)req_rsp)->hws =
186 (2 * queue_port_id) + 1;
187 rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
188 if (rc < 0)
189 goto invalid_value;
190 }
191
192 break;
193 case RTE_EVENT_DEV_XSTATS_QUEUE:
194 if (queue_port_id >= (signed int)dev->nb_event_queues)
195 goto invalid_value;
196
197 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
198 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
199 xstats = sso_grp_xstats;
200
201 req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
202 ((struct sso_info_req *)req_rsp)->grp = queue_port_id;
203 rc = otx2_mbox_process_msg(mbox, (void *)&req_rsp);
204 if (rc < 0)
205 goto invalid_value;
206
207 break;
208 default:
209 otx2_err("Invalid mode received");
210 goto invalid_value;
211 };
212
213 for (i = 0; i < n && i < xstats_mode_count; i++) {
214 xstat = &xstats[ids[i] - start_offset];
215 value = *(uint64_t *)((char *)req_rsp + xstat->offset);
216 value = (value >> xstat->shift) & xstat->mask;
217
218 if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
219 xstat->reset_snap[queue_port_id] += value;
220 else
221 xstat->reset_snap[queue_port_id] = value;
222 }
223 return i;
224 invalid_value:
225 return -EINVAL;
226 }
227
228 static int
otx2_sso_xstats_get_names(const struct rte_eventdev * event_dev,enum rte_event_dev_xstats_mode mode,uint8_t queue_port_id,struct rte_event_dev_xstats_name * xstats_names,unsigned int * ids,unsigned int size)229 otx2_sso_xstats_get_names(const struct rte_eventdev *event_dev,
230 enum rte_event_dev_xstats_mode mode,
231 uint8_t queue_port_id,
232 struct rte_event_dev_xstats_name *xstats_names,
233 unsigned int *ids, unsigned int size)
234 {
235 struct rte_event_dev_xstats_name xstats_names_copy[OTX2_SSO_NUM_XSTATS];
236 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
237 uint32_t xstats_mode_count = 0;
238 uint32_t start_offset = 0;
239 unsigned int xidx = 0;
240 unsigned int i;
241
242 for (i = 0; i < OTX2_SSO_NUM_HWS_XSTATS; i++) {
243 snprintf(xstats_names_copy[i].name,
244 sizeof(xstats_names_copy[i].name), "%s",
245 sso_hws_xstats[i].name);
246 }
247
248 for (; i < OTX2_SSO_NUM_XSTATS; i++) {
249 snprintf(xstats_names_copy[i].name,
250 sizeof(xstats_names_copy[i].name), "%s",
251 sso_grp_xstats[i - OTX2_SSO_NUM_HWS_XSTATS].name);
252 }
253
254 switch (mode) {
255 case RTE_EVENT_DEV_XSTATS_DEVICE:
256 break;
257 case RTE_EVENT_DEV_XSTATS_PORT:
258 if (queue_port_id >= (signed int)dev->nb_event_ports)
259 break;
260 xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
261 break;
262 case RTE_EVENT_DEV_XSTATS_QUEUE:
263 if (queue_port_id >= (signed int)dev->nb_event_queues)
264 break;
265 xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
266 start_offset = OTX2_SSO_NUM_HWS_XSTATS;
267 break;
268 default:
269 otx2_err("Invalid mode received");
270 return -EINVAL;
271 };
272
273 if (xstats_mode_count > size || !ids || !xstats_names)
274 return xstats_mode_count;
275
276 for (i = 0; i < xstats_mode_count; i++) {
277 xidx = i + start_offset;
278 strncpy(xstats_names[i].name, xstats_names_copy[xidx].name,
279 sizeof(xstats_names[i].name));
280 ids[i] = xidx;
281 }
282
283 return i;
284 }
285
286 #endif
287