1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
3 * All rights reserved.
4 */
5
6 #include <inttypes.h>
7
8 #include <rte_string_fns.h>
9 #include <rte_byteorder.h>
10
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_filter.h"
14 #include "bnxt_hwrm.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_stats.h"
17 #include "bnxt_txq.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
22 {"rx_64b_frames", offsetof(struct rx_port_stats,
23 rx_64b_frames)},
24 {"rx_65b_127b_frames", offsetof(struct rx_port_stats,
25 rx_65b_127b_frames)},
26 {"rx_128b_255b_frames", offsetof(struct rx_port_stats,
27 rx_128b_255b_frames)},
28 {"rx_256b_511b_frames", offsetof(struct rx_port_stats,
29 rx_256b_511b_frames)},
30 {"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
31 rx_512b_1023b_frames)},
32 {"rx_1024b_1518b_frames", offsetof(struct rx_port_stats,
33 rx_1024b_1518b_frames)},
34 {"rx_good_vlan_frames", offsetof(struct rx_port_stats,
35 rx_good_vlan_frames)},
36 {"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
37 rx_1519b_2047b_frames)},
38 {"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
39 rx_2048b_4095b_frames)},
40 {"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
41 rx_4096b_9216b_frames)},
42 {"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
43 rx_9217b_16383b_frames)},
44 {"rx_total_frames", offsetof(struct rx_port_stats,
45 rx_total_frames)},
46 {"rx_ucast_frames", offsetof(struct rx_port_stats,
47 rx_ucast_frames)},
48 {"rx_mcast_frames", offsetof(struct rx_port_stats,
49 rx_mcast_frames)},
50 {"rx_bcast_frames", offsetof(struct rx_port_stats,
51 rx_bcast_frames)},
52 {"rx_fcs_err_frames", offsetof(struct rx_port_stats,
53 rx_fcs_err_frames)},
54 {"rx_ctrl_frames", offsetof(struct rx_port_stats,
55 rx_ctrl_frames)},
56 {"rx_pause_frames", offsetof(struct rx_port_stats,
57 rx_pause_frames)},
58 {"rx_pfc_frames", offsetof(struct rx_port_stats,
59 rx_pfc_frames)},
60 {"rx_unsupported_opcode_frames", offsetof(struct rx_port_stats,
61 rx_unsupported_opcode_frames)},
62 {"rx_unsupported_da_pausepfc_frames", offsetof(struct rx_port_stats,
63 rx_unsupported_da_pausepfc_frames)},
64 {"rx_wrong_sa_frames", offsetof(struct rx_port_stats,
65 rx_wrong_sa_frames)},
66 {"rx_align_err_frames", offsetof(struct rx_port_stats,
67 rx_align_err_frames)},
68 {"rx_oor_len_frames", offsetof(struct rx_port_stats,
69 rx_oor_len_frames)},
70 {"rx_code_err_frames", offsetof(struct rx_port_stats,
71 rx_code_err_frames)},
72 {"rx_false_carrier_frames", offsetof(struct rx_port_stats,
73 rx_false_carrier_frames)},
74 {"rx_ovrsz_frames", offsetof(struct rx_port_stats,
75 rx_ovrsz_frames)},
76 {"rx_jbr_frames", offsetof(struct rx_port_stats,
77 rx_jbr_frames)},
78 {"rx_mtu_err_frames", offsetof(struct rx_port_stats,
79 rx_mtu_err_frames)},
80 {"rx_match_crc_frames", offsetof(struct rx_port_stats,
81 rx_match_crc_frames)},
82 {"rx_promiscuous_frames", offsetof(struct rx_port_stats,
83 rx_promiscuous_frames)},
84 {"rx_tagged_frames", offsetof(struct rx_port_stats,
85 rx_tagged_frames)},
86 {"rx_double_tagged_frames", offsetof(struct rx_port_stats,
87 rx_double_tagged_frames)},
88 {"rx_trunc_frames", offsetof(struct rx_port_stats,
89 rx_trunc_frames)},
90 {"rx_good_frames", offsetof(struct rx_port_stats,
91 rx_good_frames)},
92 {"rx_sch_crc_err_frames", offsetof(struct rx_port_stats,
93 rx_sch_crc_err_frames)},
94 {"rx_undrsz_frames", offsetof(struct rx_port_stats,
95 rx_undrsz_frames)},
96 {"rx_frag_frames", offsetof(struct rx_port_stats,
97 rx_frag_frames)},
98 {"rx_eee_lpi_events", offsetof(struct rx_port_stats,
99 rx_eee_lpi_events)},
100 {"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
101 rx_eee_lpi_duration)},
102 {"rx_llfc_physical_msgs", offsetof(struct rx_port_stats,
103 rx_llfc_physical_msgs)},
104 {"rx_llfc_logical_msgs", offsetof(struct rx_port_stats,
105 rx_llfc_logical_msgs)},
106 {"rx_llfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
107 rx_llfc_msgs_with_crc_err)},
108 {"rx_hcfc_msgs", offsetof(struct rx_port_stats,
109 rx_hcfc_msgs)},
110 {"rx_hcfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
111 rx_hcfc_msgs_with_crc_err)},
112 {"rx_bytes", offsetof(struct rx_port_stats,
113 rx_bytes)},
114 {"rx_runt_bytes", offsetof(struct rx_port_stats,
115 rx_runt_bytes)},
116 {"rx_runt_frames", offsetof(struct rx_port_stats,
117 rx_runt_frames)},
118 {"rx_pfc_xon2xoff_frames_pri0", offsetof(struct rx_port_stats,
119 rx_pfc_xon2xoff_frames_pri0)},
120 {"rx_pfc_xon2xoff_frames_pri1", offsetof(struct rx_port_stats,
121 rx_pfc_xon2xoff_frames_pri1)},
122 {"rx_pfc_xon2xoff_frames_pri2", offsetof(struct rx_port_stats,
123 rx_pfc_xon2xoff_frames_pri2)},
124 {"rx_pfc_xon2xoff_frames_pri3", offsetof(struct rx_port_stats,
125 rx_pfc_xon2xoff_frames_pri3)},
126 {"rx_pfc_xon2xoff_frames_pri4", offsetof(struct rx_port_stats,
127 rx_pfc_xon2xoff_frames_pri4)},
128 {"rx_pfc_xon2xoff_frames_pri5", offsetof(struct rx_port_stats,
129 rx_pfc_xon2xoff_frames_pri5)},
130 {"rx_pfc_xon2xoff_frames_pri6", offsetof(struct rx_port_stats,
131 rx_pfc_xon2xoff_frames_pri6)},
132 {"rx_pfc_xon2xoff_frames_pri7", offsetof(struct rx_port_stats,
133 rx_pfc_xon2xoff_frames_pri7)},
134 {"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats,
135 rx_pfc_ena_frames_pri0)},
136 {"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats,
137 rx_pfc_ena_frames_pri1)},
138 {"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats,
139 rx_pfc_ena_frames_pri2)},
140 {"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats,
141 rx_pfc_ena_frames_pri3)},
142 {"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats,
143 rx_pfc_ena_frames_pri4)},
144 {"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats,
145 rx_pfc_ena_frames_pri5)},
146 {"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats,
147 rx_pfc_ena_frames_pri6)},
148 {"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats,
149 rx_pfc_ena_frames_pri7)},
150 {"rx_stat_discard", offsetof(struct rx_port_stats,
151 rx_stat_discard)},
152 {"rx_stat_err", offsetof(struct rx_port_stats,
153 rx_stat_err)},
154 };
155
156 static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
157 {"tx_64b_frames", offsetof(struct tx_port_stats,
158 tx_64b_frames)},
159 {"tx_65b_127b_frames", offsetof(struct tx_port_stats,
160 tx_65b_127b_frames)},
161 {"tx_128b_255b_frames", offsetof(struct tx_port_stats,
162 tx_128b_255b_frames)},
163 {"tx_256b_511b_frames", offsetof(struct tx_port_stats,
164 tx_256b_511b_frames)},
165 {"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
166 tx_512b_1023b_frames)},
167 {"tx_1024b_1518b_frames", offsetof(struct tx_port_stats,
168 tx_1024b_1518b_frames)},
169 {"tx_good_vlan_frames", offsetof(struct tx_port_stats,
170 tx_good_vlan_frames)},
171 {"tx_1519b_2047b_frames", offsetof(struct tx_port_stats,
172 tx_1519b_2047b_frames)},
173 {"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
174 tx_2048b_4095b_frames)},
175 {"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
176 tx_4096b_9216b_frames)},
177 {"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
178 tx_9217b_16383b_frames)},
179 {"tx_good_frames", offsetof(struct tx_port_stats,
180 tx_good_frames)},
181 {"tx_total_frames", offsetof(struct tx_port_stats,
182 tx_total_frames)},
183 {"tx_ucast_frames", offsetof(struct tx_port_stats,
184 tx_ucast_frames)},
185 {"tx_mcast_frames", offsetof(struct tx_port_stats,
186 tx_mcast_frames)},
187 {"tx_bcast_frames", offsetof(struct tx_port_stats,
188 tx_bcast_frames)},
189 {"tx_pause_frames", offsetof(struct tx_port_stats,
190 tx_pause_frames)},
191 {"tx_pfc_frames", offsetof(struct tx_port_stats,
192 tx_pfc_frames)},
193 {"tx_jabber_frames", offsetof(struct tx_port_stats,
194 tx_jabber_frames)},
195 {"tx_fcs_err_frames", offsetof(struct tx_port_stats,
196 tx_fcs_err_frames)},
197 {"tx_control_frames", offsetof(struct tx_port_stats,
198 tx_control_frames)},
199 {"tx_oversz_frames", offsetof(struct tx_port_stats,
200 tx_oversz_frames)},
201 {"tx_single_dfrl_frames", offsetof(struct tx_port_stats,
202 tx_single_dfrl_frames)},
203 {"tx_multi_dfrl_frames", offsetof(struct tx_port_stats,
204 tx_multi_dfrl_frames)},
205 {"tx_single_coll_frames", offsetof(struct tx_port_stats,
206 tx_single_coll_frames)},
207 {"tx_multi_coll_frames", offsetof(struct tx_port_stats,
208 tx_multi_coll_frames)},
209 {"tx_late_coll_frames", offsetof(struct tx_port_stats,
210 tx_late_coll_frames)},
211 {"tx_excessive_coll_frames", offsetof(struct tx_port_stats,
212 tx_excessive_coll_frames)},
213 {"tx_frag_frames", offsetof(struct tx_port_stats,
214 tx_frag_frames)},
215 {"tx_err", offsetof(struct tx_port_stats,
216 tx_err)},
217 {"tx_tagged_frames", offsetof(struct tx_port_stats,
218 tx_tagged_frames)},
219 {"tx_dbl_tagged_frames", offsetof(struct tx_port_stats,
220 tx_dbl_tagged_frames)},
221 {"tx_runt_frames", offsetof(struct tx_port_stats,
222 tx_runt_frames)},
223 {"tx_fifo_underruns", offsetof(struct tx_port_stats,
224 tx_fifo_underruns)},
225 {"tx_eee_lpi_events", offsetof(struct tx_port_stats,
226 tx_eee_lpi_events)},
227 {"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
228 tx_eee_lpi_duration)},
229 {"tx_total_collisions", offsetof(struct tx_port_stats,
230 tx_total_collisions)},
231 {"tx_bytes", offsetof(struct tx_port_stats,
232 tx_bytes)},
233 {"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats,
234 tx_pfc_ena_frames_pri0)},
235 {"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats,
236 tx_pfc_ena_frames_pri1)},
237 {"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats,
238 tx_pfc_ena_frames_pri2)},
239 {"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats,
240 tx_pfc_ena_frames_pri3)},
241 {"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats,
242 tx_pfc_ena_frames_pri4)},
243 {"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats,
244 tx_pfc_ena_frames_pri5)},
245 {"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats,
246 tx_pfc_ena_frames_pri6)},
247 {"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats,
248 tx_pfc_ena_frames_pri7)},
249 {"tx_llfc_logical_msgs", offsetof(struct tx_port_stats,
250 tx_llfc_logical_msgs)},
251 {"tx_hcfc_msgs", offsetof(struct tx_port_stats,
252 tx_hcfc_msgs)},
253 {"tx_xthol_frames", offsetof(struct tx_port_stats,
254 tx_xthol_frames)},
255 {"tx_stat_discard", offsetof(struct tx_port_stats,
256 tx_stat_discard)},
257 {"tx_stat_error", offsetof(struct tx_port_stats,
258 tx_stat_error)},
259 };
260
261 static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
262 {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
263 tx_ucast_pkts)},
264 {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
265 tx_mcast_pkts)},
266 {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
267 tx_bcast_pkts)},
268 {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
269 tx_discard_pkts)},
270 {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
271 tx_drop_pkts)},
272 {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
273 tx_ucast_bytes)},
274 {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
275 tx_mcast_bytes)},
276 {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
277 tx_bcast_bytes)},
278 {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
279 rx_ucast_pkts)},
280 {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
281 rx_mcast_pkts)},
282 {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
283 rx_bcast_pkts)},
284 {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
285 rx_discard_pkts)},
286 {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
287 rx_drop_pkts)},
288 {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
289 rx_ucast_bytes)},
290 {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
291 rx_mcast_bytes)},
292 {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
293 rx_bcast_bytes)},
294 {"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
295 rx_agg_pkts)},
296 {"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
297 rx_agg_bytes)},
298 {"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
299 rx_agg_events)},
300 {"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
301 rx_agg_aborts)},
302 };
303
304
305 static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = {
306 {"link_down_events", offsetof(struct rx_port_stats_ext,
307 link_down_events)},
308 {"continuous_pause_events", offsetof(struct rx_port_stats_ext,
309 continuous_pause_events)},
310 {"resume_pause_events", offsetof(struct rx_port_stats_ext,
311 resume_pause_events)},
312 {"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext,
313 continuous_roce_pause_events)},
314 {"resume_roce_pause_events", offsetof(struct rx_port_stats_ext,
315 resume_roce_pause_events)},
316 {"rx_bytes_cos0", offsetof(struct rx_port_stats_ext,
317 rx_bytes_cos0)},
318 {"rx_bytes_cos1", offsetof(struct rx_port_stats_ext,
319 rx_bytes_cos1)},
320 {"rx_bytes_cos2", offsetof(struct rx_port_stats_ext,
321 rx_bytes_cos2)},
322 {"rx_bytes_cos3", offsetof(struct rx_port_stats_ext,
323 rx_bytes_cos3)},
324 {"rx_bytes_cos4", offsetof(struct rx_port_stats_ext,
325 rx_bytes_cos4)},
326 {"rx_bytes_cos5", offsetof(struct rx_port_stats_ext,
327 rx_bytes_cos5)},
328 {"rx_bytes_cos6", offsetof(struct rx_port_stats_ext,
329 rx_bytes_cos6)},
330 {"rx_bytes_cos7", offsetof(struct rx_port_stats_ext,
331 rx_bytes_cos7)},
332 {"rx_packets_cos0", offsetof(struct rx_port_stats_ext,
333 rx_packets_cos0)},
334 {"rx_packets_cos1", offsetof(struct rx_port_stats_ext,
335 rx_packets_cos1)},
336 {"rx_packets_cos2", offsetof(struct rx_port_stats_ext,
337 rx_packets_cos2)},
338 {"rx_packets_cos3", offsetof(struct rx_port_stats_ext,
339 rx_packets_cos3)},
340 {"rx_packets_cos4", offsetof(struct rx_port_stats_ext,
341 rx_packets_cos4)},
342 {"rx_packets_cos5", offsetof(struct rx_port_stats_ext,
343 rx_packets_cos5)},
344 {"rx_packets_cos6", offsetof(struct rx_port_stats_ext,
345 rx_packets_cos6)},
346 {"rx_packets_cos7", offsetof(struct rx_port_stats_ext,
347 rx_packets_cos7)},
348 {"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext,
349 pfc_pri0_rx_duration_us)},
350 {"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext,
351 pfc_pri0_rx_transitions)},
352 {"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext,
353 pfc_pri1_rx_duration_us)},
354 {"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext,
355 pfc_pri1_rx_transitions)},
356 {"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext,
357 pfc_pri2_rx_duration_us)},
358 {"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext,
359 pfc_pri2_rx_transitions)},
360 {"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext,
361 pfc_pri3_rx_duration_us)},
362 {"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext,
363 pfc_pri3_rx_transitions)},
364 {"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext,
365 pfc_pri4_rx_duration_us)},
366 {"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext,
367 pfc_pri4_rx_transitions)},
368 {"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext,
369 pfc_pri5_rx_duration_us)},
370 {"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext,
371 pfc_pri5_rx_transitions)},
372 {"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext,
373 pfc_pri6_rx_duration_us)},
374 {"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext,
375 pfc_pri6_rx_transitions)},
376 {"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext,
377 pfc_pri7_rx_duration_us)},
378 {"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext,
379 pfc_pri7_rx_transitions)},
380 {"rx_bits", offsetof(struct rx_port_stats_ext,
381 rx_bits)},
382 {"rx_buffer_passed_threshold", offsetof(struct rx_port_stats_ext,
383 rx_buffer_passed_threshold)},
384 {"rx_pcs_symbol_err", offsetof(struct rx_port_stats_ext,
385 rx_pcs_symbol_err)},
386 {"rx_corrected_bits", offsetof(struct rx_port_stats_ext,
387 rx_corrected_bits)},
388 {"rx_discard_bytes_cos0", offsetof(struct rx_port_stats_ext,
389 rx_discard_bytes_cos0)},
390 {"rx_discard_bytes_cos1", offsetof(struct rx_port_stats_ext,
391 rx_discard_bytes_cos1)},
392 {"rx_discard_bytes_cos2", offsetof(struct rx_port_stats_ext,
393 rx_discard_bytes_cos2)},
394 {"rx_discard_bytes_cos3", offsetof(struct rx_port_stats_ext,
395 rx_discard_bytes_cos3)},
396 {"rx_discard_bytes_cos4", offsetof(struct rx_port_stats_ext,
397 rx_discard_bytes_cos4)},
398 {"rx_discard_bytes_cos5", offsetof(struct rx_port_stats_ext,
399 rx_discard_bytes_cos5)},
400 {"rx_discard_bytes_cos6", offsetof(struct rx_port_stats_ext,
401 rx_discard_bytes_cos6)},
402 {"rx_discard_bytes_cos7", offsetof(struct rx_port_stats_ext,
403 rx_discard_bytes_cos7)},
404 {"rx_discard_packets_cos0", offsetof(struct rx_port_stats_ext,
405 rx_discard_packets_cos0)},
406 {"rx_discard_packets_cos1", offsetof(struct rx_port_stats_ext,
407 rx_discard_packets_cos1)},
408 {"rx_discard_packets_cos2", offsetof(struct rx_port_stats_ext,
409 rx_discard_packets_cos2)},
410 {"rx_discard_packets_cos3", offsetof(struct rx_port_stats_ext,
411 rx_discard_packets_cos3)},
412 {"rx_discard_packets_cos4", offsetof(struct rx_port_stats_ext,
413 rx_discard_packets_cos4)},
414 {"rx_discard_packets_cos5", offsetof(struct rx_port_stats_ext,
415 rx_discard_packets_cos5)},
416 {"rx_discard_packets_cos6", offsetof(struct rx_port_stats_ext,
417 rx_discard_packets_cos6)},
418 {"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext,
419 rx_discard_packets_cos7)},
420 };
421
422 static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = {
423 {"tx_bytes_cos0", offsetof(struct tx_port_stats_ext,
424 tx_bytes_cos0)},
425 {"tx_bytes_cos1", offsetof(struct tx_port_stats_ext,
426 tx_bytes_cos1)},
427 {"tx_bytes_cos2", offsetof(struct tx_port_stats_ext,
428 tx_bytes_cos2)},
429 {"tx_bytes_cos3", offsetof(struct tx_port_stats_ext,
430 tx_bytes_cos3)},
431 {"tx_bytes_cos4", offsetof(struct tx_port_stats_ext,
432 tx_bytes_cos4)},
433 {"tx_bytes_cos5", offsetof(struct tx_port_stats_ext,
434 tx_bytes_cos5)},
435 {"tx_bytes_cos6", offsetof(struct tx_port_stats_ext,
436 tx_bytes_cos6)},
437 {"tx_bytes_cos7", offsetof(struct tx_port_stats_ext,
438 tx_bytes_cos7)},
439 {"tx_packets_cos0", offsetof(struct tx_port_stats_ext,
440 tx_packets_cos0)},
441 {"tx_packets_cos1", offsetof(struct tx_port_stats_ext,
442 tx_packets_cos1)},
443 {"tx_packets_cos2", offsetof(struct tx_port_stats_ext,
444 tx_packets_cos2)},
445 {"tx_packets_cos3", offsetof(struct tx_port_stats_ext,
446 tx_packets_cos3)},
447 {"tx_packets_cos4", offsetof(struct tx_port_stats_ext,
448 tx_packets_cos4)},
449 {"tx_packets_cos5", offsetof(struct tx_port_stats_ext,
450 tx_packets_cos5)},
451 {"tx_packets_cos6", offsetof(struct tx_port_stats_ext,
452 tx_packets_cos6)},
453 {"tx_packets_cos7", offsetof(struct tx_port_stats_ext,
454 tx_packets_cos7)},
455 {"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext,
456 pfc_pri0_tx_duration_us)},
457 {"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext,
458 pfc_pri0_tx_transitions)},
459 {"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext,
460 pfc_pri1_tx_duration_us)},
461 {"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext,
462 pfc_pri1_tx_transitions)},
463 {"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext,
464 pfc_pri2_tx_duration_us)},
465 {"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext,
466 pfc_pri2_tx_transitions)},
467 {"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext,
468 pfc_pri3_tx_duration_us)},
469 {"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext,
470 pfc_pri3_tx_transitions)},
471 {"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext,
472 pfc_pri4_tx_duration_us)},
473 {"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext,
474 pfc_pri4_tx_transitions)},
475 {"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext,
476 pfc_pri5_tx_duration_us)},
477 {"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext,
478 pfc_pri5_tx_transitions)},
479 {"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext,
480 pfc_pri6_tx_duration_us)},
481 {"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext,
482 pfc_pri6_tx_transitions)},
483 {"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext,
484 pfc_pri7_tx_duration_us)},
485 {"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext,
486 pfc_pri7_tx_transitions)},
487 };
488
489 /*
490 * Statistics functions
491 */
492
bnxt_free_stats(struct bnxt * bp)493 void bnxt_free_stats(struct bnxt *bp)
494 {
495 int i;
496
497 for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
498 struct bnxt_tx_queue *txq = bp->tx_queues[i];
499
500 bnxt_free_txq_stats(txq);
501 }
502 for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
503 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
504
505 bnxt_free_rxq_stats(rxq);
506 }
507 }
508
bnxt_fill_rte_eth_stats(struct rte_eth_stats * stats,struct bnxt_ring_stats * ring_stats,unsigned int i,bool rx)509 static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
510 struct bnxt_ring_stats *ring_stats,
511 unsigned int i, bool rx)
512 {
513 if (rx) {
514 stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
515 stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
516 stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
517
518 stats->ipackets += stats->q_ipackets[i];
519
520 stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
521 stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
522 stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
523
524 stats->ibytes += stats->q_ibytes[i];
525
526 stats->q_errors[i] = ring_stats->rx_discard_pkts;
527 stats->q_errors[i] += ring_stats->rx_error_pkts;
528
529 stats->imissed += ring_stats->rx_discard_pkts;
530 stats->ierrors += ring_stats->rx_error_pkts;
531 } else {
532 stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
533 stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
534 stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
535
536 stats->opackets += stats->q_opackets[i];
537
538 stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
539 stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
540 stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
541
542 stats->obytes += stats->q_obytes[i];
543
544 stats->oerrors += ring_stats->tx_discard_pkts;
545 }
546 }
547
bnxt_stats_get_op(struct rte_eth_dev * eth_dev,struct rte_eth_stats * bnxt_stats)548 int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
549 struct rte_eth_stats *bnxt_stats)
550 {
551 int rc = 0;
552 unsigned int i;
553 struct bnxt *bp = eth_dev->data->dev_private;
554 unsigned int num_q_stats;
555
556 rc = is_bnxt_in_error(bp);
557 if (rc)
558 return rc;
559
560 if (!eth_dev->data->dev_started)
561 return -EIO;
562
563 num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
564 (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
565
566 for (i = 0; i < num_q_stats; i++) {
567 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
568 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
569 struct bnxt_ring_stats ring_stats = {0};
570
571 if (!rxq->rx_started)
572 continue;
573
574 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
575 &ring_stats, true);
576 if (unlikely(rc))
577 return rc;
578
579 bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
580 bnxt_stats->rx_nombuf +=
581 rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
582 }
583
584 num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
585 (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
586
587 for (i = 0; i < num_q_stats; i++) {
588 struct bnxt_tx_queue *txq = bp->tx_queues[i];
589 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
590 struct bnxt_ring_stats ring_stats = {0};
591
592 if (!txq->tx_started)
593 continue;
594
595 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
596 &ring_stats, false);
597 if (unlikely(rc))
598 return rc;
599
600 bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
601 }
602
603 return rc;
604 }
605
bnxt_clear_prev_stat(struct bnxt * bp)606 static void bnxt_clear_prev_stat(struct bnxt *bp)
607 {
608 /*
609 * Clear the cached values of stats returned by HW in the previous
610 * get operation.
611 */
612 memset(bp->prev_rx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
613 memset(bp->prev_tx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
614 }
615
bnxt_stats_reset_op(struct rte_eth_dev * eth_dev)616 int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
617 {
618 struct bnxt *bp = eth_dev->data->dev_private;
619 unsigned int i;
620 int ret;
621
622 ret = is_bnxt_in_error(bp);
623 if (ret)
624 return ret;
625
626 if (!eth_dev->data->dev_started) {
627 PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
628 return -EINVAL;
629 }
630
631 ret = bnxt_clear_all_hwrm_stat_ctxs(bp);
632 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
633 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
634
635 rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
636 }
637
638 bnxt_clear_prev_stat(bp);
639
640 return ret;
641 }
642
bnxt_fill_func_qstats(struct hwrm_func_qstats_output * func_qstats,struct bnxt_ring_stats * ring_stats,bool rx)643 static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
644 struct bnxt_ring_stats *ring_stats,
645 bool rx)
646 {
647 if (rx) {
648 func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
649 func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
650 func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
651
652 func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
653 func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
654 func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
655
656 func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
657 func_qstats->rx_drop_pkts += ring_stats->rx_error_pkts;
658
659 func_qstats->rx_agg_pkts += ring_stats->rx_agg_pkts;
660 func_qstats->rx_agg_bytes += ring_stats->rx_agg_bytes;
661 func_qstats->rx_agg_events += ring_stats->rx_agg_events;
662 func_qstats->rx_agg_aborts += ring_stats->rx_agg_aborts;
663 } else {
664 func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
665 func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
666 func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
667
668 func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
669 func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
670 func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
671
672 func_qstats->tx_drop_pkts += ring_stats->tx_error_pkts;
673 func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
674 }
675 }
676
bnxt_dev_xstats_get_op(struct rte_eth_dev * eth_dev,struct rte_eth_xstat * xstats,unsigned int n)677 int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
678 struct rte_eth_xstat *xstats, unsigned int n)
679 {
680 struct bnxt *bp = eth_dev->data->dev_private;
681 unsigned int count, i;
682 unsigned int rx_port_stats_ext_cnt;
683 unsigned int tx_port_stats_ext_cnt;
684 unsigned int stat_size = sizeof(uint64_t);
685 struct hwrm_func_qstats_output func_qstats = {0};
686 unsigned int stat_count;
687 int rc;
688
689 rc = is_bnxt_in_error(bp);
690 if (rc)
691 return rc;
692
693 stat_count = RTE_DIM(bnxt_rx_stats_strings) +
694 RTE_DIM(bnxt_tx_stats_strings) +
695 RTE_DIM(bnxt_func_stats_strings) +
696 RTE_DIM(bnxt_rx_ext_stats_strings) +
697 RTE_DIM(bnxt_tx_ext_stats_strings) +
698 bnxt_flow_stats_cnt(bp);
699
700 if (n < stat_count || xstats == NULL)
701 return stat_count;
702
703 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
704 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
705 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
706 struct bnxt_ring_stats ring_stats = {0};
707
708 if (!rxq->rx_started)
709 continue;
710
711 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
712 &ring_stats, true);
713 if (unlikely(rc))
714 return rc;
715
716 bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
717 }
718
719 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
720 struct bnxt_tx_queue *txq = bp->tx_queues[i];
721 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
722 struct bnxt_ring_stats ring_stats = {0};
723
724 if (!txq->tx_started)
725 continue;
726
727 rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
728 &ring_stats, false);
729 if (unlikely(rc))
730 return rc;
731
732 bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
733 }
734
735 bnxt_hwrm_port_qstats(bp);
736 bnxt_hwrm_ext_port_qstats(bp);
737 rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
738 (bp->fw_rx_port_stats_ext_size /
739 stat_size));
740 tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings),
741 (bp->fw_tx_port_stats_ext_size /
742 stat_size));
743
744 memset(xstats, 0, sizeof(*xstats) * n);
745
746 count = 0;
747 for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
748 uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
749 xstats[count].id = count;
750 xstats[count].value = rte_le_to_cpu_64(
751 *(uint64_t *)((char *)rx_stats +
752 bnxt_rx_stats_strings[i].offset));
753 count++;
754 }
755
756 for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
757 uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
758 xstats[count].id = count;
759 xstats[count].value = rte_le_to_cpu_64(
760 *(uint64_t *)((char *)tx_stats +
761 bnxt_tx_stats_strings[i].offset));
762 count++;
763 }
764
765 for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
766 xstats[count].id = count;
767 xstats[count].value = *(uint64_t *)((char *)&func_qstats +
768 bnxt_func_stats_strings[i].offset);
769 count++;
770 }
771
772 for (i = 0; i < rx_port_stats_ext_cnt; i++) {
773 uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
774
775 xstats[count].value = rte_le_to_cpu_64
776 (*(uint64_t *)((char *)rx_stats_ext +
777 bnxt_rx_ext_stats_strings[i].offset));
778
779 count++;
780 }
781
782 for (i = 0; i < tx_port_stats_ext_cnt; i++) {
783 uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
784
785 xstats[count].value = rte_le_to_cpu_64
786 (*(uint64_t *)((char *)tx_stats_ext +
787 bnxt_tx_ext_stats_strings[i].offset));
788 count++;
789 }
790
791 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
792 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
793 BNXT_FLOW_XSTATS_EN(bp)) {
794 int j;
795
796 i = 0;
797 for (j = 0; j < bp->max_vnics; j++) {
798 struct bnxt_filter_info *filter;
799 struct bnxt_vnic_info *vnic;
800 struct rte_flow *flow;
801
802 vnic = &bp->vnic_info[j];
803 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
804 continue;
805
806 if (STAILQ_EMPTY(&vnic->flow_list))
807 continue;
808
809 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
810 if (!flow || !flow->filter)
811 continue;
812
813 filter = flow->filter;
814 xstats[count].id = count;
815 xstats[count].value =
816 filter->hw_stats.bytes;
817 count++;
818 xstats[count].id = count;
819 xstats[count].value =
820 filter->hw_stats.packets;
821 count++;
822 if (++i > bp->max_l2_ctx)
823 break;
824 }
825 if (i > bp->max_l2_ctx)
826 break;
827 }
828 }
829
830 return stat_count;
831 }
832
bnxt_flow_stats_cnt(struct bnxt * bp)833 int bnxt_flow_stats_cnt(struct bnxt *bp)
834 {
835 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
836 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
837 BNXT_FLOW_XSTATS_EN(bp)) {
838 struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx];
839 struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx];
840
841 return RTE_DIM(flow_bytes) + RTE_DIM(flow_pkts);
842 }
843
844 return 0;
845 }
846
bnxt_dev_xstats_get_names_op(struct rte_eth_dev * eth_dev,struct rte_eth_xstat_name * xstats_names,unsigned int size)847 int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
848 struct rte_eth_xstat_name *xstats_names,
849 unsigned int size)
850 {
851 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
852 const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
853 RTE_DIM(bnxt_tx_stats_strings) +
854 RTE_DIM(bnxt_func_stats_strings) +
855 RTE_DIM(bnxt_rx_ext_stats_strings) +
856 RTE_DIM(bnxt_tx_ext_stats_strings) +
857 bnxt_flow_stats_cnt(bp);
858 unsigned int i, count = 0;
859 int rc;
860
861 rc = is_bnxt_in_error(bp);
862 if (rc)
863 return rc;
864
865 if (xstats_names == NULL || size < stat_cnt)
866 return stat_cnt;
867
868 for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
869 strlcpy(xstats_names[count].name,
870 bnxt_rx_stats_strings[i].name,
871 sizeof(xstats_names[count].name));
872 count++;
873 }
874
875 for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
876 strlcpy(xstats_names[count].name,
877 bnxt_tx_stats_strings[i].name,
878 sizeof(xstats_names[count].name));
879 count++;
880 }
881
882 for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
883 strlcpy(xstats_names[count].name,
884 bnxt_func_stats_strings[i].name,
885 sizeof(xstats_names[count].name));
886 count++;
887 }
888
889 for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
890 strlcpy(xstats_names[count].name,
891 bnxt_rx_ext_stats_strings[i].name,
892 sizeof(xstats_names[count].name));
893
894 count++;
895 }
896
897 for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
898 strlcpy(xstats_names[count].name,
899 bnxt_tx_ext_stats_strings[i].name,
900 sizeof(xstats_names[count].name));
901
902 count++;
903 }
904
905 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
906 bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
907 BNXT_FLOW_XSTATS_EN(bp)) {
908 for (i = 0; i < bp->max_l2_ctx; i++) {
909 char buf[RTE_ETH_XSTATS_NAME_SIZE];
910
911 sprintf(buf, "flow_%d_bytes", i);
912 strlcpy(xstats_names[count].name, buf,
913 sizeof(xstats_names[count].name));
914 count++;
915
916 sprintf(buf, "flow_%d_packets", i);
917 strlcpy(xstats_names[count].name, buf,
918 sizeof(xstats_names[count].name));
919
920 count++;
921 }
922 }
923
924 return stat_cnt;
925 }
926
bnxt_dev_xstats_reset_op(struct rte_eth_dev * eth_dev)927 int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
928 {
929 struct bnxt *bp = eth_dev->data->dev_private;
930 int ret;
931
932 ret = is_bnxt_in_error(bp);
933 if (ret)
934 return ret;
935
936 if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) ||
937 !(bp->flags & BNXT_FLAG_PORT_STATS)) {
938 PMD_DRV_LOG(ERR, "Operation not supported\n");
939 return -ENOTSUP;
940 }
941
942 ret = bnxt_hwrm_port_clr_stats(bp);
943 if (ret != 0)
944 PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n",
945 strerror(-ret));
946
947 bnxt_clear_prev_stat(bp);
948
949 return ret;
950 }
951
952 /* Update the input context memory with the flow counter IDs
953 * of the flows that we are interested in.
954 * Also, update the output tables with the current local values
955 * since that is what will be used by FW to accumulate
956 */
bnxt_update_fc_pre_qstat(uint32_t * in_tbl,uint64_t * out_tbl,struct bnxt_filter_info * filter,uint32_t * ptbl_cnt)957 static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl,
958 uint64_t *out_tbl,
959 struct bnxt_filter_info *filter,
960 uint32_t *ptbl_cnt)
961 {
962 uint32_t in_tbl_cnt = *ptbl_cnt;
963
964 in_tbl[in_tbl_cnt] = filter->flow_id;
965 out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets;
966 out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes;
967 in_tbl_cnt++;
968 *ptbl_cnt = in_tbl_cnt;
969 }
970
971 /* Post issuing counter_qstats cmd, update the driver's local stat
972 * entries with the values DMA-ed by FW in the output table
973 */
bnxt_update_fc_post_qstat(struct bnxt_filter_info * filter,uint64_t * out_tbl,uint32_t out_tbl_idx)974 static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter,
975 uint64_t *out_tbl,
976 uint32_t out_tbl_idx)
977 {
978 filter->hw_stats.packets = out_tbl[2 * out_tbl_idx];
979 filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1];
980 }
981
bnxt_update_fc_tbl(struct bnxt * bp,uint16_t ctr,struct bnxt_filter_info * en_tbl[],uint16_t in_flow_cnt)982 static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr,
983 struct bnxt_filter_info *en_tbl[],
984 uint16_t in_flow_cnt)
985 {
986 uint32_t *in_rx_tbl;
987 uint64_t *out_rx_tbl;
988 uint32_t in_rx_tbl_cnt = 0;
989 uint32_t out_rx_tbl_cnt = 0;
990 int i, rc = 0;
991
992 in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va;
993 out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va;
994
995 for (i = 0; i < in_flow_cnt; i++) {
996 if (!en_tbl[i])
997 continue;
998
999 /* Currently only ingress/Rx flows are supported anyway. */
1000 bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl,
1001 en_tbl[i], &in_rx_tbl_cnt);
1002 }
1003
1004 /* Currently only ingress/Rx flows are supported */
1005 if (in_rx_tbl_cnt) {
1006 rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr,
1007 in_rx_tbl_cnt);
1008 if (rc)
1009 return rc;
1010 }
1011
1012 for (i = 0; i < in_flow_cnt; i++) {
1013 if (!en_tbl[i])
1014 continue;
1015
1016 /* Currently only ingress/Rx flows are supported */
1017 bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl,
1018 out_rx_tbl_cnt);
1019 out_rx_tbl_cnt++;
1020 }
1021
1022 return rc;
1023 }
1024
1025 /* Walks through the list which has all the flows
1026 * requesting for explicit flow counters.
1027 */
bnxt_flow_stats_req(struct bnxt * bp)1028 int bnxt_flow_stats_req(struct bnxt *bp)
1029 {
1030 int i;
1031 int rc = 0;
1032 struct rte_flow *flow;
1033 uint16_t in_flow_tbl_cnt = 0;
1034 struct bnxt_vnic_info *vnic = NULL;
1035 struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc];
1036 uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC;
1037
1038 bnxt_acquire_flow_lock(bp);
1039 for (i = 0; i < bp->max_vnics; i++) {
1040 vnic = &bp->vnic_info[i];
1041 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1042 continue;
1043
1044 if (STAILQ_EMPTY(&vnic->flow_list))
1045 continue;
1046
1047 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1048 if (!flow || !flow->filter)
1049 continue;
1050
1051 valid_en_tbl[in_flow_tbl_cnt++] = flow->filter;
1052 if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) {
1053 rc = bnxt_update_fc_tbl(bp, counter_type,
1054 valid_en_tbl,
1055 in_flow_tbl_cnt);
1056 if (rc)
1057 goto err;
1058 in_flow_tbl_cnt = 0;
1059 continue;
1060 }
1061 }
1062 }
1063
1064 if (!in_flow_tbl_cnt) {
1065 bnxt_release_flow_lock(bp);
1066 goto out;
1067 }
1068
1069 rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl,
1070 in_flow_tbl_cnt);
1071 if (!rc) {
1072 bnxt_release_flow_lock(bp);
1073 return 0;
1074 }
1075
1076 err:
1077 /* If cmd fails once, no need of
1078 * invoking again every second
1079 */
1080 bnxt_release_flow_lock(bp);
1081 bnxt_cancel_fc_thread(bp);
1082 out:
1083 return rc;
1084 }
1085