xref: /f-stack/dpdk/drivers/net/hns3/hns3_stats.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <rte_ethdev.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13 
14 /* MAC statistics */
15 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
16 	{"mac_tx_mac_pause_num",
17 		HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
18 	{"mac_rx_mac_pause_num",
19 		HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
20 	{"mac_tx_control_pkt_num",
21 		HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
22 	{"mac_rx_control_pkt_num",
23 		HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
24 	{"mac_tx_pfc_pkt_num",
25 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
26 	{"mac_tx_pfc_pri0_pkt_num",
27 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
28 	{"mac_tx_pfc_pri1_pkt_num",
29 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
30 	{"mac_tx_pfc_pri2_pkt_num",
31 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
32 	{"mac_tx_pfc_pri3_pkt_num",
33 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
34 	{"mac_tx_pfc_pri4_pkt_num",
35 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
36 	{"mac_tx_pfc_pri5_pkt_num",
37 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
38 	{"mac_tx_pfc_pri6_pkt_num",
39 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
40 	{"mac_tx_pfc_pri7_pkt_num",
41 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
42 	{"mac_rx_pfc_pkt_num",
43 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
44 	{"mac_rx_pfc_pri0_pkt_num",
45 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
46 	{"mac_rx_pfc_pri1_pkt_num",
47 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
48 	{"mac_rx_pfc_pri2_pkt_num",
49 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
50 	{"mac_rx_pfc_pri3_pkt_num",
51 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
52 	{"mac_rx_pfc_pri4_pkt_num",
53 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
54 	{"mac_rx_pfc_pri5_pkt_num",
55 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
56 	{"mac_rx_pfc_pri6_pkt_num",
57 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
58 	{"mac_rx_pfc_pri7_pkt_num",
59 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
60 	{"mac_tx_total_pkt_num",
61 		HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
62 	{"mac_tx_total_oct_num",
63 		HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
64 	{"mac_tx_good_pkt_num",
65 		HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
66 	{"mac_tx_bad_pkt_num",
67 		HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
68 	{"mac_tx_good_oct_num",
69 		HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
70 	{"mac_tx_bad_oct_num",
71 		HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
72 	{"mac_tx_uni_pkt_num",
73 		HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
74 	{"mac_tx_multi_pkt_num",
75 		HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
76 	{"mac_tx_broad_pkt_num",
77 		HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
78 	{"mac_tx_undersize_pkt_num",
79 		HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
80 	{"mac_tx_oversize_pkt_num",
81 		HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
82 	{"mac_tx_64_oct_pkt_num",
83 		HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
84 	{"mac_tx_65_127_oct_pkt_num",
85 		HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
86 	{"mac_tx_128_255_oct_pkt_num",
87 		HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
88 	{"mac_tx_256_511_oct_pkt_num",
89 		HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
90 	{"mac_tx_512_1023_oct_pkt_num",
91 		HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
92 	{"mac_tx_1024_1518_oct_pkt_num",
93 		HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
94 	{"mac_tx_1519_2047_oct_pkt_num",
95 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
96 	{"mac_tx_2048_4095_oct_pkt_num",
97 		HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
98 	{"mac_tx_4096_8191_oct_pkt_num",
99 		HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
100 	{"mac_tx_8192_9216_oct_pkt_num",
101 		HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
102 	{"mac_tx_9217_12287_oct_pkt_num",
103 		HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
104 	{"mac_tx_12288_16383_oct_pkt_num",
105 		HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
106 	{"mac_tx_1519_max_good_pkt_num",
107 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
108 	{"mac_tx_1519_max_bad_pkt_num",
109 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
110 	{"mac_rx_total_pkt_num",
111 		HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
112 	{"mac_rx_total_oct_num",
113 		HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
114 	{"mac_rx_good_pkt_num",
115 		HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
116 	{"mac_rx_bad_pkt_num",
117 		HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
118 	{"mac_rx_good_oct_num",
119 		HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
120 	{"mac_rx_bad_oct_num",
121 		HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
122 	{"mac_rx_uni_pkt_num",
123 		HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
124 	{"mac_rx_multi_pkt_num",
125 		HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
126 	{"mac_rx_broad_pkt_num",
127 		HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
128 	{"mac_rx_undersize_pkt_num",
129 		HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
130 	{"mac_rx_oversize_pkt_num",
131 		HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
132 	{"mac_rx_64_oct_pkt_num",
133 		HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
134 	{"mac_rx_65_127_oct_pkt_num",
135 		HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
136 	{"mac_rx_128_255_oct_pkt_num",
137 		HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
138 	{"mac_rx_256_511_oct_pkt_num",
139 		HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
140 	{"mac_rx_512_1023_oct_pkt_num",
141 		HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
142 	{"mac_rx_1024_1518_oct_pkt_num",
143 		HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
144 	{"mac_rx_1519_2047_oct_pkt_num",
145 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
146 	{"mac_rx_2048_4095_oct_pkt_num",
147 		HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
148 	{"mac_rx_4096_8191_oct_pkt_num",
149 		HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
150 	{"mac_rx_8192_9216_oct_pkt_num",
151 		HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
152 	{"mac_rx_9217_12287_oct_pkt_num",
153 		HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
154 	{"mac_rx_12288_16383_oct_pkt_num",
155 		HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
156 	{"mac_rx_1519_max_good_pkt_num",
157 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
158 	{"mac_rx_1519_max_bad_pkt_num",
159 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
160 	{"mac_tx_fragment_pkt_num",
161 		HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
162 	{"mac_tx_undermin_pkt_num",
163 		HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
164 	{"mac_tx_jabber_pkt_num",
165 		HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
166 	{"mac_tx_err_all_pkt_num",
167 		HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
168 	{"mac_tx_from_app_good_pkt_num",
169 		HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
170 	{"mac_tx_from_app_bad_pkt_num",
171 		HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
172 	{"mac_rx_fragment_pkt_num",
173 		HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
174 	{"mac_rx_undermin_pkt_num",
175 		HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
176 	{"mac_rx_jabber_pkt_num",
177 		HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
178 	{"mac_rx_fcs_err_pkt_num",
179 		HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
180 	{"mac_rx_send_app_good_pkt_num",
181 		HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
182 	{"mac_rx_send_app_bad_pkt_num",
183 		HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
184 };
185 
186 static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
187 	{"MAC_AFIFO_TNL_INT_R",
188 		HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_int_cnt)},
189 	{"PPU_MPF_ABNORMAL_INT_ST2_MSIX",
190 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_msix_cnt)},
191 	{"SSU_PORT_BASED_ERR_INT_MSIX",
192 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_int_cnt)},
193 	{"PPP_PF_ABNORMAL_INT_ST0",
194 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_int_cnt)},
195 	{"PPU_PF_ABNORMAL_INT_ST_MSIX",
196 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_msix_cnt)},
197 	{"IMP_TCM_ECC_INT_STS",
198 		HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_tcm_ecc_int_cnt)},
199 	{"CMDQ_MEM_ECC_INT_STS",
200 		HNS3_ERR_INT_STATS_FIELD_OFFSET(cmdq_mem_ecc_int_cnt)},
201 	{"IMP_RD_POISON_INT_STS",
202 		HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_rd_poison_int_cnt)},
203 	{"TQP_INT_ECC_INT_STS",
204 		HNS3_ERR_INT_STATS_FIELD_OFFSET(tqp_int_ecc_int_cnt)},
205 	{"MSIX_ECC_INT_STS",
206 		HNS3_ERR_INT_STATS_FIELD_OFFSET(msix_ecc_int_cnt)},
207 	{"SSU_ECC_MULTI_BIT_INT_0",
208 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_0_cnt)},
209 	{"SSU_ECC_MULTI_BIT_INT_1",
210 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_1_cnt)},
211 	{"SSU_COMMON_ERR_INT",
212 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_common_ecc_int_cnt)},
213 	{"IGU_INT_STS",
214 		HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_int_cnt)},
215 	{"PPP_MPF_ABNORMAL_INT_ST1",
216 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st1_cnt)},
217 	{"PPP_MPF_ABNORMAL_INT_ST3",
218 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st3_cnt)},
219 	{"PPU_MPF_ABNORMAL_INT_ST1",
220 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st1_cnt)},
221 	{"PPU_MPF_ABNORMAL_INT_ST2_RAS",
222 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_ras_cnt)},
223 	{"PPU_MPF_ABNORMAL_INT_ST3",
224 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st3_cnt)},
225 	{"TM_SCH_RINT",
226 		HNS3_ERR_INT_STATS_FIELD_OFFSET(tm_sch_int_cnt)},
227 	{"QCN_FIFO_RINT",
228 		HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_fifo_int_cnt)},
229 	{"QCN_ECC_RINT",
230 		HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_ecc_int_cnt)},
231 	{"NCSI_ECC_INT_RPT",
232 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ncsi_ecc_int_cnt)},
233 	{"SSU_PORT_BASED_ERR_INT_RAS",
234 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_err_int_cnt)},
235 	{"SSU_FIFO_OVERFLOW_INT",
236 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_fifo_overflow_int_cnt)},
237 	{"SSU_ETS_TCG_INT",
238 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ets_tcg_int_cnt)},
239 	{"IGU_EGU_TNL_INT_STS",
240 		HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_egu_tnl_int_cnt)},
241 	{"PPU_PF_ABNORMAL_INT_ST_RAS",
242 		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_ras_cnt)},
243 };
244 
245 /* The statistic of reset */
246 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
247 	{"REQ_RESET_CNT",
248 		HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
249 	{"GLOBAL_RESET_CNT",
250 		HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
251 	{"IMP_RESET_CNT",
252 		HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
253 	{"RESET_EXEC_CNT",
254 		HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
255 	{"RESET_SUCCESS_CNT",
256 		HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
257 	{"RESET_FAIL_CNT",
258 		HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
259 	{"RESET_MERGE_CNT",
260 		HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
261 };
262 
263 /* The statistic of errors in Rx BD */
264 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
265 	{"RX_PKT_LEN_ERRORS",
266 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
267 	{"L2_RX_ERRORS",
268 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)},
269 	{"RX_L3_CHECKSUM_ERRORS",
270 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)},
271 	{"RX_L4_CHECKSUM_ERRORS",
272 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)},
273 	{"RX_OL3_CHECKSUM_ERRORS",
274 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)},
275 	{"RX_OL4_CHECKSUM_ERRORS",
276 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)}
277 };
278 
279 /* The statistic of the Tx errors */
280 static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = {
281 	{"TX_OVER_LENGTH_PKT_CNT",
282 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
283 	{"TX_EXCEED_LIMITED_BD_PKT_CNT",
284 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
285 	{"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
286 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
287 	{"TX_UNSUPPORTED_TUNNEL_PKT_CNT",
288 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
289 	{"TX_QUEUE_FULL_CNT",
290 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)},
291 	{"TX_SHORT_PKT_PAD_FAIL_CNT",
292 		HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
293 };
294 
295 /* The statistic of rx queue */
296 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
297 	{"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
298 };
299 
300 /* The statistic of tx queue */
301 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
302 	{"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
303 };
304 
305 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
306 	sizeof(hns3_mac_strings[0]))
307 
308 #define HNS3_NUM_ERROR_INT_XSTATS (sizeof(hns3_error_int_stats_strings) / \
309 	sizeof(hns3_error_int_stats_strings[0]))
310 
311 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
312 	sizeof(hns3_reset_stats_strings[0]))
313 
314 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
315 	sizeof(hns3_rx_bd_error_strings[0]))
316 
317 #define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \
318 	sizeof(hns3_tx_errors_strings[0]))
319 
320 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
321 	sizeof(hns3_rx_queue_strings[0]))
322 
323 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
324 	sizeof(hns3_tx_queue_strings[0]))
325 
326 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
327 			    HNS3_NUM_RESET_XSTATS)
328 
329 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
330 
331 /*
332  * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
333  * This command is used before send 'query_mac_stat command', the descriptor
334  * number of 'query_mac_stat command' must match with reg_num in this command.
335  * @praram hw
336  *   Pointer to structure hns3_hw.
337  * @return
338  *   0 on success.
339  */
340 static int
hns3_update_mac_stats(struct hns3_hw * hw,const uint32_t desc_num)341 hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
342 {
343 	uint64_t *data = (uint64_t *)(&hw->mac_stats);
344 	struct hns3_cmd_desc *desc;
345 	uint64_t *desc_data;
346 	uint16_t i, k, n;
347 	int ret;
348 
349 	desc = rte_malloc("hns3_mac_desc",
350 			  desc_num * sizeof(struct hns3_cmd_desc), 0);
351 	if (desc == NULL) {
352 		hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
353 		return -ENOMEM;
354 	}
355 
356 	hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
357 	ret = hns3_cmd_send(hw, desc, desc_num);
358 	if (ret) {
359 		hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
360 		rte_free(desc);
361 		return ret;
362 	}
363 
364 	for (i = 0; i < desc_num; i++) {
365 		/* For special opcode 0034, only the first desc has the head */
366 		if (i == 0) {
367 			desc_data = (uint64_t *)(&desc[i].data[0]);
368 			n = HNS3_RD_FIRST_STATS_NUM;
369 		} else {
370 			desc_data = (uint64_t *)(&desc[i]);
371 			n = HNS3_RD_OTHER_STATS_NUM;
372 		}
373 
374 		for (k = 0; k < n; k++) {
375 			*data += rte_le_to_cpu_64(*desc_data);
376 			data++;
377 			desc_data++;
378 		}
379 	}
380 	rte_free(desc);
381 
382 	return 0;
383 }
384 
385 /*
386  * Query Mac stat reg num command ,opcode id: 0x0033.
387  * This command is used before send 'query_mac_stat command', the descriptor
388  * number of 'query_mac_stat command' must match with reg_num in this command.
389  * @praram rte_stats
390  *   Pointer to structure rte_eth_stats.
391  * @return
392  *   0 on success.
393  */
394 static int
hns3_mac_query_reg_num(struct rte_eth_dev * dev,uint32_t * desc_num)395 hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
396 {
397 	struct hns3_adapter *hns = dev->data->dev_private;
398 	struct hns3_hw *hw = &hns->hw;
399 	struct hns3_cmd_desc desc;
400 	uint32_t *desc_data;
401 	uint32_t reg_num;
402 	int ret;
403 
404 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
405 	ret = hns3_cmd_send(hw, &desc, 1);
406 	if (ret)
407 		return ret;
408 
409 	/*
410 	 * The num of MAC statistics registers that are provided by IMP in this
411 	 * version.
412 	 */
413 	desc_data = (uint32_t *)(&desc.data[0]);
414 	reg_num = rte_le_to_cpu_32(*desc_data);
415 
416 	/*
417 	 * The descriptor number of 'query_additional_mac_stat command' is
418 	 * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
419 	 * This value is 83 in this version
420 	 */
421 	*desc_num = 1 + ((reg_num - 3) >> 2) +
422 		    (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
423 
424 	return 0;
425 }
426 
427 static int
hns3_query_update_mac_stats(struct rte_eth_dev * dev)428 hns3_query_update_mac_stats(struct rte_eth_dev *dev)
429 {
430 	struct hns3_adapter *hns = dev->data->dev_private;
431 	struct hns3_hw *hw = &hns->hw;
432 	uint32_t desc_num;
433 	int ret;
434 
435 	ret = hns3_mac_query_reg_num(dev, &desc_num);
436 	if (ret == 0)
437 		ret = hns3_update_mac_stats(hw, desc_num);
438 	else
439 		hns3_err(hw, "Query mac reg num fail : %d", ret);
440 	return ret;
441 }
442 
443 /* Get tqp stats from register */
444 static int
hns3_update_tqp_stats(struct hns3_hw * hw)445 hns3_update_tqp_stats(struct hns3_hw *hw)
446 {
447 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
448 	struct hns3_cmd_desc desc;
449 	uint64_t cnt;
450 	uint16_t i;
451 	int ret;
452 
453 	for (i = 0; i < hw->tqps_num; i++) {
454 		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_RX_STATUS,
455 					  true);
456 
457 		desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
458 		ret = hns3_cmd_send(hw, &desc, 1);
459 		if (ret) {
460 			hns3_err(hw, "Failed to query RX No.%u queue stat: %d",
461 				 i, ret);
462 			return ret;
463 		}
464 		cnt = rte_le_to_cpu_32(desc.data[1]);
465 		stats->rcb_rx_ring_pktnum_rcd += cnt;
466 		stats->rcb_rx_ring_pktnum[i] += cnt;
467 
468 		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_TX_STATUS,
469 					  true);
470 
471 		desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
472 		ret = hns3_cmd_send(hw, &desc, 1);
473 		if (ret) {
474 			hns3_err(hw, "Failed to query TX No.%u queue stat: %d",
475 				 i, ret);
476 			return ret;
477 		}
478 		cnt = rte_le_to_cpu_32(desc.data[1]);
479 		stats->rcb_tx_ring_pktnum_rcd += cnt;
480 		stats->rcb_tx_ring_pktnum[i] += cnt;
481 	}
482 
483 	return 0;
484 }
485 
486 /*
487  * Query tqp tx queue statistics ,opcode id: 0x0B03.
488  * Query tqp rx queue statistics ,opcode id: 0x0B13.
489  * Get all statistics of a port.
490  * @param eth_dev
491  *   Pointer to Ethernet device.
492  * @praram rte_stats
493  *   Pointer to structure rte_eth_stats.
494  * @return
495  *   0 on success.
496  */
497 int
hns3_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * rte_stats)498 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
499 {
500 	struct hns3_adapter *hns = eth_dev->data->dev_private;
501 	struct hns3_hw *hw = &hns->hw;
502 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
503 	struct hns3_rx_queue *rxq;
504 	struct hns3_tx_queue *txq;
505 	uint64_t cnt;
506 	uint64_t num;
507 	uint16_t i;
508 	int ret;
509 
510 	/* Update tqp stats by read register */
511 	ret = hns3_update_tqp_stats(hw);
512 	if (ret) {
513 		hns3_err(hw, "Update tqp stats fail : %d", ret);
514 		return ret;
515 	}
516 
517 	/* Get the error stats of received packets */
518 	num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues);
519 	for (i = 0; i != num; ++i) {
520 		rxq = eth_dev->data->rx_queues[i];
521 		if (rxq) {
522 			cnt = rxq->l2_errors + rxq->pkt_len_errors;
523 			rte_stats->q_errors[i] = cnt;
524 			rte_stats->q_ipackets[i] =
525 				stats->rcb_rx_ring_pktnum[i] - cnt;
526 			rte_stats->ierrors += cnt;
527 		}
528 	}
529 	/* Get the error stats of transmitted packets */
530 	num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues);
531 	for (i = 0; i < num; i++) {
532 		txq = eth_dev->data->tx_queues[i];
533 		if (txq)
534 			rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i];
535 	}
536 
537 	rte_stats->oerrors = 0;
538 	rte_stats->ipackets  = stats->rcb_rx_ring_pktnum_rcd -
539 		rte_stats->ierrors;
540 	rte_stats->opackets  = stats->rcb_tx_ring_pktnum_rcd -
541 		rte_stats->oerrors;
542 	rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
543 
544 	return 0;
545 }
546 
547 int
hns3_stats_reset(struct rte_eth_dev * eth_dev)548 hns3_stats_reset(struct rte_eth_dev *eth_dev)
549 {
550 	struct hns3_adapter *hns = eth_dev->data->dev_private;
551 	struct hns3_hw *hw = &hns->hw;
552 	struct hns3_cmd_desc desc_reset;
553 	struct hns3_rx_queue *rxq;
554 	struct hns3_tx_queue *txq;
555 	uint16_t i;
556 	int ret;
557 
558 	/*
559 	 * Note: Reading hardware statistics of rx/tx queue packet number
560 	 * will clear them.
561 	 */
562 	for (i = 0; i < hw->tqps_num; i++) {
563 		hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_RX_STATUS,
564 					  true);
565 		desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
566 		ret = hns3_cmd_send(hw, &desc_reset, 1);
567 		if (ret) {
568 			hns3_err(hw, "Failed to reset RX No.%u queue stat: %d",
569 				 i, ret);
570 			return ret;
571 		}
572 
573 		hns3_cmd_setup_basic_desc(&desc_reset, HNS3_OPC_QUERY_TX_STATUS,
574 					  true);
575 		desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
576 		ret = hns3_cmd_send(hw, &desc_reset, 1);
577 		if (ret) {
578 			hns3_err(hw, "Failed to reset TX No.%u queue stat: %d",
579 				 i, ret);
580 			return ret;
581 		}
582 	}
583 
584 	/* Clear the Rx BD errors stats */
585 	for (i = 0; i != eth_dev->data->nb_rx_queues; ++i) {
586 		rxq = eth_dev->data->rx_queues[i];
587 		if (rxq) {
588 			rxq->pkt_len_errors = 0;
589 			rxq->l2_errors = 0;
590 			rxq->l3_csum_errors = 0;
591 			rxq->l4_csum_errors = 0;
592 			rxq->ol3_csum_errors = 0;
593 			rxq->ol4_csum_errors = 0;
594 		}
595 	}
596 
597 	/* Clear the Tx errors stats */
598 	for (i = 0; i != eth_dev->data->nb_tx_queues; ++i) {
599 		txq = eth_dev->data->tx_queues[i];
600 		if (txq) {
601 			txq->over_length_pkt_cnt = 0;
602 			txq->exceed_limit_bd_pkt_cnt = 0;
603 			txq->exceed_limit_bd_reassem_fail = 0;
604 			txq->unsupported_tunnel_pkt_cnt = 0;
605 			txq->queue_full_cnt = 0;
606 			txq->pkt_padding_fail_cnt = 0;
607 		}
608 	}
609 
610 	hns3_tqp_stats_clear(hw);
611 
612 	return 0;
613 }
614 
615 static int
hns3_mac_stats_reset(__rte_unused struct rte_eth_dev * dev)616 hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
617 {
618 	struct hns3_adapter *hns = dev->data->dev_private;
619 	struct hns3_hw *hw = &hns->hw;
620 	struct hns3_mac_stats *mac_stats = &hw->mac_stats;
621 	int ret;
622 
623 	ret = hns3_query_update_mac_stats(dev);
624 	if (ret) {
625 		hns3_err(hw, "Clear Mac stats fail : %d", ret);
626 		return ret;
627 	}
628 
629 	memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
630 
631 	return 0;
632 }
633 
634 /* This function calculates the number of xstats based on the current config */
635 static int
hns3_xstats_calc_num(struct rte_eth_dev * dev)636 hns3_xstats_calc_num(struct rte_eth_dev *dev)
637 {
638 	struct hns3_adapter *hns = dev->data->dev_private;
639 	int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS;
640 	int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS;
641 	int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS;
642 	int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS;
643 
644 	if (hns->is_vf)
645 		return bderr_stats + tx_err_stats + rx_queue_stats +
646 		       tx_queue_stats + HNS3_NUM_RESET_XSTATS;
647 	else
648 		return bderr_stats + tx_err_stats + rx_queue_stats +
649 		       tx_queue_stats + HNS3_FIX_NUM_STATS;
650 }
651 
652 static void
hns3_get_queue_stats(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)653 hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
654 		     int *count)
655 {
656 	struct hns3_adapter *hns = dev->data->dev_private;
657 	struct hns3_hw *hw = &hns->hw;
658 	uint32_t reg_offset;
659 	uint16_t i, j;
660 
661 	/* Get rx queue stats */
662 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
663 		for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
664 			reg_offset = hns3_get_tqp_reg_offset(j);
665 			xstats[*count].value = hns3_read_dev(hw,
666 				reg_offset + hns3_rx_queue_strings[i].offset);
667 			xstats[*count].id = *count;
668 			(*count)++;
669 		}
670 	}
671 
672 	/* Get tx queue stats */
673 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
674 		for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
675 			reg_offset = hns3_get_tqp_reg_offset(j);
676 			xstats[*count].value = hns3_read_dev(hw,
677 				reg_offset + hns3_tx_queue_strings[i].offset);
678 			xstats[*count].id = *count;
679 			(*count)++;
680 		}
681 	}
682 }
683 
684 void
hns3_error_int_stats_add(struct hns3_adapter * hns,const char * err)685 hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
686 {
687 	struct hns3_pf *pf = &hns->pf;
688 	uint16_t i;
689 	char *addr;
690 
691 	for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
692 		if (strcmp(hns3_error_int_stats_strings[i].name, err) == 0) {
693 			addr = (char *)&pf->abn_int_stats +
694 				hns3_error_int_stats_strings[i].offset;
695 			*(uint64_t *)addr += 1;
696 			break;
697 		}
698 	}
699 }
700 
701 /*
702  * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
703  * @param dev
704  *   Pointer to Ethernet device.
705  * @praram xstats
706  *   A pointer to a table of structure of type *rte_eth_xstat*
707  *   to be filled with device statistics ids and values.
708  *   This parameter can be set to NULL if n is 0.
709  * @param n
710  *   The size of the xstats array (number of elements).
711  * @return
712  *   0 on fail, count(The size of the statistics elements) on success.
713  */
714 int
hns3_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)715 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
716 		    unsigned int n)
717 {
718 	struct hns3_adapter *hns = dev->data->dev_private;
719 	struct hns3_pf *pf = &hns->pf;
720 	struct hns3_hw *hw = &hns->hw;
721 	struct hns3_mac_stats *mac_stats = &hw->mac_stats;
722 	struct hns3_reset_stats *reset_stats = &hw->reset.stats;
723 	struct hns3_rx_queue *rxq;
724 	struct hns3_tx_queue *txq;
725 	uint16_t i, j;
726 	char *addr;
727 	int count;
728 	int ret;
729 
730 	if (xstats == NULL)
731 		return 0;
732 
733 	count = hns3_xstats_calc_num(dev);
734 	if ((int)n < count)
735 		return count;
736 
737 	count = 0;
738 
739 	if (!hns->is_vf) {
740 		/* Update Mac stats */
741 		ret = hns3_query_update_mac_stats(dev);
742 		if (ret) {
743 			hns3_err(hw, "Update Mac stats fail : %d", ret);
744 			return 0;
745 		}
746 
747 		/* Get MAC stats from hw->hw_xstats.mac_stats struct */
748 		for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
749 			addr = (char *)mac_stats + hns3_mac_strings[i].offset;
750 			xstats[count].value = *(uint64_t *)addr;
751 			xstats[count].id = count;
752 			count++;
753 		}
754 
755 		for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
756 			addr = (char *)&pf->abn_int_stats +
757 			       hns3_error_int_stats_strings[i].offset;
758 			xstats[count].value = *(uint64_t *)addr;
759 			xstats[count].id = count;
760 			count++;
761 		}
762 	}
763 
764 	/* Get the reset stat */
765 	for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
766 		addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
767 		xstats[count].value = *(uint64_t *)addr;
768 		xstats[count].id = count;
769 		count++;
770 	}
771 
772 	/* Get the Rx BD errors stats */
773 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
774 		for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
775 			rxq = dev->data->rx_queues[j];
776 			addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset;
777 			xstats[count].value = *(uint64_t *)addr;
778 			xstats[count].id = count;
779 			count++;
780 		}
781 	}
782 
783 	/* Get the Tx errors stats */
784 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
785 		for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
786 			txq = dev->data->tx_queues[j];
787 			addr = (char *)txq + hns3_tx_errors_strings[i].offset;
788 			xstats[count].value = *(uint64_t *)addr;
789 			xstats[count].id = count;
790 			count++;
791 		}
792 	}
793 
794 	hns3_get_queue_stats(dev, xstats, &count);
795 	return count;
796 }
797 
798 /*
799  * Retrieve names of extended statistics of an Ethernet device.
800  *
801  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
802  * by array index:
803  *  xstats_names[i].name => xstats[i].value
804  *
805  * And the array index is same with id field of 'struct rte_eth_xstat':
806  *  xstats[i].id == i
807  *
808  * This assumption makes key-value pair matching less flexible but simpler.
809  *
810  * @param dev
811  *   Pointer to Ethernet device.
812  * @param xstats_names
813  *   An rte_eth_xstat_name array of at least *size* elements to
814  *   be filled. If set to NULL, the function returns the required number
815  *   of elements.
816  * @param size
817  *   The size of the xstats_names array (number of elements).
818  * @return
819  *   - A positive value lower or equal to size: success. The return value
820  *     is the number of entries filled in the stats table.
821  */
822 int
hns3_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int size)823 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
824 			  struct rte_eth_xstat_name *xstats_names,
825 			  __rte_unused unsigned int size)
826 {
827 	struct hns3_adapter *hns = dev->data->dev_private;
828 	int cnt_stats = hns3_xstats_calc_num(dev);
829 	uint32_t count = 0;
830 	uint16_t i, j;
831 
832 	if (xstats_names == NULL)
833 		return cnt_stats;
834 
835 	/* Note: size limited checked in rte_eth_xstats_get_names() */
836 	if (!hns->is_vf) {
837 		/* Get MAC name from hw->hw_xstats.mac_stats struct */
838 		for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
839 			snprintf(xstats_names[count].name,
840 				 sizeof(xstats_names[count].name),
841 				 "%s", hns3_mac_strings[i].name);
842 			count++;
843 		}
844 
845 		for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
846 			snprintf(xstats_names[count].name,
847 				 sizeof(xstats_names[count].name),
848 				 "%s", hns3_error_int_stats_strings[i].name);
849 			count++;
850 		}
851 	}
852 	for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
853 		snprintf(xstats_names[count].name,
854 			 sizeof(xstats_names[count].name),
855 			 "%s", hns3_reset_stats_strings[i].name);
856 		count++;
857 	}
858 
859 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
860 		for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
861 			snprintf(xstats_names[count].name,
862 				 sizeof(xstats_names[count].name),
863 				 "rx_q%u%s", j,
864 				 hns3_rx_bd_error_strings[i].name);
865 			count++;
866 		}
867 	}
868 
869 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
870 		for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) {
871 			snprintf(xstats_names[count].name,
872 				 sizeof(xstats_names[count].name),
873 				 "tx_q%u%s", j,
874 				 hns3_tx_errors_strings[i].name);
875 			count++;
876 		}
877 	}
878 
879 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
880 		for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
881 			snprintf(xstats_names[count].name,
882 				 sizeof(xstats_names[count].name),
883 				 "rx_q%u%s", j, hns3_rx_queue_strings[i].name);
884 			count++;
885 		}
886 	}
887 
888 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
889 		for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
890 			snprintf(xstats_names[count].name,
891 				 sizeof(xstats_names[count].name),
892 				 "tx_q%u%s", j, hns3_tx_queue_strings[i].name);
893 			count++;
894 		}
895 	}
896 
897 	return count;
898 }
899 
900 /*
901  * Retrieve extended statistics of an Ethernet device.
902  *
903  * @param dev
904  *   Pointer to Ethernet device.
905  * @param ids
906  *   A pointer to an ids array passed by application. This tells which
907  *   statistics values function should retrieve. This parameter
908  *   can be set to NULL if size is 0. In this case function will retrieve
909  *   all avalible statistics.
910  * @param values
911  *   A pointer to a table to be filled with device statistics values.
912  * @param size
913  *   The size of the ids array (number of elements).
914  * @return
915  *   - A positive value lower or equal to size: success. The return value
916  *     is the number of entries filled in the stats table.
917  *   - A positive value higher than size: error, the given statistics table
918  *     is too small. The return value corresponds to the size that should
919  *     be given to succeed. The entries in the table are not valid and
920  *     shall not be used by the caller.
921  *   - 0 on no ids.
922  */
923 int
hns3_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,uint32_t size)924 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
925 			  uint64_t *values, uint32_t size)
926 {
927 	const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
928 	struct hns3_adapter *hns = dev->data->dev_private;
929 	struct rte_eth_xstat *values_copy;
930 	struct hns3_hw *hw = &hns->hw;
931 	uint32_t count_value;
932 	uint64_t len;
933 	uint32_t i;
934 	int ret;
935 
936 	if (ids == NULL || size < cnt_stats)
937 		return cnt_stats;
938 
939 	/* Update tqp stats by read register */
940 	ret = hns3_update_tqp_stats(hw);
941 	if (ret) {
942 		hns3_err(hw, "Update tqp stats fail : %d", ret);
943 		return ret;
944 	}
945 
946 	len = cnt_stats * sizeof(struct rte_eth_xstat);
947 	values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
948 	if (values_copy == NULL) {
949 		hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
950 			     "to store statistics values", len);
951 		return -ENOMEM;
952 	}
953 
954 	count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
955 	if (count_value != cnt_stats) {
956 		rte_free(values_copy);
957 		return -EINVAL;
958 	}
959 
960 	for (i = 0; i < size; i++) {
961 		if (ids[i] >= cnt_stats) {
962 			hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
963 				     "should < %u", i, ids[i], cnt_stats);
964 			rte_free(values_copy);
965 			return -EINVAL;
966 		}
967 		memcpy(&values[i], &values_copy[ids[i]].value,
968 			sizeof(values[i]));
969 	}
970 
971 	rte_free(values_copy);
972 	return size;
973 }
974 
975 /*
976  * Retrieve names of extended statistics of an Ethernet device.
977  *
978  * @param dev
979  *   Pointer to Ethernet device.
980  * @param xstats_names
981  *   An rte_eth_xstat_name array of at least *size* elements to
982  *   be filled. If set to NULL, the function returns the required number
983  *   of elements.
984  * @param ids
985  *   IDs array given by app to retrieve specific statistics
986  * @param size
987  *   The size of the xstats_names array (number of elements).
988  * @return
989  *   - A positive value lower or equal to size: success. The return value
990  *     is the number of entries filled in the stats table.
991  *   - A positive value higher than size: error, the given statistics table
992  *     is too small. The return value corresponds to the size that should
993  *     be given to succeed. The entries in the table are not valid and
994  *     shall not be used by the caller.
995  */
996 int
hns3_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,uint32_t size)997 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
998 				struct rte_eth_xstat_name *xstats_names,
999 				const uint64_t *ids, uint32_t size)
1000 {
1001 	const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1002 	struct hns3_adapter *hns = dev->data->dev_private;
1003 	struct rte_eth_xstat_name *names_copy;
1004 	struct hns3_hw *hw = &hns->hw;
1005 	uint64_t len;
1006 	uint32_t i;
1007 
1008 	if (ids == NULL || xstats_names == NULL)
1009 		return cnt_stats;
1010 
1011 	len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1012 	names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1013 	if (names_copy == NULL) {
1014 		hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1015 			     "to store statistics names", len);
1016 		return -ENOMEM;
1017 	}
1018 
1019 	(void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1020 
1021 	for (i = 0; i < size; i++) {
1022 		if (ids[i] >= cnt_stats) {
1023 			hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1024 				     "should < %u", i, ids[i], cnt_stats);
1025 			rte_free(names_copy);
1026 			return -EINVAL;
1027 		}
1028 		snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1029 			 "%s", names_copy[ids[i]].name);
1030 	}
1031 
1032 	rte_free(names_copy);
1033 	return size;
1034 }
1035 
1036 int
hns3_dev_xstats_reset(struct rte_eth_dev * dev)1037 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1038 {
1039 	struct hns3_adapter *hns = dev->data->dev_private;
1040 	struct hns3_pf *pf = &hns->pf;
1041 	int ret;
1042 
1043 	/* Clear tqp stats */
1044 	ret = hns3_stats_reset(dev);
1045 	if (ret)
1046 		return ret;
1047 
1048 	/* Clear reset stats */
1049 	memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1050 
1051 	if (hns->is_vf)
1052 		return 0;
1053 
1054 	/* HW registers are cleared on read */
1055 	ret = hns3_mac_stats_reset(dev);
1056 	if (ret)
1057 		return ret;
1058 
1059 	/* Clear error stats */
1060 	memset(&pf->abn_int_stats, 0, sizeof(struct hns3_err_msix_intr_stats));
1061 
1062 	return 0;
1063 }
1064 
1065 int
hns3_tqp_stats_init(struct hns3_hw * hw)1066 hns3_tqp_stats_init(struct hns3_hw *hw)
1067 {
1068 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1069 
1070 	tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1071 					 sizeof(uint64_t) * hw->tqps_num, 0);
1072 	if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1073 		hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1074 		return -ENOMEM;
1075 	}
1076 
1077 	tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1078 					 sizeof(uint64_t) * hw->tqps_num, 0);
1079 	if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1080 		hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1081 		rte_free(tqp_stats->rcb_rx_ring_pktnum);
1082 		tqp_stats->rcb_rx_ring_pktnum = NULL;
1083 		return -ENOMEM;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 void
hns3_tqp_stats_uninit(struct hns3_hw * hw)1090 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1091 {
1092 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1093 
1094 	rte_free(tqp_stats->rcb_rx_ring_pktnum);
1095 	tqp_stats->rcb_rx_ring_pktnum = NULL;
1096 	rte_free(tqp_stats->rcb_tx_ring_pktnum);
1097 	tqp_stats->rcb_tx_ring_pktnum = NULL;
1098 }
1099 
1100 static void
hns3_tqp_stats_clear(struct hns3_hw * hw)1101 hns3_tqp_stats_clear(struct hns3_hw *hw)
1102 {
1103 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
1104 
1105 	stats->rcb_rx_ring_pktnum_rcd = 0;
1106 	stats->rcb_tx_ring_pktnum_rcd = 0;
1107 	memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1108 	memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1109 }
1110