xref: /f-stack/dpdk/drivers/net/hns3/hns3_dcb.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <rte_io.h>
6 #include <rte_ethdev.h>
7 
8 #include "hns3_logs.h"
9 #include "hns3_ethdev.h"
10 #include "hns3_dcb.h"
11 
12 #define HNS3_SHAPER_BS_U_DEF	5
13 #define HNS3_SHAPER_BS_S_DEF	20
14 #define BW_MAX_PERCENT		100
15 
16 /*
17  * hns3_shaper_para_calc: calculate ir parameter for the shaper
18  * @ir: Rate to be config, its unit is Mbps
19  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20  * @shaper_para: shaper parameter of IR shaper
21  *
22  * the formula:
23  *
24  *		IR_b * (2 ^ IR_u) * 8
25  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
26  *		Tick * (2 ^ IR_s)
27  *
28  * @return: 0: calculate sucessful, negative: fail
29  */
30 static int
hns3_shaper_para_calc(struct hns3_hw * hw,uint32_t ir,uint8_t shaper_level,struct hns3_shaper_parameter * shaper_para)31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 		      struct hns3_shaper_parameter *shaper_para)
33 {
34 #define SHAPER_DEFAULT_IR_B	126
35 #define DIVISOR_CLK		(1000 * 8)
36 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
37 
38 	const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 		6 * 256,    /* Prioriy level */
40 		6 * 32,     /* Prioriy group level */
41 		6 * 8,      /* Port level */
42 		6 * 256     /* Qset level */
43 	};
44 	uint8_t ir_u_calc = 0;
45 	uint8_t ir_s_calc = 0;
46 	uint32_t denominator;
47 	uint32_t ir_calc;
48 	uint32_t tick;
49 
50 	/* Calc tick */
51 	if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52 		hns3_err(hw,
53 			 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 			 shaper_level, HNS3_SHAPER_LVL_CNT);
55 		return -EINVAL;
56 	}
57 
58 	if (ir > hw->max_tm_rate) {
59 		hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 			 "supported.", ir, hw->max_tm_rate);
61 		return -EINVAL;
62 	}
63 
64 	tick = tick_array[shaper_level];
65 
66 	/*
67 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 	 * the formula is changed to:
69 	 *		126 * 1 * 8
70 	 * ir_calc = ---------------- * 1000
71 	 *		tick * 1
72 	 */
73 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74 
75 	if (ir_calc == ir) {
76 		shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 	} else if (ir_calc > ir) {
78 		/* Increasing the denominator to select ir_s value */
79 		do {
80 			ir_s_calc++;
81 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82 		} while (ir_calc > ir);
83 
84 		if (ir_calc == ir)
85 			shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
86 		else
87 			shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
88 				 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
89 	} else {
90 		/*
91 		 * Increasing the numerator to select ir_u value. ir_u_calc will
92 		 * get maximum value when ir_calc is minimum and ir is maximum.
93 		 * ir_calc gets minimum value when tick is the maximum value.
94 		 * At the same time, value of ir_u_calc can only be increased up
95 		 * to eight after the while loop if the value of ir is equal
96 		 * to hw->max_tm_rate.
97 		 */
98 		uint32_t numerator;
99 		do {
100 			ir_u_calc++;
101 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
102 			ir_calc = (numerator + (tick >> 1)) / tick;
103 		} while (ir_calc < ir);
104 
105 		if (ir_calc == ir) {
106 			shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
107 		} else {
108 			--ir_u_calc;
109 
110 			/*
111 			 * The maximum value of ir_u_calc in this branch is
112 			 * seven in all cases. Thus, value of denominator can
113 			 * not be zero here.
114 			 */
115 			denominator = DIVISOR_CLK * (1 << ir_u_calc);
116 			shaper_para->ir_b =
117 				(ir * tick + (denominator >> 1)) / denominator;
118 		}
119 	}
120 
121 	shaper_para->ir_u = ir_u_calc;
122 	shaper_para->ir_s = ir_s_calc;
123 
124 	return 0;
125 }
126 
127 static int
hns3_fill_pri_array(struct hns3_hw * hw,uint8_t * pri,uint8_t pri_id)128 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
129 {
130 #define HNS3_HALF_BYTE_BIT_OFFSET 4
131 	uint8_t tc = hw->dcb_info.prio_tc[pri_id];
132 
133 	if (tc >= hw->dcb_info.num_tc)
134 		return -EINVAL;
135 
136 	/*
137 	 * The register for priority has four bytes, the first bytes includes
138 	 *  priority0 and priority1, the higher 4bit stands for priority1
139 	 *  while the lower 4bit stands for priority0, as below:
140 	 * first byte:	| pri_1 | pri_0 |
141 	 * second byte:	| pri_3 | pri_2 |
142 	 * third byte:	| pri_5 | pri_4 |
143 	 * fourth byte:	| pri_7 | pri_6 |
144 	 */
145 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
146 
147 	return 0;
148 }
149 
150 static int
hns3_up_to_tc_map(struct hns3_hw * hw)151 hns3_up_to_tc_map(struct hns3_hw *hw)
152 {
153 	struct hns3_cmd_desc desc;
154 	uint8_t *pri = (uint8_t *)desc.data;
155 	uint8_t pri_id;
156 	int ret;
157 
158 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
159 
160 	for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
161 		ret = hns3_fill_pri_array(hw, pri, pri_id);
162 		if (ret)
163 			return ret;
164 	}
165 
166 	return hns3_cmd_send(hw, &desc, 1);
167 }
168 
169 static int
hns3_pg_to_pri_map_cfg(struct hns3_hw * hw,uint8_t pg_id,uint8_t pri_bit_map)170 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
171 {
172 	struct hns3_pg_to_pri_link_cmd *map;
173 	struct hns3_cmd_desc desc;
174 
175 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
176 
177 	map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
178 
179 	map->pg_id = pg_id;
180 	map->pri_bit_map = pri_bit_map;
181 
182 	return hns3_cmd_send(hw, &desc, 1);
183 }
184 
185 static int
hns3_pg_to_pri_map(struct hns3_hw * hw)186 hns3_pg_to_pri_map(struct hns3_hw *hw)
187 {
188 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
189 	struct hns3_pf *pf = &hns->pf;
190 	struct hns3_pg_info *pg_info;
191 	int ret, i;
192 
193 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
194 		return -EINVAL;
195 
196 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
197 		/* Cfg pg to priority mapping */
198 		pg_info = &hw->dcb_info.pg_info[i];
199 		ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
200 		if (ret)
201 			return ret;
202 	}
203 
204 	return 0;
205 }
206 
207 static int
hns3_qs_to_pri_map_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t pri)208 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
209 {
210 	struct hns3_qs_to_pri_link_cmd *map;
211 	struct hns3_cmd_desc desc;
212 
213 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
214 
215 	map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
216 
217 	map->qs_id = rte_cpu_to_le_16(qs_id);
218 	map->priority = pri;
219 	map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
220 
221 	return hns3_cmd_send(hw, &desc, 1);
222 }
223 
224 static int
hns3_dcb_qs_weight_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t dwrr)225 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
226 {
227 	struct hns3_qs_weight_cmd *weight;
228 	struct hns3_cmd_desc desc;
229 
230 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
231 
232 	weight = (struct hns3_qs_weight_cmd *)desc.data;
233 
234 	weight->qs_id = rte_cpu_to_le_16(qs_id);
235 	weight->dwrr = dwrr;
236 
237 	return hns3_cmd_send(hw, &desc, 1);
238 }
239 
240 static int
hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw * hw)241 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
242 {
243 #define DEFAULT_TC_WEIGHT	1
244 #define DEFAULT_TC_OFFSET	14
245 	struct hns3_ets_tc_weight_cmd *ets_weight;
246 	struct hns3_cmd_desc desc;
247 	uint8_t i;
248 
249 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
250 	ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
251 
252 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
253 		struct hns3_pg_info *pg_info;
254 
255 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
256 
257 		if (!(hw->hw_tc_map & BIT(i)))
258 			continue;
259 
260 		pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
261 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
262 	}
263 
264 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
265 
266 	return hns3_cmd_send(hw, &desc, 1);
267 }
268 
269 static int
hns3_dcb_pri_weight_cfg(struct hns3_hw * hw,uint8_t pri_id,uint8_t dwrr)270 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
271 {
272 	struct hns3_priority_weight_cmd *weight;
273 	struct hns3_cmd_desc desc;
274 
275 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
276 
277 	weight = (struct hns3_priority_weight_cmd *)desc.data;
278 
279 	weight->pri_id = pri_id;
280 	weight->dwrr = dwrr;
281 
282 	return hns3_cmd_send(hw, &desc, 1);
283 }
284 
285 static int
hns3_dcb_pg_weight_cfg(struct hns3_hw * hw,uint8_t pg_id,uint8_t dwrr)286 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
287 {
288 	struct hns3_pg_weight_cmd *weight;
289 	struct hns3_cmd_desc desc;
290 
291 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
292 
293 	weight = (struct hns3_pg_weight_cmd *)desc.data;
294 
295 	weight->pg_id = pg_id;
296 	weight->dwrr = dwrr;
297 
298 	return hns3_cmd_send(hw, &desc, 1);
299 }
300 static int
hns3_dcb_pg_schd_mode_cfg(struct hns3_hw * hw,uint8_t pg_id)301 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
302 {
303 	struct hns3_cmd_desc desc;
304 
305 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
306 
307 	if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
308 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
309 	else
310 		desc.data[1] = 0;
311 
312 	desc.data[0] = rte_cpu_to_le_32(pg_id);
313 
314 	return hns3_cmd_send(hw, &desc, 1);
315 }
316 
317 static uint32_t
hns3_dcb_get_shapping_para(uint8_t ir_b,uint8_t ir_u,uint8_t ir_s,uint8_t bs_b,uint8_t bs_s)318 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
319 			   uint8_t bs_b, uint8_t bs_s)
320 {
321 	uint32_t shapping_para = 0;
322 
323 	hns3_dcb_set_field(shapping_para, IR_B, ir_b);
324 	hns3_dcb_set_field(shapping_para, IR_U, ir_u);
325 	hns3_dcb_set_field(shapping_para, IR_S, ir_s);
326 	hns3_dcb_set_field(shapping_para, BS_B, bs_b);
327 	hns3_dcb_set_field(shapping_para, BS_S, bs_s);
328 
329 	return shapping_para;
330 }
331 
332 int
hns3_dcb_port_shaper_cfg(struct hns3_hw * hw)333 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw)
334 {
335 	struct hns3_port_shapping_cmd *shap_cfg_cmd;
336 	struct hns3_shaper_parameter shaper_parameter;
337 	uint32_t shapping_para;
338 	uint32_t ir_u, ir_b, ir_s;
339 	struct hns3_cmd_desc desc;
340 	int ret;
341 
342 	ret = hns3_shaper_para_calc(hw, hw->mac.link_speed,
343 				    HNS3_SHAPER_LVL_PORT, &shaper_parameter);
344 	if (ret) {
345 		hns3_err(hw, "calculate shaper parameter failed: %d", ret);
346 		return ret;
347 	}
348 
349 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
350 	shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
351 
352 	ir_b = shaper_parameter.ir_b;
353 	ir_u = shaper_parameter.ir_u;
354 	ir_s = shaper_parameter.ir_s;
355 	shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
356 						   HNS3_SHAPER_BS_U_DEF,
357 						   HNS3_SHAPER_BS_S_DEF);
358 
359 	shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
360 
361 	/*
362 	 * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
363 	 * field in hns3_port_shapping_cmd to require firmware to recalculate
364 	 * shapping parameters. And whether the parameters are recalculated
365 	 * depends on the firmware version. But driver still needs to
366 	 * calculate it and configure to firmware for better compatibility.
367 	 */
368 	shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed);
369 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
370 
371 	return hns3_cmd_send(hw, &desc, 1);
372 }
373 
374 static int
hns3_dcb_pg_shapping_cfg(struct hns3_hw * hw,enum hns3_shap_bucket bucket,uint8_t pg_id,uint32_t shapping_para,uint32_t rate)375 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
376 			 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
377 {
378 	struct hns3_pg_shapping_cmd *shap_cfg_cmd;
379 	enum hns3_opcode_type opcode;
380 	struct hns3_cmd_desc desc;
381 
382 	opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
383 		 HNS3_OPC_TM_PG_C_SHAPPING;
384 	hns3_cmd_setup_basic_desc(&desc, opcode, false);
385 
386 	shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
387 
388 	shap_cfg_cmd->pg_id = pg_id;
389 
390 	shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
391 
392 	/*
393 	 * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
394 	 * hns3_pg_shapping_cmd to require firmware to recalculate shapping
395 	 * parameters. And whether parameters are recalculated depends on
396 	 * the firmware version. But driver still needs to calculate it and
397 	 * configure to firmware for better compatibility.
398 	 */
399 	shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
400 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
401 
402 	return hns3_cmd_send(hw, &desc, 1);
403 }
404 
405 static int
hns3_dcb_pg_shaper_cfg(struct hns3_hw * hw)406 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
407 {
408 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
409 	struct hns3_shaper_parameter shaper_parameter;
410 	struct hns3_pf *pf = &hns->pf;
411 	uint32_t ir_u, ir_b, ir_s;
412 	uint32_t shaper_para;
413 	uint32_t rate;
414 	uint8_t i;
415 	int ret;
416 
417 	/* Cfg pg schd */
418 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
419 		return -EINVAL;
420 
421 	/* Pg to pri */
422 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
423 		rate = hw->dcb_info.pg_info[i].bw_limit;
424 
425 		/* Calc shaper para */
426 		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
427 					    &shaper_parameter);
428 		if (ret) {
429 			hns3_err(hw, "calculate shaper parameter failed: %d",
430 				 ret);
431 			return ret;
432 		}
433 
434 		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
435 							 HNS3_SHAPER_BS_U_DEF,
436 							 HNS3_SHAPER_BS_S_DEF);
437 
438 		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
439 					       shaper_para, rate);
440 		if (ret) {
441 			hns3_err(hw,
442 				 "config PG CIR shaper parameter failed: %d",
443 				 ret);
444 			return ret;
445 		}
446 
447 		ir_b = shaper_parameter.ir_b;
448 		ir_u = shaper_parameter.ir_u;
449 		ir_s = shaper_parameter.ir_s;
450 		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451 							 HNS3_SHAPER_BS_U_DEF,
452 							 HNS3_SHAPER_BS_S_DEF);
453 
454 		ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
455 					       shaper_para, rate);
456 		if (ret) {
457 			hns3_err(hw,
458 				 "config PG PIR shaper parameter failed: %d",
459 				 ret);
460 			return ret;
461 		}
462 	}
463 
464 	return 0;
465 }
466 
467 static int
hns3_dcb_qs_schd_mode_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t mode)468 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
469 {
470 	struct hns3_cmd_desc desc;
471 
472 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
473 
474 	if (mode == HNS3_SCH_MODE_DWRR)
475 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
476 	else
477 		desc.data[1] = 0;
478 
479 	desc.data[0] = rte_cpu_to_le_32(qs_id);
480 
481 	return hns3_cmd_send(hw, &desc, 1);
482 }
483 
484 static int
hns3_dcb_pri_schd_mode_cfg(struct hns3_hw * hw,uint8_t pri_id)485 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
486 {
487 	struct hns3_cmd_desc desc;
488 
489 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
490 
491 	if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
492 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
493 	else
494 		desc.data[1] = 0;
495 
496 	desc.data[0] = rte_cpu_to_le_32(pri_id);
497 
498 	return hns3_cmd_send(hw, &desc, 1);
499 }
500 
501 static int
hns3_dcb_pri_shapping_cfg(struct hns3_hw * hw,enum hns3_shap_bucket bucket,uint8_t pri_id,uint32_t shapping_para,uint32_t rate)502 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
503 			  uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
504 {
505 	struct hns3_pri_shapping_cmd *shap_cfg_cmd;
506 	enum hns3_opcode_type opcode;
507 	struct hns3_cmd_desc desc;
508 
509 	opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
510 		 HNS3_OPC_TM_PRI_C_SHAPPING;
511 
512 	hns3_cmd_setup_basic_desc(&desc, opcode, false);
513 
514 	shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
515 
516 	shap_cfg_cmd->pri_id = pri_id;
517 
518 	shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
519 
520 	/*
521 	 * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
522 	 * field in hns3_pri_shapping_cmd to require firmware to recalculate
523 	 * shapping parameters. And whether the parameters are recalculated
524 	 * depends on the firmware version. But driver still needs to
525 	 * calculate it and configure to firmware for better compatibility.
526 	 */
527 	shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
528 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
529 
530 	return hns3_cmd_send(hw, &desc, 1);
531 }
532 
533 static int
hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw * hw)534 hns3_dcb_pri_tc_base_shaper_cfg(struct hns3_hw *hw)
535 {
536 	struct hns3_shaper_parameter shaper_parameter;
537 	uint32_t ir_u, ir_b, ir_s;
538 	uint32_t shaper_para;
539 	uint32_t rate;
540 	int ret, i;
541 
542 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
543 		rate = hw->dcb_info.tc_info[i].bw_limit;
544 		ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
545 					    &shaper_parameter);
546 		if (ret) {
547 			hns3_err(hw, "calculate shaper parameter failed: %d",
548 				 ret);
549 			return ret;
550 		}
551 
552 		shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
553 							 HNS3_SHAPER_BS_U_DEF,
554 							 HNS3_SHAPER_BS_S_DEF);
555 
556 		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, i,
557 						shaper_para, rate);
558 		if (ret) {
559 			hns3_err(hw,
560 				 "config priority CIR shaper parameter failed: %d",
561 				 ret);
562 			return ret;
563 		}
564 
565 		ir_b = shaper_parameter.ir_b;
566 		ir_u = shaper_parameter.ir_u;
567 		ir_s = shaper_parameter.ir_s;
568 		shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
569 							 HNS3_SHAPER_BS_U_DEF,
570 							 HNS3_SHAPER_BS_S_DEF);
571 
572 		ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, i,
573 						shaper_para, rate);
574 		if (ret) {
575 			hns3_err(hw,
576 				 "config priority PIR shaper parameter failed: %d",
577 				 ret);
578 			return ret;
579 		}
580 	}
581 
582 	return 0;
583 }
584 
585 
586 static int
hns3_dcb_pri_shaper_cfg(struct hns3_hw * hw)587 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
588 {
589 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
590 	struct hns3_pf *pf = &hns->pf;
591 	int ret;
592 
593 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
594 		return -EINVAL;
595 
596 	ret = hns3_dcb_pri_tc_base_shaper_cfg(hw);
597 	if (ret)
598 		hns3_err(hw, "config port shaper failed: %d", ret);
599 
600 	return ret;
601 }
602 
603 static int
hns3_set_rss_size(struct hns3_hw * hw,uint16_t nb_rx_q)604 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
605 {
606 	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
607 	uint16_t rx_qnum_per_tc;
608 	uint16_t used_rx_queues;
609 	int i;
610 
611 	rx_qnum_per_tc = nb_rx_q / hw->num_tc;
612 	if (rx_qnum_per_tc > hw->rss_size_max) {
613 		hns3_err(hw, "rx queue number of per tc (%u) is greater than "
614 			 "value (%u) hardware supported.",
615 			 rx_qnum_per_tc, hw->rss_size_max);
616 		return -EINVAL;
617 	}
618 
619 	used_rx_queues = hw->num_tc * rx_qnum_per_tc;
620 	if (used_rx_queues != nb_rx_q) {
621 		hns3_err(hw, "rx queue number (%u) configured must be an "
622 			 "integral multiple of valid tc number (%u).",
623 			 nb_rx_q, hw->num_tc);
624 		return -EINVAL;
625 	}
626 	hw->alloc_rss_size = rx_qnum_per_tc;
627 	hw->used_rx_queues = used_rx_queues;
628 
629 	/*
630 	 * When rss size is changed, we need to update rss redirection table
631 	 * maintained by driver. Besides, during the entire reset process, we
632 	 * need to ensure that the rss table information are not overwritten
633 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
634 	 * stage of the reset process.
635 	 */
636 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
637 		for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++)
638 			rss_cfg->rss_indirection_tbl[i] =
639 							i % hw->alloc_rss_size;
640 	}
641 
642 	return 0;
643 }
644 
645 static int
hns3_tc_queue_mapping_cfg(struct hns3_hw * hw,uint16_t nb_tx_q)646 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
647 {
648 	struct hns3_tc_queue_info *tc_queue;
649 	uint16_t used_tx_queues;
650 	uint16_t tx_qnum_per_tc;
651 	uint8_t i;
652 
653 	tx_qnum_per_tc = nb_tx_q / hw->num_tc;
654 	used_tx_queues = hw->num_tc * tx_qnum_per_tc;
655 	if (used_tx_queues != nb_tx_q) {
656 		hns3_err(hw, "tx queue number (%u) configured must be an "
657 			 "integral multiple of valid tc number (%u).",
658 			 nb_tx_q, hw->num_tc);
659 		return -EINVAL;
660 	}
661 
662 	hw->used_tx_queues = used_tx_queues;
663 	hw->tx_qnum_per_tc = tx_qnum_per_tc;
664 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
665 		tc_queue = &hw->tc_queue[i];
666 		if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
667 			tc_queue->enable = true;
668 			tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
669 			tc_queue->tqp_count = hw->tx_qnum_per_tc;
670 			tc_queue->tc = i;
671 		} else {
672 			/* Set to default queue if TC is disable */
673 			tc_queue->enable = false;
674 			tc_queue->tqp_offset = 0;
675 			tc_queue->tqp_count = 0;
676 			tc_queue->tc = 0;
677 		}
678 	}
679 
680 	return 0;
681 }
682 
683 int
hns3_queue_to_tc_mapping(struct hns3_hw * hw,uint16_t nb_rx_q,uint16_t nb_tx_q)684 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
685 {
686 	int ret;
687 
688 	ret = hns3_set_rss_size(hw, nb_rx_q);
689 	if (ret)
690 		return ret;
691 
692 	return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
693 }
694 
695 static int
hns3_dcb_update_tc_queue_mapping(struct hns3_hw * hw,uint16_t nb_rx_q,uint16_t nb_tx_q)696 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
697 				 uint16_t nb_tx_q)
698 {
699 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
700 	struct hns3_pf *pf = &hns->pf;
701 	int ret;
702 
703 	hw->num_tc = hw->dcb_info.num_tc;
704 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
705 	if (ret)
706 		return ret;
707 
708 	if (!hns->is_vf)
709 		memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
710 
711 	return 0;
712 }
713 
714 int
hns3_dcb_info_init(struct hns3_hw * hw)715 hns3_dcb_info_init(struct hns3_hw *hw)
716 {
717 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
718 	struct hns3_pf *pf = &hns->pf;
719 	int i, k;
720 
721 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
722 	    hw->dcb_info.num_pg != 1)
723 		return -EINVAL;
724 
725 	/* Initializing PG information */
726 	memset(hw->dcb_info.pg_info, 0,
727 	       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
728 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
729 		hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
730 		hw->dcb_info.pg_info[i].pg_id = i;
731 		hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
732 		hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
733 
734 		if (i != 0)
735 			continue;
736 
737 		hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
738 		for (k = 0; k < hw->dcb_info.num_tc; k++)
739 			hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
740 	}
741 
742 	/* All UPs mapping to TC0 */
743 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
744 		hw->dcb_info.prio_tc[i] = 0;
745 
746 	/* Initializing tc information */
747 	memset(hw->dcb_info.tc_info, 0,
748 	       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
749 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
750 		hw->dcb_info.tc_info[i].tc_id = i;
751 		hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
752 		hw->dcb_info.tc_info[i].pgid = 0;
753 		hw->dcb_info.tc_info[i].bw_limit =
754 			hw->dcb_info.pg_info[0].bw_limit;
755 	}
756 
757 	return 0;
758 }
759 
760 static int
hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw * hw)761 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
762 {
763 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
764 	struct hns3_pf *pf = &hns->pf;
765 	int ret, i;
766 
767 	/* Only being config on TC-Based scheduler mode */
768 	if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
769 		return -EINVAL;
770 
771 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
772 		ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
773 		if (ret)
774 			return ret;
775 	}
776 
777 	return 0;
778 }
779 
780 static int
hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw * hw)781 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
782 {
783 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
784 	struct hns3_pf *pf = &hns->pf;
785 	uint8_t i;
786 	int ret;
787 
788 	if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
789 		for (i = 0; i < hw->dcb_info.num_tc; i++) {
790 			ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
791 			if (ret)
792 				return ret;
793 
794 			ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
795 							HNS3_SCH_MODE_DWRR);
796 			if (ret)
797 				return ret;
798 		}
799 	}
800 
801 	return 0;
802 }
803 
804 static int
hns3_dcb_schd_mode_cfg(struct hns3_hw * hw)805 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
806 {
807 	int ret;
808 
809 	ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
810 	if (ret) {
811 		hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
812 		return ret;
813 	}
814 
815 	ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
816 	if (ret)
817 		hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
818 
819 	return ret;
820 }
821 
822 static int
hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw * hw)823 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
824 {
825 	struct hns3_pg_info *pg_info;
826 	uint8_t dwrr;
827 	int ret, i;
828 
829 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
830 		pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
831 		dwrr = pg_info->tc_dwrr[i];
832 
833 		ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
834 		if (ret) {
835 			hns3_err(hw,
836 			       "fail to send priority weight cmd: %d, ret = %d",
837 			       i, ret);
838 			return ret;
839 		}
840 
841 		ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
842 		if (ret) {
843 			hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
844 				 i, ret);
845 			return ret;
846 		}
847 	}
848 
849 	return 0;
850 }
851 
852 static int
hns3_dcb_pri_dwrr_cfg(struct hns3_hw * hw)853 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
854 {
855 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
856 	struct hns3_pf *pf = &hns->pf;
857 	uint32_t version;
858 	int ret;
859 
860 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
861 		return -EINVAL;
862 
863 	ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
864 	if (ret)
865 		return ret;
866 
867 	if (!hns3_dev_dcb_supported(hw))
868 		return 0;
869 
870 	ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
871 	if (ret == -EOPNOTSUPP) {
872 		version = hw->fw_version;
873 		hns3_warn(hw,
874 			  "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
875 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
876 					 HNS3_FW_VERSION_BYTE3_S),
877 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
878 					 HNS3_FW_VERSION_BYTE2_S),
879 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
880 					 HNS3_FW_VERSION_BYTE1_S),
881 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
882 					 HNS3_FW_VERSION_BYTE0_S));
883 		ret = 0;
884 	}
885 
886 	return ret;
887 }
888 
889 static int
hns3_dcb_pg_dwrr_cfg(struct hns3_hw * hw)890 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
891 {
892 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
893 	struct hns3_pf *pf = &hns->pf;
894 	int ret, i;
895 
896 	/* Cfg pg schd */
897 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
898 		return -EINVAL;
899 
900 	/* Cfg pg to prio */
901 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
902 		/* Cfg dwrr */
903 		ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
904 		if (ret)
905 			return ret;
906 	}
907 
908 	return 0;
909 }
910 
911 static int
hns3_dcb_dwrr_cfg(struct hns3_hw * hw)912 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
913 {
914 	int ret;
915 
916 	ret = hns3_dcb_pg_dwrr_cfg(hw);
917 	if (ret) {
918 		hns3_err(hw, "config pg_dwrr failed: %d", ret);
919 		return ret;
920 	}
921 
922 	ret = hns3_dcb_pri_dwrr_cfg(hw);
923 	if (ret)
924 		hns3_err(hw, "config pri_dwrr failed: %d", ret);
925 
926 	return ret;
927 }
928 
929 static int
hns3_dcb_shaper_cfg(struct hns3_hw * hw)930 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
931 {
932 	int ret;
933 
934 	ret = hns3_dcb_port_shaper_cfg(hw);
935 	if (ret) {
936 		hns3_err(hw, "config port shaper failed: %d", ret);
937 		return ret;
938 	}
939 
940 	ret = hns3_dcb_pg_shaper_cfg(hw);
941 	if (ret) {
942 		hns3_err(hw, "config pg shaper failed: %d", ret);
943 		return ret;
944 	}
945 
946 	return hns3_dcb_pri_shaper_cfg(hw);
947 }
948 
949 static int
hns3_q_to_qs_map_cfg(struct hns3_hw * hw,uint16_t q_id,uint16_t qs_id)950 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
951 {
952 	struct hns3_nq_to_qs_link_cmd *map;
953 	struct hns3_cmd_desc desc;
954 	uint16_t tmp_qs_id = 0;
955 	uint16_t qs_id_l;
956 	uint16_t qs_id_h;
957 
958 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
959 
960 	map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
961 
962 	map->nq_id = rte_cpu_to_le_16(q_id);
963 
964 	/*
965 	 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
966 	 * configure qset_id. So we need to convert qs_id to the follow
967 	 * format to support qset_id > 1024.
968 	 * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
969 	 *            /         / \         \
970 	 *           /         /   \         \
971 	 * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
972 	 *          | qs_id_h | vld | qs_id_l |
973 	 */
974 	qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
975 				 HNS3_DCB_QS_ID_L_S);
976 	qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
977 				 HNS3_DCB_QS_ID_H_S);
978 	hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
979 		       qs_id_l);
980 	hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
981 		       HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
982 	map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
983 
984 	return hns3_cmd_send(hw, &desc, 1);
985 }
986 
987 static int
hns3_q_to_qs_map(struct hns3_hw * hw)988 hns3_q_to_qs_map(struct hns3_hw *hw)
989 {
990 	struct hns3_tc_queue_info *tc_queue;
991 	uint16_t q_id;
992 	uint32_t i, j;
993 	int ret;
994 
995 	for (i = 0; i < hw->num_tc; i++) {
996 		tc_queue = &hw->tc_queue[i];
997 		for (j = 0; j < tc_queue->tqp_count; j++) {
998 			q_id = tc_queue->tqp_offset + j;
999 			ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1000 			if (ret)
1001 				return ret;
1002 		}
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static int
hns3_pri_q_qs_cfg(struct hns3_hw * hw)1009 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1010 {
1011 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1012 	struct hns3_pf *pf = &hns->pf;
1013 	uint32_t i;
1014 	int ret;
1015 
1016 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1017 		return -EINVAL;
1018 
1019 	/* Cfg qs -> pri mapping */
1020 	for (i = 0; i < hw->num_tc; i++) {
1021 		ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1022 		if (ret) {
1023 			hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1024 			return ret;
1025 		}
1026 	}
1027 
1028 	/* Cfg q -> qs mapping */
1029 	ret = hns3_q_to_qs_map(hw);
1030 	if (ret)
1031 		hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1032 
1033 	return ret;
1034 }
1035 
1036 static int
hns3_dcb_map_cfg(struct hns3_hw * hw)1037 hns3_dcb_map_cfg(struct hns3_hw *hw)
1038 {
1039 	int ret;
1040 
1041 	ret = hns3_up_to_tc_map(hw);
1042 	if (ret) {
1043 		hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1044 		return ret;
1045 	}
1046 
1047 	ret = hns3_pg_to_pri_map(hw);
1048 	if (ret) {
1049 		hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1050 		return ret;
1051 	}
1052 
1053 	return hns3_pri_q_qs_cfg(hw);
1054 }
1055 
1056 static int
hns3_dcb_schd_setup_hw(struct hns3_hw * hw)1057 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1058 {
1059 	int ret;
1060 
1061 	/* Cfg dcb mapping  */
1062 	ret = hns3_dcb_map_cfg(hw);
1063 	if (ret)
1064 		return ret;
1065 
1066 	/* Cfg dcb shaper */
1067 	ret = hns3_dcb_shaper_cfg(hw);
1068 	if (ret)
1069 		return ret;
1070 
1071 	/* Cfg dwrr */
1072 	ret = hns3_dcb_dwrr_cfg(hw);
1073 	if (ret)
1074 		return ret;
1075 
1076 	/* Cfg schd mode for each level schd */
1077 	return hns3_dcb_schd_mode_cfg(hw);
1078 }
1079 
1080 static int
hns3_pause_param_cfg(struct hns3_hw * hw,const uint8_t * addr,uint8_t pause_trans_gap,uint16_t pause_trans_time)1081 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1082 		     uint8_t pause_trans_gap, uint16_t pause_trans_time)
1083 {
1084 	struct hns3_cfg_pause_param_cmd *pause_param;
1085 	struct hns3_cmd_desc desc;
1086 
1087 	pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1088 
1089 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1090 
1091 	memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1092 	memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1093 	pause_param->pause_trans_gap = pause_trans_gap;
1094 	pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1095 
1096 	return hns3_cmd_send(hw, &desc, 1);
1097 }
1098 
1099 int
hns3_pause_addr_cfg(struct hns3_hw * hw,const uint8_t * mac_addr)1100 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1101 {
1102 	struct hns3_cfg_pause_param_cmd *pause_param;
1103 	struct hns3_cmd_desc desc;
1104 	uint16_t trans_time;
1105 	uint8_t trans_gap;
1106 	int ret;
1107 
1108 	pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1109 
1110 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1111 
1112 	ret = hns3_cmd_send(hw, &desc, 1);
1113 	if (ret)
1114 		return ret;
1115 
1116 	trans_gap = pause_param->pause_trans_gap;
1117 	trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1118 
1119 	return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1120 }
1121 
1122 static int
hns3_pause_param_setup_hw(struct hns3_hw * hw,uint16_t pause_time)1123 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1124 {
1125 #define PAUSE_TIME_DIV_BY	2
1126 #define PAUSE_TIME_MIN_VALUE	0x4
1127 
1128 	struct hns3_mac *mac = &hw->mac;
1129 	uint8_t pause_trans_gap;
1130 
1131 	/*
1132 	 * Pause transmit gap must be less than "pause_time / 2", otherwise
1133 	 * the behavior of MAC is undefined.
1134 	 */
1135 	if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1136 		pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1137 	else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1138 		 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1139 		pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1140 	else {
1141 		hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1142 		pause_time = PAUSE_TIME_MIN_VALUE;
1143 		pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1144 	}
1145 
1146 	return hns3_pause_param_cfg(hw, mac->mac_addr,
1147 				    pause_trans_gap, pause_time);
1148 }
1149 
1150 static int
hns3_mac_pause_en_cfg(struct hns3_hw * hw,bool tx,bool rx)1151 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1152 {
1153 	struct hns3_cmd_desc desc;
1154 
1155 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1156 
1157 	desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1158 		(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1159 
1160 	return hns3_cmd_send(hw, &desc, 1);
1161 }
1162 
1163 static int
hns3_pfc_pause_en_cfg(struct hns3_hw * hw,uint8_t pfc_bitmap,bool tx,bool rx)1164 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1165 {
1166 	struct hns3_cmd_desc desc;
1167 	struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1168 
1169 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1170 
1171 	pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1172 					(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1173 
1174 	pfc->pri_en_bitmap = pfc_bitmap;
1175 
1176 	return hns3_cmd_send(hw, &desc, 1);
1177 }
1178 
1179 static int
hns3_qs_bp_cfg(struct hns3_hw * hw,uint8_t tc,uint8_t grp_id,uint32_t bit_map)1180 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1181 {
1182 	struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1183 	struct hns3_cmd_desc desc;
1184 
1185 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1186 
1187 	bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1188 
1189 	bp_to_qs_map_cmd->tc_id = tc;
1190 	bp_to_qs_map_cmd->qs_group_id = grp_id;
1191 	bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1192 
1193 	return hns3_cmd_send(hw, &desc, 1);
1194 }
1195 
1196 static void
hns3_get_rx_tx_en_status(struct hns3_hw * hw,bool * tx_en,bool * rx_en)1197 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1198 {
1199 	switch (hw->current_mode) {
1200 	case HNS3_FC_NONE:
1201 		*tx_en = false;
1202 		*rx_en = false;
1203 		break;
1204 	case HNS3_FC_RX_PAUSE:
1205 		*tx_en = false;
1206 		*rx_en = true;
1207 		break;
1208 	case HNS3_FC_TX_PAUSE:
1209 		*tx_en = true;
1210 		*rx_en = false;
1211 		break;
1212 	case HNS3_FC_FULL:
1213 		*tx_en = true;
1214 		*rx_en = true;
1215 		break;
1216 	default:
1217 		*tx_en = false;
1218 		*rx_en = false;
1219 		break;
1220 	}
1221 }
1222 
1223 static int
hns3_mac_pause_setup_hw(struct hns3_hw * hw)1224 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1225 {
1226 	bool tx_en, rx_en;
1227 
1228 	if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1229 		hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1230 	else {
1231 		tx_en = false;
1232 		rx_en = false;
1233 	}
1234 
1235 	return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1236 }
1237 
1238 static int
hns3_pfc_setup_hw(struct hns3_hw * hw)1239 hns3_pfc_setup_hw(struct hns3_hw *hw)
1240 {
1241 	bool tx_en, rx_en;
1242 
1243 	if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1244 		hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1245 	else {
1246 		tx_en = false;
1247 		rx_en = false;
1248 	}
1249 
1250 	return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1251 }
1252 
1253 /*
1254  * Each Tc has a 1024 queue sets to backpress, it divides to
1255  * 32 group, each group contains 32 queue sets, which can be
1256  * represented by uint32_t bitmap.
1257  */
1258 static int
hns3_bp_setup_hw(struct hns3_hw * hw,uint8_t tc)1259 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1260 {
1261 	uint32_t qs_bitmap;
1262 	int ret;
1263 	int i;
1264 
1265 	for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1266 		uint8_t grp, sub_grp;
1267 		qs_bitmap = 0;
1268 
1269 		grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1270 		sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1271 					 HNS3_BP_SUB_GRP_ID_S);
1272 		if (i == grp)
1273 			qs_bitmap |= (1 << sub_grp);
1274 
1275 		ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1276 		if (ret)
1277 			return ret;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static int
hns3_dcb_bp_setup(struct hns3_hw * hw)1284 hns3_dcb_bp_setup(struct hns3_hw *hw)
1285 {
1286 	int ret, i;
1287 
1288 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1289 		ret = hns3_bp_setup_hw(hw, i);
1290 		if (ret)
1291 			return ret;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int
hns3_dcb_pause_setup_hw(struct hns3_hw * hw)1298 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1299 {
1300 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1301 	struct hns3_pf *pf = &hns->pf;
1302 	int ret;
1303 
1304 	ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1305 	if (ret) {
1306 		hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1307 		return ret;
1308 	}
1309 
1310 	ret = hns3_mac_pause_setup_hw(hw);
1311 	if (ret) {
1312 		hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1313 		return ret;
1314 	}
1315 
1316 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1317 	if (!hns3_dev_dcb_supported(hw))
1318 		return 0;
1319 
1320 	ret = hns3_pfc_setup_hw(hw);
1321 	if (ret) {
1322 		hns3_err(hw, "config pfc failed! ret = %d", ret);
1323 		return ret;
1324 	}
1325 
1326 	return hns3_dcb_bp_setup(hw);
1327 }
1328 
1329 static uint8_t
hns3_dcb_undrop_tc_map(struct hns3_hw * hw,uint8_t pfc_en)1330 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1331 {
1332 	uint8_t pfc_map = 0;
1333 	uint8_t *prio_tc;
1334 	uint8_t i, j;
1335 
1336 	prio_tc = hw->dcb_info.prio_tc;
1337 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1338 		for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1339 			if (prio_tc[j] == i && pfc_en & BIT(j)) {
1340 				pfc_map |= BIT(i);
1341 				break;
1342 			}
1343 		}
1344 	}
1345 
1346 	return pfc_map;
1347 }
1348 
1349 static void
hns3_dcb_cfg_validate(struct hns3_adapter * hns,uint8_t * tc,bool * changed)1350 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1351 {
1352 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1353 	struct hns3_hw *hw = &hns->hw;
1354 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1355 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1356 	uint8_t max_tc = 0;
1357 	uint8_t pfc_en;
1358 	int i;
1359 
1360 	dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1361 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1362 		if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1363 			*changed = true;
1364 
1365 		if (dcb_rx_conf->dcb_tc[i] > max_tc)
1366 			max_tc = dcb_rx_conf->dcb_tc[i];
1367 	}
1368 	*tc = max_tc + 1;
1369 	if (*tc != hw->dcb_info.num_tc)
1370 		*changed = true;
1371 
1372 	/*
1373 	 * We ensure that dcb information can be reconfigured
1374 	 * after the hns3_priority_flow_ctrl_set function called.
1375 	 */
1376 	if (hw->current_mode != HNS3_FC_FULL)
1377 		*changed = true;
1378 	pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1379 	if (hw->dcb_info.pfc_en != pfc_en)
1380 		*changed = true;
1381 
1382 	/* tx/rx queue number is reconfigured. */
1383 	if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
1384 		*changed = true;
1385 }
1386 
1387 static int
hns3_dcb_info_cfg(struct hns3_adapter * hns)1388 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1389 {
1390 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1391 	struct hns3_pf *pf = &hns->pf;
1392 	struct hns3_hw *hw = &hns->hw;
1393 	uint8_t tc_bw, bw_rest;
1394 	uint8_t i, j;
1395 	int ret;
1396 
1397 	dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1398 	pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1399 	pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1400 
1401 	/* Config pg0 */
1402 	memset(hw->dcb_info.pg_info, 0,
1403 	       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1404 	hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1405 	hw->dcb_info.pg_info[0].pg_id = 0;
1406 	hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1407 	hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1408 	hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1409 
1410 	/* Each tc has same bw for valid tc by default */
1411 	tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1412 	for (i = 0; i < hw->dcb_info.num_tc; i++)
1413 		hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1414 	/* To ensure the sum of tc_dwrr is equal to 100 */
1415 	bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1416 	for (j = 0; j < bw_rest; j++)
1417 		hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1418 	for (; i < dcb_rx_conf->nb_tcs; i++)
1419 		hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1420 
1421 	/* All tcs map to pg0 */
1422 	memset(hw->dcb_info.tc_info, 0,
1423 	       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1424 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1425 		hw->dcb_info.tc_info[i].tc_id = i;
1426 		hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1427 		hw->dcb_info.tc_info[i].pgid = 0;
1428 		hw->dcb_info.tc_info[i].bw_limit =
1429 					hw->dcb_info.pg_info[0].bw_limit;
1430 	}
1431 
1432 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1433 		hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1434 
1435 	ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1436 					       hw->data->nb_tx_queues);
1437 	if (ret)
1438 		hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1439 
1440 	return ret;
1441 }
1442 
1443 static int
hns3_dcb_info_update(struct hns3_adapter * hns,uint8_t num_tc)1444 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1445 {
1446 	struct hns3_pf *pf = &hns->pf;
1447 	struct hns3_hw *hw = &hns->hw;
1448 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1449 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1450 	uint8_t bit_map = 0;
1451 	uint8_t i;
1452 
1453 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1454 	    hw->dcb_info.num_pg != 1)
1455 		return -EINVAL;
1456 
1457 	if (nb_rx_q < num_tc) {
1458 		hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1459 			 nb_rx_q, num_tc);
1460 		return -EINVAL;
1461 	}
1462 
1463 	if (nb_tx_q < num_tc) {
1464 		hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1465 			 nb_tx_q, num_tc);
1466 		return -EINVAL;
1467 	}
1468 
1469 	/* Currently not support uncontinuous tc */
1470 	hw->dcb_info.num_tc = num_tc;
1471 	for (i = 0; i < hw->dcb_info.num_tc; i++)
1472 		bit_map |= BIT(i);
1473 
1474 	if (!bit_map) {
1475 		bit_map = 1;
1476 		hw->dcb_info.num_tc = 1;
1477 	}
1478 	hw->hw_tc_map = bit_map;
1479 
1480 	return hns3_dcb_info_cfg(hns);
1481 }
1482 
1483 static int
hns3_dcb_hw_configure(struct hns3_adapter * hns)1484 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1485 {
1486 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1487 	struct hns3_pf *pf = &hns->pf;
1488 	struct hns3_hw *hw = &hns->hw;
1489 	enum hns3_fc_status fc_status = hw->current_fc_status;
1490 	enum hns3_fc_mode current_mode = hw->current_mode;
1491 	uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1492 	int ret, status;
1493 
1494 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1495 	    pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1496 		return -ENOTSUP;
1497 
1498 	ret = hns3_dcb_schd_setup_hw(hw);
1499 	if (ret) {
1500 		hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1501 		return ret;
1502 	}
1503 
1504 	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1505 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1506 		if (dcb_rx_conf->nb_tcs == 0)
1507 			hw->dcb_info.pfc_en = 1; /* tc0 only */
1508 		else
1509 			hw->dcb_info.pfc_en =
1510 			RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1511 
1512 		hw->dcb_info.hw_pfc_map =
1513 				hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1514 
1515 		ret = hns3_buffer_alloc(hw);
1516 		if (ret)
1517 			return ret;
1518 
1519 		hw->current_fc_status = HNS3_FC_STATUS_PFC;
1520 		hw->current_mode = HNS3_FC_FULL;
1521 		ret = hns3_dcb_pause_setup_hw(hw);
1522 		if (ret) {
1523 			hns3_err(hw, "setup pfc failed! ret = %d", ret);
1524 			goto pfc_setup_fail;
1525 		}
1526 	} else {
1527 		/*
1528 		 * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1529 		 * flag, the DCB information is configured, such as tc numbers.
1530 		 * Therefore, refreshing the allocation of packet buffer is
1531 		 * necessary.
1532 		 */
1533 		ret = hns3_buffer_alloc(hw);
1534 		if (ret)
1535 			return ret;
1536 	}
1537 
1538 	return 0;
1539 
1540 pfc_setup_fail:
1541 	hw->current_mode = current_mode;
1542 	hw->current_fc_status = fc_status;
1543 	hw->dcb_info.hw_pfc_map = hw_pfc_map;
1544 	status = hns3_buffer_alloc(hw);
1545 	if (status)
1546 		hns3_err(hw, "recover packet buffer fail! status = %d", status);
1547 
1548 	return ret;
1549 }
1550 
1551 /*
1552  * hns3_dcb_configure - setup dcb related config
1553  * @hns: pointer to hns3 adapter
1554  * Returns 0 on success, negative value on failure.
1555  */
1556 int
hns3_dcb_configure(struct hns3_adapter * hns)1557 hns3_dcb_configure(struct hns3_adapter *hns)
1558 {
1559 	struct hns3_hw *hw = &hns->hw;
1560 	bool map_changed = false;
1561 	uint8_t num_tc = 0;
1562 	int ret;
1563 
1564 	hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1565 	if (map_changed || rte_atomic16_read(&hw->reset.resetting)) {
1566 		ret = hns3_dcb_info_update(hns, num_tc);
1567 		if (ret) {
1568 			hns3_err(hw, "dcb info update failed: %d", ret);
1569 			return ret;
1570 		}
1571 
1572 		ret = hns3_dcb_hw_configure(hns);
1573 		if (ret) {
1574 			hns3_err(hw, "dcb sw configure failed: %d", ret);
1575 			return ret;
1576 		}
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 int
hns3_dcb_init_hw(struct hns3_hw * hw)1583 hns3_dcb_init_hw(struct hns3_hw *hw)
1584 {
1585 	int ret;
1586 
1587 	ret = hns3_dcb_schd_setup_hw(hw);
1588 	if (ret) {
1589 		hns3_err(hw, "dcb schedule setup failed: %d", ret);
1590 		return ret;
1591 	}
1592 
1593 	ret = hns3_dcb_pause_setup_hw(hw);
1594 	if (ret)
1595 		hns3_err(hw, "PAUSE setup failed: %d", ret);
1596 
1597 	return ret;
1598 }
1599 
1600 int
hns3_dcb_init(struct hns3_hw * hw)1601 hns3_dcb_init(struct hns3_hw *hw)
1602 {
1603 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1604 	struct hns3_pf *pf = &hns->pf;
1605 	uint16_t default_tqp_num;
1606 	int ret;
1607 
1608 	PMD_INIT_FUNC_TRACE();
1609 
1610 	/*
1611 	 * According to the 'adapter_state' identifier, the following branch
1612 	 * is only executed to initialize default configurations of dcb during
1613 	 * the initializing driver process. Due to driver saving dcb-related
1614 	 * information before reset triggered, the reinit dev stage of the
1615 	 * reset process can not access to the branch, or those information
1616 	 * will be changed.
1617 	 */
1618 	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1619 		hw->requested_mode = HNS3_FC_NONE;
1620 		hw->current_mode = hw->requested_mode;
1621 		pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1622 		hw->current_fc_status = HNS3_FC_STATUS_NONE;
1623 
1624 		ret = hns3_dcb_info_init(hw);
1625 		if (ret) {
1626 			hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1627 			return ret;
1628 		}
1629 
1630 		/*
1631 		 * The number of queues configured by default cannot exceed
1632 		 * the maximum number of queues for a single TC.
1633 		 */
1634 		default_tqp_num = RTE_MIN(hw->rss_size_max,
1635 					  hw->tqps_num / hw->dcb_info.num_tc);
1636 		ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1637 						       default_tqp_num);
1638 		if (ret) {
1639 			hns3_err(hw,
1640 				 "update tc queue mapping failed, ret = %d.",
1641 				 ret);
1642 			return ret;
1643 		}
1644 	}
1645 
1646 	/*
1647 	 * DCB hardware will be configured by following the function during
1648 	 * the initializing driver process and the reset process. However,
1649 	 * driver will restore directly configurations of dcb hardware based
1650 	 * on dcb-related information soft maintained when driver
1651 	 * initialization has finished and reset is coming.
1652 	 */
1653 	ret = hns3_dcb_init_hw(hw);
1654 	if (ret) {
1655 		hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1656 		return ret;
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 static int
hns3_update_queue_map_configure(struct hns3_adapter * hns)1663 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1664 {
1665 	struct hns3_hw *hw = &hns->hw;
1666 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1667 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1668 	int ret;
1669 
1670 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1671 	if (ret) {
1672 		hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1673 			 ret);
1674 		return ret;
1675 	}
1676 	ret = hns3_q_to_qs_map(hw);
1677 	if (ret)
1678 		hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1679 
1680 	return ret;
1681 }
1682 
1683 int
hns3_dcb_cfg_update(struct hns3_adapter * hns)1684 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1685 {
1686 	struct hns3_hw *hw = &hns->hw;
1687 	enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1688 	int ret;
1689 
1690 	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1691 		ret = hns3_dcb_configure(hns);
1692 		if (ret)
1693 			hns3_err(hw, "Failed to config dcb: %d", ret);
1694 	} else {
1695 		/*
1696 		 * Update queue map without PFC configuration,
1697 		 * due to queues reconfigured by user.
1698 		 */
1699 		ret = hns3_update_queue_map_configure(hns);
1700 		if (ret)
1701 			hns3_err(hw,
1702 				 "Failed to update queue mapping configure: %d",
1703 				 ret);
1704 	}
1705 
1706 	return ret;
1707 }
1708 
1709 /*
1710  * hns3_dcb_pfc_enable - Enable priority flow control
1711  * @dev: pointer to ethernet device
1712  *
1713  * Configures the pfc settings for one porority.
1714  */
1715 int
hns3_dcb_pfc_enable(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)1716 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1717 {
1718 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1719 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1720 	enum hns3_fc_status fc_status = hw->current_fc_status;
1721 	enum hns3_fc_mode current_mode = hw->current_mode;
1722 	uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1723 	uint8_t pfc_en = hw->dcb_info.pfc_en;
1724 	uint8_t priority = pfc_conf->priority;
1725 	uint16_t pause_time = pf->pause_time;
1726 	int ret, status;
1727 
1728 	pf->pause_time = pfc_conf->fc.pause_time;
1729 	hw->current_mode = hw->requested_mode;
1730 	hw->current_fc_status = HNS3_FC_STATUS_PFC;
1731 	hw->dcb_info.pfc_en |= BIT(priority);
1732 	hw->dcb_info.hw_pfc_map =
1733 			hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1734 	ret = hns3_buffer_alloc(hw);
1735 	if (ret)
1736 		goto pfc_setup_fail;
1737 
1738 	/*
1739 	 * The flow control mode of all UPs will be changed based on
1740 	 * current_mode coming from user.
1741 	 */
1742 	ret = hns3_dcb_pause_setup_hw(hw);
1743 	if (ret) {
1744 		hns3_err(hw, "enable pfc failed! ret = %d", ret);
1745 		goto pfc_setup_fail;
1746 	}
1747 
1748 	return 0;
1749 
1750 pfc_setup_fail:
1751 	hw->current_mode = current_mode;
1752 	hw->current_fc_status = fc_status;
1753 	pf->pause_time = pause_time;
1754 	hw->dcb_info.pfc_en = pfc_en;
1755 	hw->dcb_info.hw_pfc_map = hw_pfc_map;
1756 	status = hns3_buffer_alloc(hw);
1757 	if (status)
1758 		hns3_err(hw, "recover packet buffer fail: %d", status);
1759 
1760 	return ret;
1761 }
1762 
1763 /*
1764  * hns3_fc_enable - Enable MAC pause
1765  * @dev: pointer to ethernet device
1766  *
1767  * Configures the MAC pause settings.
1768  */
1769 int
hns3_fc_enable(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1770 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1771 {
1772 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1774 	enum hns3_fc_status fc_status = hw->current_fc_status;
1775 	enum hns3_fc_mode current_mode = hw->current_mode;
1776 	uint16_t pause_time = pf->pause_time;
1777 	int ret;
1778 
1779 	pf->pause_time = fc_conf->pause_time;
1780 	hw->current_mode = hw->requested_mode;
1781 
1782 	/*
1783 	 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1784 	 * of flow control is configured to be HNS3_FC_NONE.
1785 	 */
1786 	if (hw->current_mode == HNS3_FC_NONE)
1787 		hw->current_fc_status = HNS3_FC_STATUS_NONE;
1788 	else
1789 		hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1790 
1791 	ret = hns3_dcb_pause_setup_hw(hw);
1792 	if (ret) {
1793 		hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1794 		goto setup_fc_fail;
1795 	}
1796 
1797 	return 0;
1798 
1799 setup_fc_fail:
1800 	hw->current_mode = current_mode;
1801 	hw->current_fc_status = fc_status;
1802 	pf->pause_time = pause_time;
1803 
1804 	return ret;
1805 }
1806