xref: /dpdk/drivers/net/hns3/hns3_dcb.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <rte_io.h>
6 #include <rte_ethdev.h>
7 
8 #include "hns3_logs.h"
9 #include "hns3_ethdev.h"
10 #include "hns3_dcb.h"
11 
12 #define HNS3_SHAPER_BS_U_DEF	5
13 #define HNS3_SHAPER_BS_S_DEF	20
14 #define BW_MAX_PERCENT		100
15 
16 /*
17  * hns3_shaper_para_calc: calculate ir parameter for the shaper
18  * @ir: Rate to be config, its unit is Mbps
19  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20  * @shaper_para: shaper parameter of IR shaper
21  *
22  * the formula:
23  *
24  *		IR_b * (2 ^ IR_u) * 8
25  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
26  *		Tick * (2 ^ IR_s)
27  *
28  * @return: 0: calculate sucessful, negative: fail
29  */
30 static int
31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 		      struct hns3_shaper_parameter *shaper_para)
33 {
34 #define SHAPER_DEFAULT_IR_B	126
35 #define DIVISOR_CLK		(1000 * 8)
36 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
37 
38 	const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 		6 * 256,    /* Prioriy level */
40 		6 * 32,     /* Prioriy group level */
41 		6 * 8,      /* Port level */
42 		6 * 256     /* Qset level */
43 	};
44 	uint8_t ir_u_calc = 0;
45 	uint8_t ir_s_calc = 0;
46 	uint32_t denominator;
47 	uint32_t ir_calc;
48 	uint32_t tick;
49 
50 	/* Calc tick */
51 	if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52 		hns3_err(hw,
53 			 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 			 shaper_level, HNS3_SHAPER_LVL_CNT);
55 		return -EINVAL;
56 	}
57 
58 	if (ir > hw->max_tm_rate) {
59 		hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 			 "supported.", ir, hw->max_tm_rate);
61 		return -EINVAL;
62 	}
63 
64 	tick = tick_array[shaper_level];
65 
66 	/*
67 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 	 * the formula is changed to:
69 	 *		126 * 1 * 8
70 	 * ir_calc = ---------------- * 1000
71 	 *		tick * 1
72 	 */
73 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74 
75 	if (ir_calc == ir) {
76 		shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 	} else if (ir_calc > ir) {
78 		/* Increasing the denominator to select ir_s value */
79 		while (ir_calc >= ir && ir) {
80 			ir_s_calc++;
81 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82 		}
83 
84 		shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85 				    (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
86 	} else {
87 		/*
88 		 * Increasing the numerator to select ir_u value. ir_u_calc will
89 		 * get maximum value when ir_calc is minimum and ir is maximum.
90 		 * ir_calc gets minimum value when tick is the maximum value.
91 		 * At the same time, value of ir_u_calc can only be increased up
92 		 * to eight after the while loop if the value of ir is equal
93 		 * to hw->max_tm_rate.
94 		 */
95 		uint32_t numerator;
96 		do {
97 			ir_u_calc++;
98 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99 			ir_calc = (numerator + (tick >> 1)) / tick;
100 		} while (ir_calc < ir);
101 
102 		if (ir_calc == ir) {
103 			shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
104 		} else {
105 			--ir_u_calc;
106 
107 			/*
108 			 * The maximum value of ir_u_calc in this branch is
109 			 * seven in all cases. Thus, value of denominator can
110 			 * not be zero here.
111 			 */
112 			denominator = DIVISOR_CLK * (1 << ir_u_calc);
113 			shaper_para->ir_b =
114 				(ir * tick + (denominator >> 1)) / denominator;
115 		}
116 	}
117 
118 	shaper_para->ir_u = ir_u_calc;
119 	shaper_para->ir_s = ir_s_calc;
120 
121 	return 0;
122 }
123 
124 static int
125 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
126 {
127 #define HNS3_HALF_BYTE_BIT_OFFSET 4
128 	uint8_t tc = hw->dcb_info.prio_tc[pri_id];
129 
130 	if (tc >= hw->dcb_info.num_tc)
131 		return -EINVAL;
132 
133 	/*
134 	 * The register for priority has four bytes, the first bytes includes
135 	 *  priority0 and priority1, the higher 4bit stands for priority1
136 	 *  while the lower 4bit stands for priority0, as below:
137 	 * first byte:	| pri_1 | pri_0 |
138 	 * second byte:	| pri_3 | pri_2 |
139 	 * third byte:	| pri_5 | pri_4 |
140 	 * fourth byte:	| pri_7 | pri_6 |
141 	 */
142 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
143 
144 	return 0;
145 }
146 
147 static int
148 hns3_up_to_tc_map(struct hns3_hw *hw)
149 {
150 	struct hns3_cmd_desc desc;
151 	uint8_t *pri = (uint8_t *)desc.data;
152 	uint8_t pri_id;
153 	int ret;
154 
155 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
156 
157 	for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158 		ret = hns3_fill_pri_array(hw, pri, pri_id);
159 		if (ret)
160 			return ret;
161 	}
162 
163 	return hns3_cmd_send(hw, &desc, 1);
164 }
165 
166 static int
167 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
168 {
169 	struct hns3_pg_to_pri_link_cmd *map;
170 	struct hns3_cmd_desc desc;
171 
172 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
173 
174 	map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
175 
176 	map->pg_id = pg_id;
177 	map->pri_bit_map = pri_bit_map;
178 
179 	return hns3_cmd_send(hw, &desc, 1);
180 }
181 
182 static int
183 hns3_pg_to_pri_map(struct hns3_hw *hw)
184 {
185 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186 	struct hns3_pf *pf = &hns->pf;
187 	struct hns3_pg_info *pg_info;
188 	int ret, i;
189 
190 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
191 		return -EINVAL;
192 
193 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
194 		/* Cfg pg to priority mapping */
195 		pg_info = &hw->dcb_info.pg_info[i];
196 		ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
197 		if (ret)
198 			return ret;
199 	}
200 
201 	return 0;
202 }
203 
204 static int
205 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
206 {
207 	struct hns3_qs_to_pri_link_cmd *map;
208 	struct hns3_cmd_desc desc;
209 
210 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
211 
212 	map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
213 
214 	map->qs_id = rte_cpu_to_le_16(qs_id);
215 	map->priority = pri;
216 	map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
217 
218 	return hns3_cmd_send(hw, &desc, 1);
219 }
220 
221 static int
222 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
223 {
224 	struct hns3_qs_weight_cmd *weight;
225 	struct hns3_cmd_desc desc;
226 
227 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
228 
229 	weight = (struct hns3_qs_weight_cmd *)desc.data;
230 
231 	weight->qs_id = rte_cpu_to_le_16(qs_id);
232 	weight->dwrr = dwrr;
233 
234 	return hns3_cmd_send(hw, &desc, 1);
235 }
236 
237 static int
238 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
239 {
240 #define DEFAULT_TC_WEIGHT	1
241 #define DEFAULT_TC_OFFSET	14
242 	struct hns3_ets_tc_weight_cmd *ets_weight;
243 	struct hns3_cmd_desc desc;
244 	uint8_t i;
245 
246 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247 	ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
248 
249 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250 		struct hns3_pg_info *pg_info;
251 
252 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
253 
254 		if (!(hw->hw_tc_map & BIT(i)))
255 			continue;
256 
257 		pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
258 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
259 	}
260 
261 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
262 
263 	return hns3_cmd_send(hw, &desc, 1);
264 }
265 
266 static int
267 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
268 {
269 	struct hns3_priority_weight_cmd *weight;
270 	struct hns3_cmd_desc desc;
271 
272 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
273 
274 	weight = (struct hns3_priority_weight_cmd *)desc.data;
275 
276 	weight->pri_id = pri_id;
277 	weight->dwrr = dwrr;
278 
279 	return hns3_cmd_send(hw, &desc, 1);
280 }
281 
282 static int
283 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
284 {
285 	struct hns3_pg_weight_cmd *weight;
286 	struct hns3_cmd_desc desc;
287 
288 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
289 
290 	weight = (struct hns3_pg_weight_cmd *)desc.data;
291 
292 	weight->pg_id = pg_id;
293 	weight->dwrr = dwrr;
294 
295 	return hns3_cmd_send(hw, &desc, 1);
296 }
297 static int
298 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
299 {
300 	struct hns3_cmd_desc desc;
301 
302 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
303 
304 	if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
305 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
306 	else
307 		desc.data[1] = 0;
308 
309 	desc.data[0] = rte_cpu_to_le_32(pg_id);
310 
311 	return hns3_cmd_send(hw, &desc, 1);
312 }
313 
314 static uint32_t
315 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
316 			   uint8_t bs_b, uint8_t bs_s)
317 {
318 	uint32_t shapping_para = 0;
319 
320 	/* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
321 	if (ir_b == 0)
322 		return shapping_para;
323 
324 	hns3_dcb_set_field(shapping_para, IR_B, ir_b);
325 	hns3_dcb_set_field(shapping_para, IR_U, ir_u);
326 	hns3_dcb_set_field(shapping_para, IR_S, ir_s);
327 	hns3_dcb_set_field(shapping_para, BS_B, bs_b);
328 	hns3_dcb_set_field(shapping_para, BS_S, bs_s);
329 
330 	return shapping_para;
331 }
332 
333 static int
334 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
335 {
336 	struct hns3_port_shapping_cmd *shap_cfg_cmd;
337 	struct hns3_shaper_parameter shaper_parameter;
338 	uint32_t shapping_para;
339 	uint32_t ir_u, ir_b, ir_s;
340 	struct hns3_cmd_desc desc;
341 	int ret;
342 
343 	ret = hns3_shaper_para_calc(hw, speed,
344 				    HNS3_SHAPER_LVL_PORT, &shaper_parameter);
345 	if (ret) {
346 		hns3_err(hw, "calculate shaper parameter failed: %d", ret);
347 		return ret;
348 	}
349 
350 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
351 	shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
352 
353 	ir_b = shaper_parameter.ir_b;
354 	ir_u = shaper_parameter.ir_u;
355 	ir_s = shaper_parameter.ir_s;
356 	shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
357 						   HNS3_SHAPER_BS_U_DEF,
358 						   HNS3_SHAPER_BS_S_DEF);
359 
360 	shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
361 
362 	/*
363 	 * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
364 	 * field in hns3_port_shapping_cmd to require firmware to recalculate
365 	 * shapping parameters. And whether the parameters are recalculated
366 	 * depends on the firmware version. But driver still needs to
367 	 * calculate it and configure to firmware for better compatibility.
368 	 */
369 	shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
370 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
371 
372 	return hns3_cmd_send(hw, &desc, 1);
373 }
374 
375 int
376 hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
377 {
378 	int ret;
379 
380 	ret = hns3_dcb_port_shaper_cfg(hw, speed);
381 	if (ret)
382 		hns3_err(hw, "configure port shappering failed: ret = %d", ret);
383 
384 	return ret;
385 }
386 
387 static int
388 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
389 			 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
390 {
391 	struct hns3_pg_shapping_cmd *shap_cfg_cmd;
392 	enum hns3_opcode_type opcode;
393 	struct hns3_cmd_desc desc;
394 
395 	opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
396 		 HNS3_OPC_TM_PG_C_SHAPPING;
397 	hns3_cmd_setup_basic_desc(&desc, opcode, false);
398 
399 	shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
400 
401 	shap_cfg_cmd->pg_id = pg_id;
402 
403 	shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
404 
405 	/*
406 	 * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
407 	 * hns3_pg_shapping_cmd to require firmware to recalculate shapping
408 	 * parameters. And whether parameters are recalculated depends on
409 	 * the firmware version. But driver still needs to calculate it and
410 	 * configure to firmware for better compatibility.
411 	 */
412 	shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
413 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
414 
415 	return hns3_cmd_send(hw, &desc, 1);
416 }
417 
418 int
419 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
420 {
421 	struct hns3_shaper_parameter shaper_parameter;
422 	uint32_t ir_u, ir_b, ir_s;
423 	uint32_t shaper_para;
424 	int ret;
425 
426 	/* Calc shaper para */
427 	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
428 				    &shaper_parameter);
429 	if (ret) {
430 		hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
431 			 ret);
432 		return ret;
433 	}
434 
435 	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
436 						 HNS3_SHAPER_BS_U_DEF,
437 						 HNS3_SHAPER_BS_S_DEF);
438 
439 	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
440 				       shaper_para, rate);
441 	if (ret) {
442 		hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
443 			 ret);
444 		return ret;
445 	}
446 
447 	ir_b = shaper_parameter.ir_b;
448 	ir_u = shaper_parameter.ir_u;
449 	ir_s = shaper_parameter.ir_s;
450 	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
451 						 HNS3_SHAPER_BS_U_DEF,
452 						 HNS3_SHAPER_BS_S_DEF);
453 
454 	ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
455 				       shaper_para, rate);
456 	if (ret) {
457 		hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
458 			 ret);
459 		return ret;
460 	}
461 
462 	return 0;
463 }
464 
465 static int
466 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
467 {
468 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
469 	uint32_t rate;
470 	uint8_t i;
471 	int ret;
472 
473 	/* Cfg pg schd */
474 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
475 		return -EINVAL;
476 
477 	/* Pg to pri */
478 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
479 		rate = hw->dcb_info.pg_info[i].bw_limit;
480 		ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
481 		if (ret)
482 			return ret;
483 	}
484 
485 	return 0;
486 }
487 
488 static int
489 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
490 {
491 	struct hns3_cmd_desc desc;
492 
493 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
494 
495 	if (mode == HNS3_SCH_MODE_DWRR)
496 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
497 	else
498 		desc.data[1] = 0;
499 
500 	desc.data[0] = rte_cpu_to_le_32(qs_id);
501 
502 	return hns3_cmd_send(hw, &desc, 1);
503 }
504 
505 static int
506 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
507 {
508 	struct hns3_cmd_desc desc;
509 
510 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
511 
512 	if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
513 		desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
514 	else
515 		desc.data[1] = 0;
516 
517 	desc.data[0] = rte_cpu_to_le_32(pri_id);
518 
519 	return hns3_cmd_send(hw, &desc, 1);
520 }
521 
522 static int
523 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
524 			  uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
525 {
526 	struct hns3_pri_shapping_cmd *shap_cfg_cmd;
527 	enum hns3_opcode_type opcode;
528 	struct hns3_cmd_desc desc;
529 
530 	opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
531 		 HNS3_OPC_TM_PRI_C_SHAPPING;
532 
533 	hns3_cmd_setup_basic_desc(&desc, opcode, false);
534 
535 	shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
536 
537 	shap_cfg_cmd->pri_id = pri_id;
538 
539 	shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
540 
541 	/*
542 	 * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
543 	 * field in hns3_pri_shapping_cmd to require firmware to recalculate
544 	 * shapping parameters. And whether the parameters are recalculated
545 	 * depends on the firmware version. But driver still needs to
546 	 * calculate it and configure to firmware for better compatibility.
547 	 */
548 	shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
549 	hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
550 
551 	return hns3_cmd_send(hw, &desc, 1);
552 }
553 
554 int
555 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
556 {
557 	struct hns3_shaper_parameter shaper_parameter;
558 	uint32_t ir_u, ir_b, ir_s;
559 	uint32_t shaper_para;
560 	int ret;
561 
562 	ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
563 				    &shaper_parameter);
564 	if (ret) {
565 		hns3_err(hw, "calculate shaper parameter failed: %d.",
566 			 ret);
567 		return ret;
568 	}
569 
570 	shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
571 						 HNS3_SHAPER_BS_U_DEF,
572 						 HNS3_SHAPER_BS_S_DEF);
573 
574 	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
575 					shaper_para, rate);
576 	if (ret) {
577 		hns3_err(hw,
578 			 "config priority CIR shaper parameter failed: %d.",
579 			 ret);
580 		return ret;
581 	}
582 
583 	ir_b = shaper_parameter.ir_b;
584 	ir_u = shaper_parameter.ir_u;
585 	ir_s = shaper_parameter.ir_s;
586 	shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
587 						 HNS3_SHAPER_BS_U_DEF,
588 						 HNS3_SHAPER_BS_S_DEF);
589 
590 	ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
591 					shaper_para, rate);
592 	if (ret) {
593 		hns3_err(hw,
594 			 "config priority PIR shaper parameter failed: %d.",
595 			 ret);
596 		return ret;
597 	}
598 
599 	return 0;
600 }
601 
602 static int
603 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
604 {
605 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
606 	uint32_t rate;
607 	uint8_t i;
608 	int ret;
609 
610 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
611 		return -EINVAL;
612 
613 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
614 		rate = hw->dcb_info.tc_info[i].bw_limit;
615 		ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
616 		if (ret) {
617 			hns3_err(hw, "config pri shaper failed: %d.", ret);
618 			return ret;
619 		}
620 	}
621 
622 	return 0;
623 }
624 
625 static int
626 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
627 {
628 	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
629 	uint16_t rx_qnum_per_tc;
630 	uint16_t used_rx_queues;
631 	int i;
632 
633 	rx_qnum_per_tc = nb_rx_q / hw->num_tc;
634 	if (rx_qnum_per_tc > hw->rss_size_max) {
635 		hns3_err(hw, "rx queue number of per tc (%u) is greater than "
636 			 "value (%u) hardware supported.",
637 			 rx_qnum_per_tc, hw->rss_size_max);
638 		return -EINVAL;
639 	}
640 
641 	used_rx_queues = hw->num_tc * rx_qnum_per_tc;
642 	if (used_rx_queues != nb_rx_q) {
643 		hns3_err(hw, "rx queue number (%u) configured must be an "
644 			 "integral multiple of valid tc number (%u).",
645 			 nb_rx_q, hw->num_tc);
646 		return -EINVAL;
647 	}
648 	hw->alloc_rss_size = rx_qnum_per_tc;
649 	hw->used_rx_queues = used_rx_queues;
650 
651 	/*
652 	 * When rss size is changed, we need to update rss redirection table
653 	 * maintained by driver. Besides, during the entire reset process, we
654 	 * need to ensure that the rss table information are not overwritten
655 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
656 	 * stage of the reset process.
657 	 */
658 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
659 		for (i = 0; i < hw->rss_ind_tbl_size; i++)
660 			rss_cfg->rss_indirection_tbl[i] =
661 							i % hw->alloc_rss_size;
662 	}
663 
664 	return 0;
665 }
666 
667 static int
668 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
669 {
670 	struct hns3_tc_queue_info *tc_queue;
671 	uint16_t used_tx_queues;
672 	uint16_t tx_qnum_per_tc;
673 	uint8_t i;
674 
675 	tx_qnum_per_tc = nb_tx_q / hw->num_tc;
676 	used_tx_queues = hw->num_tc * tx_qnum_per_tc;
677 	if (used_tx_queues != nb_tx_q) {
678 		hns3_err(hw, "tx queue number (%u) configured must be an "
679 			 "integral multiple of valid tc number (%u).",
680 			 nb_tx_q, hw->num_tc);
681 		return -EINVAL;
682 	}
683 
684 	hw->used_tx_queues = used_tx_queues;
685 	hw->tx_qnum_per_tc = tx_qnum_per_tc;
686 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
687 		tc_queue = &hw->tc_queue[i];
688 		if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
689 			tc_queue->enable = true;
690 			tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
691 			tc_queue->tqp_count = hw->tx_qnum_per_tc;
692 			tc_queue->tc = i;
693 		} else {
694 			/* Set to default queue if TC is disable */
695 			tc_queue->enable = false;
696 			tc_queue->tqp_offset = 0;
697 			tc_queue->tqp_count = 0;
698 			tc_queue->tc = 0;
699 		}
700 	}
701 
702 	return 0;
703 }
704 
705 uint8_t
706 hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
707 {
708 	struct hns3_tc_queue_info *tc_queue;
709 	uint8_t i;
710 
711 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
712 		tc_queue = &hw->tc_queue[i];
713 		if (!tc_queue->enable)
714 			continue;
715 
716 		if (txq_no >= tc_queue->tqp_offset &&
717 		    txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
718 			return i;
719 	}
720 
721 	/* return TC0 in default case */
722 	return 0;
723 }
724 
725 int
726 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
727 {
728 	int ret;
729 
730 	ret = hns3_set_rss_size(hw, nb_rx_q);
731 	if (ret)
732 		return ret;
733 
734 	return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
735 }
736 
737 static int
738 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
739 				 uint16_t nb_tx_q)
740 {
741 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
742 	struct hns3_pf *pf = &hns->pf;
743 	int ret;
744 
745 	hw->num_tc = hw->dcb_info.num_tc;
746 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
747 	if (ret)
748 		return ret;
749 
750 	if (!hns->is_vf)
751 		memcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);
752 
753 	return 0;
754 }
755 
756 int
757 hns3_dcb_info_init(struct hns3_hw *hw)
758 {
759 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
760 	struct hns3_pf *pf = &hns->pf;
761 	int i, k;
762 
763 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
764 	    hw->dcb_info.num_pg != 1)
765 		return -EINVAL;
766 
767 	/* Initializing PG information */
768 	memset(hw->dcb_info.pg_info, 0,
769 	       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
770 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
771 		hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
772 		hw->dcb_info.pg_info[i].pg_id = i;
773 		hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
774 		hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
775 
776 		if (i != 0)
777 			continue;
778 
779 		hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
780 		for (k = 0; k < hw->dcb_info.num_tc; k++)
781 			hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
782 	}
783 
784 	/* All UPs mapping to TC0 */
785 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
786 		hw->dcb_info.prio_tc[i] = 0;
787 
788 	/* Initializing tc information */
789 	memset(hw->dcb_info.tc_info, 0,
790 	       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
791 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
792 		hw->dcb_info.tc_info[i].tc_id = i;
793 		hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
794 		hw->dcb_info.tc_info[i].pgid = 0;
795 		hw->dcb_info.tc_info[i].bw_limit =
796 			hw->dcb_info.pg_info[0].bw_limit;
797 	}
798 
799 	return 0;
800 }
801 
802 static int
803 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
804 {
805 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
806 	struct hns3_pf *pf = &hns->pf;
807 	int ret, i;
808 
809 	/* Only being config on TC-Based scheduler mode */
810 	if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
811 		return -EINVAL;
812 
813 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
814 		ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
815 		if (ret)
816 			return ret;
817 	}
818 
819 	return 0;
820 }
821 
822 static int
823 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
824 {
825 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
826 	struct hns3_pf *pf = &hns->pf;
827 	uint8_t i;
828 	int ret;
829 
830 	if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
831 		for (i = 0; i < hw->dcb_info.num_tc; i++) {
832 			ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
833 			if (ret)
834 				return ret;
835 
836 			ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
837 							HNS3_SCH_MODE_DWRR);
838 			if (ret)
839 				return ret;
840 		}
841 	}
842 
843 	return 0;
844 }
845 
846 static int
847 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
848 {
849 	int ret;
850 
851 	ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
852 	if (ret) {
853 		hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
854 		return ret;
855 	}
856 
857 	ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
858 	if (ret)
859 		hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
860 
861 	return ret;
862 }
863 
864 static int
865 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
866 {
867 	struct hns3_pg_info *pg_info;
868 	uint8_t dwrr;
869 	int ret, i;
870 
871 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
872 		pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
873 		dwrr = pg_info->tc_dwrr[i];
874 
875 		ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
876 		if (ret) {
877 			hns3_err(hw,
878 			       "fail to send priority weight cmd: %d, ret = %d",
879 			       i, ret);
880 			return ret;
881 		}
882 
883 		ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
884 		if (ret) {
885 			hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
886 				 i, ret);
887 			return ret;
888 		}
889 	}
890 
891 	return 0;
892 }
893 
894 static int
895 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
896 {
897 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
898 	struct hns3_pf *pf = &hns->pf;
899 	uint32_t version;
900 	int ret;
901 
902 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
903 		return -EINVAL;
904 
905 	ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
906 	if (ret)
907 		return ret;
908 
909 	if (!hns3_dev_dcb_supported(hw))
910 		return 0;
911 
912 	ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
913 	if (ret == -EOPNOTSUPP) {
914 		version = hw->fw_version;
915 		hns3_warn(hw,
916 			  "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
917 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
918 					 HNS3_FW_VERSION_BYTE3_S),
919 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
920 					 HNS3_FW_VERSION_BYTE2_S),
921 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
922 					 HNS3_FW_VERSION_BYTE1_S),
923 			  hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
924 					 HNS3_FW_VERSION_BYTE0_S));
925 		ret = 0;
926 	}
927 
928 	return ret;
929 }
930 
931 static int
932 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
933 {
934 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
935 	struct hns3_pf *pf = &hns->pf;
936 	int ret, i;
937 
938 	/* Cfg pg schd */
939 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
940 		return -EINVAL;
941 
942 	/* Cfg pg to prio */
943 	for (i = 0; i < hw->dcb_info.num_pg; i++) {
944 		/* Cfg dwrr */
945 		ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
946 		if (ret)
947 			return ret;
948 	}
949 
950 	return 0;
951 }
952 
953 static int
954 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
955 {
956 	int ret;
957 
958 	ret = hns3_dcb_pg_dwrr_cfg(hw);
959 	if (ret) {
960 		hns3_err(hw, "config pg_dwrr failed: %d", ret);
961 		return ret;
962 	}
963 
964 	ret = hns3_dcb_pri_dwrr_cfg(hw);
965 	if (ret)
966 		hns3_err(hw, "config pri_dwrr failed: %d", ret);
967 
968 	return ret;
969 }
970 
971 static int
972 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
973 {
974 	int ret;
975 
976 	ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
977 	if (ret) {
978 		hns3_err(hw, "config port shaper failed: %d", ret);
979 		return ret;
980 	}
981 
982 	ret = hns3_dcb_pg_shaper_cfg(hw);
983 	if (ret) {
984 		hns3_err(hw, "config pg shaper failed: %d", ret);
985 		return ret;
986 	}
987 
988 	return hns3_dcb_pri_shaper_cfg(hw);
989 }
990 
991 static int
992 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
993 {
994 	struct hns3_nq_to_qs_link_cmd *map;
995 	struct hns3_cmd_desc desc;
996 	uint16_t tmp_qs_id = 0;
997 	uint16_t qs_id_l;
998 	uint16_t qs_id_h;
999 
1000 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
1001 
1002 	map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
1003 
1004 	map->nq_id = rte_cpu_to_le_16(q_id);
1005 
1006 	/*
1007 	 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
1008 	 * configure qset_id. So we need to convert qs_id to the follow
1009 	 * format to support qset_id > 1024.
1010 	 * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
1011 	 *            /         / \         \
1012 	 *           /         /   \         \
1013 	 * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
1014 	 *          | qs_id_h | vld | qs_id_l |
1015 	 */
1016 	qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1017 				 HNS3_DCB_QS_ID_L_S);
1018 	qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1019 				 HNS3_DCB_QS_ID_H_S);
1020 	hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1021 		       qs_id_l);
1022 	hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1023 		       HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1024 	map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1025 
1026 	return hns3_cmd_send(hw, &desc, 1);
1027 }
1028 
1029 static int
1030 hns3_q_to_qs_map(struct hns3_hw *hw)
1031 {
1032 	struct hns3_tc_queue_info *tc_queue;
1033 	uint16_t q_id;
1034 	uint32_t i, j;
1035 	int ret;
1036 
1037 	for (i = 0; i < hw->num_tc; i++) {
1038 		tc_queue = &hw->tc_queue[i];
1039 		for (j = 0; j < tc_queue->tqp_count; j++) {
1040 			q_id = tc_queue->tqp_offset + j;
1041 			ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1042 			if (ret)
1043 				return ret;
1044 		}
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 static int
1051 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1052 {
1053 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1054 	struct hns3_pf *pf = &hns->pf;
1055 	uint32_t i;
1056 	int ret;
1057 
1058 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1059 		return -EINVAL;
1060 
1061 	/* Cfg qs -> pri mapping */
1062 	for (i = 0; i < hw->num_tc; i++) {
1063 		ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1064 		if (ret) {
1065 			hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1066 			return ret;
1067 		}
1068 	}
1069 
1070 	/* Cfg q -> qs mapping */
1071 	ret = hns3_q_to_qs_map(hw);
1072 	if (ret)
1073 		hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1074 
1075 	return ret;
1076 }
1077 
1078 static int
1079 hns3_dcb_map_cfg(struct hns3_hw *hw)
1080 {
1081 	int ret;
1082 
1083 	ret = hns3_up_to_tc_map(hw);
1084 	if (ret) {
1085 		hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1086 		return ret;
1087 	}
1088 
1089 	ret = hns3_pg_to_pri_map(hw);
1090 	if (ret) {
1091 		hns3_err(hw, "pri_to_pg mapping fail: %d", ret);
1092 		return ret;
1093 	}
1094 
1095 	return hns3_pri_q_qs_cfg(hw);
1096 }
1097 
1098 static int
1099 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1100 {
1101 	int ret;
1102 
1103 	/* Cfg dcb mapping  */
1104 	ret = hns3_dcb_map_cfg(hw);
1105 	if (ret)
1106 		return ret;
1107 
1108 	/* Cfg dcb shaper */
1109 	ret = hns3_dcb_shaper_cfg(hw);
1110 	if (ret)
1111 		return ret;
1112 
1113 	/* Cfg dwrr */
1114 	ret = hns3_dcb_dwrr_cfg(hw);
1115 	if (ret)
1116 		return ret;
1117 
1118 	/* Cfg schd mode for each level schd */
1119 	return hns3_dcb_schd_mode_cfg(hw);
1120 }
1121 
1122 static int
1123 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1124 		     uint8_t pause_trans_gap, uint16_t pause_trans_time)
1125 {
1126 	struct hns3_cfg_pause_param_cmd *pause_param;
1127 	struct hns3_cmd_desc desc;
1128 
1129 	pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1130 
1131 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1132 
1133 	memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1134 	memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1135 	pause_param->pause_trans_gap = pause_trans_gap;
1136 	pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1137 
1138 	return hns3_cmd_send(hw, &desc, 1);
1139 }
1140 
1141 int
1142 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1143 {
1144 	struct hns3_cfg_pause_param_cmd *pause_param;
1145 	struct hns3_cmd_desc desc;
1146 	uint16_t trans_time;
1147 	uint8_t trans_gap;
1148 	int ret;
1149 
1150 	pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1151 
1152 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1153 
1154 	ret = hns3_cmd_send(hw, &desc, 1);
1155 	if (ret)
1156 		return ret;
1157 
1158 	trans_gap = pause_param->pause_trans_gap;
1159 	trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1160 
1161 	return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1162 }
1163 
1164 static int
1165 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1166 {
1167 #define PAUSE_TIME_DIV_BY	2
1168 #define PAUSE_TIME_MIN_VALUE	0x4
1169 
1170 	struct hns3_mac *mac = &hw->mac;
1171 	uint8_t pause_trans_gap;
1172 
1173 	/*
1174 	 * Pause transmit gap must be less than "pause_time / 2", otherwise
1175 	 * the behavior of MAC is undefined.
1176 	 */
1177 	if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1178 		pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1179 	else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1180 		 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1181 		pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1182 	else {
1183 		hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1184 		pause_time = PAUSE_TIME_MIN_VALUE;
1185 		pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1186 	}
1187 
1188 	return hns3_pause_param_cfg(hw, mac->mac_addr,
1189 				    pause_trans_gap, pause_time);
1190 }
1191 
1192 static int
1193 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1194 {
1195 	struct hns3_cmd_desc desc;
1196 
1197 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1198 
1199 	desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1200 		(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1201 
1202 	return hns3_cmd_send(hw, &desc, 1);
1203 }
1204 
1205 static int
1206 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1207 {
1208 	struct hns3_cmd_desc desc;
1209 	struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1210 
1211 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1212 
1213 	pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1214 					(rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1215 
1216 	pfc->pri_en_bitmap = pfc_bitmap;
1217 
1218 	return hns3_cmd_send(hw, &desc, 1);
1219 }
1220 
1221 static int
1222 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1223 {
1224 	struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1225 	struct hns3_cmd_desc desc;
1226 
1227 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1228 
1229 	bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1230 
1231 	bp_to_qs_map_cmd->tc_id = tc;
1232 	bp_to_qs_map_cmd->qs_group_id = grp_id;
1233 	bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1234 
1235 	return hns3_cmd_send(hw, &desc, 1);
1236 }
1237 
1238 static void
1239 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1240 {
1241 	switch (hw->current_mode) {
1242 	case HNS3_FC_NONE:
1243 		*tx_en = false;
1244 		*rx_en = false;
1245 		break;
1246 	case HNS3_FC_RX_PAUSE:
1247 		*tx_en = false;
1248 		*rx_en = true;
1249 		break;
1250 	case HNS3_FC_TX_PAUSE:
1251 		*tx_en = true;
1252 		*rx_en = false;
1253 		break;
1254 	case HNS3_FC_FULL:
1255 		*tx_en = true;
1256 		*rx_en = true;
1257 		break;
1258 	default:
1259 		*tx_en = false;
1260 		*rx_en = false;
1261 		break;
1262 	}
1263 }
1264 
1265 static int
1266 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1267 {
1268 	bool tx_en, rx_en;
1269 
1270 	if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1271 		hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1272 	else {
1273 		tx_en = false;
1274 		rx_en = false;
1275 	}
1276 
1277 	return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1278 }
1279 
1280 static int
1281 hns3_pfc_setup_hw(struct hns3_hw *hw)
1282 {
1283 	bool tx_en, rx_en;
1284 
1285 	if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1286 		hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1287 	else {
1288 		tx_en = false;
1289 		rx_en = false;
1290 	}
1291 
1292 	return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1293 }
1294 
1295 /*
1296  * Each Tc has a 1024 queue sets to backpress, it divides to
1297  * 32 group, each group contains 32 queue sets, which can be
1298  * represented by uint32_t bitmap.
1299  */
1300 static int
1301 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1302 {
1303 	uint32_t qs_bitmap;
1304 	int ret;
1305 	int i;
1306 
1307 	for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1308 		uint8_t grp, sub_grp;
1309 		qs_bitmap = 0;
1310 
1311 		grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1312 		sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1313 					 HNS3_BP_SUB_GRP_ID_S);
1314 		if (i == grp)
1315 			qs_bitmap |= (1 << sub_grp);
1316 
1317 		ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1318 		if (ret)
1319 			return ret;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static int
1326 hns3_dcb_bp_setup(struct hns3_hw *hw)
1327 {
1328 	int ret, i;
1329 
1330 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1331 		ret = hns3_bp_setup_hw(hw, i);
1332 		if (ret)
1333 			return ret;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int
1340 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1341 {
1342 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1343 	struct hns3_pf *pf = &hns->pf;
1344 	int ret;
1345 
1346 	ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1347 	if (ret) {
1348 		hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1349 		return ret;
1350 	}
1351 
1352 	ret = hns3_mac_pause_setup_hw(hw);
1353 	if (ret) {
1354 		hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1355 		return ret;
1356 	}
1357 
1358 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1359 	if (!hns3_dev_dcb_supported(hw))
1360 		return 0;
1361 
1362 	ret = hns3_pfc_setup_hw(hw);
1363 	if (ret) {
1364 		hns3_err(hw, "config pfc failed! ret = %d", ret);
1365 		return ret;
1366 	}
1367 
1368 	return hns3_dcb_bp_setup(hw);
1369 }
1370 
1371 static uint8_t
1372 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1373 {
1374 	uint8_t pfc_map = 0;
1375 	uint8_t *prio_tc;
1376 	uint8_t i, j;
1377 
1378 	prio_tc = hw->dcb_info.prio_tc;
1379 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1380 		for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1381 			if (prio_tc[j] == i && pfc_en & BIT(j)) {
1382 				pfc_map |= BIT(i);
1383 				break;
1384 			}
1385 		}
1386 	}
1387 
1388 	return pfc_map;
1389 }
1390 
1391 static void
1392 hns3_dcb_cfg_validate(struct hns3_adapter *hns, uint8_t *tc, bool *changed)
1393 {
1394 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1395 	struct hns3_hw *hw = &hns->hw;
1396 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1397 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1398 	uint8_t max_tc = 0;
1399 	uint8_t pfc_en;
1400 	int i;
1401 
1402 	dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1403 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1404 		if (dcb_rx_conf->dcb_tc[i] != hw->dcb_info.prio_tc[i])
1405 			*changed = true;
1406 
1407 		if (dcb_rx_conf->dcb_tc[i] > max_tc)
1408 			max_tc = dcb_rx_conf->dcb_tc[i];
1409 	}
1410 	*tc = max_tc + 1;
1411 	if (*tc != hw->dcb_info.num_tc)
1412 		*changed = true;
1413 
1414 	/*
1415 	 * We ensure that dcb information can be reconfigured
1416 	 * after the hns3_priority_flow_ctrl_set function called.
1417 	 */
1418 	if (hw->current_mode != HNS3_FC_FULL)
1419 		*changed = true;
1420 	pfc_en = RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1421 	if (hw->dcb_info.pfc_en != pfc_en)
1422 		*changed = true;
1423 
1424 	/* tx/rx queue number is reconfigured. */
1425 	if (nb_rx_q != hw->used_rx_queues || nb_tx_q != hw->used_tx_queues)
1426 		*changed = true;
1427 }
1428 
1429 static int
1430 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1431 {
1432 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1433 	struct hns3_pf *pf = &hns->pf;
1434 	struct hns3_hw *hw = &hns->hw;
1435 	uint8_t tc_bw, bw_rest;
1436 	uint8_t i, j;
1437 	int ret;
1438 
1439 	dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1440 	pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1441 	pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1442 
1443 	/* Config pg0 */
1444 	memset(hw->dcb_info.pg_info, 0,
1445 	       sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1446 	hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1447 	hw->dcb_info.pg_info[0].pg_id = 0;
1448 	hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1449 	hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1450 	hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1451 
1452 	/* Each tc has same bw for valid tc by default */
1453 	tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1454 	for (i = 0; i < hw->dcb_info.num_tc; i++)
1455 		hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1456 	/* To ensure the sum of tc_dwrr is equal to 100 */
1457 	bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1458 	for (j = 0; j < bw_rest; j++)
1459 		hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1460 	for (; i < dcb_rx_conf->nb_tcs; i++)
1461 		hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1462 
1463 	/* All tcs map to pg0 */
1464 	memset(hw->dcb_info.tc_info, 0,
1465 	       sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1466 	for (i = 0; i < hw->dcb_info.num_tc; i++) {
1467 		hw->dcb_info.tc_info[i].tc_id = i;
1468 		hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1469 		hw->dcb_info.tc_info[i].pgid = 0;
1470 		hw->dcb_info.tc_info[i].bw_limit =
1471 					hw->dcb_info.pg_info[0].bw_limit;
1472 	}
1473 
1474 	for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1475 		hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1476 
1477 	ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1478 					       hw->data->nb_tx_queues);
1479 	if (ret)
1480 		hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1481 
1482 	return ret;
1483 }
1484 
1485 static int
1486 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1487 {
1488 	struct hns3_pf *pf = &hns->pf;
1489 	struct hns3_hw *hw = &hns->hw;
1490 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1491 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1492 	uint8_t bit_map = 0;
1493 	uint8_t i;
1494 
1495 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1496 	    hw->dcb_info.num_pg != 1)
1497 		return -EINVAL;
1498 
1499 	if (nb_rx_q < num_tc) {
1500 		hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1501 			 nb_rx_q, num_tc);
1502 		return -EINVAL;
1503 	}
1504 
1505 	if (nb_tx_q < num_tc) {
1506 		hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1507 			 nb_tx_q, num_tc);
1508 		return -EINVAL;
1509 	}
1510 
1511 	/* Currently not support uncontinuous tc */
1512 	hw->dcb_info.num_tc = num_tc;
1513 	for (i = 0; i < hw->dcb_info.num_tc; i++)
1514 		bit_map |= BIT(i);
1515 
1516 	if (!bit_map) {
1517 		bit_map = 1;
1518 		hw->dcb_info.num_tc = 1;
1519 	}
1520 	hw->hw_tc_map = bit_map;
1521 
1522 	return hns3_dcb_info_cfg(hns);
1523 }
1524 
1525 static int
1526 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1527 {
1528 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1529 	struct hns3_pf *pf = &hns->pf;
1530 	struct hns3_hw *hw = &hns->hw;
1531 	enum hns3_fc_status fc_status = hw->current_fc_status;
1532 	enum hns3_fc_mode current_mode = hw->current_mode;
1533 	uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1534 	int ret, status;
1535 
1536 	if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1537 	    pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1538 		return -ENOTSUP;
1539 
1540 	ret = hns3_dcb_schd_setup_hw(hw);
1541 	if (ret) {
1542 		hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
1543 		return ret;
1544 	}
1545 
1546 	if (hw->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
1547 		dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1548 		if (dcb_rx_conf->nb_tcs == 0)
1549 			hw->dcb_info.pfc_en = 1; /* tc0 only */
1550 		else
1551 			hw->dcb_info.pfc_en =
1552 			RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t);
1553 
1554 		hw->dcb_info.hw_pfc_map =
1555 				hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1556 
1557 		ret = hns3_buffer_alloc(hw);
1558 		if (ret)
1559 			return ret;
1560 
1561 		hw->current_fc_status = HNS3_FC_STATUS_PFC;
1562 		hw->current_mode = HNS3_FC_FULL;
1563 		ret = hns3_dcb_pause_setup_hw(hw);
1564 		if (ret) {
1565 			hns3_err(hw, "setup pfc failed! ret = %d", ret);
1566 			goto pfc_setup_fail;
1567 		}
1568 	} else {
1569 		/*
1570 		 * Although dcb_capability_en is lack of ETH_DCB_PFC_SUPPORT
1571 		 * flag, the DCB information is configured, such as tc numbers.
1572 		 * Therefore, refreshing the allocation of packet buffer is
1573 		 * necessary.
1574 		 */
1575 		ret = hns3_buffer_alloc(hw);
1576 		if (ret)
1577 			return ret;
1578 	}
1579 
1580 	return 0;
1581 
1582 pfc_setup_fail:
1583 	hw->current_mode = current_mode;
1584 	hw->current_fc_status = fc_status;
1585 	hw->dcb_info.hw_pfc_map = hw_pfc_map;
1586 	status = hns3_buffer_alloc(hw);
1587 	if (status)
1588 		hns3_err(hw, "recover packet buffer fail! status = %d", status);
1589 
1590 	return ret;
1591 }
1592 
1593 /*
1594  * hns3_dcb_configure - setup dcb related config
1595  * @hns: pointer to hns3 adapter
1596  * Returns 0 on success, negative value on failure.
1597  */
1598 int
1599 hns3_dcb_configure(struct hns3_adapter *hns)
1600 {
1601 	struct hns3_hw *hw = &hns->hw;
1602 	bool map_changed = false;
1603 	uint8_t num_tc = 0;
1604 	int ret;
1605 
1606 	hns3_dcb_cfg_validate(hns, &num_tc, &map_changed);
1607 	if (map_changed ||
1608 	    __atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED)) {
1609 		ret = hns3_dcb_info_update(hns, num_tc);
1610 		if (ret) {
1611 			hns3_err(hw, "dcb info update failed: %d", ret);
1612 			return ret;
1613 		}
1614 
1615 		ret = hns3_dcb_hw_configure(hns);
1616 		if (ret) {
1617 			hns3_err(hw, "dcb sw configure failed: %d", ret);
1618 			return ret;
1619 		}
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 int
1626 hns3_dcb_init_hw(struct hns3_hw *hw)
1627 {
1628 	int ret;
1629 
1630 	ret = hns3_dcb_schd_setup_hw(hw);
1631 	if (ret) {
1632 		hns3_err(hw, "dcb schedule setup failed: %d", ret);
1633 		return ret;
1634 	}
1635 
1636 	ret = hns3_dcb_pause_setup_hw(hw);
1637 	if (ret)
1638 		hns3_err(hw, "PAUSE setup failed: %d", ret);
1639 
1640 	return ret;
1641 }
1642 
1643 int
1644 hns3_dcb_init(struct hns3_hw *hw)
1645 {
1646 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1647 	struct hns3_pf *pf = &hns->pf;
1648 	uint16_t default_tqp_num;
1649 	int ret;
1650 
1651 	PMD_INIT_FUNC_TRACE();
1652 
1653 	/*
1654 	 * According to the 'adapter_state' identifier, the following branch
1655 	 * is only executed to initialize default configurations of dcb during
1656 	 * the initializing driver process. Due to driver saving dcb-related
1657 	 * information before reset triggered, the reinit dev stage of the
1658 	 * reset process can not access to the branch, or those information
1659 	 * will be changed.
1660 	 */
1661 	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1662 		hw->requested_mode = HNS3_FC_NONE;
1663 		hw->current_mode = hw->requested_mode;
1664 		pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1665 		hw->current_fc_status = HNS3_FC_STATUS_NONE;
1666 
1667 		ret = hns3_dcb_info_init(hw);
1668 		if (ret) {
1669 			hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1670 			return ret;
1671 		}
1672 
1673 		/*
1674 		 * The number of queues configured by default cannot exceed
1675 		 * the maximum number of queues for a single TC.
1676 		 */
1677 		default_tqp_num = RTE_MIN(hw->rss_size_max,
1678 					  hw->tqps_num / hw->dcb_info.num_tc);
1679 		ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1680 						       default_tqp_num);
1681 		if (ret) {
1682 			hns3_err(hw,
1683 				 "update tc queue mapping failed, ret = %d.",
1684 				 ret);
1685 			return ret;
1686 		}
1687 	}
1688 
1689 	/*
1690 	 * DCB hardware will be configured by following the function during
1691 	 * the initializing driver process and the reset process. However,
1692 	 * driver will restore directly configurations of dcb hardware based
1693 	 * on dcb-related information soft maintained when driver
1694 	 * initialization has finished and reset is coming.
1695 	 */
1696 	ret = hns3_dcb_init_hw(hw);
1697 	if (ret) {
1698 		hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1699 		return ret;
1700 	}
1701 
1702 	return 0;
1703 }
1704 
1705 static int
1706 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1707 {
1708 	struct hns3_hw *hw = &hns->hw;
1709 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1710 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1711 	int ret;
1712 
1713 	ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1714 	if (ret) {
1715 		hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1716 			 ret);
1717 		return ret;
1718 	}
1719 	ret = hns3_q_to_qs_map(hw);
1720 	if (ret)
1721 		hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1722 
1723 	return ret;
1724 }
1725 
1726 int
1727 hns3_dcb_cfg_update(struct hns3_adapter *hns)
1728 {
1729 	struct hns3_hw *hw = &hns->hw;
1730 	enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1731 	int ret;
1732 
1733 	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
1734 		ret = hns3_dcb_configure(hns);
1735 		if (ret)
1736 			hns3_err(hw, "Failed to config dcb: %d", ret);
1737 	} else {
1738 		/*
1739 		 * Update queue map without PFC configuration,
1740 		 * due to queues reconfigured by user.
1741 		 */
1742 		ret = hns3_update_queue_map_configure(hns);
1743 		if (ret)
1744 			hns3_err(hw,
1745 				 "Failed to update queue mapping configure: %d",
1746 				 ret);
1747 	}
1748 
1749 	return ret;
1750 }
1751 
1752 /*
1753  * hns3_dcb_pfc_enable - Enable priority flow control
1754  * @dev: pointer to ethernet device
1755  *
1756  * Configures the pfc settings for one porority.
1757  */
1758 int
1759 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1760 {
1761 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1762 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1763 	enum hns3_fc_status fc_status = hw->current_fc_status;
1764 	enum hns3_fc_mode current_mode = hw->current_mode;
1765 	uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1766 	uint8_t pfc_en = hw->dcb_info.pfc_en;
1767 	uint8_t priority = pfc_conf->priority;
1768 	uint16_t pause_time = pf->pause_time;
1769 	int ret, status;
1770 
1771 	pf->pause_time = pfc_conf->fc.pause_time;
1772 	hw->current_mode = hw->requested_mode;
1773 	hw->current_fc_status = HNS3_FC_STATUS_PFC;
1774 	hw->dcb_info.pfc_en |= BIT(priority);
1775 	hw->dcb_info.hw_pfc_map =
1776 			hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1777 	ret = hns3_buffer_alloc(hw);
1778 	if (ret)
1779 		goto pfc_setup_fail;
1780 
1781 	/*
1782 	 * The flow control mode of all UPs will be changed based on
1783 	 * current_mode coming from user.
1784 	 */
1785 	ret = hns3_dcb_pause_setup_hw(hw);
1786 	if (ret) {
1787 		hns3_err(hw, "enable pfc failed! ret = %d", ret);
1788 		goto pfc_setup_fail;
1789 	}
1790 
1791 	return 0;
1792 
1793 pfc_setup_fail:
1794 	hw->current_mode = current_mode;
1795 	hw->current_fc_status = fc_status;
1796 	pf->pause_time = pause_time;
1797 	hw->dcb_info.pfc_en = pfc_en;
1798 	hw->dcb_info.hw_pfc_map = hw_pfc_map;
1799 	status = hns3_buffer_alloc(hw);
1800 	if (status)
1801 		hns3_err(hw, "recover packet buffer fail: %d", status);
1802 
1803 	return ret;
1804 }
1805 
1806 /*
1807  * hns3_fc_enable - Enable MAC pause
1808  * @dev: pointer to ethernet device
1809  *
1810  * Configures the MAC pause settings.
1811  */
1812 int
1813 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1814 {
1815 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1816 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1817 	enum hns3_fc_status fc_status = hw->current_fc_status;
1818 	enum hns3_fc_mode current_mode = hw->current_mode;
1819 	uint16_t pause_time = pf->pause_time;
1820 	int ret;
1821 
1822 	pf->pause_time = fc_conf->pause_time;
1823 	hw->current_mode = hw->requested_mode;
1824 
1825 	/*
1826 	 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1827 	 * of flow control is configured to be HNS3_FC_NONE.
1828 	 */
1829 	if (hw->current_mode == HNS3_FC_NONE)
1830 		hw->current_fc_status = HNS3_FC_STATUS_NONE;
1831 	else
1832 		hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1833 
1834 	ret = hns3_dcb_pause_setup_hw(hw);
1835 	if (ret) {
1836 		hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1837 		goto setup_fc_fail;
1838 	}
1839 
1840 	return 0;
1841 
1842 setup_fc_fail:
1843 	hw->current_mode = current_mode;
1844 	hw->current_fc_status = fc_status;
1845 	pf->pause_time = pause_time;
1846 
1847 	return ret;
1848 }
1849