1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
3 */
4
5 #include <rte_io.h>
6 #include <rte_ethdev.h>
7
8 #include "hns3_logs.h"
9 #include "hns3_ethdev.h"
10 #include "hns3_dcb.h"
11
12 #define HNS3_SHAPER_BS_U_DEF 5
13 #define HNS3_SHAPER_BS_S_DEF 20
14 #define BW_MAX_PERCENT 100
15
16 /*
17 * hns3_shaper_para_calc: calculate ir parameter for the shaper
18 * @ir: Rate to be config, its unit is Mbps
19 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
20 * @shaper_para: shaper parameter of IR shaper
21 *
22 * the formula:
23 *
24 * IR_b * (2 ^ IR_u) * 8
25 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
26 * Tick * (2 ^ IR_s)
27 *
28 * @return: 0: calculate successful, negative: fail
29 */
30 static int
hns3_shaper_para_calc(struct hns3_hw * hw,uint32_t ir,uint8_t shaper_level,struct hns3_shaper_parameter * shaper_para)31 hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
32 struct hns3_shaper_parameter *shaper_para)
33 {
34 #define SHAPER_DEFAULT_IR_B 126
35 #define DIVISOR_CLK (1000 * 8)
36 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
37
38 const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
39 6 * 256, /* Priority level */
40 6 * 32, /* Priority group level */
41 6 * 8, /* Port level */
42 6 * 256 /* Qset level */
43 };
44 uint8_t ir_u_calc = 0;
45 uint8_t ir_s_calc = 0;
46 uint32_t denominator;
47 uint32_t ir_calc;
48 uint32_t tick;
49
50 /* Calc tick */
51 if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
52 hns3_err(hw,
53 "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
54 shaper_level, HNS3_SHAPER_LVL_CNT);
55 return -EINVAL;
56 }
57
58 if (ir > hw->max_tm_rate) {
59 hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
60 "supported.", ir, hw->max_tm_rate);
61 return -EINVAL;
62 }
63
64 tick = tick_array[shaper_level];
65
66 /*
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
69 * 126 * 1 * 8
70 * ir_calc = ---------------- * 1000
71 * tick * 1
72 */
73 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74
75 if (ir_calc == ir) {
76 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
77 } else if (ir_calc > ir) {
78 /* Increasing the denominator to select ir_s value */
79 while (ir_calc >= ir && ir) {
80 ir_s_calc++;
81 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
82 }
83
84 shaper_para->ir_b = (ir * tick * (1 << ir_s_calc) +
85 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
86 } else {
87 /*
88 * Increasing the numerator to select ir_u value. ir_u_calc will
89 * get maximum value when ir_calc is minimum and ir is maximum.
90 * ir_calc gets minimum value when tick is the maximum value.
91 * At the same time, value of ir_u_calc can only be increased up
92 * to eight after the while loop if the value of ir is equal
93 * to hw->max_tm_rate.
94 */
95 uint32_t numerator;
96 do {
97 ir_u_calc++;
98 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
99 ir_calc = (numerator + (tick >> 1)) / tick;
100 } while (ir_calc < ir);
101
102 if (ir_calc == ir) {
103 shaper_para->ir_b = SHAPER_DEFAULT_IR_B;
104 } else {
105 --ir_u_calc;
106
107 /*
108 * The maximum value of ir_u_calc in this branch is
109 * seven in all cases. Thus, value of denominator can
110 * not be zero here.
111 */
112 denominator = DIVISOR_CLK * (1 << ir_u_calc);
113 shaper_para->ir_b =
114 (ir * tick + (denominator >> 1)) / denominator;
115 }
116 }
117
118 shaper_para->ir_u = ir_u_calc;
119 shaper_para->ir_s = ir_s_calc;
120
121 return 0;
122 }
123
124 static int
hns3_fill_pri_array(struct hns3_hw * hw,uint8_t * pri,uint8_t pri_id)125 hns3_fill_pri_array(struct hns3_hw *hw, uint8_t *pri, uint8_t pri_id)
126 {
127 #define HNS3_HALF_BYTE_BIT_OFFSET 4
128 uint8_t tc = hw->dcb_info.prio_tc[pri_id];
129
130 if (tc >= hw->dcb_info.num_tc)
131 return -EINVAL;
132
133 /*
134 * The register for priority has four bytes, the first bytes includes
135 * priority0 and priority1, the higher 4bit stands for priority1
136 * while the lower 4bit stands for priority0, as below:
137 * first byte: | pri_1 | pri_0 |
138 * second byte: | pri_3 | pri_2 |
139 * third byte: | pri_5 | pri_4 |
140 * fourth byte: | pri_7 | pri_6 |
141 */
142 pri[pri_id >> 1] |= tc << ((pri_id & 1) * HNS3_HALF_BYTE_BIT_OFFSET);
143
144 return 0;
145 }
146
147 static int
hns3_up_to_tc_map(struct hns3_hw * hw)148 hns3_up_to_tc_map(struct hns3_hw *hw)
149 {
150 struct hns3_cmd_desc desc;
151 uint8_t *pri = (uint8_t *)desc.data;
152 uint8_t pri_id;
153 int ret;
154
155 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PRI_TO_TC_MAPPING, false);
156
157 for (pri_id = 0; pri_id < HNS3_MAX_USER_PRIO; pri_id++) {
158 ret = hns3_fill_pri_array(hw, pri, pri_id);
159 if (ret)
160 return ret;
161 }
162
163 return hns3_cmd_send(hw, &desc, 1);
164 }
165
166 static int
hns3_pg_to_pri_map_cfg(struct hns3_hw * hw,uint8_t pg_id,uint8_t pri_bit_map)167 hns3_pg_to_pri_map_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t pri_bit_map)
168 {
169 struct hns3_pg_to_pri_link_cmd *map;
170 struct hns3_cmd_desc desc;
171
172 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_TO_PRI_LINK, false);
173
174 map = (struct hns3_pg_to_pri_link_cmd *)desc.data;
175
176 map->pg_id = pg_id;
177 map->pri_bit_map = pri_bit_map;
178
179 return hns3_cmd_send(hw, &desc, 1);
180 }
181
182 static int
hns3_pg_to_pri_map(struct hns3_hw * hw)183 hns3_pg_to_pri_map(struct hns3_hw *hw)
184 {
185 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
186 struct hns3_pf *pf = &hns->pf;
187 struct hns3_pg_info *pg_info;
188 int ret, i;
189
190 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
191 return -EINVAL;
192
193 for (i = 0; i < hw->dcb_info.num_pg; i++) {
194 /* Cfg pg to priority mapping */
195 pg_info = &hw->dcb_info.pg_info[i];
196 ret = hns3_pg_to_pri_map_cfg(hw, i, pg_info->tc_bit_map);
197 if (ret)
198 return ret;
199 }
200
201 return 0;
202 }
203
204 static int
hns3_qs_to_pri_map_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t pri)205 hns3_qs_to_pri_map_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t pri)
206 {
207 struct hns3_qs_to_pri_link_cmd *map;
208 struct hns3_cmd_desc desc;
209
210 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_TO_PRI_LINK, false);
211
212 map = (struct hns3_qs_to_pri_link_cmd *)desc.data;
213
214 map->qs_id = rte_cpu_to_le_16(qs_id);
215 map->priority = pri;
216 map->link_vld = HNS3_DCB_QS_PRI_LINK_VLD_MSK;
217
218 return hns3_cmd_send(hw, &desc, 1);
219 }
220
221 static int
hns3_dcb_qs_weight_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t dwrr)222 hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr)
223 {
224 struct hns3_qs_weight_cmd *weight;
225 struct hns3_cmd_desc desc;
226
227 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_WEIGHT, false);
228
229 weight = (struct hns3_qs_weight_cmd *)desc.data;
230
231 weight->qs_id = rte_cpu_to_le_16(qs_id);
232 weight->dwrr = dwrr;
233
234 return hns3_cmd_send(hw, &desc, 1);
235 }
236
237 static int
hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw * hw)238 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
239 {
240 #define DEFAULT_TC_OFFSET 14
241 struct hns3_ets_tc_weight_cmd *ets_weight;
242 struct hns3_pg_info *pg_info;
243 struct hns3_cmd_desc desc;
244 uint8_t i;
245
246 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_ETS_TC_WEIGHT, false);
247 ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
248
249 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
250 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
251 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
252 }
253
254 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
255
256 return hns3_cmd_send(hw, &desc, 1);
257 }
258
259 static int
hns3_dcb_pri_weight_cfg(struct hns3_hw * hw,uint8_t pri_id,uint8_t dwrr)260 hns3_dcb_pri_weight_cfg(struct hns3_hw *hw, uint8_t pri_id, uint8_t dwrr)
261 {
262 struct hns3_priority_weight_cmd *weight;
263 struct hns3_cmd_desc desc;
264
265 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_WEIGHT, false);
266
267 weight = (struct hns3_priority_weight_cmd *)desc.data;
268
269 weight->pri_id = pri_id;
270 weight->dwrr = dwrr;
271
272 return hns3_cmd_send(hw, &desc, 1);
273 }
274
275 static int
hns3_dcb_pg_weight_cfg(struct hns3_hw * hw,uint8_t pg_id,uint8_t dwrr)276 hns3_dcb_pg_weight_cfg(struct hns3_hw *hw, uint8_t pg_id, uint8_t dwrr)
277 {
278 struct hns3_pg_weight_cmd *weight;
279 struct hns3_cmd_desc desc;
280
281 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_WEIGHT, false);
282
283 weight = (struct hns3_pg_weight_cmd *)desc.data;
284
285 weight->pg_id = pg_id;
286 weight->dwrr = dwrr;
287
288 return hns3_cmd_send(hw, &desc, 1);
289 }
290 static int
hns3_dcb_pg_schd_mode_cfg(struct hns3_hw * hw,uint8_t pg_id)291 hns3_dcb_pg_schd_mode_cfg(struct hns3_hw *hw, uint8_t pg_id)
292 {
293 struct hns3_cmd_desc desc;
294
295 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PG_SCH_MODE_CFG, false);
296
297 if (hw->dcb_info.pg_info[pg_id].pg_sch_mode == HNS3_SCH_MODE_DWRR)
298 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
299 else
300 desc.data[1] = 0;
301
302 desc.data[0] = rte_cpu_to_le_32(pg_id);
303
304 return hns3_cmd_send(hw, &desc, 1);
305 }
306
307 static uint32_t
hns3_dcb_get_shapping_para(uint8_t ir_b,uint8_t ir_u,uint8_t ir_s,uint8_t bs_b,uint8_t bs_s)308 hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s,
309 uint8_t bs_b, uint8_t bs_s)
310 {
311 uint32_t shapping_para = 0;
312
313 /* If ir_b is zero it means IR is 0Mbps, return zero of shapping_para */
314 if (ir_b == 0)
315 return shapping_para;
316
317 hns3_dcb_set_field(shapping_para, IR_B, ir_b);
318 hns3_dcb_set_field(shapping_para, IR_U, ir_u);
319 hns3_dcb_set_field(shapping_para, IR_S, ir_s);
320 hns3_dcb_set_field(shapping_para, BS_B, bs_b);
321 hns3_dcb_set_field(shapping_para, BS_S, bs_s);
322
323 return shapping_para;
324 }
325
326 static int
hns3_dcb_port_shaper_cfg(struct hns3_hw * hw,uint32_t speed)327 hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed)
328 {
329 struct hns3_port_shapping_cmd *shap_cfg_cmd;
330 struct hns3_shaper_parameter shaper_parameter;
331 uint32_t shapping_para;
332 uint32_t ir_u, ir_b, ir_s;
333 struct hns3_cmd_desc desc;
334 int ret;
335
336 ret = hns3_shaper_para_calc(hw, speed,
337 HNS3_SHAPER_LVL_PORT, &shaper_parameter);
338 if (ret) {
339 hns3_err(hw, "calculate shaper parameter failed: %d", ret);
340 return ret;
341 }
342
343 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_SHAPPING, false);
344 shap_cfg_cmd = (struct hns3_port_shapping_cmd *)desc.data;
345
346 ir_b = shaper_parameter.ir_b;
347 ir_u = shaper_parameter.ir_u;
348 ir_s = shaper_parameter.ir_s;
349 shapping_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
350 HNS3_SHAPER_BS_U_DEF,
351 HNS3_SHAPER_BS_S_DEF);
352
353 shap_cfg_cmd->port_shapping_para = rte_cpu_to_le_32(shapping_para);
354
355 /*
356 * Configure the port_rate and set bit HNS3_TM_RATE_VLD_B of flag
357 * field in hns3_port_shapping_cmd to require firmware to recalculate
358 * shapping parameters. And whether the parameters are recalculated
359 * depends on the firmware version. But driver still needs to
360 * calculate it and configure to firmware for better compatibility.
361 */
362 shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed);
363 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
364
365 return hns3_cmd_send(hw, &desc, 1);
366 }
367
368 int
hns3_port_shaper_update(struct hns3_hw * hw,uint32_t speed)369 hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed)
370 {
371 int ret;
372
373 ret = hns3_dcb_port_shaper_cfg(hw, speed);
374 if (ret)
375 hns3_err(hw, "configure port shappering failed: ret = %d", ret);
376
377 return ret;
378 }
379
380 static int
hns3_dcb_pg_shapping_cfg(struct hns3_hw * hw,enum hns3_shap_bucket bucket,uint8_t pg_id,uint32_t shapping_para,uint32_t rate)381 hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
382 uint8_t pg_id, uint32_t shapping_para, uint32_t rate)
383 {
384 struct hns3_pg_shapping_cmd *shap_cfg_cmd;
385 enum hns3_opcode_type opcode;
386 struct hns3_cmd_desc desc;
387
388 opcode = bucket ? HNS3_OPC_TM_PG_P_SHAPPING :
389 HNS3_OPC_TM_PG_C_SHAPPING;
390 hns3_cmd_setup_basic_desc(&desc, opcode, false);
391
392 shap_cfg_cmd = (struct hns3_pg_shapping_cmd *)desc.data;
393
394 shap_cfg_cmd->pg_id = pg_id;
395
396 shap_cfg_cmd->pg_shapping_para = rte_cpu_to_le_32(shapping_para);
397
398 /*
399 * Configure the pg_rate and set bit HNS3_TM_RATE_VLD_B of flag field in
400 * hns3_pg_shapping_cmd to require firmware to recalculate shapping
401 * parameters. And whether parameters are recalculated depends on
402 * the firmware version. But driver still needs to calculate it and
403 * configure to firmware for better compatibility.
404 */
405 shap_cfg_cmd->pg_rate = rte_cpu_to_le_32(rate);
406 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
407
408 return hns3_cmd_send(hw, &desc, 1);
409 }
410
411 static int
hns3_pg_shaper_rate_cfg(struct hns3_hw * hw,uint8_t pg_id,uint32_t rate)412 hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate)
413 {
414 struct hns3_shaper_parameter shaper_parameter;
415 uint32_t ir_u, ir_b, ir_s;
416 uint32_t shaper_para;
417 int ret;
418
419 /* Calc shaper para */
420 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PG,
421 &shaper_parameter);
422 if (ret) {
423 hns3_err(hw, "calculate shaper parameter fail, ret = %d.",
424 ret);
425 return ret;
426 }
427
428 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
429 HNS3_SHAPER_BS_U_DEF,
430 HNS3_SHAPER_BS_S_DEF);
431
432 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, pg_id,
433 shaper_para, rate);
434 if (ret) {
435 hns3_err(hw, "config PG CIR shaper parameter fail, ret = %d.",
436 ret);
437 return ret;
438 }
439
440 ir_b = shaper_parameter.ir_b;
441 ir_u = shaper_parameter.ir_u;
442 ir_s = shaper_parameter.ir_s;
443 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
444 HNS3_SHAPER_BS_U_DEF,
445 HNS3_SHAPER_BS_S_DEF);
446
447 ret = hns3_dcb_pg_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, pg_id,
448 shaper_para, rate);
449 if (ret) {
450 hns3_err(hw, "config PG PIR shaper parameter fail, ret = %d.",
451 ret);
452 return ret;
453 }
454
455 return 0;
456 }
457
458 static int
hns3_dcb_pg_shaper_cfg(struct hns3_hw * hw)459 hns3_dcb_pg_shaper_cfg(struct hns3_hw *hw)
460 {
461 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
462 uint32_t rate;
463 uint8_t i;
464 int ret;
465
466 /* Cfg pg schd */
467 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
468 return -EINVAL;
469
470 /* Pg to pri */
471 for (i = 0; i < hw->dcb_info.num_pg; i++) {
472 rate = hw->dcb_info.pg_info[i].bw_limit;
473 ret = hns3_pg_shaper_rate_cfg(hw, i, rate);
474 if (ret)
475 return ret;
476 }
477
478 return 0;
479 }
480
481 static int
hns3_dcb_qs_schd_mode_cfg(struct hns3_hw * hw,uint16_t qs_id,uint8_t mode)482 hns3_dcb_qs_schd_mode_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t mode)
483 {
484 struct hns3_cmd_desc desc;
485
486 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QS_SCH_MODE_CFG, false);
487
488 if (mode == HNS3_SCH_MODE_DWRR)
489 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
490 else
491 desc.data[1] = 0;
492
493 desc.data[0] = rte_cpu_to_le_32(qs_id);
494
495 return hns3_cmd_send(hw, &desc, 1);
496 }
497
498 static int
hns3_dcb_pri_schd_mode_cfg(struct hns3_hw * hw,uint8_t pri_id)499 hns3_dcb_pri_schd_mode_cfg(struct hns3_hw *hw, uint8_t pri_id)
500 {
501 struct hns3_cmd_desc desc;
502
503 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PRI_SCH_MODE_CFG, false);
504
505 if (hw->dcb_info.tc_info[pri_id].tc_sch_mode == HNS3_SCH_MODE_DWRR)
506 desc.data[1] = rte_cpu_to_le_32(HNS3_DCB_TX_SCHD_DWRR_MSK);
507 else
508 desc.data[1] = 0;
509
510 desc.data[0] = rte_cpu_to_le_32(pri_id);
511
512 return hns3_cmd_send(hw, &desc, 1);
513 }
514
515 static int
hns3_dcb_pri_shapping_cfg(struct hns3_hw * hw,enum hns3_shap_bucket bucket,uint8_t pri_id,uint32_t shapping_para,uint32_t rate)516 hns3_dcb_pri_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket,
517 uint8_t pri_id, uint32_t shapping_para, uint32_t rate)
518 {
519 struct hns3_pri_shapping_cmd *shap_cfg_cmd;
520 enum hns3_opcode_type opcode;
521 struct hns3_cmd_desc desc;
522
523 opcode = bucket ? HNS3_OPC_TM_PRI_P_SHAPPING :
524 HNS3_OPC_TM_PRI_C_SHAPPING;
525
526 hns3_cmd_setup_basic_desc(&desc, opcode, false);
527
528 shap_cfg_cmd = (struct hns3_pri_shapping_cmd *)desc.data;
529
530 shap_cfg_cmd->pri_id = pri_id;
531
532 shap_cfg_cmd->pri_shapping_para = rte_cpu_to_le_32(shapping_para);
533
534 /*
535 * Configure the pri_rate and set bit HNS3_TM_RATE_VLD_B of flag
536 * field in hns3_pri_shapping_cmd to require firmware to recalculate
537 * shapping parameters. And whether the parameters are recalculated
538 * depends on the firmware version. But driver still needs to
539 * calculate it and configure to firmware for better compatibility.
540 */
541 shap_cfg_cmd->pri_rate = rte_cpu_to_le_32(rate);
542 hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1);
543
544 return hns3_cmd_send(hw, &desc, 1);
545 }
546
547 static int
hns3_pri_shaper_rate_cfg(struct hns3_hw * hw,uint8_t tc_no,uint32_t rate)548 hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate)
549 {
550 struct hns3_shaper_parameter shaper_parameter;
551 uint32_t ir_u, ir_b, ir_s;
552 uint32_t shaper_para;
553 int ret;
554
555 ret = hns3_shaper_para_calc(hw, rate, HNS3_SHAPER_LVL_PRI,
556 &shaper_parameter);
557 if (ret) {
558 hns3_err(hw, "calculate shaper parameter failed: %d.",
559 ret);
560 return ret;
561 }
562
563 shaper_para = hns3_dcb_get_shapping_para(0, 0, 0,
564 HNS3_SHAPER_BS_U_DEF,
565 HNS3_SHAPER_BS_S_DEF);
566
567 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_C_BUCKET, tc_no,
568 shaper_para, rate);
569 if (ret) {
570 hns3_err(hw,
571 "config priority CIR shaper parameter failed: %d.",
572 ret);
573 return ret;
574 }
575
576 ir_b = shaper_parameter.ir_b;
577 ir_u = shaper_parameter.ir_u;
578 ir_s = shaper_parameter.ir_s;
579 shaper_para = hns3_dcb_get_shapping_para(ir_b, ir_u, ir_s,
580 HNS3_SHAPER_BS_U_DEF,
581 HNS3_SHAPER_BS_S_DEF);
582
583 ret = hns3_dcb_pri_shapping_cfg(hw, HNS3_DCB_SHAP_P_BUCKET, tc_no,
584 shaper_para, rate);
585 if (ret) {
586 hns3_err(hw,
587 "config priority PIR shaper parameter failed: %d.",
588 ret);
589 return ret;
590 }
591
592 return 0;
593 }
594
595 static int
hns3_dcb_pri_shaper_cfg(struct hns3_hw * hw)596 hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)
597 {
598 struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
599 uint32_t rate;
600 uint8_t i;
601 int ret;
602
603 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
604 return -EINVAL;
605
606 for (i = 0; i < hw->dcb_info.num_tc; i++) {
607 rate = hw->dcb_info.tc_info[i].bw_limit;
608 ret = hns3_pri_shaper_rate_cfg(hw, i, rate);
609 if (ret) {
610 hns3_err(hw, "config pri shaper failed: %d.", ret);
611 return ret;
612 }
613 }
614
615 return 0;
616 }
617
618 static int
hns3_set_rss_size(struct hns3_hw * hw,uint16_t nb_rx_q)619 hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
620 {
621 struct hns3_rss_conf *rss_cfg = &hw->rss_info;
622 uint16_t rx_qnum_per_tc;
623 uint16_t used_rx_queues;
624 uint16_t i;
625
626 rx_qnum_per_tc = nb_rx_q / hw->num_tc;
627 if (rx_qnum_per_tc > hw->rss_size_max) {
628 hns3_err(hw, "rx queue number of per tc (%u) is greater than "
629 "value (%u) hardware supported.",
630 rx_qnum_per_tc, hw->rss_size_max);
631 return -EINVAL;
632 }
633
634 used_rx_queues = hw->num_tc * rx_qnum_per_tc;
635 if (used_rx_queues != nb_rx_q) {
636 hns3_err(hw, "rx queue number (%u) configured must be an "
637 "integral multiple of valid tc number (%u).",
638 nb_rx_q, hw->num_tc);
639 return -EINVAL;
640 }
641 hw->alloc_rss_size = rx_qnum_per_tc;
642 hw->used_rx_queues = used_rx_queues;
643
644 /*
645 * When rss size is changed, we need to update rss redirection table
646 * maintained by driver. Besides, during the entire reset process, we
647 * need to ensure that the rss table information are not overwritten
648 * and configured directly to the hardware in the RESET_STAGE_RESTORE
649 * stage of the reset process.
650 */
651 if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
652 for (i = 0; i < hw->rss_ind_tbl_size; i++)
653 rss_cfg->rss_indirection_tbl[i] =
654 i % hw->alloc_rss_size;
655 }
656
657 return 0;
658 }
659
660 static int
hns3_tc_queue_mapping_cfg(struct hns3_hw * hw,uint16_t nb_tx_q)661 hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_tx_q)
662 {
663 struct hns3_tc_queue_info *tc_queue;
664 uint16_t used_tx_queues;
665 uint16_t tx_qnum_per_tc;
666 uint8_t i;
667
668 tx_qnum_per_tc = nb_tx_q / hw->num_tc;
669 used_tx_queues = hw->num_tc * tx_qnum_per_tc;
670 if (used_tx_queues != nb_tx_q) {
671 hns3_err(hw, "tx queue number (%u) configured must be an "
672 "integral multiple of valid tc number (%u).",
673 nb_tx_q, hw->num_tc);
674 return -EINVAL;
675 }
676
677 hw->used_tx_queues = used_tx_queues;
678 hw->tx_qnum_per_tc = tx_qnum_per_tc;
679 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
680 tc_queue = &hw->tc_queue[i];
681 if (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {
682 tc_queue->enable = true;
683 tc_queue->tqp_offset = i * hw->tx_qnum_per_tc;
684 tc_queue->tqp_count = hw->tx_qnum_per_tc;
685 tc_queue->tc = i;
686 } else {
687 /* Set to default queue if TC is disable */
688 tc_queue->enable = false;
689 tc_queue->tqp_offset = 0;
690 tc_queue->tqp_count = 0;
691 tc_queue->tc = 0;
692 }
693 }
694
695 return 0;
696 }
697
698 uint8_t
hns3_txq_mapped_tc_get(struct hns3_hw * hw,uint16_t txq_no)699 hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no)
700 {
701 struct hns3_tc_queue_info *tc_queue;
702 uint8_t i;
703
704 for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
705 tc_queue = &hw->tc_queue[i];
706 if (!tc_queue->enable)
707 continue;
708
709 if (txq_no >= tc_queue->tqp_offset &&
710 txq_no < tc_queue->tqp_offset + tc_queue->tqp_count)
711 return i;
712 }
713
714 /* return TC0 in default case */
715 return 0;
716 }
717
718 int
hns3_queue_to_tc_mapping(struct hns3_hw * hw,uint16_t nb_rx_q,uint16_t nb_tx_q)719 hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q)
720 {
721 int ret;
722
723 if (nb_rx_q < hw->num_tc) {
724 hns3_err(hw, "number of Rx queues(%u) is less than number of TC(%u).",
725 nb_rx_q, hw->num_tc);
726 return -EINVAL;
727 }
728
729 if (nb_tx_q < hw->num_tc) {
730 hns3_err(hw, "number of Tx queues(%u) is less than number of TC(%u).",
731 nb_tx_q, hw->num_tc);
732 return -EINVAL;
733 }
734
735 ret = hns3_set_rss_size(hw, nb_rx_q);
736 if (ret)
737 return ret;
738
739 return hns3_tc_queue_mapping_cfg(hw, nb_tx_q);
740 }
741
742 static int
hns3_dcb_update_tc_queue_mapping(struct hns3_hw * hw,uint16_t nb_rx_q,uint16_t nb_tx_q)743 hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
744 uint16_t nb_tx_q)
745 {
746 hw->num_tc = hw->dcb_info.num_tc;
747
748 return hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
749 }
750
751 int
hns3_dcb_info_init(struct hns3_hw * hw)752 hns3_dcb_info_init(struct hns3_hw *hw)
753 {
754 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
755 struct hns3_pf *pf = &hns->pf;
756 int i, k;
757
758 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
759 hw->dcb_info.num_pg != 1)
760 return -EINVAL;
761
762 /* Initializing PG information */
763 memset(hw->dcb_info.pg_info, 0,
764 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
765 for (i = 0; i < hw->dcb_info.num_pg; i++) {
766 hw->dcb_info.pg_dwrr[i] = i ? 0 : BW_MAX_PERCENT;
767 hw->dcb_info.pg_info[i].pg_id = i;
768 hw->dcb_info.pg_info[i].pg_sch_mode = HNS3_SCH_MODE_DWRR;
769 hw->dcb_info.pg_info[i].bw_limit = hw->max_tm_rate;
770
771 if (i != 0)
772 continue;
773
774 hw->dcb_info.pg_info[i].tc_bit_map = hw->hw_tc_map;
775 for (k = 0; k < hw->dcb_info.num_tc; k++)
776 hw->dcb_info.pg_info[i].tc_dwrr[k] = BW_MAX_PERCENT;
777 }
778
779 /* All UPs mapping to TC0 */
780 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
781 hw->dcb_info.prio_tc[i] = 0;
782
783 /* Initializing tc information */
784 memset(hw->dcb_info.tc_info, 0,
785 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
786 for (i = 0; i < hw->dcb_info.num_tc; i++) {
787 hw->dcb_info.tc_info[i].tc_id = i;
788 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
789 hw->dcb_info.tc_info[i].pgid = 0;
790 hw->dcb_info.tc_info[i].bw_limit =
791 hw->dcb_info.pg_info[0].bw_limit;
792 }
793
794 return 0;
795 }
796
797 static int
hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw * hw)798 hns3_dcb_lvl2_schd_mode_cfg(struct hns3_hw *hw)
799 {
800 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
801 struct hns3_pf *pf = &hns->pf;
802 int ret, i;
803
804 /* Only being config on TC-Based scheduler mode */
805 if (pf->tx_sch_mode == HNS3_FLAG_VNET_BASE_SCH_MODE)
806 return -EINVAL;
807
808 for (i = 0; i < hw->dcb_info.num_pg; i++) {
809 ret = hns3_dcb_pg_schd_mode_cfg(hw, i);
810 if (ret)
811 return ret;
812 }
813
814 return 0;
815 }
816
817 static int
hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw * hw)818 hns3_dcb_lvl34_schd_mode_cfg(struct hns3_hw *hw)
819 {
820 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
821 struct hns3_pf *pf = &hns->pf;
822 uint8_t i;
823 int ret;
824
825 if (pf->tx_sch_mode == HNS3_FLAG_TC_BASE_SCH_MODE) {
826 for (i = 0; i < hw->dcb_info.num_tc; i++) {
827 ret = hns3_dcb_pri_schd_mode_cfg(hw, i);
828 if (ret)
829 return ret;
830
831 ret = hns3_dcb_qs_schd_mode_cfg(hw, i,
832 HNS3_SCH_MODE_DWRR);
833 if (ret)
834 return ret;
835 }
836 }
837
838 return 0;
839 }
840
841 static int
hns3_dcb_schd_mode_cfg(struct hns3_hw * hw)842 hns3_dcb_schd_mode_cfg(struct hns3_hw *hw)
843 {
844 int ret;
845
846 ret = hns3_dcb_lvl2_schd_mode_cfg(hw);
847 if (ret) {
848 hns3_err(hw, "config lvl2_schd_mode failed: %d", ret);
849 return ret;
850 }
851
852 ret = hns3_dcb_lvl34_schd_mode_cfg(hw);
853 if (ret)
854 hns3_err(hw, "config lvl34_schd_mode failed: %d", ret);
855
856 return ret;
857 }
858
859 static int
hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw * hw)860 hns3_dcb_pri_tc_base_dwrr_cfg(struct hns3_hw *hw)
861 {
862 struct hns3_pg_info *pg_info;
863 uint8_t dwrr;
864 int ret, i;
865
866 for (i = 0; i < hw->dcb_info.num_tc; i++) {
867 pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
868 dwrr = pg_info->tc_dwrr[i];
869
870 ret = hns3_dcb_pri_weight_cfg(hw, i, dwrr);
871 if (ret) {
872 hns3_err(hw, "fail to send priority weight cmd: %d, ret = %d",
873 i, ret);
874 return ret;
875 }
876
877 ret = hns3_dcb_qs_weight_cfg(hw, i, BW_MAX_PERCENT);
878 if (ret) {
879 hns3_err(hw, "fail to send qs_weight cmd: %d, ret = %d",
880 i, ret);
881 return ret;
882 }
883 }
884
885 return 0;
886 }
887
888 static int
hns3_dcb_pri_dwrr_cfg(struct hns3_hw * hw)889 hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
890 {
891 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
892 struct hns3_pf *pf = &hns->pf;
893 uint32_t version;
894 int ret;
895
896 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
897 return -EINVAL;
898
899 ret = hns3_dcb_pri_tc_base_dwrr_cfg(hw);
900 if (ret)
901 return ret;
902
903 if (!hns3_dev_get_support(hw, DCB))
904 return 0;
905
906 ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
907 if (ret == -EOPNOTSUPP) {
908 version = hw->fw_version;
909 hns3_warn(hw,
910 "fw %lu.%lu.%lu.%lu doesn't support ets tc weight cmd",
911 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
912 HNS3_FW_VERSION_BYTE3_S),
913 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
914 HNS3_FW_VERSION_BYTE2_S),
915 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
916 HNS3_FW_VERSION_BYTE1_S),
917 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
918 HNS3_FW_VERSION_BYTE0_S));
919 ret = 0;
920 }
921
922 return ret;
923 }
924
925 static int
hns3_dcb_pg_dwrr_cfg(struct hns3_hw * hw)926 hns3_dcb_pg_dwrr_cfg(struct hns3_hw *hw)
927 {
928 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
929 struct hns3_pf *pf = &hns->pf;
930 int ret, i;
931
932 /* Cfg pg schd */
933 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
934 return -EINVAL;
935
936 /* Cfg pg to prio */
937 for (i = 0; i < hw->dcb_info.num_pg; i++) {
938 /* Cfg dwrr */
939 ret = hns3_dcb_pg_weight_cfg(hw, i, hw->dcb_info.pg_dwrr[i]);
940 if (ret)
941 return ret;
942 }
943
944 return 0;
945 }
946
947 static int
hns3_dcb_dwrr_cfg(struct hns3_hw * hw)948 hns3_dcb_dwrr_cfg(struct hns3_hw *hw)
949 {
950 int ret;
951
952 ret = hns3_dcb_pg_dwrr_cfg(hw);
953 if (ret) {
954 hns3_err(hw, "config pg_dwrr failed: %d", ret);
955 return ret;
956 }
957
958 ret = hns3_dcb_pri_dwrr_cfg(hw);
959 if (ret)
960 hns3_err(hw, "config pri_dwrr failed: %d", ret);
961
962 return ret;
963 }
964
965 static int
hns3_dcb_shaper_cfg(struct hns3_hw * hw)966 hns3_dcb_shaper_cfg(struct hns3_hw *hw)
967 {
968 int ret;
969
970 ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed);
971 if (ret) {
972 hns3_err(hw, "config port shaper failed: %d", ret);
973 return ret;
974 }
975
976 ret = hns3_dcb_pg_shaper_cfg(hw);
977 if (ret) {
978 hns3_err(hw, "config pg shaper failed: %d", ret);
979 return ret;
980 }
981
982 return hns3_dcb_pri_shaper_cfg(hw);
983 }
984
985 static int
hns3_q_to_qs_map_cfg(struct hns3_hw * hw,uint16_t q_id,uint16_t qs_id)986 hns3_q_to_qs_map_cfg(struct hns3_hw *hw, uint16_t q_id, uint16_t qs_id)
987 {
988 struct hns3_nq_to_qs_link_cmd *map;
989 struct hns3_cmd_desc desc;
990 uint16_t tmp_qs_id = 0;
991 uint16_t qs_id_l;
992 uint16_t qs_id_h;
993
994 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_NQ_TO_QS_LINK, false);
995
996 map = (struct hns3_nq_to_qs_link_cmd *)desc.data;
997
998 map->nq_id = rte_cpu_to_le_16(q_id);
999
1000 /*
1001 * Network engine with revision_id 0x21 uses 0~9 bit of qs_id to
1002 * configure qset_id. So we need to convert qs_id to the follow
1003 * format to support qset_id > 1024.
1004 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1005 * / / \ \
1006 * / / \ \
1007 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1008 * | qs_id_h | vld | qs_id_l |
1009 */
1010 qs_id_l = hns3_get_field(qs_id, HNS3_DCB_QS_ID_L_MSK,
1011 HNS3_DCB_QS_ID_L_S);
1012 qs_id_h = hns3_get_field(qs_id, HNS3_DCB_QS_ID_H_MSK,
1013 HNS3_DCB_QS_ID_H_S);
1014 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_L_MSK, HNS3_DCB_QS_ID_L_S,
1015 qs_id_l);
1016 hns3_set_field(tmp_qs_id, HNS3_DCB_QS_ID_H_EXT_MSK,
1017 HNS3_DCB_QS_ID_H_EXT_S, qs_id_h);
1018 map->qset_id = rte_cpu_to_le_16(tmp_qs_id | HNS3_DCB_Q_QS_LINK_VLD_MSK);
1019
1020 return hns3_cmd_send(hw, &desc, 1);
1021 }
1022
1023 static int
hns3_q_to_qs_map(struct hns3_hw * hw)1024 hns3_q_to_qs_map(struct hns3_hw *hw)
1025 {
1026 struct hns3_tc_queue_info *tc_queue;
1027 uint16_t q_id;
1028 uint32_t i, j;
1029 int ret;
1030
1031 for (i = 0; i < hw->num_tc; i++) {
1032 tc_queue = &hw->tc_queue[i];
1033 for (j = 0; j < tc_queue->tqp_count; j++) {
1034 q_id = tc_queue->tqp_offset + j;
1035 ret = hns3_q_to_qs_map_cfg(hw, q_id, i);
1036 if (ret)
1037 return ret;
1038 }
1039 }
1040
1041 return 0;
1042 }
1043
1044 static int
hns3_pri_q_qs_cfg(struct hns3_hw * hw)1045 hns3_pri_q_qs_cfg(struct hns3_hw *hw)
1046 {
1047 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1048 struct hns3_pf *pf = &hns->pf;
1049 uint32_t i;
1050 int ret;
1051
1052 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE)
1053 return -EINVAL;
1054
1055 /* Cfg qs -> pri mapping */
1056 for (i = 0; i < hw->num_tc; i++) {
1057 ret = hns3_qs_to_pri_map_cfg(hw, i, i);
1058 if (ret) {
1059 hns3_err(hw, "qs_to_pri mapping fail: %d", ret);
1060 return ret;
1061 }
1062 }
1063
1064 /* Cfg q -> qs mapping */
1065 ret = hns3_q_to_qs_map(hw);
1066 if (ret)
1067 hns3_err(hw, "nq_to_qs mapping fail: %d", ret);
1068
1069 return ret;
1070 }
1071
1072 static int
hns3_dcb_map_cfg(struct hns3_hw * hw)1073 hns3_dcb_map_cfg(struct hns3_hw *hw)
1074 {
1075 int ret;
1076
1077 ret = hns3_up_to_tc_map(hw);
1078 if (ret) {
1079 hns3_err(hw, "up_to_tc mapping fail: %d", ret);
1080 return ret;
1081 }
1082
1083 ret = hns3_pg_to_pri_map(hw);
1084 if (ret) {
1085 hns3_err(hw, "pg_to_pri mapping fail: %d", ret);
1086 return ret;
1087 }
1088
1089 return hns3_pri_q_qs_cfg(hw);
1090 }
1091
1092 static int
hns3_dcb_schd_setup_hw(struct hns3_hw * hw)1093 hns3_dcb_schd_setup_hw(struct hns3_hw *hw)
1094 {
1095 int ret;
1096
1097 /* Cfg dcb mapping */
1098 ret = hns3_dcb_map_cfg(hw);
1099 if (ret)
1100 return ret;
1101
1102 /* Cfg dcb shaper */
1103 ret = hns3_dcb_shaper_cfg(hw);
1104 if (ret)
1105 return ret;
1106
1107 /* Cfg dwrr */
1108 ret = hns3_dcb_dwrr_cfg(hw);
1109 if (ret)
1110 return ret;
1111
1112 /* Cfg schd mode for each level schd */
1113 return hns3_dcb_schd_mode_cfg(hw);
1114 }
1115
1116 static int
hns3_pause_param_cfg(struct hns3_hw * hw,const uint8_t * addr,uint8_t pause_trans_gap,uint16_t pause_trans_time)1117 hns3_pause_param_cfg(struct hns3_hw *hw, const uint8_t *addr,
1118 uint8_t pause_trans_gap, uint16_t pause_trans_time)
1119 {
1120 struct hns3_cfg_pause_param_cmd *pause_param;
1121 struct hns3_cmd_desc desc;
1122
1123 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1124
1125 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, false);
1126
1127 memcpy(pause_param->mac_addr, addr, RTE_ETHER_ADDR_LEN);
1128 memcpy(pause_param->mac_addr_extra, addr, RTE_ETHER_ADDR_LEN);
1129 pause_param->pause_trans_gap = pause_trans_gap;
1130 pause_param->pause_trans_time = rte_cpu_to_le_16(pause_trans_time);
1131
1132 return hns3_cmd_send(hw, &desc, 1);
1133 }
1134
1135 int
hns3_pause_addr_cfg(struct hns3_hw * hw,const uint8_t * mac_addr)1136 hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr)
1137 {
1138 struct hns3_cfg_pause_param_cmd *pause_param;
1139 struct hns3_cmd_desc desc;
1140 uint16_t trans_time;
1141 uint8_t trans_gap;
1142 int ret;
1143
1144 pause_param = (struct hns3_cfg_pause_param_cmd *)desc.data;
1145
1146 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PARA, true);
1147
1148 ret = hns3_cmd_send(hw, &desc, 1);
1149 if (ret)
1150 return ret;
1151
1152 trans_gap = pause_param->pause_trans_gap;
1153 trans_time = rte_le_to_cpu_16(pause_param->pause_trans_time);
1154
1155 return hns3_pause_param_cfg(hw, mac_addr, trans_gap, trans_time);
1156 }
1157
1158 static int
hns3_pause_param_setup_hw(struct hns3_hw * hw,uint16_t pause_time)1159 hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
1160 {
1161 #define PAUSE_TIME_DIV_BY 2
1162 #define PAUSE_TIME_MIN_VALUE 0x4
1163
1164 struct hns3_mac *mac = &hw->mac;
1165 uint8_t pause_trans_gap;
1166
1167 /*
1168 * Pause transmit gap must be less than "pause_time / 2", otherwise
1169 * the behavior of MAC is undefined.
1170 */
1171 if (pause_time > PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1172 pause_trans_gap = HNS3_DEFAULT_PAUSE_TRANS_GAP;
1173 else if (pause_time >= PAUSE_TIME_MIN_VALUE &&
1174 pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
1175 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1176 else {
1177 hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
1178 pause_time = PAUSE_TIME_MIN_VALUE;
1179 pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
1180 }
1181
1182 return hns3_pause_param_cfg(hw, mac->mac_addr,
1183 pause_trans_gap, pause_time);
1184 }
1185
1186 static int
hns3_mac_pause_en_cfg(struct hns3_hw * hw,bool tx,bool rx)1187 hns3_mac_pause_en_cfg(struct hns3_hw *hw, bool tx, bool rx)
1188 {
1189 struct hns3_cmd_desc desc;
1190
1191 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_MAC_PAUSE_EN, false);
1192
1193 desc.data[0] = rte_cpu_to_le_32((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1194 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1195
1196 return hns3_cmd_send(hw, &desc, 1);
1197 }
1198
1199 static int
hns3_pfc_pause_en_cfg(struct hns3_hw * hw,uint8_t pfc_bitmap,bool tx,bool rx)1200 hns3_pfc_pause_en_cfg(struct hns3_hw *hw, uint8_t pfc_bitmap, bool tx, bool rx)
1201 {
1202 struct hns3_cmd_desc desc;
1203 struct hns3_pfc_en_cmd *pfc = (struct hns3_pfc_en_cmd *)desc.data;
1204
1205 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PFC_PAUSE_EN, false);
1206
1207 pfc->tx_rx_en_bitmap = (uint8_t)((tx ? HNS3_TX_MAC_PAUSE_EN_MSK : 0) |
1208 (rx ? HNS3_RX_MAC_PAUSE_EN_MSK : 0));
1209
1210 pfc->pri_en_bitmap = pfc_bitmap;
1211
1212 return hns3_cmd_send(hw, &desc, 1);
1213 }
1214
1215 static int
hns3_qs_bp_cfg(struct hns3_hw * hw,uint8_t tc,uint8_t grp_id,uint32_t bit_map)1216 hns3_qs_bp_cfg(struct hns3_hw *hw, uint8_t tc, uint8_t grp_id, uint32_t bit_map)
1217 {
1218 struct hns3_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
1219 struct hns3_cmd_desc desc;
1220
1221 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_BP_TO_QSET_MAPPING, false);
1222
1223 bp_to_qs_map_cmd = (struct hns3_bp_to_qs_map_cmd *)desc.data;
1224
1225 bp_to_qs_map_cmd->tc_id = tc;
1226 bp_to_qs_map_cmd->qs_group_id = grp_id;
1227 bp_to_qs_map_cmd->qs_bit_map = rte_cpu_to_le_32(bit_map);
1228
1229 return hns3_cmd_send(hw, &desc, 1);
1230 }
1231
1232 static void
hns3_get_rx_tx_en_status(struct hns3_hw * hw,bool * tx_en,bool * rx_en)1233 hns3_get_rx_tx_en_status(struct hns3_hw *hw, bool *tx_en, bool *rx_en)
1234 {
1235 switch (hw->requested_fc_mode) {
1236 case HNS3_FC_NONE:
1237 *tx_en = false;
1238 *rx_en = false;
1239 break;
1240 case HNS3_FC_RX_PAUSE:
1241 *tx_en = false;
1242 *rx_en = true;
1243 break;
1244 case HNS3_FC_TX_PAUSE:
1245 *tx_en = true;
1246 *rx_en = false;
1247 break;
1248 case HNS3_FC_FULL:
1249 *tx_en = true;
1250 *rx_en = true;
1251 break;
1252 default:
1253 *tx_en = false;
1254 *rx_en = false;
1255 break;
1256 }
1257 }
1258
1259 static int
hns3_mac_pause_setup_hw(struct hns3_hw * hw)1260 hns3_mac_pause_setup_hw(struct hns3_hw *hw)
1261 {
1262 bool tx_en, rx_en;
1263
1264 if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE)
1265 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1266 else {
1267 tx_en = false;
1268 rx_en = false;
1269 }
1270
1271 return hns3_mac_pause_en_cfg(hw, tx_en, rx_en);
1272 }
1273
1274 static int
hns3_pfc_setup_hw(struct hns3_hw * hw)1275 hns3_pfc_setup_hw(struct hns3_hw *hw)
1276 {
1277 bool tx_en, rx_en;
1278
1279 if (hw->current_fc_status == HNS3_FC_STATUS_PFC)
1280 hns3_get_rx_tx_en_status(hw, &tx_en, &rx_en);
1281 else {
1282 tx_en = false;
1283 rx_en = false;
1284 }
1285
1286 return hns3_pfc_pause_en_cfg(hw, hw->dcb_info.pfc_en, tx_en, rx_en);
1287 }
1288
1289 /*
1290 * Each Tc has a 1024 queue sets to backpress, it divides to
1291 * 32 group, each group contains 32 queue sets, which can be
1292 * represented by uint32_t bitmap.
1293 */
1294 static int
hns3_bp_setup_hw(struct hns3_hw * hw,uint8_t tc)1295 hns3_bp_setup_hw(struct hns3_hw *hw, uint8_t tc)
1296 {
1297 uint32_t qs_bitmap;
1298 int ret;
1299 int i;
1300
1301 for (i = 0; i < HNS3_BP_GRP_NUM; i++) {
1302 uint8_t grp, sub_grp;
1303 qs_bitmap = 0;
1304
1305 grp = hns3_get_field(tc, HNS3_BP_GRP_ID_M, HNS3_BP_GRP_ID_S);
1306 sub_grp = hns3_get_field(tc, HNS3_BP_SUB_GRP_ID_M,
1307 HNS3_BP_SUB_GRP_ID_S);
1308 if (i == grp)
1309 qs_bitmap |= (1 << sub_grp);
1310
1311 ret = hns3_qs_bp_cfg(hw, tc, i, qs_bitmap);
1312 if (ret)
1313 return ret;
1314 }
1315
1316 return 0;
1317 }
1318
1319 static int
hns3_dcb_bp_setup(struct hns3_hw * hw)1320 hns3_dcb_bp_setup(struct hns3_hw *hw)
1321 {
1322 int ret, i;
1323
1324 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1325 ret = hns3_bp_setup_hw(hw, i);
1326 if (ret)
1327 return ret;
1328 }
1329
1330 return 0;
1331 }
1332
1333 static int
hns3_dcb_pause_setup_hw(struct hns3_hw * hw)1334 hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
1335 {
1336 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1337 struct hns3_pf *pf = &hns->pf;
1338 int ret;
1339
1340 ret = hns3_pause_param_setup_hw(hw, pf->pause_time);
1341 if (ret) {
1342 hns3_err(hw, "Fail to set pause parameter. ret = %d", ret);
1343 return ret;
1344 }
1345
1346 ret = hns3_mac_pause_setup_hw(hw);
1347 if (ret) {
1348 hns3_err(hw, "Fail to setup MAC pause. ret = %d", ret);
1349 return ret;
1350 }
1351
1352 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1353 if (!hns3_dev_get_support(hw, DCB))
1354 return 0;
1355
1356 ret = hns3_pfc_setup_hw(hw);
1357 if (ret) {
1358 hns3_err(hw, "config pfc failed! ret = %d", ret);
1359 return ret;
1360 }
1361
1362 return hns3_dcb_bp_setup(hw);
1363 }
1364
1365 static uint8_t
hns3_dcb_undrop_tc_map(struct hns3_hw * hw,uint8_t pfc_en)1366 hns3_dcb_undrop_tc_map(struct hns3_hw *hw, uint8_t pfc_en)
1367 {
1368 uint8_t pfc_map = 0;
1369 uint8_t *prio_tc;
1370 uint8_t i, j;
1371
1372 prio_tc = hw->dcb_info.prio_tc;
1373 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1374 for (j = 0; j < HNS3_MAX_USER_PRIO; j++) {
1375 if (prio_tc[j] == i && pfc_en & BIT(j)) {
1376 pfc_map |= BIT(i);
1377 break;
1378 }
1379 }
1380 }
1381
1382 return pfc_map;
1383 }
1384
1385 static uint8_t
hns3_dcb_parse_num_tc(struct hns3_adapter * hns)1386 hns3_dcb_parse_num_tc(struct hns3_adapter *hns)
1387 {
1388 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1389 struct hns3_hw *hw = &hns->hw;
1390 uint8_t max_tc_id = 0;
1391 int i;
1392
1393 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1394 for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
1395 if (dcb_rx_conf->dcb_tc[i] > max_tc_id)
1396 max_tc_id = dcb_rx_conf->dcb_tc[i];
1397 }
1398
1399 /* Number of TC is equal to max_tc_id plus 1. */
1400 return max_tc_id + 1;
1401 }
1402
1403 static int
hns3_dcb_info_cfg(struct hns3_adapter * hns)1404 hns3_dcb_info_cfg(struct hns3_adapter *hns)
1405 {
1406 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
1407 struct hns3_pf *pf = &hns->pf;
1408 struct hns3_hw *hw = &hns->hw;
1409 uint8_t tc_bw, bw_rest;
1410 uint8_t i, j;
1411 int ret;
1412
1413 dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf;
1414 pf->local_max_tc = (uint8_t)dcb_rx_conf->nb_tcs;
1415 pf->pfc_max = (uint8_t)dcb_rx_conf->nb_tcs;
1416
1417 /* Config pg0 */
1418 memset(hw->dcb_info.pg_info, 0,
1419 sizeof(struct hns3_pg_info) * HNS3_PG_NUM);
1420 hw->dcb_info.pg_dwrr[0] = BW_MAX_PERCENT;
1421 hw->dcb_info.pg_info[0].pg_id = 0;
1422 hw->dcb_info.pg_info[0].pg_sch_mode = HNS3_SCH_MODE_DWRR;
1423 hw->dcb_info.pg_info[0].bw_limit = hw->max_tm_rate;
1424 hw->dcb_info.pg_info[0].tc_bit_map = hw->hw_tc_map;
1425
1426 /* Each tc has same bw for valid tc by default */
1427 tc_bw = BW_MAX_PERCENT / hw->dcb_info.num_tc;
1428 for (i = 0; i < hw->dcb_info.num_tc; i++)
1429 hw->dcb_info.pg_info[0].tc_dwrr[i] = tc_bw;
1430 /* To ensure the sum of tc_dwrr is equal to 100 */
1431 bw_rest = BW_MAX_PERCENT % hw->dcb_info.num_tc;
1432 for (j = 0; j < bw_rest; j++)
1433 hw->dcb_info.pg_info[0].tc_dwrr[j]++;
1434 for (; i < dcb_rx_conf->nb_tcs; i++)
1435 hw->dcb_info.pg_info[0].tc_dwrr[i] = 0;
1436
1437 /* All tcs map to pg0 */
1438 memset(hw->dcb_info.tc_info, 0,
1439 sizeof(struct hns3_tc_info) * HNS3_MAX_TC_NUM);
1440 for (i = 0; i < hw->dcb_info.num_tc; i++) {
1441 hw->dcb_info.tc_info[i].tc_id = i;
1442 hw->dcb_info.tc_info[i].tc_sch_mode = HNS3_SCH_MODE_DWRR;
1443 hw->dcb_info.tc_info[i].pgid = 0;
1444 hw->dcb_info.tc_info[i].bw_limit =
1445 hw->dcb_info.pg_info[0].bw_limit;
1446 }
1447
1448 for (i = 0; i < HNS3_MAX_USER_PRIO; i++)
1449 hw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];
1450
1451 ret = hns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,
1452 hw->data->nb_tx_queues);
1453 if (ret)
1454 hns3_err(hw, "update tc queue mapping failed, ret = %d.", ret);
1455
1456 return ret;
1457 }
1458
1459 static int
hns3_dcb_info_update(struct hns3_adapter * hns,uint8_t num_tc)1460 hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
1461 {
1462 struct hns3_pf *pf = &hns->pf;
1463 struct hns3_hw *hw = &hns->hw;
1464 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1465 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1466 uint8_t bit_map = 0;
1467 uint8_t i;
1468
1469 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1470 hw->dcb_info.num_pg != 1)
1471 return -EINVAL;
1472
1473 if (nb_rx_q < num_tc) {
1474 hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
1475 nb_rx_q, num_tc);
1476 return -EINVAL;
1477 }
1478
1479 if (nb_tx_q < num_tc) {
1480 hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
1481 nb_tx_q, num_tc);
1482 return -EINVAL;
1483 }
1484
1485 /* Currently not support uncontinuous tc */
1486 hw->dcb_info.num_tc = num_tc;
1487 for (i = 0; i < hw->dcb_info.num_tc; i++)
1488 bit_map |= BIT(i);
1489
1490 if (!bit_map) {
1491 bit_map = 1;
1492 hw->dcb_info.num_tc = 1;
1493 }
1494 hw->hw_tc_map = bit_map;
1495
1496 return hns3_dcb_info_cfg(hns);
1497 }
1498
1499 static int
hns3_dcb_hw_configure(struct hns3_adapter * hns)1500 hns3_dcb_hw_configure(struct hns3_adapter *hns)
1501 {
1502 struct hns3_pf *pf = &hns->pf;
1503 struct hns3_hw *hw = &hns->hw;
1504 enum hns3_fc_status fc_status = hw->current_fc_status;
1505 enum hns3_fc_mode requested_fc_mode = hw->requested_fc_mode;
1506 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1507 uint8_t pfc_en = hw->dcb_info.pfc_en;
1508 int ret;
1509
1510 if (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&
1511 pf->tx_sch_mode != HNS3_FLAG_VNET_BASE_SCH_MODE)
1512 return -ENOTSUP;
1513
1514 ret = hns3_dcb_schd_setup_hw(hw);
1515 if (ret) {
1516 hns3_err(hw, "dcb schedule configure failed! ret = %d", ret);
1517 return ret;
1518 }
1519
1520 if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
1521 hw->dcb_info.pfc_en =
1522 RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t);
1523
1524 hw->dcb_info.hw_pfc_map =
1525 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1526
1527 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1528 hw->requested_fc_mode = HNS3_FC_FULL;
1529 } else {
1530 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1531 hw->requested_fc_mode = HNS3_FC_NONE;
1532 hw->dcb_info.pfc_en = 0;
1533 hw->dcb_info.hw_pfc_map = 0;
1534 }
1535
1536 ret = hns3_buffer_alloc(hw);
1537 if (ret)
1538 goto cfg_fail;
1539
1540 ret = hns3_dcb_pause_setup_hw(hw);
1541 if (ret) {
1542 hns3_err(hw, "setup pfc failed! ret = %d", ret);
1543 goto cfg_fail;
1544 }
1545
1546 return 0;
1547
1548 cfg_fail:
1549 hw->requested_fc_mode = requested_fc_mode;
1550 hw->current_fc_status = fc_status;
1551 hw->dcb_info.pfc_en = pfc_en;
1552 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1553
1554 return ret;
1555 }
1556
1557 /*
1558 * hns3_dcb_configure - setup dcb related config
1559 * @hns: pointer to hns3 adapter
1560 * Returns 0 on success, negative value on failure.
1561 */
1562 int
hns3_dcb_configure(struct hns3_adapter * hns)1563 hns3_dcb_configure(struct hns3_adapter *hns)
1564 {
1565 struct hns3_hw *hw = &hns->hw;
1566 uint8_t num_tc;
1567 int ret;
1568
1569 num_tc = hns3_dcb_parse_num_tc(hns);
1570 ret = hns3_dcb_info_update(hns, num_tc);
1571 if (ret) {
1572 hns3_err(hw, "dcb info update failed: %d", ret);
1573 return ret;
1574 }
1575
1576 ret = hns3_dcb_hw_configure(hns);
1577 if (ret) {
1578 hns3_err(hw, "dcb sw configure failed: %d", ret);
1579 return ret;
1580 }
1581
1582 return 0;
1583 }
1584
1585 int
hns3_dcb_init_hw(struct hns3_hw * hw)1586 hns3_dcb_init_hw(struct hns3_hw *hw)
1587 {
1588 int ret;
1589
1590 ret = hns3_dcb_schd_setup_hw(hw);
1591 if (ret) {
1592 hns3_err(hw, "dcb schedule setup failed: %d", ret);
1593 return ret;
1594 }
1595
1596 ret = hns3_dcb_pause_setup_hw(hw);
1597 if (ret)
1598 hns3_err(hw, "PAUSE setup failed: %d", ret);
1599
1600 return ret;
1601 }
1602
1603 int
hns3_dcb_init(struct hns3_hw * hw)1604 hns3_dcb_init(struct hns3_hw *hw)
1605 {
1606 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1607 struct hns3_pf *pf = &hns->pf;
1608 uint16_t default_tqp_num;
1609 int ret;
1610
1611 PMD_INIT_FUNC_TRACE();
1612
1613 /*
1614 * According to the 'adapter_state' identifier, the following branch
1615 * is only executed to initialize default configurations of dcb during
1616 * the initializing driver process. Due to driver saving dcb-related
1617 * information before reset triggered, the reinit dev stage of the
1618 * reset process can not access to the branch, or those information
1619 * will be changed.
1620 */
1621 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
1622 hw->requested_fc_mode = HNS3_FC_NONE;
1623 pf->pause_time = HNS3_DEFAULT_PAUSE_TRANS_TIME;
1624 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1625
1626 ret = hns3_dcb_info_init(hw);
1627 if (ret) {
1628 hns3_err(hw, "dcb info init failed, ret = %d.", ret);
1629 return ret;
1630 }
1631
1632 /*
1633 * The number of queues configured by default cannot exceed
1634 * the maximum number of queues for a single TC.
1635 */
1636 default_tqp_num = RTE_MIN(hw->rss_size_max,
1637 hw->tqps_num / hw->dcb_info.num_tc);
1638 ret = hns3_dcb_update_tc_queue_mapping(hw, default_tqp_num,
1639 default_tqp_num);
1640 if (ret) {
1641 hns3_err(hw,
1642 "update tc queue mapping failed, ret = %d.",
1643 ret);
1644 return ret;
1645 }
1646 }
1647
1648 /*
1649 * DCB hardware will be configured by following the function during
1650 * the initializing driver process and the reset process. However,
1651 * driver will restore directly configurations of dcb hardware based
1652 * on dcb-related information soft maintained when driver
1653 * initialization has finished and reset is coming.
1654 */
1655 ret = hns3_dcb_init_hw(hw);
1656 if (ret) {
1657 hns3_err(hw, "dcb init hardware failed, ret = %d.", ret);
1658 return ret;
1659 }
1660
1661 return 0;
1662 }
1663
1664 int
hns3_update_queue_map_configure(struct hns3_adapter * hns)1665 hns3_update_queue_map_configure(struct hns3_adapter *hns)
1666 {
1667 struct hns3_hw *hw = &hns->hw;
1668 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
1669 uint16_t nb_rx_q = hw->data->nb_rx_queues;
1670 uint16_t nb_tx_q = hw->data->nb_tx_queues;
1671 int ret;
1672
1673 if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
1674 return 0;
1675
1676 ret = hns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);
1677 if (ret) {
1678 hns3_err(hw, "failed to update tc queue mapping, ret = %d.",
1679 ret);
1680 return ret;
1681 }
1682 ret = hns3_q_to_qs_map(hw);
1683 if (ret)
1684 hns3_err(hw, "failed to map nq to qs, ret = %d.", ret);
1685
1686 return ret;
1687 }
1688
1689 static void
hns3_get_fc_mode(struct hns3_hw * hw,enum rte_eth_fc_mode mode)1690 hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
1691 {
1692 switch (mode) {
1693 case RTE_ETH_FC_NONE:
1694 hw->requested_fc_mode = HNS3_FC_NONE;
1695 break;
1696 case RTE_ETH_FC_RX_PAUSE:
1697 hw->requested_fc_mode = HNS3_FC_RX_PAUSE;
1698 break;
1699 case RTE_ETH_FC_TX_PAUSE:
1700 hw->requested_fc_mode = HNS3_FC_TX_PAUSE;
1701 break;
1702 case RTE_ETH_FC_FULL:
1703 hw->requested_fc_mode = HNS3_FC_FULL;
1704 break;
1705 default:
1706 hw->requested_fc_mode = HNS3_FC_NONE;
1707 hns3_warn(hw, "fc_mode(%u) exceeds member scope and is "
1708 "configured to RTE_ETH_FC_NONE", mode);
1709 break;
1710 }
1711 }
1712
1713 /*
1714 * hns3_dcb_pfc_enable - Enable priority flow control
1715 * @dev: pointer to ethernet device
1716 *
1717 * Configures the pfc settings for one priority.
1718 */
1719 int
hns3_dcb_pfc_enable(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)1720 hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
1721 {
1722 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1724 enum hns3_fc_status fc_status = hw->current_fc_status;
1725 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1726 uint8_t hw_pfc_map = hw->dcb_info.hw_pfc_map;
1727 uint8_t pfc_en = hw->dcb_info.pfc_en;
1728 uint8_t priority = pfc_conf->priority;
1729 uint16_t pause_time = pf->pause_time;
1730 int ret;
1731
1732 hw->dcb_info.pfc_en |= BIT(priority);
1733 hw->dcb_info.hw_pfc_map =
1734 hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en);
1735 ret = hns3_buffer_alloc(hw);
1736 if (ret) {
1737 hns3_err(hw, "update packet buffer failed, ret = %d", ret);
1738 goto buffer_alloc_fail;
1739 }
1740
1741 pf->pause_time = pfc_conf->fc.pause_time;
1742 hns3_get_fc_mode(hw, pfc_conf->fc.mode);
1743 if (hw->requested_fc_mode == HNS3_FC_NONE)
1744 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1745 else
1746 hw->current_fc_status = HNS3_FC_STATUS_PFC;
1747
1748 /*
1749 * The flow control mode of all UPs will be changed based on
1750 * requested_fc_mode coming from user.
1751 */
1752 ret = hns3_dcb_pause_setup_hw(hw);
1753 if (ret) {
1754 hns3_err(hw, "enable pfc failed! ret = %d", ret);
1755 goto pfc_setup_fail;
1756 }
1757
1758 return 0;
1759
1760 pfc_setup_fail:
1761 hw->requested_fc_mode = old_fc_mode;
1762 hw->current_fc_status = fc_status;
1763 pf->pause_time = pause_time;
1764 buffer_alloc_fail:
1765 hw->dcb_info.pfc_en = pfc_en;
1766 hw->dcb_info.hw_pfc_map = hw_pfc_map;
1767
1768 return ret;
1769 }
1770
1771 /*
1772 * hns3_fc_enable - Enable MAC pause
1773 * @dev: pointer to ethernet device
1774 *
1775 * Configures the MAC pause settings.
1776 */
1777 int
hns3_fc_enable(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1778 hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1779 {
1780 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1781 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1782 enum hns3_fc_mode old_fc_mode = hw->requested_fc_mode;
1783 enum hns3_fc_status fc_status = hw->current_fc_status;
1784 uint16_t pause_time = pf->pause_time;
1785 int ret;
1786
1787 pf->pause_time = fc_conf->pause_time;
1788 hns3_get_fc_mode(hw, fc_conf->mode);
1789
1790 /*
1791 * In fact, current_fc_status is HNS3_FC_STATUS_NONE when mode
1792 * of flow control is configured to be HNS3_FC_NONE.
1793 */
1794 if (hw->requested_fc_mode == HNS3_FC_NONE)
1795 hw->current_fc_status = HNS3_FC_STATUS_NONE;
1796 else
1797 hw->current_fc_status = HNS3_FC_STATUS_MAC_PAUSE;
1798
1799 ret = hns3_dcb_pause_setup_hw(hw);
1800 if (ret) {
1801 hns3_err(hw, "enable MAC Pause failed! ret = %d", ret);
1802 goto setup_fc_fail;
1803 }
1804
1805 return 0;
1806
1807 setup_fc_fail:
1808 hw->requested_fc_mode = old_fc_mode;
1809 hw->current_fc_status = fc_status;
1810 pf->pause_time = pause_time;
1811
1812 return ret;
1813 }
1814