xref: /dpdk/drivers/net/hns3/hns3_tm.c (revision 3953323852dfe399c0e6bdf2d35f88005b8a2135)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_malloc.h>
6 
7 #include "hns3_common.h"
8 #include "hns3_dcb.h"
9 #include "hns3_logs.h"
10 #include "hns3_tm.h"
11 
12 static inline uint32_t
13 hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev)
14 {
15 	/*
16 	 * This API will called in pci device probe stage, we can't call
17 	 * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices
18 	 * not setup), so we call the hns3_dev_infos_get.
19 	 */
20 	struct rte_eth_dev_info dev_info;
21 
22 	memset(&dev_info, 0, sizeof(dev_info));
23 	(void)hns3_dev_infos_get(dev, &dev_info);
24 	return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT);
25 }
26 
27 void
28 hns3_tm_conf_init(struct rte_eth_dev *dev)
29 {
30 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
31 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
32 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
33 
34 	if (!hns3_dev_get_support(hw, TM))
35 		return;
36 
37 	pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
38 	pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
39 	pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM;
40 
41 	TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
42 	pf->tm_conf.nb_shaper_profile = 0;
43 
44 	pf->tm_conf.root = NULL;
45 	TAILQ_INIT(&pf->tm_conf.tc_list);
46 	TAILQ_INIT(&pf->tm_conf.queue_list);
47 	pf->tm_conf.nb_tc_node = 0;
48 	pf->tm_conf.nb_queue_node = 0;
49 
50 	pf->tm_conf.committed = false;
51 }
52 
53 void
54 hns3_tm_conf_uninit(struct rte_eth_dev *dev)
55 {
56 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
57 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
58 	struct hns3_tm_shaper_profile *shaper_profile;
59 	struct hns3_tm_node *tm_node;
60 
61 	if (!hns3_dev_get_support(hw, TM))
62 		return;
63 
64 	if (pf->tm_conf.nb_queue_node > 0) {
65 		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
66 			TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
67 			rte_free(tm_node);
68 		}
69 		pf->tm_conf.nb_queue_node = 0;
70 	}
71 
72 	if (pf->tm_conf.nb_tc_node > 0) {
73 		while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
74 			TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
75 			rte_free(tm_node);
76 		}
77 		pf->tm_conf.nb_tc_node = 0;
78 	}
79 
80 	if (pf->tm_conf.root != NULL) {
81 		rte_free(pf->tm_conf.root);
82 		pf->tm_conf.root = NULL;
83 	}
84 
85 	if (pf->tm_conf.nb_shaper_profile > 0) {
86 		while ((shaper_profile =
87 		       TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
88 			TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
89 				     shaper_profile, node);
90 			rte_free(shaper_profile);
91 		}
92 		pf->tm_conf.nb_shaper_profile = 0;
93 	}
94 
95 	pf->tm_conf.nb_leaf_nodes_max = 0;
96 	pf->tm_conf.nb_nodes_max = 0;
97 	pf->tm_conf.nb_shaper_profile_max = 0;
98 }
99 
100 static inline uint64_t
101 hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate)
102 {
103 #define FIRMWARE_TO_TM_RATE_SCALE	125000
104 	/* tm rate unit is Bps, firmware rate is Mbps */
105 	return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE;
106 }
107 
108 static inline uint32_t
109 hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate)
110 {
111 #define TM_TO_FIRMWARE_RATE_SCALE	125000
112 	/* tm rate unit is Bps, firmware rate is Mbps */
113 	return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE);
114 }
115 
116 static int
117 hns3_tm_capabilities_get(struct rte_eth_dev *dev,
118 			 struct rte_tm_capabilities *cap,
119 			 struct rte_tm_error *error)
120 {
121 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
122 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
123 
124 	if (cap == NULL || error == NULL)
125 		return -EINVAL;
126 
127 	error->type = RTE_TM_ERROR_TYPE_NONE;
128 
129 	memset(cap, 0, sizeof(struct rte_tm_capabilities));
130 
131 	cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues;
132 	cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX;
133 	cap->non_leaf_nodes_identical = 1;
134 	cap->leaf_nodes_identical = 1;
135 	cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM;
136 	cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM;
137 	cap->shaper_private_rate_max =
138 		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
139 
140 	cap->sched_n_children_max = max_tx_queues;
141 	cap->sched_sp_n_priorities_max = 1;
142 	cap->sched_wfq_weight_max = 1;
143 
144 	cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
145 	cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
146 
147 	return 0;
148 }
149 
150 static struct hns3_tm_shaper_profile *
151 hns3_tm_shaper_profile_search(struct rte_eth_dev *dev,
152 			      uint32_t shaper_profile_id)
153 {
154 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
155 	struct hns3_shaper_profile_list *shaper_profile_list =
156 		&pf->tm_conf.shaper_profile_list;
157 	struct hns3_tm_shaper_profile *shaper_profile;
158 
159 	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
160 		if (shaper_profile_id == shaper_profile->shaper_profile_id)
161 			return shaper_profile;
162 	}
163 
164 	return NULL;
165 }
166 
167 static int
168 hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev,
169 				   const struct rte_tm_shaper_params *profile,
170 				   struct rte_tm_error *error)
171 {
172 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
173 
174 	if (profile->committed.rate) {
175 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
176 		error->message = "committed rate not supported";
177 		return -EINVAL;
178 	}
179 
180 	if (profile->committed.size) {
181 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
182 		error->message = "committed bucket size not supported";
183 		return -EINVAL;
184 	}
185 
186 	if (profile->peak.rate >
187 	    hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) {
188 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
189 		error->message = "peak rate too large";
190 		return -EINVAL;
191 	}
192 
193 	if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) {
194 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
195 		error->message = "peak rate must be at least 1Mbps";
196 		return -EINVAL;
197 	}
198 
199 	if (profile->peak.size) {
200 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
201 		error->message = "peak bucket size not supported";
202 		return -EINVAL;
203 	}
204 
205 	if (profile->pkt_length_adjust) {
206 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
207 		error->message = "packet length adjustment not supported";
208 		return -EINVAL;
209 	}
210 
211 	if (profile->packet_mode) {
212 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE;
213 		error->message = "packet mode not supported";
214 		return -EINVAL;
215 	}
216 
217 	return 0;
218 }
219 
220 static int
221 hns3_tm_shaper_profile_add(struct rte_eth_dev *dev,
222 			   uint32_t shaper_profile_id,
223 			   const struct rte_tm_shaper_params *profile,
224 			   struct rte_tm_error *error)
225 {
226 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
227 	struct hns3_tm_shaper_profile *shaper_profile;
228 	int ret;
229 
230 	if (profile == NULL || error == NULL)
231 		return -EINVAL;
232 
233 	if (pf->tm_conf.nb_shaper_profile >=
234 	    pf->tm_conf.nb_shaper_profile_max) {
235 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
236 		error->message = "too much profiles";
237 		return -EINVAL;
238 	}
239 
240 	ret = hns3_tm_shaper_profile_param_check(dev, profile, error);
241 	if (ret)
242 		return ret;
243 
244 	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
245 	if (shaper_profile) {
246 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
247 		error->message = "profile ID exist";
248 		return -EINVAL;
249 	}
250 
251 	shaper_profile = rte_zmalloc("hns3_tm_shaper_profile",
252 				     sizeof(struct hns3_tm_shaper_profile),
253 				     0);
254 	if (shaper_profile == NULL)
255 		return -ENOMEM;
256 
257 	shaper_profile->shaper_profile_id = shaper_profile_id;
258 	memcpy(&shaper_profile->profile, profile,
259 	       sizeof(struct rte_tm_shaper_params));
260 	TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
261 			  shaper_profile, node);
262 	pf->tm_conf.nb_shaper_profile++;
263 
264 	return 0;
265 }
266 
267 static int
268 hns3_tm_shaper_profile_del(struct rte_eth_dev *dev,
269 			   uint32_t shaper_profile_id,
270 			   struct rte_tm_error *error)
271 {
272 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
273 	struct hns3_tm_shaper_profile *shaper_profile;
274 
275 	if (error == NULL)
276 		return -EINVAL;
277 
278 	shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
279 	if (shaper_profile == NULL) {
280 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
281 		error->message = "profile ID not exist";
282 		return -EINVAL;
283 	}
284 
285 	if (shaper_profile->reference_count) {
286 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
287 		error->message = "profile in use";
288 		return -EINVAL;
289 	}
290 
291 	TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
292 	rte_free(shaper_profile);
293 	pf->tm_conf.nb_shaper_profile--;
294 
295 	return 0;
296 }
297 
298 static struct hns3_tm_node *
299 hns3_tm_node_search(struct rte_eth_dev *dev,
300 		    uint32_t node_id,
301 		    enum hns3_tm_node_type *node_type)
302 {
303 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
304 	struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list;
305 	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
306 	struct hns3_tm_node *tm_node;
307 
308 	if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
309 		*node_type = HNS3_TM_NODE_TYPE_PORT;
310 		return pf->tm_conf.root;
311 	}
312 
313 	TAILQ_FOREACH(tm_node, tc_list, node) {
314 		if (tm_node->id == node_id) {
315 			*node_type = HNS3_TM_NODE_TYPE_TC;
316 			return tm_node;
317 		}
318 	}
319 
320 	TAILQ_FOREACH(tm_node, queue_list, node) {
321 		if (tm_node->id == node_id) {
322 			*node_type = HNS3_TM_NODE_TYPE_QUEUE;
323 			return tm_node;
324 		}
325 	}
326 
327 	return NULL;
328 }
329 
330 static int
331 hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev,
332 				 const struct rte_tm_node_params *params,
333 				 struct rte_tm_error *error)
334 {
335 	struct hns3_tm_shaper_profile *shaper_profile;
336 
337 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
338 		shaper_profile = hns3_tm_shaper_profile_search(dev,
339 				 params->shaper_profile_id);
340 		if (shaper_profile == NULL) {
341 			error->type =
342 				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
343 			error->message = "shaper profile not exist";
344 			return -EINVAL;
345 		}
346 	}
347 
348 	if (params->nonleaf.wfq_weight_mode) {
349 		error->type =
350 			RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
351 		error->message = "WFQ not supported";
352 		return -EINVAL;
353 	}
354 
355 	if (params->nonleaf.n_sp_priorities != 1) {
356 		error->type =
357 			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
358 		error->message = "SP priority not supported";
359 		return -EINVAL;
360 	}
361 
362 	return 0;
363 }
364 
365 static int
366 hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused,
367 			      const struct rte_tm_node_params *params,
368 			      struct rte_tm_error *error)
369 
370 {
371 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
372 		error->type =
373 			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
374 		error->message = "shaper not supported";
375 		return -EINVAL;
376 	}
377 
378 	if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) {
379 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
380 		error->message = "congestion management not supported";
381 		return -EINVAL;
382 	}
383 
384 	if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) {
385 		error->type =
386 			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
387 		error->message = "WRED not supported";
388 		return -EINVAL;
389 	}
390 
391 	if (params->leaf.wred.shared_wred_context_id) {
392 		error->type =
393 			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
394 		error->message = "WRED not supported";
395 		return -EINVAL;
396 	}
397 
398 	if (params->leaf.wred.n_shared_wred_contexts) {
399 		error->type =
400 			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
401 		error->message = "WRED not supported";
402 		return -EINVAL;
403 	}
404 
405 	return 0;
406 }
407 
408 static int
409 hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
410 			 uint32_t priority, uint32_t weight,
411 			 const struct rte_tm_node_params *params,
412 			 struct rte_tm_error *error)
413 {
414 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
415 	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
416 
417 	if (node_id == RTE_TM_NODE_ID_NULL) {
418 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
419 		error->message = "invalid node id";
420 		return -EINVAL;
421 	}
422 
423 	if (hns3_tm_node_search(dev, node_id, &node_type)) {
424 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
425 		error->message = "node id already used";
426 		return -EINVAL;
427 	}
428 
429 	if (priority) {
430 		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
431 		error->message = "priority should be 0";
432 		return -EINVAL;
433 	}
434 
435 	if (weight != 1) {
436 		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
437 		error->message = "weight must be 1";
438 		return -EINVAL;
439 	}
440 
441 	if (params->shared_shaper_id) {
442 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
443 		error->message = "shared shaper not supported";
444 		return -EINVAL;
445 	}
446 	if (params->n_shared_shapers) {
447 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
448 		error->message = "shared shaper not supported";
449 		return -EINVAL;
450 	}
451 
452 	if (node_id >= pf->tm_conf.nb_leaf_nodes_max)
453 		return hns3_tm_nonleaf_node_param_check(dev, params, error);
454 	else
455 		return hns3_tm_leaf_node_param_check(dev, params, error);
456 }
457 
458 static int
459 hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id,
460 		      uint32_t level_id, const struct rte_tm_node_params *params,
461 		      struct rte_tm_error *error)
462 {
463 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
464 	struct hns3_tm_node *tm_node;
465 
466 	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
467 	    level_id != HNS3_TM_NODE_LEVEL_PORT) {
468 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
469 		error->message = "wrong level";
470 		return -EINVAL;
471 	}
472 
473 	if (node_id != pf->tm_conf.nb_nodes_max - 1) {
474 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
475 		error->message = "invalid port node ID";
476 		return -EINVAL;
477 	}
478 
479 	if (pf->tm_conf.root) {
480 		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
481 		error->message = "already have a root";
482 		return -EINVAL;
483 	}
484 
485 	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
486 	if (tm_node == NULL)
487 		return -ENOMEM;
488 
489 	tm_node->id = node_id;
490 	tm_node->reference_count = 0;
491 	tm_node->parent = NULL;
492 	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
493 				  params->shaper_profile_id);
494 	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
495 	pf->tm_conf.root = tm_node;
496 
497 	if (tm_node->shaper_profile)
498 		tm_node->shaper_profile->reference_count++;
499 
500 	return 0;
501 }
502 
503 static int
504 hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id,
505 		    uint32_t level_id, struct hns3_tm_node *parent_node,
506 		    const struct rte_tm_node_params *params,
507 		    struct rte_tm_error *error)
508 {
509 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
510 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
511 	struct hns3_tm_node *tm_node;
512 
513 	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
514 	    level_id != HNS3_TM_NODE_LEVEL_TC) {
515 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
516 		error->message = "wrong level";
517 		return -EINVAL;
518 	}
519 
520 	if (node_id >= pf->tm_conf.nb_nodes_max - 1 ||
521 	    node_id < pf->tm_conf.nb_leaf_nodes_max ||
522 	    hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) {
523 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
524 		error->message = "invalid tc node ID";
525 		return -EINVAL;
526 	}
527 
528 	if (pf->tm_conf.nb_tc_node >= hw->num_tc) {
529 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
530 		error->message = "too many TCs";
531 		return -EINVAL;
532 	}
533 
534 	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
535 	if (tm_node == NULL)
536 		return -ENOMEM;
537 
538 	tm_node->id = node_id;
539 	tm_node->reference_count = 0;
540 	tm_node->parent = parent_node;
541 	tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev,
542 					params->shaper_profile_id);
543 	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
544 	TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node);
545 	pf->tm_conf.nb_tc_node++;
546 	tm_node->parent->reference_count++;
547 
548 	if (tm_node->shaper_profile)
549 		tm_node->shaper_profile->reference_count++;
550 
551 	return 0;
552 }
553 
554 static int
555 hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id,
556 		       uint32_t level_id, struct hns3_tm_node *parent_node,
557 		       const struct rte_tm_node_params *params,
558 		       struct rte_tm_error *error)
559 {
560 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
561 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
562 	struct hns3_tm_node *tm_node;
563 
564 	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
565 	    level_id != HNS3_TM_NODE_LEVEL_QUEUE) {
566 		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
567 		error->message = "wrong level";
568 		return -EINVAL;
569 	}
570 
571 	/* note: dev->data->nb_tx_queues <= max_tx_queues */
572 	if (node_id >= dev->data->nb_tx_queues) {
573 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
574 		error->message = "invalid queue node ID";
575 		return -EINVAL;
576 	}
577 
578 	if (hns3_txq_mapped_tc_get(hw, node_id) !=
579 	    hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) {
580 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
581 		error->message = "queue's TC not match parent's TC";
582 		return -EINVAL;
583 	}
584 
585 	tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0);
586 	if (tm_node == NULL)
587 		return -ENOMEM;
588 
589 	tm_node->id = node_id;
590 	tm_node->reference_count = 0;
591 	tm_node->parent = parent_node;
592 	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
593 	TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node);
594 	pf->tm_conf.nb_queue_node++;
595 	tm_node->parent->reference_count++;
596 
597 	return 0;
598 }
599 
600 static int
601 hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
602 		 uint32_t parent_node_id, uint32_t priority,
603 		 uint32_t weight, uint32_t level_id,
604 		 const struct rte_tm_node_params *params,
605 		 struct rte_tm_error *error)
606 {
607 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
608 	enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX;
609 	struct hns3_tm_node *parent_node;
610 	int ret;
611 
612 	if (params == NULL || error == NULL)
613 		return -EINVAL;
614 
615 	if (pf->tm_conf.committed) {
616 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
617 		error->message = "already committed";
618 		return -EINVAL;
619 	}
620 
621 	ret = hns3_tm_node_param_check(dev, node_id, priority, weight,
622 				       params, error);
623 	if (ret)
624 		return ret;
625 
626 	/* root node who don't have a parent */
627 	if (parent_node_id == RTE_TM_NODE_ID_NULL)
628 		return hns3_tm_port_node_add(dev, node_id, level_id,
629 					     params, error);
630 
631 	parent_node = hns3_tm_node_search(dev, parent_node_id,
632 					  &parent_node_type);
633 	if (parent_node == NULL) {
634 		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
635 		error->message = "parent not exist";
636 		return -EINVAL;
637 	}
638 
639 	if (parent_node_type != HNS3_TM_NODE_TYPE_PORT &&
640 	    parent_node_type != HNS3_TM_NODE_TYPE_TC) {
641 		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
642 		error->message = "parent is not port or TC";
643 		return -EINVAL;
644 	}
645 
646 	if (parent_node_type == HNS3_TM_NODE_TYPE_PORT)
647 		return hns3_tm_tc_node_add(dev, node_id, level_id,
648 					   parent_node, params, error);
649 	else
650 		return hns3_tm_queue_node_add(dev, node_id, level_id,
651 					      parent_node, params, error);
652 }
653 
654 static void
655 hns3_tm_node_do_delete(struct hns3_pf *pf,
656 		       enum hns3_tm_node_type node_type,
657 		       struct hns3_tm_node *tm_node)
658 {
659 	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
660 		if (tm_node->shaper_profile)
661 			tm_node->shaper_profile->reference_count--;
662 		rte_free(tm_node);
663 		pf->tm_conf.root = NULL;
664 		return;
665 	}
666 
667 	if (tm_node->shaper_profile)
668 		tm_node->shaper_profile->reference_count--;
669 	tm_node->parent->reference_count--;
670 	if (node_type == HNS3_TM_NODE_TYPE_TC) {
671 		TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
672 		pf->tm_conf.nb_tc_node--;
673 	} else {
674 		TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
675 		pf->tm_conf.nb_queue_node--;
676 	}
677 	rte_free(tm_node);
678 }
679 
680 static int
681 hns3_tm_node_delete(struct rte_eth_dev *dev,
682 		    uint32_t node_id,
683 		    struct rte_tm_error *error)
684 {
685 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
686 	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
687 	struct hns3_tm_node *tm_node;
688 
689 	if (error == NULL)
690 		return -EINVAL;
691 
692 	if (pf->tm_conf.committed) {
693 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
694 		error->message = "already committed";
695 		return -EINVAL;
696 	}
697 
698 	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
699 	if (tm_node == NULL) {
700 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
701 		error->message = "no such node";
702 		return -EINVAL;
703 	}
704 
705 	if (tm_node->reference_count) {
706 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
707 		error->message = "cannot delete a node which has children";
708 		return -EINVAL;
709 	}
710 
711 	hns3_tm_node_do_delete(pf, node_type, tm_node);
712 
713 	return 0;
714 }
715 
716 static int
717 hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
718 		      int *is_leaf, struct rte_tm_error *error)
719 {
720 	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
721 	struct hns3_tm_node *tm_node;
722 
723 	if (is_leaf == NULL || error == NULL)
724 		return -EINVAL;
725 
726 	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
727 	if (tm_node == NULL) {
728 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
729 		error->message = "no such node";
730 		return -EINVAL;
731 	}
732 
733 	if (node_type == HNS3_TM_NODE_TYPE_QUEUE)
734 		*is_leaf = true;
735 	else
736 		*is_leaf = false;
737 
738 	return 0;
739 }
740 
741 static void
742 hns3_tm_nonleaf_level_capabilities_get(struct rte_eth_dev *dev,
743 				       uint32_t level_id,
744 				       struct rte_tm_level_capabilities *cap)
745 {
746 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
747 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
748 
749 	if (level_id == HNS3_TM_NODE_LEVEL_PORT) {
750 		cap->n_nodes_max = 1;
751 		cap->n_nodes_nonleaf_max = 1;
752 		cap->n_nodes_leaf_max = 0;
753 	} else {
754 		cap->n_nodes_max = HNS3_MAX_TC_NUM;
755 		cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM;
756 		cap->n_nodes_leaf_max = 0;
757 	}
758 
759 	cap->non_leaf_nodes_identical = 1;
760 	cap->leaf_nodes_identical = 1;
761 
762 	cap->nonleaf.shaper_private_supported = true;
763 	cap->nonleaf.shaper_private_dual_rate_supported = false;
764 	cap->nonleaf.shaper_private_rate_min = 0;
765 	cap->nonleaf.shaper_private_rate_max =
766 		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
767 	cap->nonleaf.shaper_shared_n_max = 0;
768 	if (level_id == HNS3_TM_NODE_LEVEL_PORT)
769 		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
770 	else
771 		cap->nonleaf.sched_n_children_max = max_tx_queues;
772 	cap->nonleaf.sched_sp_n_priorities_max = 1;
773 	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
774 	cap->nonleaf.sched_wfq_n_groups_max = 0;
775 	cap->nonleaf.sched_wfq_weight_max = 1;
776 	cap->nonleaf.stats_mask = 0;
777 }
778 
779 static void
780 hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev,
781 				    struct rte_tm_level_capabilities *cap)
782 {
783 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
784 
785 	cap->n_nodes_max = max_tx_queues;
786 	cap->n_nodes_nonleaf_max = 0;
787 	cap->n_nodes_leaf_max = max_tx_queues;
788 
789 	cap->non_leaf_nodes_identical = 1;
790 	cap->leaf_nodes_identical = 1;
791 
792 	cap->leaf.shaper_private_supported = false;
793 	cap->leaf.shaper_private_dual_rate_supported = false;
794 	cap->leaf.shaper_private_rate_min = 0;
795 	cap->leaf.shaper_private_rate_max = 0;
796 	cap->leaf.shaper_shared_n_max = 0;
797 	cap->leaf.cman_head_drop_supported = false;
798 	cap->leaf.cman_wred_context_private_supported = false;
799 	cap->leaf.cman_wred_context_shared_n_max = 0;
800 	cap->leaf.stats_mask = 0;
801 }
802 
803 static int
804 hns3_tm_level_capabilities_get(struct rte_eth_dev *dev,
805 			       uint32_t level_id,
806 			       struct rte_tm_level_capabilities *cap,
807 			       struct rte_tm_error *error)
808 {
809 	if (cap == NULL || error == NULL)
810 		return -EINVAL;
811 
812 	if (level_id >= HNS3_TM_NODE_LEVEL_MAX) {
813 		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
814 		error->message = "too deep level";
815 		return -EINVAL;
816 	}
817 
818 	memset(cap, 0, sizeof(struct rte_tm_level_capabilities));
819 
820 	if (level_id != HNS3_TM_NODE_LEVEL_QUEUE)
821 		hns3_tm_nonleaf_level_capabilities_get(dev, level_id, cap);
822 	else
823 		hns3_tm_leaf_level_capabilities_get(dev, cap);
824 
825 	return 0;
826 }
827 
828 static void
829 hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev,
830 				      enum hns3_tm_node_type node_type,
831 				      struct rte_tm_node_capabilities *cap)
832 {
833 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
834 	uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
835 
836 	cap->shaper_private_supported = true;
837 	cap->shaper_private_dual_rate_supported = false;
838 	cap->shaper_private_rate_min = 0;
839 	cap->shaper_private_rate_max =
840 		hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate);
841 	cap->shaper_shared_n_max = 0;
842 
843 	if (node_type == HNS3_TM_NODE_TYPE_PORT)
844 		cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM;
845 	else
846 		cap->nonleaf.sched_n_children_max = max_tx_queues;
847 	cap->nonleaf.sched_sp_n_priorities_max = 1;
848 	cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
849 	cap->nonleaf.sched_wfq_n_groups_max = 0;
850 	cap->nonleaf.sched_wfq_weight_max = 1;
851 
852 	cap->stats_mask = 0;
853 }
854 
855 static void
856 hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
857 				   struct rte_tm_node_capabilities *cap)
858 {
859 	cap->shaper_private_supported = false;
860 	cap->shaper_private_dual_rate_supported = false;
861 	cap->shaper_private_rate_min = 0;
862 	cap->shaper_private_rate_max = 0;
863 	cap->shaper_shared_n_max = 0;
864 
865 	cap->leaf.cman_head_drop_supported = false;
866 	cap->leaf.cman_wred_context_private_supported = false;
867 	cap->leaf.cman_wred_context_shared_n_max = 0;
868 
869 	cap->stats_mask = 0;
870 }
871 
872 static int
873 hns3_tm_node_capabilities_get(struct rte_eth_dev *dev,
874 			      uint32_t node_id,
875 			      struct rte_tm_node_capabilities *cap,
876 			      struct rte_tm_error *error)
877 {
878 	enum hns3_tm_node_type node_type;
879 	struct hns3_tm_node *tm_node;
880 
881 	if (cap == NULL || error == NULL)
882 		return -EINVAL;
883 
884 	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
885 	if (tm_node == NULL) {
886 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
887 		error->message = "no such node";
888 		return -EINVAL;
889 	}
890 
891 	memset(cap, 0, sizeof(struct rte_tm_node_capabilities));
892 
893 	if (node_type != HNS3_TM_NODE_TYPE_QUEUE)
894 		hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap);
895 	else
896 		hns3_tm_leaf_node_capabilities_get(dev, cap);
897 
898 	return 0;
899 }
900 
901 static int
902 hns3_tm_config_port_rate(struct hns3_hw *hw,
903 			 struct hns3_tm_shaper_profile *shaper_profile)
904 {
905 	struct hns3_port_limit_rate_cmd *cfg;
906 	struct hns3_cmd_desc desc;
907 	uint32_t firmware_rate;
908 	uint64_t rate;
909 	int ret;
910 
911 	if (shaper_profile) {
912 		rate = shaper_profile->profile.peak.rate;
913 		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
914 	} else {
915 		firmware_rate = hw->max_tm_rate;
916 	}
917 
918 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false);
919 	cfg = (struct hns3_port_limit_rate_cmd *)desc.data;
920 	cfg->speed = rte_cpu_to_le_32(firmware_rate);
921 
922 	ret = hns3_cmd_send(hw, &desc, 1);
923 	if (ret)
924 		hns3_err(hw, "failed to config port rate, ret = %d", ret);
925 
926 	return ret;
927 }
928 
929 static int
930 hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no,
931 		       struct hns3_tm_shaper_profile *shaper_profile)
932 {
933 	struct hns3_tc_limit_rate_cmd *cfg;
934 	struct hns3_cmd_desc desc;
935 	uint32_t firmware_rate;
936 	uint64_t rate;
937 	int ret;
938 
939 	if (shaper_profile) {
940 		rate = shaper_profile->profile.peak.rate;
941 		firmware_rate = hns3_tm_rate_convert_tm2firmware(rate);
942 	} else {
943 		firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit;
944 	}
945 
946 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false);
947 	cfg = (struct hns3_tc_limit_rate_cmd *)desc.data;
948 	cfg->speed = rte_cpu_to_le_32(firmware_rate);
949 	cfg->tc_id = tc_no;
950 
951 	ret = hns3_cmd_send(hw, &desc, 1);
952 	if (ret)
953 		hns3_err(hw, "failed to config tc (%u) rate, ret = %d",
954 			 tc_no, ret);
955 
956 	return ret;
957 }
958 
959 static bool
960 hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error)
961 {
962 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
963 	struct hns3_tm_conf *tm_conf = &pf->tm_conf;
964 	struct hns3_tm_node_list *tc_list = &tm_conf->tc_list;
965 	struct hns3_tm_node_list *queue_list = &tm_conf->queue_list;
966 	struct hns3_tm_node *tm_node;
967 
968 	/* TC */
969 	TAILQ_FOREACH(tm_node, tc_list, node) {
970 		if (!tm_node->reference_count) {
971 			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
972 			error->message = "TC without queue assigned";
973 			return false;
974 		}
975 
976 		if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >=
977 			hw->num_tc) {
978 			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
979 			error->message = "node's TC not exist";
980 			return false;
981 		}
982 	}
983 
984 	/* Queue */
985 	TAILQ_FOREACH(tm_node, queue_list, node) {
986 		if (tm_node->id >= hw->data->nb_tx_queues) {
987 			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
988 			error->message = "node's queue invalid";
989 			return false;
990 		}
991 
992 		if (hns3_txq_mapped_tc_get(hw, tm_node->id) !=
993 		    hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) {
994 			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
995 			error->message = "queue's TC not match parent's TC";
996 			return false;
997 		}
998 	}
999 
1000 	return true;
1001 }
1002 
1003 static int
1004 hns3_tm_hierarchy_do_commit(struct hns3_hw *hw,
1005 			    struct rte_tm_error *error)
1006 {
1007 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1008 	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1009 	struct hns3_tm_node *tm_node;
1010 	uint8_t tc_no;
1011 	int ret;
1012 
1013 	/* port */
1014 	tm_node = pf->tm_conf.root;
1015 	if (tm_node->shaper_profile) {
1016 		ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile);
1017 		if (ret) {
1018 			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1019 			error->message = "fail to set port peak rate";
1020 			return -EIO;
1021 		}
1022 	}
1023 
1024 	/* TC */
1025 	TAILQ_FOREACH(tm_node, tc_list, node) {
1026 		if (tm_node->shaper_profile == NULL)
1027 			continue;
1028 
1029 		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1030 		ret = hns3_tm_config_tc_rate(hw, tc_no,
1031 					     tm_node->shaper_profile);
1032 		if (ret) {
1033 			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
1034 			error->message = "fail to set TC peak rate";
1035 			return -EIO;
1036 		}
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static int
1043 hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
1044 			 int clear_on_fail,
1045 			 struct rte_tm_error *error)
1046 {
1047 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1048 	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049 	int ret;
1050 
1051 	if (error == NULL)
1052 		return -EINVAL;
1053 
1054 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
1055 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1056 		error->message = "device is resetting";
1057 		/* don't goto fail_clear, user may try later */
1058 		return -EBUSY;
1059 	}
1060 
1061 	if (pf->tm_conf.root == NULL)
1062 		goto done;
1063 
1064 	/* check configure before commit make sure key configure not violated */
1065 	if (!hns3_tm_configure_check(hw, error))
1066 		goto fail_clear;
1067 
1068 	ret = hns3_tm_hierarchy_do_commit(hw, error);
1069 	if (ret)
1070 		goto fail_clear;
1071 
1072 done:
1073 	pf->tm_conf.committed = true;
1074 	return 0;
1075 
1076 fail_clear:
1077 	if (clear_on_fail) {
1078 		hns3_tm_conf_uninit(dev);
1079 		hns3_tm_conf_init(dev);
1080 	}
1081 	return -EINVAL;
1082 }
1083 
1084 static int
1085 hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
1086 			      uint32_t node_id,
1087 			      enum hns3_tm_node_type node_type,
1088 			      struct hns3_tm_shaper_profile *shaper_profile,
1089 			      struct rte_tm_error *error)
1090 {
1091 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1092 	uint8_t tc_no;
1093 	int ret;
1094 
1095 	if (node_type == HNS3_TM_NODE_TYPE_QUEUE) {
1096 		if (shaper_profile != NULL) {
1097 			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1098 			error->message = "queue node shaper not supported";
1099 			return -EINVAL;
1100 		}
1101 		return 0;
1102 	}
1103 
1104 	if (!pf->tm_conf.committed)
1105 		return 0;
1106 
1107 	if (node_type == HNS3_TM_NODE_TYPE_PORT) {
1108 		ret = hns3_tm_config_port_rate(hw, shaper_profile);
1109 		if (ret) {
1110 			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1111 			error->message = "fail to update port peak rate";
1112 		}
1113 
1114 		return ret;
1115 	}
1116 
1117 	/*
1118 	 * update TC's shaper
1119 	 */
1120 	tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id);
1121 	ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile);
1122 	if (ret) {
1123 		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1124 		error->message = "fail to update TC peak rate";
1125 	}
1126 
1127 	return ret;
1128 }
1129 
1130 static int
1131 hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
1132 			   uint32_t node_id,
1133 			   uint32_t shaper_profile_id,
1134 			   struct rte_tm_error *error)
1135 {
1136 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1137 	enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX;
1138 	struct hns3_tm_shaper_profile *profile = NULL;
1139 	struct hns3_tm_node *tm_node;
1140 
1141 	if (error == NULL)
1142 		return -EINVAL;
1143 
1144 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
1145 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1146 		error->message = "device is resetting";
1147 		return -EBUSY;
1148 	}
1149 
1150 	tm_node = hns3_tm_node_search(dev, node_id, &node_type);
1151 	if (tm_node == NULL) {
1152 		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1153 		error->message = "no such node";
1154 		return -EINVAL;
1155 	}
1156 
1157 	if (shaper_profile_id == tm_node->params.shaper_profile_id)
1158 		return 0;
1159 
1160 	if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
1161 		profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id);
1162 		if (profile == NULL) {
1163 			error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
1164 			error->message = "profile ID not exist";
1165 			return -EINVAL;
1166 		}
1167 	}
1168 
1169 	if (hns3_tm_node_shaper_do_update(hw, node_id, node_type,
1170 					  profile, error))
1171 		return -EINVAL;
1172 
1173 	if (tm_node->shaper_profile)
1174 		tm_node->shaper_profile->reference_count--;
1175 	tm_node->shaper_profile = profile;
1176 	tm_node->params.shaper_profile_id = shaper_profile_id;
1177 	if (profile != NULL)
1178 		profile->reference_count++;
1179 
1180 	return 0;
1181 }
1182 
1183 static int
1184 hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev,
1185 			      struct rte_tm_capabilities *cap,
1186 			      struct rte_tm_error *error)
1187 {
1188 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 	int ret;
1190 
1191 	rte_spinlock_lock(&hw->lock);
1192 	ret = hns3_tm_capabilities_get(dev, cap, error);
1193 	rte_spinlock_unlock(&hw->lock);
1194 
1195 	return ret;
1196 }
1197 
1198 static int
1199 hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev,
1200 				uint32_t shaper_profile_id,
1201 				const struct rte_tm_shaper_params *profile,
1202 				struct rte_tm_error *error)
1203 {
1204 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 	int ret;
1206 
1207 	rte_spinlock_lock(&hw->lock);
1208 	ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error);
1209 	rte_spinlock_unlock(&hw->lock);
1210 
1211 	return ret;
1212 }
1213 
1214 static int
1215 hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev,
1216 				uint32_t shaper_profile_id,
1217 				struct rte_tm_error *error)
1218 {
1219 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1220 	int ret;
1221 
1222 	rte_spinlock_lock(&hw->lock);
1223 	ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error);
1224 	rte_spinlock_unlock(&hw->lock);
1225 
1226 	return ret;
1227 }
1228 
1229 static int
1230 hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id,
1231 		      uint32_t parent_node_id, uint32_t priority,
1232 		      uint32_t weight, uint32_t level_id,
1233 		      const struct rte_tm_node_params *params,
1234 		      struct rte_tm_error *error)
1235 {
1236 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237 	int ret;
1238 
1239 	rte_spinlock_lock(&hw->lock);
1240 	ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority,
1241 			       weight, level_id, params, error);
1242 	rte_spinlock_unlock(&hw->lock);
1243 
1244 	return ret;
1245 }
1246 
1247 static int
1248 hns3_tm_node_delete_wrap(struct rte_eth_dev *dev,
1249 			 uint32_t node_id,
1250 			 struct rte_tm_error *error)
1251 {
1252 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1253 	int ret;
1254 
1255 	rte_spinlock_lock(&hw->lock);
1256 	ret = hns3_tm_node_delete(dev, node_id, error);
1257 	rte_spinlock_unlock(&hw->lock);
1258 
1259 	return ret;
1260 }
1261 
1262 static int
1263 hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev,
1264 			   uint32_t node_id,
1265 			   int *is_leaf,
1266 			   struct rte_tm_error *error)
1267 {
1268 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269 	int ret;
1270 
1271 	rte_spinlock_lock(&hw->lock);
1272 	ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error);
1273 	rte_spinlock_unlock(&hw->lock);
1274 
1275 	return ret;
1276 }
1277 
1278 static int
1279 hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev,
1280 				    uint32_t level_id,
1281 				    struct rte_tm_level_capabilities *cap,
1282 				    struct rte_tm_error *error)
1283 {
1284 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1285 	int ret;
1286 
1287 	rte_spinlock_lock(&hw->lock);
1288 	ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error);
1289 	rte_spinlock_unlock(&hw->lock);
1290 
1291 	return ret;
1292 }
1293 
1294 static int
1295 hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev,
1296 				   uint32_t node_id,
1297 				   struct rte_tm_node_capabilities *cap,
1298 				   struct rte_tm_error *error)
1299 {
1300 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1301 	int ret;
1302 
1303 	rte_spinlock_lock(&hw->lock);
1304 	ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error);
1305 	rte_spinlock_unlock(&hw->lock);
1306 
1307 	return ret;
1308 }
1309 
1310 static int
1311 hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
1312 			      int clear_on_fail,
1313 			      struct rte_tm_error *error)
1314 {
1315 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1316 	int ret;
1317 
1318 	rte_spinlock_lock(&hw->lock);
1319 	ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
1320 	rte_spinlock_unlock(&hw->lock);
1321 
1322 	return ret;
1323 }
1324 
1325 static int
1326 hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
1327 				uint32_t node_id,
1328 				uint32_t shaper_profile_id,
1329 				struct rte_tm_error *error)
1330 {
1331 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1332 	int ret;
1333 
1334 	rte_spinlock_lock(&hw->lock);
1335 	ret = hns3_tm_node_shaper_update(dev, node_id,
1336 					 shaper_profile_id, error);
1337 	rte_spinlock_unlock(&hw->lock);
1338 
1339 	return ret;
1340 }
1341 
1342 static const struct rte_tm_ops hns3_tm_ops = {
1343 	.capabilities_get       = hns3_tm_capabilities_get_wrap,
1344 	.shaper_profile_add     = hns3_tm_shaper_profile_add_wrap,
1345 	.shaper_profile_delete  = hns3_tm_shaper_profile_del_wrap,
1346 	.node_add               = hns3_tm_node_add_wrap,
1347 	.node_delete            = hns3_tm_node_delete_wrap,
1348 	.node_type_get          = hns3_tm_node_type_get_wrap,
1349 	.level_capabilities_get = hns3_tm_level_capabilities_get_wrap,
1350 	.node_capabilities_get  = hns3_tm_node_capabilities_get_wrap,
1351 	.hierarchy_commit       = hns3_tm_hierarchy_commit_wrap,
1352 	.node_shaper_update     = hns3_tm_node_shaper_update_wrap,
1353 };
1354 
1355 int
1356 hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
1357 {
1358 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359 
1360 	if (arg == NULL)
1361 		return -EINVAL;
1362 
1363 	if (!hns3_dev_get_support(hw, TM))
1364 		return -EOPNOTSUPP;
1365 
1366 	*(const void **)arg = &hns3_tm_ops;
1367 
1368 	return 0;
1369 }
1370 
1371 void
1372 hns3_tm_dev_start_proc(struct hns3_hw *hw)
1373 {
1374 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1375 
1376 	if (!hns3_dev_get_support(hw, TM))
1377 		return;
1378 
1379 	if (pf->tm_conf.root && !pf->tm_conf.committed)
1380 		hns3_warn(hw,
1381 		    "please call hierarchy_commit() before starting the port.");
1382 }
1383 
1384 /*
1385  * We need clear tm_conf committed flag when device stop so that user can modify
1386  * tm configuration (e.g. add or delete node).
1387  *
1388  * If user don't call hierarchy commit when device start later, the Port/TC's
1389  * shaper rate still the same as previous committed.
1390  *
1391  * To avoid the above problem, we need recover Port/TC shaper rate when device
1392  * stop.
1393  */
1394 void
1395 hns3_tm_dev_stop_proc(struct hns3_hw *hw)
1396 {
1397 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1398 	struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list;
1399 	struct hns3_tm_node *tm_node;
1400 	uint8_t tc_no;
1401 
1402 	if (!pf->tm_conf.committed)
1403 		return;
1404 
1405 	tm_node = pf->tm_conf.root;
1406 	if (tm_node != NULL && tm_node->shaper_profile)
1407 		(void)hns3_tm_config_port_rate(hw, NULL);
1408 
1409 	TAILQ_FOREACH(tm_node, tc_list, node) {
1410 		if (tm_node->shaper_profile == NULL)
1411 			continue;
1412 		tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id);
1413 		(void)hns3_tm_config_tc_rate(hw, tc_no, NULL);
1414 	}
1415 
1416 	pf->tm_conf.committed = false;
1417 }
1418 
1419 int
1420 hns3_tm_conf_update(struct hns3_hw *hw)
1421 {
1422 	struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
1423 	struct rte_tm_error error;
1424 
1425 	if (!hns3_dev_get_support(hw, TM))
1426 		return 0;
1427 
1428 	if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
1429 		return 0;
1430 
1431 	memset(&error, 0, sizeof(struct rte_tm_error));
1432 	return hns3_tm_hierarchy_do_commit(hw, &error);
1433 }
1434