1c09c7847SChengwen Feng /* SPDX-License-Identifier: BSD-3-Clause 253e6f86cSMin Hu (Connor) * Copyright(c) 2020-2021 HiSilicon Limited. 3c09c7847SChengwen Feng */ 4c09c7847SChengwen Feng 5c09c7847SChengwen Feng #include <rte_malloc.h> 6c09c7847SChengwen Feng 7247f0ce2SChengwen Feng #include "hns3_common.h" 8c09c7847SChengwen Feng #include "hns3_dcb.h" 9c09c7847SChengwen Feng #include "hns3_logs.h" 10c09c7847SChengwen Feng #include "hns3_tm.h" 11c09c7847SChengwen Feng 12c09c7847SChengwen Feng static inline uint32_t 13c09c7847SChengwen Feng hns3_tm_max_tx_queues_get(struct rte_eth_dev *dev) 14c09c7847SChengwen Feng { 15c09c7847SChengwen Feng /* 16c09c7847SChengwen Feng * This API will called in pci device probe stage, we can't call 17c09c7847SChengwen Feng * rte_eth_dev_info_get to get max_tx_queues (due to rte_eth_devices 18c09c7847SChengwen Feng * not setup), so we call the hns3_dev_infos_get. 19c09c7847SChengwen Feng */ 20c09c7847SChengwen Feng struct rte_eth_dev_info dev_info; 21c09c7847SChengwen Feng 22c09c7847SChengwen Feng memset(&dev_info, 0, sizeof(dev_info)); 23c09c7847SChengwen Feng (void)hns3_dev_infos_get(dev, &dev_info); 24c09c7847SChengwen Feng return RTE_MIN(dev_info.max_tx_queues, RTE_MAX_QUEUES_PER_PORT); 25c09c7847SChengwen Feng } 26c09c7847SChengwen Feng 27c09c7847SChengwen Feng void 28c09c7847SChengwen Feng hns3_tm_conf_init(struct rte_eth_dev *dev) 29c09c7847SChengwen Feng { 30c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 31fc18d1b4SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 32c09c7847SChengwen Feng uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); 33c09c7847SChengwen Feng 34efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, TM)) 35fc18d1b4SHuisong Li return; 36fc18d1b4SHuisong Li 37c09c7847SChengwen Feng pf->tm_conf.nb_leaf_nodes_max = max_tx_queues; 38c09c7847SChengwen Feng pf->tm_conf.nb_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues; 39c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile_max = 1 + HNS3_MAX_TC_NUM; 40c09c7847SChengwen Feng 41c09c7847SChengwen Feng TAILQ_INIT(&pf->tm_conf.shaper_profile_list); 42c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile = 0; 43c09c7847SChengwen Feng 44c09c7847SChengwen Feng pf->tm_conf.root = NULL; 45c09c7847SChengwen Feng TAILQ_INIT(&pf->tm_conf.tc_list); 46c09c7847SChengwen Feng TAILQ_INIT(&pf->tm_conf.queue_list); 47c09c7847SChengwen Feng pf->tm_conf.nb_tc_node = 0; 48c09c7847SChengwen Feng pf->tm_conf.nb_queue_node = 0; 49c09c7847SChengwen Feng 50c09c7847SChengwen Feng pf->tm_conf.committed = false; 51c09c7847SChengwen Feng } 52c09c7847SChengwen Feng 53c09c7847SChengwen Feng void 54c09c7847SChengwen Feng hns3_tm_conf_uninit(struct rte_eth_dev *dev) 55c09c7847SChengwen Feng { 56c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 57fc18d1b4SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 58c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile; 59c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 60c09c7847SChengwen Feng 61efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, TM)) 62fc18d1b4SHuisong Li return; 63fc18d1b4SHuisong Li 64c09c7847SChengwen Feng if (pf->tm_conf.nb_queue_node > 0) { 65c09c7847SChengwen Feng while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { 66c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); 67c09c7847SChengwen Feng rte_free(tm_node); 68c09c7847SChengwen Feng } 69c09c7847SChengwen Feng pf->tm_conf.nb_queue_node = 0; 70c09c7847SChengwen Feng } 71c09c7847SChengwen Feng 72c09c7847SChengwen Feng if (pf->tm_conf.nb_tc_node > 0) { 73c09c7847SChengwen Feng while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) { 74c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); 75c09c7847SChengwen Feng rte_free(tm_node); 76c09c7847SChengwen Feng } 77c09c7847SChengwen Feng pf->tm_conf.nb_tc_node = 0; 78c09c7847SChengwen Feng } 79c09c7847SChengwen Feng 80c09c7847SChengwen Feng if (pf->tm_conf.root != NULL) { 81c09c7847SChengwen Feng rte_free(pf->tm_conf.root); 82c09c7847SChengwen Feng pf->tm_conf.root = NULL; 83c09c7847SChengwen Feng } 84c09c7847SChengwen Feng 85c09c7847SChengwen Feng if (pf->tm_conf.nb_shaper_profile > 0) { 86c09c7847SChengwen Feng while ((shaper_profile = 87c09c7847SChengwen Feng TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { 88c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, 89c09c7847SChengwen Feng shaper_profile, node); 90c09c7847SChengwen Feng rte_free(shaper_profile); 91c09c7847SChengwen Feng } 92c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile = 0; 93c09c7847SChengwen Feng } 94c09c7847SChengwen Feng 95c09c7847SChengwen Feng pf->tm_conf.nb_leaf_nodes_max = 0; 96c09c7847SChengwen Feng pf->tm_conf.nb_nodes_max = 0; 97c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile_max = 0; 98c09c7847SChengwen Feng } 99c09c7847SChengwen Feng 100c09c7847SChengwen Feng static inline uint64_t 101c09c7847SChengwen Feng hns3_tm_rate_convert_firmware2tm(uint32_t firmware_rate) 102c09c7847SChengwen Feng { 103c09c7847SChengwen Feng #define FIRMWARE_TO_TM_RATE_SCALE 125000 104c09c7847SChengwen Feng /* tm rate unit is Bps, firmware rate is Mbps */ 105c09c7847SChengwen Feng return ((uint64_t)firmware_rate) * FIRMWARE_TO_TM_RATE_SCALE; 106c09c7847SChengwen Feng } 107c09c7847SChengwen Feng 108c09c7847SChengwen Feng static inline uint32_t 109c09c7847SChengwen Feng hns3_tm_rate_convert_tm2firmware(uint64_t tm_rate) 110c09c7847SChengwen Feng { 111c09c7847SChengwen Feng #define TM_TO_FIRMWARE_RATE_SCALE 125000 112c09c7847SChengwen Feng /* tm rate unit is Bps, firmware rate is Mbps */ 113c09c7847SChengwen Feng return (uint32_t)(tm_rate / TM_TO_FIRMWARE_RATE_SCALE); 114c09c7847SChengwen Feng } 115c09c7847SChengwen Feng 116c09c7847SChengwen Feng static int 117c09c7847SChengwen Feng hns3_tm_capabilities_get(struct rte_eth_dev *dev, 118c09c7847SChengwen Feng struct rte_tm_capabilities *cap, 119c09c7847SChengwen Feng struct rte_tm_error *error) 120c09c7847SChengwen Feng { 121c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 122c09c7847SChengwen Feng uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); 123c09c7847SChengwen Feng 124c09c7847SChengwen Feng if (cap == NULL || error == NULL) 125c09c7847SChengwen Feng return -EINVAL; 126c09c7847SChengwen Feng 127c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NONE; 128c09c7847SChengwen Feng 129c09c7847SChengwen Feng memset(cap, 0, sizeof(struct rte_tm_capabilities)); 130c09c7847SChengwen Feng 131c09c7847SChengwen Feng cap->n_nodes_max = 1 + HNS3_MAX_TC_NUM + max_tx_queues; 132c09c7847SChengwen Feng cap->n_levels_max = HNS3_TM_NODE_LEVEL_MAX; 133c09c7847SChengwen Feng cap->non_leaf_nodes_identical = 1; 134c09c7847SChengwen Feng cap->leaf_nodes_identical = 1; 135c09c7847SChengwen Feng cap->shaper_n_max = 1 + HNS3_MAX_TC_NUM; 136c09c7847SChengwen Feng cap->shaper_private_n_max = 1 + HNS3_MAX_TC_NUM; 137c09c7847SChengwen Feng cap->shaper_private_rate_max = 138c09c7847SChengwen Feng hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate); 139c09c7847SChengwen Feng 140c09c7847SChengwen Feng cap->sched_n_children_max = max_tx_queues; 141c09c7847SChengwen Feng cap->sched_sp_n_priorities_max = 1; 142c09c7847SChengwen Feng cap->sched_wfq_weight_max = 1; 143c09c7847SChengwen Feng 144c09c7847SChengwen Feng cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; 145c09c7847SChengwen Feng cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; 146c09c7847SChengwen Feng 147c09c7847SChengwen Feng return 0; 148c09c7847SChengwen Feng } 149c09c7847SChengwen Feng 150c09c7847SChengwen Feng static struct hns3_tm_shaper_profile * 151c09c7847SChengwen Feng hns3_tm_shaper_profile_search(struct rte_eth_dev *dev, 152c09c7847SChengwen Feng uint32_t shaper_profile_id) 153c09c7847SChengwen Feng { 154c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 155c09c7847SChengwen Feng struct hns3_shaper_profile_list *shaper_profile_list = 156c09c7847SChengwen Feng &pf->tm_conf.shaper_profile_list; 157c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile; 158c09c7847SChengwen Feng 159c09c7847SChengwen Feng TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { 160c09c7847SChengwen Feng if (shaper_profile_id == shaper_profile->shaper_profile_id) 161c09c7847SChengwen Feng return shaper_profile; 162c09c7847SChengwen Feng } 163c09c7847SChengwen Feng 164c09c7847SChengwen Feng return NULL; 165c09c7847SChengwen Feng } 166c09c7847SChengwen Feng 167c09c7847SChengwen Feng static int 168c09c7847SChengwen Feng hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev, 169*39533238SBruce Richardson const struct rte_tm_shaper_params *profile, 170c09c7847SChengwen Feng struct rte_tm_error *error) 171c09c7847SChengwen Feng { 172c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 173c09c7847SChengwen Feng 174c09c7847SChengwen Feng if (profile->committed.rate) { 175c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; 176c09c7847SChengwen Feng error->message = "committed rate not supported"; 177c09c7847SChengwen Feng return -EINVAL; 178c09c7847SChengwen Feng } 179c09c7847SChengwen Feng 180c09c7847SChengwen Feng if (profile->committed.size) { 181c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; 182c09c7847SChengwen Feng error->message = "committed bucket size not supported"; 183c09c7847SChengwen Feng return -EINVAL; 184c09c7847SChengwen Feng } 185c09c7847SChengwen Feng 186c09c7847SChengwen Feng if (profile->peak.rate > 187c09c7847SChengwen Feng hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate)) { 188c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE; 189c09c7847SChengwen Feng error->message = "peak rate too large"; 190c09c7847SChengwen Feng return -EINVAL; 191c09c7847SChengwen Feng } 192c09c7847SChengwen Feng 1933e5c397cSChengwen Feng if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) { 1943e5c397cSChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE; 1953e5c397cSChengwen Feng error->message = "peak rate must be at least 1Mbps"; 1963e5c397cSChengwen Feng return -EINVAL; 1973e5c397cSChengwen Feng } 1983e5c397cSChengwen Feng 199c09c7847SChengwen Feng if (profile->peak.size) { 200c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; 201c09c7847SChengwen Feng error->message = "peak bucket size not supported"; 202c09c7847SChengwen Feng return -EINVAL; 203c09c7847SChengwen Feng } 204c09c7847SChengwen Feng 205c09c7847SChengwen Feng if (profile->pkt_length_adjust) { 206c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; 207c09c7847SChengwen Feng error->message = "packet length adjustment not supported"; 208c09c7847SChengwen Feng return -EINVAL; 209c09c7847SChengwen Feng } 210c09c7847SChengwen Feng 211c09c7847SChengwen Feng if (profile->packet_mode) { 212c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE; 213c09c7847SChengwen Feng error->message = "packet mode not supported"; 214c09c7847SChengwen Feng return -EINVAL; 215c09c7847SChengwen Feng } 216c09c7847SChengwen Feng 217c09c7847SChengwen Feng return 0; 218c09c7847SChengwen Feng } 219c09c7847SChengwen Feng 220c09c7847SChengwen Feng static int 221c09c7847SChengwen Feng hns3_tm_shaper_profile_add(struct rte_eth_dev *dev, 222c09c7847SChengwen Feng uint32_t shaper_profile_id, 223*39533238SBruce Richardson const struct rte_tm_shaper_params *profile, 224c09c7847SChengwen Feng struct rte_tm_error *error) 225c09c7847SChengwen Feng { 226c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 227c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile; 228c09c7847SChengwen Feng int ret; 229c09c7847SChengwen Feng 230c09c7847SChengwen Feng if (profile == NULL || error == NULL) 231c09c7847SChengwen Feng return -EINVAL; 232c09c7847SChengwen Feng 233c09c7847SChengwen Feng if (pf->tm_conf.nb_shaper_profile >= 234c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile_max) { 235c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 236c09c7847SChengwen Feng error->message = "too much profiles"; 237c09c7847SChengwen Feng return -EINVAL; 238c09c7847SChengwen Feng } 239c09c7847SChengwen Feng 240c09c7847SChengwen Feng ret = hns3_tm_shaper_profile_param_check(dev, profile, error); 241c09c7847SChengwen Feng if (ret) 242c09c7847SChengwen Feng return ret; 243c09c7847SChengwen Feng 244c09c7847SChengwen Feng shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id); 245c09c7847SChengwen Feng if (shaper_profile) { 246c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 247c09c7847SChengwen Feng error->message = "profile ID exist"; 248c09c7847SChengwen Feng return -EINVAL; 249c09c7847SChengwen Feng } 250c09c7847SChengwen Feng 251c09c7847SChengwen Feng shaper_profile = rte_zmalloc("hns3_tm_shaper_profile", 252c09c7847SChengwen Feng sizeof(struct hns3_tm_shaper_profile), 253c09c7847SChengwen Feng 0); 254c09c7847SChengwen Feng if (shaper_profile == NULL) 255c09c7847SChengwen Feng return -ENOMEM; 256c09c7847SChengwen Feng 257c09c7847SChengwen Feng shaper_profile->shaper_profile_id = shaper_profile_id; 258c09c7847SChengwen Feng memcpy(&shaper_profile->profile, profile, 259c09c7847SChengwen Feng sizeof(struct rte_tm_shaper_params)); 260c09c7847SChengwen Feng TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list, 261c09c7847SChengwen Feng shaper_profile, node); 262c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile++; 263c09c7847SChengwen Feng 264c09c7847SChengwen Feng return 0; 265c09c7847SChengwen Feng } 266c09c7847SChengwen Feng 267c09c7847SChengwen Feng static int 268c09c7847SChengwen Feng hns3_tm_shaper_profile_del(struct rte_eth_dev *dev, 269c09c7847SChengwen Feng uint32_t shaper_profile_id, 270c09c7847SChengwen Feng struct rte_tm_error *error) 271c09c7847SChengwen Feng { 272c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 273c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile; 274c09c7847SChengwen Feng 275c09c7847SChengwen Feng if (error == NULL) 276c09c7847SChengwen Feng return -EINVAL; 277c09c7847SChengwen Feng 278c09c7847SChengwen Feng shaper_profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id); 279c09c7847SChengwen Feng if (shaper_profile == NULL) { 280c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 281c09c7847SChengwen Feng error->message = "profile ID not exist"; 282c09c7847SChengwen Feng return -EINVAL; 283c09c7847SChengwen Feng } 284c09c7847SChengwen Feng 285c09c7847SChengwen Feng if (shaper_profile->reference_count) { 286c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 287c09c7847SChengwen Feng error->message = "profile in use"; 288c09c7847SChengwen Feng return -EINVAL; 289c09c7847SChengwen Feng } 290c09c7847SChengwen Feng 291c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); 292c09c7847SChengwen Feng rte_free(shaper_profile); 293c09c7847SChengwen Feng pf->tm_conf.nb_shaper_profile--; 294c09c7847SChengwen Feng 295c09c7847SChengwen Feng return 0; 296c09c7847SChengwen Feng } 297c09c7847SChengwen Feng 298c09c7847SChengwen Feng static struct hns3_tm_node * 299c09c7847SChengwen Feng hns3_tm_node_search(struct rte_eth_dev *dev, 300c09c7847SChengwen Feng uint32_t node_id, 301c09c7847SChengwen Feng enum hns3_tm_node_type *node_type) 302c09c7847SChengwen Feng { 303c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 304c09c7847SChengwen Feng struct hns3_tm_node_list *queue_list = &pf->tm_conf.queue_list; 305c09c7847SChengwen Feng struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list; 306c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 307c09c7847SChengwen Feng 308c09c7847SChengwen Feng if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) { 309c09c7847SChengwen Feng *node_type = HNS3_TM_NODE_TYPE_PORT; 310c09c7847SChengwen Feng return pf->tm_conf.root; 311c09c7847SChengwen Feng } 312c09c7847SChengwen Feng 313c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, tc_list, node) { 314c09c7847SChengwen Feng if (tm_node->id == node_id) { 315c09c7847SChengwen Feng *node_type = HNS3_TM_NODE_TYPE_TC; 316c09c7847SChengwen Feng return tm_node; 317c09c7847SChengwen Feng } 318c09c7847SChengwen Feng } 319c09c7847SChengwen Feng 320c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, queue_list, node) { 321c09c7847SChengwen Feng if (tm_node->id == node_id) { 322c09c7847SChengwen Feng *node_type = HNS3_TM_NODE_TYPE_QUEUE; 323c09c7847SChengwen Feng return tm_node; 324c09c7847SChengwen Feng } 325c09c7847SChengwen Feng } 326c09c7847SChengwen Feng 327c09c7847SChengwen Feng return NULL; 328c09c7847SChengwen Feng } 329c09c7847SChengwen Feng 330c09c7847SChengwen Feng static int 331c09c7847SChengwen Feng hns3_tm_nonleaf_node_param_check(struct rte_eth_dev *dev, 3325d49af62SBruce Richardson const struct rte_tm_node_params *params, 333c09c7847SChengwen Feng struct rte_tm_error *error) 334c09c7847SChengwen Feng { 335c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile; 336c09c7847SChengwen Feng 337c09c7847SChengwen Feng if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 338c09c7847SChengwen Feng shaper_profile = hns3_tm_shaper_profile_search(dev, 339c09c7847SChengwen Feng params->shaper_profile_id); 340c09c7847SChengwen Feng if (shaper_profile == NULL) { 341c09c7847SChengwen Feng error->type = 342c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 343c09c7847SChengwen Feng error->message = "shaper profile not exist"; 344c09c7847SChengwen Feng return -EINVAL; 345c09c7847SChengwen Feng } 346c09c7847SChengwen Feng } 347c09c7847SChengwen Feng 348c09c7847SChengwen Feng if (params->nonleaf.wfq_weight_mode) { 349c09c7847SChengwen Feng error->type = 350c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 351c09c7847SChengwen Feng error->message = "WFQ not supported"; 352c09c7847SChengwen Feng return -EINVAL; 353c09c7847SChengwen Feng } 354c09c7847SChengwen Feng 355c09c7847SChengwen Feng if (params->nonleaf.n_sp_priorities != 1) { 356c09c7847SChengwen Feng error->type = 357c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; 358c09c7847SChengwen Feng error->message = "SP priority not supported"; 359c09c7847SChengwen Feng return -EINVAL; 360c09c7847SChengwen Feng } 361c09c7847SChengwen Feng 362c09c7847SChengwen Feng return 0; 363c09c7847SChengwen Feng } 364c09c7847SChengwen Feng 365c09c7847SChengwen Feng static int 366c09c7847SChengwen Feng hns3_tm_leaf_node_param_check(struct rte_eth_dev *dev __rte_unused, 3675d49af62SBruce Richardson const struct rte_tm_node_params *params, 368c09c7847SChengwen Feng struct rte_tm_error *error) 369c09c7847SChengwen Feng 370c09c7847SChengwen Feng { 371c09c7847SChengwen Feng if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 372c09c7847SChengwen Feng error->type = 373c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 374c09c7847SChengwen Feng error->message = "shaper not supported"; 375c09c7847SChengwen Feng return -EINVAL; 376c09c7847SChengwen Feng } 377c09c7847SChengwen Feng 378281b62f7SMin Hu (Connor) if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP) { 379c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; 380c09c7847SChengwen Feng error->message = "congestion management not supported"; 381c09c7847SChengwen Feng return -EINVAL; 382c09c7847SChengwen Feng } 383c09c7847SChengwen Feng 384c09c7847SChengwen Feng if (params->leaf.wred.wred_profile_id != RTE_TM_WRED_PROFILE_ID_NONE) { 385c09c7847SChengwen Feng error->type = 386c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; 387c09c7847SChengwen Feng error->message = "WRED not supported"; 388c09c7847SChengwen Feng return -EINVAL; 389c09c7847SChengwen Feng } 390c09c7847SChengwen Feng 391c09c7847SChengwen Feng if (params->leaf.wred.shared_wred_context_id) { 392c09c7847SChengwen Feng error->type = 393c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; 394c09c7847SChengwen Feng error->message = "WRED not supported"; 395c09c7847SChengwen Feng return -EINVAL; 396c09c7847SChengwen Feng } 397c09c7847SChengwen Feng 398c09c7847SChengwen Feng if (params->leaf.wred.n_shared_wred_contexts) { 399c09c7847SChengwen Feng error->type = 400c09c7847SChengwen Feng RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; 401c09c7847SChengwen Feng error->message = "WRED not supported"; 402c09c7847SChengwen Feng return -EINVAL; 403c09c7847SChengwen Feng } 404c09c7847SChengwen Feng 405c09c7847SChengwen Feng return 0; 406c09c7847SChengwen Feng } 407c09c7847SChengwen Feng 408c09c7847SChengwen Feng static int 409c09c7847SChengwen Feng hns3_tm_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, 410c09c7847SChengwen Feng uint32_t priority, uint32_t weight, 4115d49af62SBruce Richardson const struct rte_tm_node_params *params, 412c09c7847SChengwen Feng struct rte_tm_error *error) 413c09c7847SChengwen Feng { 414c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 415c09c7847SChengwen Feng enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX; 416c09c7847SChengwen Feng 417c09c7847SChengwen Feng if (node_id == RTE_TM_NODE_ID_NULL) { 418c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 419c09c7847SChengwen Feng error->message = "invalid node id"; 420c09c7847SChengwen Feng return -EINVAL; 421c09c7847SChengwen Feng } 422c09c7847SChengwen Feng 423c09c7847SChengwen Feng if (hns3_tm_node_search(dev, node_id, &node_type)) { 424c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 425c09c7847SChengwen Feng error->message = "node id already used"; 426c09c7847SChengwen Feng return -EINVAL; 427c09c7847SChengwen Feng } 428c09c7847SChengwen Feng 429c09c7847SChengwen Feng if (priority) { 430c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; 431c09c7847SChengwen Feng error->message = "priority should be 0"; 432c09c7847SChengwen Feng return -EINVAL; 433c09c7847SChengwen Feng } 434c09c7847SChengwen Feng 435c09c7847SChengwen Feng if (weight != 1) { 436c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; 437c09c7847SChengwen Feng error->message = "weight must be 1"; 438c09c7847SChengwen Feng return -EINVAL; 439c09c7847SChengwen Feng } 440c09c7847SChengwen Feng 441c09c7847SChengwen Feng if (params->shared_shaper_id) { 442c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; 443c09c7847SChengwen Feng error->message = "shared shaper not supported"; 444c09c7847SChengwen Feng return -EINVAL; 445c09c7847SChengwen Feng } 446c09c7847SChengwen Feng if (params->n_shared_shapers) { 447c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; 448c09c7847SChengwen Feng error->message = "shared shaper not supported"; 449c09c7847SChengwen Feng return -EINVAL; 450c09c7847SChengwen Feng } 451c09c7847SChengwen Feng 452c09c7847SChengwen Feng if (node_id >= pf->tm_conf.nb_leaf_nodes_max) 453c09c7847SChengwen Feng return hns3_tm_nonleaf_node_param_check(dev, params, error); 454c09c7847SChengwen Feng else 455c09c7847SChengwen Feng return hns3_tm_leaf_node_param_check(dev, params, error); 456c09c7847SChengwen Feng } 457c09c7847SChengwen Feng 458c09c7847SChengwen Feng static int 459c09c7847SChengwen Feng hns3_tm_port_node_add(struct rte_eth_dev *dev, uint32_t node_id, 4605d49af62SBruce Richardson uint32_t level_id, const struct rte_tm_node_params *params, 461c09c7847SChengwen Feng struct rte_tm_error *error) 462c09c7847SChengwen Feng { 463c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 464c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 465c09c7847SChengwen Feng 466c09c7847SChengwen Feng if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 467c09c7847SChengwen Feng level_id != HNS3_TM_NODE_LEVEL_PORT) { 468c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 469c09c7847SChengwen Feng error->message = "wrong level"; 470c09c7847SChengwen Feng return -EINVAL; 471c09c7847SChengwen Feng } 472c09c7847SChengwen Feng 473c09c7847SChengwen Feng if (node_id != pf->tm_conf.nb_nodes_max - 1) { 474c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 475c09c7847SChengwen Feng error->message = "invalid port node ID"; 476c09c7847SChengwen Feng return -EINVAL; 477c09c7847SChengwen Feng } 478c09c7847SChengwen Feng 479c09c7847SChengwen Feng if (pf->tm_conf.root) { 480c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 481c09c7847SChengwen Feng error->message = "already have a root"; 482c09c7847SChengwen Feng return -EINVAL; 483c09c7847SChengwen Feng } 484c09c7847SChengwen Feng 485c09c7847SChengwen Feng tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0); 486c09c7847SChengwen Feng if (tm_node == NULL) 487c09c7847SChengwen Feng return -ENOMEM; 488c09c7847SChengwen Feng 489c09c7847SChengwen Feng tm_node->id = node_id; 490c09c7847SChengwen Feng tm_node->reference_count = 0; 491c09c7847SChengwen Feng tm_node->parent = NULL; 492c09c7847SChengwen Feng tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev, 493c09c7847SChengwen Feng params->shaper_profile_id); 494c09c7847SChengwen Feng memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); 495c09c7847SChengwen Feng pf->tm_conf.root = tm_node; 496c09c7847SChengwen Feng 497c09c7847SChengwen Feng if (tm_node->shaper_profile) 498c09c7847SChengwen Feng tm_node->shaper_profile->reference_count++; 499c09c7847SChengwen Feng 500c09c7847SChengwen Feng return 0; 501c09c7847SChengwen Feng } 502c09c7847SChengwen Feng 503c09c7847SChengwen Feng static int 504c09c7847SChengwen Feng hns3_tm_tc_node_add(struct rte_eth_dev *dev, uint32_t node_id, 505c09c7847SChengwen Feng uint32_t level_id, struct hns3_tm_node *parent_node, 5065d49af62SBruce Richardson const struct rte_tm_node_params *params, 507c09c7847SChengwen Feng struct rte_tm_error *error) 508c09c7847SChengwen Feng { 509c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 510c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 511c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 512c09c7847SChengwen Feng 513c09c7847SChengwen Feng if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 514c09c7847SChengwen Feng level_id != HNS3_TM_NODE_LEVEL_TC) { 515c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 516c09c7847SChengwen Feng error->message = "wrong level"; 517c09c7847SChengwen Feng return -EINVAL; 518c09c7847SChengwen Feng } 519c09c7847SChengwen Feng 520c09c7847SChengwen Feng if (node_id >= pf->tm_conf.nb_nodes_max - 1 || 521c09c7847SChengwen Feng node_id < pf->tm_conf.nb_leaf_nodes_max || 522c09c7847SChengwen Feng hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id) >= hw->num_tc) { 523c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 524c09c7847SChengwen Feng error->message = "invalid tc node ID"; 525c09c7847SChengwen Feng return -EINVAL; 526c09c7847SChengwen Feng } 527c09c7847SChengwen Feng 528c09c7847SChengwen Feng if (pf->tm_conf.nb_tc_node >= hw->num_tc) { 529c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 530c09c7847SChengwen Feng error->message = "too many TCs"; 531c09c7847SChengwen Feng return -EINVAL; 532c09c7847SChengwen Feng } 533c09c7847SChengwen Feng 534c09c7847SChengwen Feng tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0); 535c09c7847SChengwen Feng if (tm_node == NULL) 536c09c7847SChengwen Feng return -ENOMEM; 537c09c7847SChengwen Feng 538c09c7847SChengwen Feng tm_node->id = node_id; 539c09c7847SChengwen Feng tm_node->reference_count = 0; 540c09c7847SChengwen Feng tm_node->parent = parent_node; 541c09c7847SChengwen Feng tm_node->shaper_profile = hns3_tm_shaper_profile_search(dev, 542c09c7847SChengwen Feng params->shaper_profile_id); 543c09c7847SChengwen Feng memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); 544c09c7847SChengwen Feng TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, tm_node, node); 545c09c7847SChengwen Feng pf->tm_conf.nb_tc_node++; 546c09c7847SChengwen Feng tm_node->parent->reference_count++; 547c09c7847SChengwen Feng 548c09c7847SChengwen Feng if (tm_node->shaper_profile) 549c09c7847SChengwen Feng tm_node->shaper_profile->reference_count++; 550c09c7847SChengwen Feng 551c09c7847SChengwen Feng return 0; 552c09c7847SChengwen Feng } 553c09c7847SChengwen Feng 554c09c7847SChengwen Feng static int 555c09c7847SChengwen Feng hns3_tm_queue_node_add(struct rte_eth_dev *dev, uint32_t node_id, 556c09c7847SChengwen Feng uint32_t level_id, struct hns3_tm_node *parent_node, 5575d49af62SBruce Richardson const struct rte_tm_node_params *params, 558c09c7847SChengwen Feng struct rte_tm_error *error) 559c09c7847SChengwen Feng { 560c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 561c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 562c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 563c09c7847SChengwen Feng 564c09c7847SChengwen Feng if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 565c09c7847SChengwen Feng level_id != HNS3_TM_NODE_LEVEL_QUEUE) { 566c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 567c09c7847SChengwen Feng error->message = "wrong level"; 568c09c7847SChengwen Feng return -EINVAL; 569c09c7847SChengwen Feng } 570c09c7847SChengwen Feng 571c09c7847SChengwen Feng /* note: dev->data->nb_tx_queues <= max_tx_queues */ 572c09c7847SChengwen Feng if (node_id >= dev->data->nb_tx_queues) { 573c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 574c09c7847SChengwen Feng error->message = "invalid queue node ID"; 575c09c7847SChengwen Feng return -EINVAL; 576c09c7847SChengwen Feng } 577c09c7847SChengwen Feng 578c09c7847SChengwen Feng if (hns3_txq_mapped_tc_get(hw, node_id) != 579c09c7847SChengwen Feng hns3_tm_calc_node_tc_no(&pf->tm_conf, parent_node->id)) { 580c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 581c09c7847SChengwen Feng error->message = "queue's TC not match parent's TC"; 582c09c7847SChengwen Feng return -EINVAL; 583c09c7847SChengwen Feng } 584c09c7847SChengwen Feng 585c09c7847SChengwen Feng tm_node = rte_zmalloc("hns3_tm_node", sizeof(struct hns3_tm_node), 0); 586c09c7847SChengwen Feng if (tm_node == NULL) 587c09c7847SChengwen Feng return -ENOMEM; 588c09c7847SChengwen Feng 589c09c7847SChengwen Feng tm_node->id = node_id; 590c09c7847SChengwen Feng tm_node->reference_count = 0; 591c09c7847SChengwen Feng tm_node->parent = parent_node; 592c09c7847SChengwen Feng memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); 593c09c7847SChengwen Feng TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node); 594c09c7847SChengwen Feng pf->tm_conf.nb_queue_node++; 595c09c7847SChengwen Feng tm_node->parent->reference_count++; 596c09c7847SChengwen Feng 597c09c7847SChengwen Feng return 0; 598c09c7847SChengwen Feng } 599c09c7847SChengwen Feng 600c09c7847SChengwen Feng static int 601c09c7847SChengwen Feng hns3_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, 602c09c7847SChengwen Feng uint32_t parent_node_id, uint32_t priority, 603c09c7847SChengwen Feng uint32_t weight, uint32_t level_id, 6045d49af62SBruce Richardson const struct rte_tm_node_params *params, 605c09c7847SChengwen Feng struct rte_tm_error *error) 606c09c7847SChengwen Feng { 607c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 608c09c7847SChengwen Feng enum hns3_tm_node_type parent_node_type = HNS3_TM_NODE_TYPE_MAX; 609c09c7847SChengwen Feng struct hns3_tm_node *parent_node; 610c09c7847SChengwen Feng int ret; 611c09c7847SChengwen Feng 612c09c7847SChengwen Feng if (params == NULL || error == NULL) 613c09c7847SChengwen Feng return -EINVAL; 614c09c7847SChengwen Feng 615c09c7847SChengwen Feng if (pf->tm_conf.committed) { 616c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 617c09c7847SChengwen Feng error->message = "already committed"; 618c09c7847SChengwen Feng return -EINVAL; 619c09c7847SChengwen Feng } 620c09c7847SChengwen Feng 621c09c7847SChengwen Feng ret = hns3_tm_node_param_check(dev, node_id, priority, weight, 622c09c7847SChengwen Feng params, error); 623c09c7847SChengwen Feng if (ret) 624c09c7847SChengwen Feng return ret; 625c09c7847SChengwen Feng 626c09c7847SChengwen Feng /* root node who don't have a parent */ 627c09c7847SChengwen Feng if (parent_node_id == RTE_TM_NODE_ID_NULL) 628c09c7847SChengwen Feng return hns3_tm_port_node_add(dev, node_id, level_id, 629c09c7847SChengwen Feng params, error); 630c09c7847SChengwen Feng 631c09c7847SChengwen Feng parent_node = hns3_tm_node_search(dev, parent_node_id, 632c09c7847SChengwen Feng &parent_node_type); 633c09c7847SChengwen Feng if (parent_node == NULL) { 634c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 635c09c7847SChengwen Feng error->message = "parent not exist"; 636c09c7847SChengwen Feng return -EINVAL; 637c09c7847SChengwen Feng } 638c09c7847SChengwen Feng 639c09c7847SChengwen Feng if (parent_node_type != HNS3_TM_NODE_TYPE_PORT && 640c09c7847SChengwen Feng parent_node_type != HNS3_TM_NODE_TYPE_TC) { 641c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 642c09c7847SChengwen Feng error->message = "parent is not port or TC"; 643c09c7847SChengwen Feng return -EINVAL; 644c09c7847SChengwen Feng } 645c09c7847SChengwen Feng 646c09c7847SChengwen Feng if (parent_node_type == HNS3_TM_NODE_TYPE_PORT) 647c09c7847SChengwen Feng return hns3_tm_tc_node_add(dev, node_id, level_id, 648c09c7847SChengwen Feng parent_node, params, error); 649c09c7847SChengwen Feng else 650c09c7847SChengwen Feng return hns3_tm_queue_node_add(dev, node_id, level_id, 651c09c7847SChengwen Feng parent_node, params, error); 652c09c7847SChengwen Feng } 653c09c7847SChengwen Feng 654c09c7847SChengwen Feng static void 655c09c7847SChengwen Feng hns3_tm_node_do_delete(struct hns3_pf *pf, 656c09c7847SChengwen Feng enum hns3_tm_node_type node_type, 657c09c7847SChengwen Feng struct hns3_tm_node *tm_node) 658c09c7847SChengwen Feng { 659c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_PORT) { 660c09c7847SChengwen Feng if (tm_node->shaper_profile) 661c09c7847SChengwen Feng tm_node->shaper_profile->reference_count--; 662c09c7847SChengwen Feng rte_free(tm_node); 663c09c7847SChengwen Feng pf->tm_conf.root = NULL; 664c09c7847SChengwen Feng return; 665c09c7847SChengwen Feng } 666c09c7847SChengwen Feng 667c09c7847SChengwen Feng if (tm_node->shaper_profile) 668c09c7847SChengwen Feng tm_node->shaper_profile->reference_count--; 669c09c7847SChengwen Feng tm_node->parent->reference_count--; 670c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_TC) { 671c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); 672c09c7847SChengwen Feng pf->tm_conf.nb_tc_node--; 673c09c7847SChengwen Feng } else { 674c09c7847SChengwen Feng TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); 675c09c7847SChengwen Feng pf->tm_conf.nb_queue_node--; 676c09c7847SChengwen Feng } 677c09c7847SChengwen Feng rte_free(tm_node); 678c09c7847SChengwen Feng } 679c09c7847SChengwen Feng 680c09c7847SChengwen Feng static int 681c09c7847SChengwen Feng hns3_tm_node_delete(struct rte_eth_dev *dev, 682c09c7847SChengwen Feng uint32_t node_id, 683c09c7847SChengwen Feng struct rte_tm_error *error) 684c09c7847SChengwen Feng { 685c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 686c09c7847SChengwen Feng enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX; 687c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 688c09c7847SChengwen Feng 689c09c7847SChengwen Feng if (error == NULL) 690c09c7847SChengwen Feng return -EINVAL; 691c09c7847SChengwen Feng 692c09c7847SChengwen Feng if (pf->tm_conf.committed) { 693c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 694c09c7847SChengwen Feng error->message = "already committed"; 695c09c7847SChengwen Feng return -EINVAL; 696c09c7847SChengwen Feng } 697c09c7847SChengwen Feng 698c09c7847SChengwen Feng tm_node = hns3_tm_node_search(dev, node_id, &node_type); 699c09c7847SChengwen Feng if (tm_node == NULL) { 700c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 701c09c7847SChengwen Feng error->message = "no such node"; 702c09c7847SChengwen Feng return -EINVAL; 703c09c7847SChengwen Feng } 704c09c7847SChengwen Feng 705c09c7847SChengwen Feng if (tm_node->reference_count) { 706c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 707c09c7847SChengwen Feng error->message = "cannot delete a node which has children"; 708c09c7847SChengwen Feng return -EINVAL; 709c09c7847SChengwen Feng } 710c09c7847SChengwen Feng 711c09c7847SChengwen Feng hns3_tm_node_do_delete(pf, node_type, tm_node); 712c09c7847SChengwen Feng 713c09c7847SChengwen Feng return 0; 714c09c7847SChengwen Feng } 715c09c7847SChengwen Feng 716c09c7847SChengwen Feng static int 717c09c7847SChengwen Feng hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 718c09c7847SChengwen Feng int *is_leaf, struct rte_tm_error *error) 719c09c7847SChengwen Feng { 720c09c7847SChengwen Feng enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX; 721c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 722c09c7847SChengwen Feng 723c09c7847SChengwen Feng if (is_leaf == NULL || error == NULL) 724c09c7847SChengwen Feng return -EINVAL; 725c09c7847SChengwen Feng 726c09c7847SChengwen Feng tm_node = hns3_tm_node_search(dev, node_id, &node_type); 727c09c7847SChengwen Feng if (tm_node == NULL) { 728c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 729c09c7847SChengwen Feng error->message = "no such node"; 730c09c7847SChengwen Feng return -EINVAL; 731c09c7847SChengwen Feng } 732c09c7847SChengwen Feng 733c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_QUEUE) 734c09c7847SChengwen Feng *is_leaf = true; 735c09c7847SChengwen Feng else 736c09c7847SChengwen Feng *is_leaf = false; 737c09c7847SChengwen Feng 738c09c7847SChengwen Feng return 0; 739c09c7847SChengwen Feng } 740c09c7847SChengwen Feng 741c09c7847SChengwen Feng static void 74228ad38ddSJie Hai hns3_tm_nonleaf_level_capabilities_get(struct rte_eth_dev *dev, 743c09c7847SChengwen Feng uint32_t level_id, 744c09c7847SChengwen Feng struct rte_tm_level_capabilities *cap) 745c09c7847SChengwen Feng { 746c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 747c09c7847SChengwen Feng uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); 748c09c7847SChengwen Feng 749c09c7847SChengwen Feng if (level_id == HNS3_TM_NODE_LEVEL_PORT) { 750c09c7847SChengwen Feng cap->n_nodes_max = 1; 751c09c7847SChengwen Feng cap->n_nodes_nonleaf_max = 1; 752c09c7847SChengwen Feng cap->n_nodes_leaf_max = 0; 753c09c7847SChengwen Feng } else { 754c09c7847SChengwen Feng cap->n_nodes_max = HNS3_MAX_TC_NUM; 755c09c7847SChengwen Feng cap->n_nodes_nonleaf_max = HNS3_MAX_TC_NUM; 756c09c7847SChengwen Feng cap->n_nodes_leaf_max = 0; 757c09c7847SChengwen Feng } 758c09c7847SChengwen Feng 759c09c7847SChengwen Feng cap->non_leaf_nodes_identical = 1; 760c09c7847SChengwen Feng cap->leaf_nodes_identical = 1; 761c09c7847SChengwen Feng 762c09c7847SChengwen Feng cap->nonleaf.shaper_private_supported = true; 763c09c7847SChengwen Feng cap->nonleaf.shaper_private_dual_rate_supported = false; 764c09c7847SChengwen Feng cap->nonleaf.shaper_private_rate_min = 0; 765c09c7847SChengwen Feng cap->nonleaf.shaper_private_rate_max = 766c09c7847SChengwen Feng hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate); 767c09c7847SChengwen Feng cap->nonleaf.shaper_shared_n_max = 0; 768c09c7847SChengwen Feng if (level_id == HNS3_TM_NODE_LEVEL_PORT) 769c09c7847SChengwen Feng cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM; 770c09c7847SChengwen Feng else 771c09c7847SChengwen Feng cap->nonleaf.sched_n_children_max = max_tx_queues; 772c09c7847SChengwen Feng cap->nonleaf.sched_sp_n_priorities_max = 1; 773c09c7847SChengwen Feng cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 774c09c7847SChengwen Feng cap->nonleaf.sched_wfq_n_groups_max = 0; 775c09c7847SChengwen Feng cap->nonleaf.sched_wfq_weight_max = 1; 776c09c7847SChengwen Feng cap->nonleaf.stats_mask = 0; 777c09c7847SChengwen Feng } 778c09c7847SChengwen Feng 779c09c7847SChengwen Feng static void 780c09c7847SChengwen Feng hns3_tm_leaf_level_capabilities_get(struct rte_eth_dev *dev, 781c09c7847SChengwen Feng struct rte_tm_level_capabilities *cap) 782c09c7847SChengwen Feng { 783c09c7847SChengwen Feng uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); 784c09c7847SChengwen Feng 785c09c7847SChengwen Feng cap->n_nodes_max = max_tx_queues; 786c09c7847SChengwen Feng cap->n_nodes_nonleaf_max = 0; 787c09c7847SChengwen Feng cap->n_nodes_leaf_max = max_tx_queues; 788c09c7847SChengwen Feng 789c09c7847SChengwen Feng cap->non_leaf_nodes_identical = 1; 790c09c7847SChengwen Feng cap->leaf_nodes_identical = 1; 791c09c7847SChengwen Feng 792c09c7847SChengwen Feng cap->leaf.shaper_private_supported = false; 793c09c7847SChengwen Feng cap->leaf.shaper_private_dual_rate_supported = false; 794c09c7847SChengwen Feng cap->leaf.shaper_private_rate_min = 0; 795c09c7847SChengwen Feng cap->leaf.shaper_private_rate_max = 0; 796c09c7847SChengwen Feng cap->leaf.shaper_shared_n_max = 0; 797c09c7847SChengwen Feng cap->leaf.cman_head_drop_supported = false; 798c09c7847SChengwen Feng cap->leaf.cman_wred_context_private_supported = false; 799c09c7847SChengwen Feng cap->leaf.cman_wred_context_shared_n_max = 0; 800c09c7847SChengwen Feng cap->leaf.stats_mask = 0; 801c09c7847SChengwen Feng } 802c09c7847SChengwen Feng 803c09c7847SChengwen Feng static int 804c09c7847SChengwen Feng hns3_tm_level_capabilities_get(struct rte_eth_dev *dev, 805c09c7847SChengwen Feng uint32_t level_id, 806c09c7847SChengwen Feng struct rte_tm_level_capabilities *cap, 807c09c7847SChengwen Feng struct rte_tm_error *error) 808c09c7847SChengwen Feng { 809c09c7847SChengwen Feng if (cap == NULL || error == NULL) 810c09c7847SChengwen Feng return -EINVAL; 811c09c7847SChengwen Feng 812c09c7847SChengwen Feng if (level_id >= HNS3_TM_NODE_LEVEL_MAX) { 813c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; 814c09c7847SChengwen Feng error->message = "too deep level"; 815c09c7847SChengwen Feng return -EINVAL; 816c09c7847SChengwen Feng } 817c09c7847SChengwen Feng 818c09c7847SChengwen Feng memset(cap, 0, sizeof(struct rte_tm_level_capabilities)); 819c09c7847SChengwen Feng 820c09c7847SChengwen Feng if (level_id != HNS3_TM_NODE_LEVEL_QUEUE) 82128ad38ddSJie Hai hns3_tm_nonleaf_level_capabilities_get(dev, level_id, cap); 822c09c7847SChengwen Feng else 823c09c7847SChengwen Feng hns3_tm_leaf_level_capabilities_get(dev, cap); 824c09c7847SChengwen Feng 825c09c7847SChengwen Feng return 0; 826c09c7847SChengwen Feng } 827c09c7847SChengwen Feng 828c09c7847SChengwen Feng static void 829c09c7847SChengwen Feng hns3_tm_nonleaf_node_capabilities_get(struct rte_eth_dev *dev, 830c09c7847SChengwen Feng enum hns3_tm_node_type node_type, 831c09c7847SChengwen Feng struct rte_tm_node_capabilities *cap) 832c09c7847SChengwen Feng { 833c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 834c09c7847SChengwen Feng uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); 835c09c7847SChengwen Feng 836c09c7847SChengwen Feng cap->shaper_private_supported = true; 837c09c7847SChengwen Feng cap->shaper_private_dual_rate_supported = false; 838c09c7847SChengwen Feng cap->shaper_private_rate_min = 0; 839c09c7847SChengwen Feng cap->shaper_private_rate_max = 840c09c7847SChengwen Feng hns3_tm_rate_convert_firmware2tm(hw->max_tm_rate); 841c09c7847SChengwen Feng cap->shaper_shared_n_max = 0; 842c09c7847SChengwen Feng 843c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_PORT) 844c09c7847SChengwen Feng cap->nonleaf.sched_n_children_max = HNS3_MAX_TC_NUM; 845c09c7847SChengwen Feng else 846c09c7847SChengwen Feng cap->nonleaf.sched_n_children_max = max_tx_queues; 847c09c7847SChengwen Feng cap->nonleaf.sched_sp_n_priorities_max = 1; 848c09c7847SChengwen Feng cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 849c09c7847SChengwen Feng cap->nonleaf.sched_wfq_n_groups_max = 0; 850c09c7847SChengwen Feng cap->nonleaf.sched_wfq_weight_max = 1; 851c09c7847SChengwen Feng 852c09c7847SChengwen Feng cap->stats_mask = 0; 853c09c7847SChengwen Feng } 854c09c7847SChengwen Feng 855c09c7847SChengwen Feng static void 856c09c7847SChengwen Feng hns3_tm_leaf_node_capabilities_get(struct rte_eth_dev *dev __rte_unused, 857c09c7847SChengwen Feng struct rte_tm_node_capabilities *cap) 858c09c7847SChengwen Feng { 859c09c7847SChengwen Feng cap->shaper_private_supported = false; 860c09c7847SChengwen Feng cap->shaper_private_dual_rate_supported = false; 861c09c7847SChengwen Feng cap->shaper_private_rate_min = 0; 862c09c7847SChengwen Feng cap->shaper_private_rate_max = 0; 863c09c7847SChengwen Feng cap->shaper_shared_n_max = 0; 864c09c7847SChengwen Feng 865c09c7847SChengwen Feng cap->leaf.cman_head_drop_supported = false; 866c09c7847SChengwen Feng cap->leaf.cman_wred_context_private_supported = false; 867c09c7847SChengwen Feng cap->leaf.cman_wred_context_shared_n_max = 0; 868c09c7847SChengwen Feng 869c09c7847SChengwen Feng cap->stats_mask = 0; 870c09c7847SChengwen Feng } 871c09c7847SChengwen Feng 872c09c7847SChengwen Feng static int 873c09c7847SChengwen Feng hns3_tm_node_capabilities_get(struct rte_eth_dev *dev, 874c09c7847SChengwen Feng uint32_t node_id, 875c09c7847SChengwen Feng struct rte_tm_node_capabilities *cap, 876c09c7847SChengwen Feng struct rte_tm_error *error) 877c09c7847SChengwen Feng { 878c09c7847SChengwen Feng enum hns3_tm_node_type node_type; 879c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 880c09c7847SChengwen Feng 881c09c7847SChengwen Feng if (cap == NULL || error == NULL) 882c09c7847SChengwen Feng return -EINVAL; 883c09c7847SChengwen Feng 884c09c7847SChengwen Feng tm_node = hns3_tm_node_search(dev, node_id, &node_type); 885c09c7847SChengwen Feng if (tm_node == NULL) { 886c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 887c09c7847SChengwen Feng error->message = "no such node"; 888c09c7847SChengwen Feng return -EINVAL; 889c09c7847SChengwen Feng } 890c09c7847SChengwen Feng 891c09c7847SChengwen Feng memset(cap, 0, sizeof(struct rte_tm_node_capabilities)); 892c09c7847SChengwen Feng 893c09c7847SChengwen Feng if (node_type != HNS3_TM_NODE_TYPE_QUEUE) 894c09c7847SChengwen Feng hns3_tm_nonleaf_node_capabilities_get(dev, node_type, cap); 895c09c7847SChengwen Feng else 896c09c7847SChengwen Feng hns3_tm_leaf_node_capabilities_get(dev, cap); 897c09c7847SChengwen Feng 898c09c7847SChengwen Feng return 0; 899c09c7847SChengwen Feng } 900c09c7847SChengwen Feng 901c09c7847SChengwen Feng static int 902c09c7847SChengwen Feng hns3_tm_config_port_rate(struct hns3_hw *hw, 903c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile) 904c09c7847SChengwen Feng { 905fc18d1b4SHuisong Li struct hns3_port_limit_rate_cmd *cfg; 906fc18d1b4SHuisong Li struct hns3_cmd_desc desc; 907c09c7847SChengwen Feng uint32_t firmware_rate; 908c09c7847SChengwen Feng uint64_t rate; 909fc18d1b4SHuisong Li int ret; 910c09c7847SChengwen Feng 911c09c7847SChengwen Feng if (shaper_profile) { 912c09c7847SChengwen Feng rate = shaper_profile->profile.peak.rate; 913c09c7847SChengwen Feng firmware_rate = hns3_tm_rate_convert_tm2firmware(rate); 914c09c7847SChengwen Feng } else { 915fc18d1b4SHuisong Li firmware_rate = hw->max_tm_rate; 916c09c7847SChengwen Feng } 917c09c7847SChengwen Feng 918fc18d1b4SHuisong Li hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_PORT_LIMIT_RATE, false); 919fc18d1b4SHuisong Li cfg = (struct hns3_port_limit_rate_cmd *)desc.data; 920fc18d1b4SHuisong Li cfg->speed = rte_cpu_to_le_32(firmware_rate); 921fc18d1b4SHuisong Li 922fc18d1b4SHuisong Li ret = hns3_cmd_send(hw, &desc, 1); 923fc18d1b4SHuisong Li if (ret) 924fc18d1b4SHuisong Li hns3_err(hw, "failed to config port rate, ret = %d", ret); 925fc18d1b4SHuisong Li 926fc18d1b4SHuisong Li return ret; 927c09c7847SChengwen Feng } 928c09c7847SChengwen Feng 929c09c7847SChengwen Feng static int 930fc18d1b4SHuisong Li hns3_tm_config_tc_rate(struct hns3_hw *hw, uint8_t tc_no, 931c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile) 932c09c7847SChengwen Feng { 933fc18d1b4SHuisong Li struct hns3_tc_limit_rate_cmd *cfg; 934fc18d1b4SHuisong Li struct hns3_cmd_desc desc; 935c09c7847SChengwen Feng uint32_t firmware_rate; 936c09c7847SChengwen Feng uint64_t rate; 937fc18d1b4SHuisong Li int ret; 938c09c7847SChengwen Feng 939c09c7847SChengwen Feng if (shaper_profile) { 940c09c7847SChengwen Feng rate = shaper_profile->profile.peak.rate; 941c09c7847SChengwen Feng firmware_rate = hns3_tm_rate_convert_tm2firmware(rate); 942c09c7847SChengwen Feng } else { 943c09c7847SChengwen Feng firmware_rate = hw->dcb_info.tc_info[tc_no].bw_limit; 944c09c7847SChengwen Feng } 945c09c7847SChengwen Feng 946fc18d1b4SHuisong Li hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_TC_LIMIT_RATE, false); 947fc18d1b4SHuisong Li cfg = (struct hns3_tc_limit_rate_cmd *)desc.data; 948fc18d1b4SHuisong Li cfg->speed = rte_cpu_to_le_32(firmware_rate); 949fc18d1b4SHuisong Li cfg->tc_id = tc_no; 950fc18d1b4SHuisong Li 951fc18d1b4SHuisong Li ret = hns3_cmd_send(hw, &desc, 1); 952fc18d1b4SHuisong Li if (ret) 953fc18d1b4SHuisong Li hns3_err(hw, "failed to config tc (%u) rate, ret = %d", 954fc18d1b4SHuisong Li tc_no, ret); 955fc18d1b4SHuisong Li 956fc18d1b4SHuisong Li return ret; 957c09c7847SChengwen Feng } 958c09c7847SChengwen Feng 959c09c7847SChengwen Feng static bool 960c09c7847SChengwen Feng hns3_tm_configure_check(struct hns3_hw *hw, struct rte_tm_error *error) 961c09c7847SChengwen Feng { 962c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 963c09c7847SChengwen Feng struct hns3_tm_conf *tm_conf = &pf->tm_conf; 964c09c7847SChengwen Feng struct hns3_tm_node_list *tc_list = &tm_conf->tc_list; 965c09c7847SChengwen Feng struct hns3_tm_node_list *queue_list = &tm_conf->queue_list; 966c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 967c09c7847SChengwen Feng 968c09c7847SChengwen Feng /* TC */ 969c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, tc_list, node) { 970c09c7847SChengwen Feng if (!tm_node->reference_count) { 971c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 972c09c7847SChengwen Feng error->message = "TC without queue assigned"; 973c09c7847SChengwen Feng return false; 974c09c7847SChengwen Feng } 975c09c7847SChengwen Feng 976c09c7847SChengwen Feng if (hns3_tm_calc_node_tc_no(tm_conf, tm_node->id) >= 977c09c7847SChengwen Feng hw->num_tc) { 978c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 979c09c7847SChengwen Feng error->message = "node's TC not exist"; 980c09c7847SChengwen Feng return false; 981c09c7847SChengwen Feng } 982c09c7847SChengwen Feng } 983c09c7847SChengwen Feng 984c09c7847SChengwen Feng /* Queue */ 985c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, queue_list, node) { 986c09c7847SChengwen Feng if (tm_node->id >= hw->data->nb_tx_queues) { 987c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 988c09c7847SChengwen Feng error->message = "node's queue invalid"; 989c09c7847SChengwen Feng return false; 990c09c7847SChengwen Feng } 991c09c7847SChengwen Feng 992c09c7847SChengwen Feng if (hns3_txq_mapped_tc_get(hw, tm_node->id) != 993c09c7847SChengwen Feng hns3_tm_calc_node_tc_no(tm_conf, tm_node->parent->id)) { 994c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 995c09c7847SChengwen Feng error->message = "queue's TC not match parent's TC"; 996c09c7847SChengwen Feng return false; 997c09c7847SChengwen Feng } 998c09c7847SChengwen Feng } 999c09c7847SChengwen Feng 1000c09c7847SChengwen Feng return true; 1001c09c7847SChengwen Feng } 1002c09c7847SChengwen Feng 1003c09c7847SChengwen Feng static int 1004c09c7847SChengwen Feng hns3_tm_hierarchy_do_commit(struct hns3_hw *hw, 1005c09c7847SChengwen Feng struct rte_tm_error *error) 1006c09c7847SChengwen Feng { 1007c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 1008c09c7847SChengwen Feng struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list; 1009c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 1010c09c7847SChengwen Feng uint8_t tc_no; 1011c09c7847SChengwen Feng int ret; 1012c09c7847SChengwen Feng 1013c09c7847SChengwen Feng /* port */ 1014c09c7847SChengwen Feng tm_node = pf->tm_conf.root; 1015c09c7847SChengwen Feng if (tm_node->shaper_profile) { 1016c09c7847SChengwen Feng ret = hns3_tm_config_port_rate(hw, tm_node->shaper_profile); 1017c09c7847SChengwen Feng if (ret) { 1018c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1019c09c7847SChengwen Feng error->message = "fail to set port peak rate"; 1020c09c7847SChengwen Feng return -EIO; 1021c09c7847SChengwen Feng } 1022c09c7847SChengwen Feng } 1023c09c7847SChengwen Feng 1024c09c7847SChengwen Feng /* TC */ 1025c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, tc_list, node) { 1026c09c7847SChengwen Feng if (tm_node->shaper_profile == NULL) 1027c09c7847SChengwen Feng continue; 1028c09c7847SChengwen Feng 1029c09c7847SChengwen Feng tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id); 1030c09c7847SChengwen Feng ret = hns3_tm_config_tc_rate(hw, tc_no, 1031c09c7847SChengwen Feng tm_node->shaper_profile); 1032c09c7847SChengwen Feng if (ret) { 1033c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 1034c09c7847SChengwen Feng error->message = "fail to set TC peak rate"; 1035c09c7847SChengwen Feng return -EIO; 1036c09c7847SChengwen Feng } 1037c09c7847SChengwen Feng } 1038c09c7847SChengwen Feng 1039c09c7847SChengwen Feng return 0; 1040c09c7847SChengwen Feng } 1041c09c7847SChengwen Feng 1042c09c7847SChengwen Feng static int 1043c09c7847SChengwen Feng hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, 1044c09c7847SChengwen Feng int clear_on_fail, 1045c09c7847SChengwen Feng struct rte_tm_error *error) 1046c09c7847SChengwen Feng { 1047c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1048c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1049c09c7847SChengwen Feng int ret; 1050c09c7847SChengwen Feng 1051c09c7847SChengwen Feng if (error == NULL) 1052c09c7847SChengwen Feng return -EINVAL; 1053c09c7847SChengwen Feng 1054e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { 1055c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 1056c09c7847SChengwen Feng error->message = "device is resetting"; 1057c09c7847SChengwen Feng /* don't goto fail_clear, user may try later */ 1058c09c7847SChengwen Feng return -EBUSY; 1059c09c7847SChengwen Feng } 1060c09c7847SChengwen Feng 1061c09c7847SChengwen Feng if (pf->tm_conf.root == NULL) 1062c09c7847SChengwen Feng goto done; 1063c09c7847SChengwen Feng 1064c09c7847SChengwen Feng /* check configure before commit make sure key configure not violated */ 1065c09c7847SChengwen Feng if (!hns3_tm_configure_check(hw, error)) 1066c09c7847SChengwen Feng goto fail_clear; 1067c09c7847SChengwen Feng 1068c09c7847SChengwen Feng ret = hns3_tm_hierarchy_do_commit(hw, error); 1069c09c7847SChengwen Feng if (ret) 1070c09c7847SChengwen Feng goto fail_clear; 1071c09c7847SChengwen Feng 1072c09c7847SChengwen Feng done: 1073c09c7847SChengwen Feng pf->tm_conf.committed = true; 1074c09c7847SChengwen Feng return 0; 1075c09c7847SChengwen Feng 1076c09c7847SChengwen Feng fail_clear: 1077c09c7847SChengwen Feng if (clear_on_fail) { 1078c09c7847SChengwen Feng hns3_tm_conf_uninit(dev); 1079c09c7847SChengwen Feng hns3_tm_conf_init(dev); 1080c09c7847SChengwen Feng } 1081c09c7847SChengwen Feng return -EINVAL; 1082c09c7847SChengwen Feng } 1083c09c7847SChengwen Feng 1084c09c7847SChengwen Feng static int 1085c09c7847SChengwen Feng hns3_tm_node_shaper_do_update(struct hns3_hw *hw, 1086c09c7847SChengwen Feng uint32_t node_id, 1087c09c7847SChengwen Feng enum hns3_tm_node_type node_type, 1088c09c7847SChengwen Feng struct hns3_tm_shaper_profile *shaper_profile, 1089c09c7847SChengwen Feng struct rte_tm_error *error) 1090c09c7847SChengwen Feng { 1091c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 1092c09c7847SChengwen Feng uint8_t tc_no; 1093c09c7847SChengwen Feng int ret; 1094c09c7847SChengwen Feng 1095c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_QUEUE) { 1096c09c7847SChengwen Feng if (shaper_profile != NULL) { 1097c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 1098c09c7847SChengwen Feng error->message = "queue node shaper not supported"; 1099c09c7847SChengwen Feng return -EINVAL; 1100c09c7847SChengwen Feng } 1101c09c7847SChengwen Feng return 0; 1102c09c7847SChengwen Feng } 1103c09c7847SChengwen Feng 1104c09c7847SChengwen Feng if (!pf->tm_conf.committed) 1105c09c7847SChengwen Feng return 0; 1106c09c7847SChengwen Feng 1107c09c7847SChengwen Feng if (node_type == HNS3_TM_NODE_TYPE_PORT) { 1108c09c7847SChengwen Feng ret = hns3_tm_config_port_rate(hw, shaper_profile); 1109c09c7847SChengwen Feng if (ret) { 1110c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1111c09c7847SChengwen Feng error->message = "fail to update port peak rate"; 1112c09c7847SChengwen Feng } 1113c09c7847SChengwen Feng 1114c09c7847SChengwen Feng return ret; 1115c09c7847SChengwen Feng } 1116c09c7847SChengwen Feng 1117c09c7847SChengwen Feng /* 1118c09c7847SChengwen Feng * update TC's shaper 1119c09c7847SChengwen Feng */ 1120c09c7847SChengwen Feng tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, node_id); 1121c09c7847SChengwen Feng ret = hns3_tm_config_tc_rate(hw, tc_no, shaper_profile); 1122c09c7847SChengwen Feng if (ret) { 1123c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1124c09c7847SChengwen Feng error->message = "fail to update TC peak rate"; 1125c09c7847SChengwen Feng } 1126c09c7847SChengwen Feng 1127c09c7847SChengwen Feng return ret; 1128c09c7847SChengwen Feng } 1129c09c7847SChengwen Feng 1130c09c7847SChengwen Feng static int 1131c09c7847SChengwen Feng hns3_tm_node_shaper_update(struct rte_eth_dev *dev, 1132c09c7847SChengwen Feng uint32_t node_id, 1133c09c7847SChengwen Feng uint32_t shaper_profile_id, 1134c09c7847SChengwen Feng struct rte_tm_error *error) 1135c09c7847SChengwen Feng { 1136c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1137c09c7847SChengwen Feng enum hns3_tm_node_type node_type = HNS3_TM_NODE_TYPE_MAX; 1138c09c7847SChengwen Feng struct hns3_tm_shaper_profile *profile = NULL; 1139c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 1140c09c7847SChengwen Feng 1141c09c7847SChengwen Feng if (error == NULL) 1142c09c7847SChengwen Feng return -EINVAL; 1143c09c7847SChengwen Feng 1144e12a0166STyler Retzlaff if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { 1145c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 1146c09c7847SChengwen Feng error->message = "device is resetting"; 1147c09c7847SChengwen Feng return -EBUSY; 1148c09c7847SChengwen Feng } 1149c09c7847SChengwen Feng 1150c09c7847SChengwen Feng tm_node = hns3_tm_node_search(dev, node_id, &node_type); 1151c09c7847SChengwen Feng if (tm_node == NULL) { 1152c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_NODE_ID; 1153c09c7847SChengwen Feng error->message = "no such node"; 1154c09c7847SChengwen Feng return -EINVAL; 1155c09c7847SChengwen Feng } 1156c09c7847SChengwen Feng 1157c09c7847SChengwen Feng if (shaper_profile_id == tm_node->params.shaper_profile_id) 1158c09c7847SChengwen Feng return 0; 1159c09c7847SChengwen Feng 1160c09c7847SChengwen Feng if (shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 1161c09c7847SChengwen Feng profile = hns3_tm_shaper_profile_search(dev, shaper_profile_id); 1162c09c7847SChengwen Feng if (profile == NULL) { 1163c09c7847SChengwen Feng error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 1164c09c7847SChengwen Feng error->message = "profile ID not exist"; 1165c09c7847SChengwen Feng return -EINVAL; 1166c09c7847SChengwen Feng } 1167c09c7847SChengwen Feng } 1168c09c7847SChengwen Feng 1169c09c7847SChengwen Feng if (hns3_tm_node_shaper_do_update(hw, node_id, node_type, 1170c09c7847SChengwen Feng profile, error)) 1171c09c7847SChengwen Feng return -EINVAL; 1172c09c7847SChengwen Feng 1173c09c7847SChengwen Feng if (tm_node->shaper_profile) 1174c09c7847SChengwen Feng tm_node->shaper_profile->reference_count--; 1175c09c7847SChengwen Feng tm_node->shaper_profile = profile; 1176c09c7847SChengwen Feng tm_node->params.shaper_profile_id = shaper_profile_id; 1177c09c7847SChengwen Feng if (profile != NULL) 1178c09c7847SChengwen Feng profile->reference_count++; 1179c09c7847SChengwen Feng 1180c09c7847SChengwen Feng return 0; 1181c09c7847SChengwen Feng } 1182c09c7847SChengwen Feng 1183c09c7847SChengwen Feng static int 118469901040SChengwen Feng hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev, 118569901040SChengwen Feng struct rte_tm_capabilities *cap, 118669901040SChengwen Feng struct rte_tm_error *error) 118769901040SChengwen Feng { 118869901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 118969901040SChengwen Feng int ret; 119069901040SChengwen Feng 119169901040SChengwen Feng rte_spinlock_lock(&hw->lock); 119269901040SChengwen Feng ret = hns3_tm_capabilities_get(dev, cap, error); 119369901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 119469901040SChengwen Feng 119569901040SChengwen Feng return ret; 119669901040SChengwen Feng } 119769901040SChengwen Feng 119869901040SChengwen Feng static int 119969901040SChengwen Feng hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev, 120069901040SChengwen Feng uint32_t shaper_profile_id, 1201*39533238SBruce Richardson const struct rte_tm_shaper_params *profile, 120269901040SChengwen Feng struct rte_tm_error *error) 120369901040SChengwen Feng { 120469901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 120569901040SChengwen Feng int ret; 120669901040SChengwen Feng 120769901040SChengwen Feng rte_spinlock_lock(&hw->lock); 120869901040SChengwen Feng ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error); 120969901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 121069901040SChengwen Feng 121169901040SChengwen Feng return ret; 121269901040SChengwen Feng } 121369901040SChengwen Feng 121469901040SChengwen Feng static int 121569901040SChengwen Feng hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev, 121669901040SChengwen Feng uint32_t shaper_profile_id, 121769901040SChengwen Feng struct rte_tm_error *error) 121869901040SChengwen Feng { 121969901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 122069901040SChengwen Feng int ret; 122169901040SChengwen Feng 122269901040SChengwen Feng rte_spinlock_lock(&hw->lock); 122369901040SChengwen Feng ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error); 122469901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 122569901040SChengwen Feng 122669901040SChengwen Feng return ret; 122769901040SChengwen Feng } 122869901040SChengwen Feng 122969901040SChengwen Feng static int 123069901040SChengwen Feng hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id, 123169901040SChengwen Feng uint32_t parent_node_id, uint32_t priority, 123269901040SChengwen Feng uint32_t weight, uint32_t level_id, 12335d49af62SBruce Richardson const struct rte_tm_node_params *params, 123469901040SChengwen Feng struct rte_tm_error *error) 123569901040SChengwen Feng { 123669901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 123769901040SChengwen Feng int ret; 123869901040SChengwen Feng 123969901040SChengwen Feng rte_spinlock_lock(&hw->lock); 124069901040SChengwen Feng ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority, 124169901040SChengwen Feng weight, level_id, params, error); 124269901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 124369901040SChengwen Feng 124469901040SChengwen Feng return ret; 124569901040SChengwen Feng } 124669901040SChengwen Feng 124769901040SChengwen Feng static int 124869901040SChengwen Feng hns3_tm_node_delete_wrap(struct rte_eth_dev *dev, 124969901040SChengwen Feng uint32_t node_id, 125069901040SChengwen Feng struct rte_tm_error *error) 125169901040SChengwen Feng { 125269901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 125369901040SChengwen Feng int ret; 125469901040SChengwen Feng 125569901040SChengwen Feng rte_spinlock_lock(&hw->lock); 125669901040SChengwen Feng ret = hns3_tm_node_delete(dev, node_id, error); 125769901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 125869901040SChengwen Feng 125969901040SChengwen Feng return ret; 126069901040SChengwen Feng } 126169901040SChengwen Feng 126269901040SChengwen Feng static int 126369901040SChengwen Feng hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev, 126469901040SChengwen Feng uint32_t node_id, 126569901040SChengwen Feng int *is_leaf, 126669901040SChengwen Feng struct rte_tm_error *error) 126769901040SChengwen Feng { 126869901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 126969901040SChengwen Feng int ret; 127069901040SChengwen Feng 127169901040SChengwen Feng rte_spinlock_lock(&hw->lock); 127269901040SChengwen Feng ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error); 127369901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 127469901040SChengwen Feng 127569901040SChengwen Feng return ret; 127669901040SChengwen Feng } 127769901040SChengwen Feng 127869901040SChengwen Feng static int 127969901040SChengwen Feng hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev, 128069901040SChengwen Feng uint32_t level_id, 128169901040SChengwen Feng struct rte_tm_level_capabilities *cap, 128269901040SChengwen Feng struct rte_tm_error *error) 128369901040SChengwen Feng { 128469901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 128569901040SChengwen Feng int ret; 128669901040SChengwen Feng 128769901040SChengwen Feng rte_spinlock_lock(&hw->lock); 128869901040SChengwen Feng ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error); 128969901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 129069901040SChengwen Feng 129169901040SChengwen Feng return ret; 129269901040SChengwen Feng } 129369901040SChengwen Feng 129469901040SChengwen Feng static int 129569901040SChengwen Feng hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev, 129669901040SChengwen Feng uint32_t node_id, 129769901040SChengwen Feng struct rte_tm_node_capabilities *cap, 129869901040SChengwen Feng struct rte_tm_error *error) 129969901040SChengwen Feng { 130069901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 130169901040SChengwen Feng int ret; 130269901040SChengwen Feng 130369901040SChengwen Feng rte_spinlock_lock(&hw->lock); 130469901040SChengwen Feng ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error); 130569901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 130669901040SChengwen Feng 130769901040SChengwen Feng return ret; 130869901040SChengwen Feng } 130969901040SChengwen Feng 131069901040SChengwen Feng static int 131169901040SChengwen Feng hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, 131269901040SChengwen Feng int clear_on_fail, 131369901040SChengwen Feng struct rte_tm_error *error) 131469901040SChengwen Feng { 131569901040SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 131669901040SChengwen Feng int ret; 131769901040SChengwen Feng 131869901040SChengwen Feng rte_spinlock_lock(&hw->lock); 131969901040SChengwen Feng ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); 132069901040SChengwen Feng rte_spinlock_unlock(&hw->lock); 132169901040SChengwen Feng 132269901040SChengwen Feng return ret; 132369901040SChengwen Feng } 132469901040SChengwen Feng 132569901040SChengwen Feng static int 1326c09c7847SChengwen Feng hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, 1327c09c7847SChengwen Feng uint32_t node_id, 1328c09c7847SChengwen Feng uint32_t shaper_profile_id, 1329c09c7847SChengwen Feng struct rte_tm_error *error) 1330c09c7847SChengwen Feng { 1331c09c7847SChengwen Feng struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1332c09c7847SChengwen Feng int ret; 1333c09c7847SChengwen Feng 1334c09c7847SChengwen Feng rte_spinlock_lock(&hw->lock); 1335c09c7847SChengwen Feng ret = hns3_tm_node_shaper_update(dev, node_id, 1336c09c7847SChengwen Feng shaper_profile_id, error); 1337c09c7847SChengwen Feng rte_spinlock_unlock(&hw->lock); 1338c09c7847SChengwen Feng 1339c09c7847SChengwen Feng return ret; 1340c09c7847SChengwen Feng } 1341c09c7847SChengwen Feng 1342c09c7847SChengwen Feng static const struct rte_tm_ops hns3_tm_ops = { 134369901040SChengwen Feng .capabilities_get = hns3_tm_capabilities_get_wrap, 134469901040SChengwen Feng .shaper_profile_add = hns3_tm_shaper_profile_add_wrap, 134569901040SChengwen Feng .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap, 134669901040SChengwen Feng .node_add = hns3_tm_node_add_wrap, 134769901040SChengwen Feng .node_delete = hns3_tm_node_delete_wrap, 134869901040SChengwen Feng .node_type_get = hns3_tm_node_type_get_wrap, 134969901040SChengwen Feng .level_capabilities_get = hns3_tm_level_capabilities_get_wrap, 135069901040SChengwen Feng .node_capabilities_get = hns3_tm_node_capabilities_get_wrap, 1351c09c7847SChengwen Feng .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, 1352c09c7847SChengwen Feng .node_shaper_update = hns3_tm_node_shaper_update_wrap, 1353c09c7847SChengwen Feng }; 1354c09c7847SChengwen Feng 1355c09c7847SChengwen Feng int 1356fc18d1b4SHuisong Li hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg) 1357c09c7847SChengwen Feng { 1358fc18d1b4SHuisong Li struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1359fc18d1b4SHuisong Li 1360c09c7847SChengwen Feng if (arg == NULL) 1361c09c7847SChengwen Feng return -EINVAL; 1362c09c7847SChengwen Feng 1363efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, TM)) 1364fc18d1b4SHuisong Li return -EOPNOTSUPP; 1365fc18d1b4SHuisong Li 1366c09c7847SChengwen Feng *(const void **)arg = &hns3_tm_ops; 1367c09c7847SChengwen Feng 1368c09c7847SChengwen Feng return 0; 1369c09c7847SChengwen Feng } 1370c09c7847SChengwen Feng 1371c09c7847SChengwen Feng void 1372c09c7847SChengwen Feng hns3_tm_dev_start_proc(struct hns3_hw *hw) 1373c09c7847SChengwen Feng { 1374c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 1375c09c7847SChengwen Feng 1376efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, TM)) 1377fc18d1b4SHuisong Li return; 1378fc18d1b4SHuisong Li 1379c09c7847SChengwen Feng if (pf->tm_conf.root && !pf->tm_conf.committed) 1380c09c7847SChengwen Feng hns3_warn(hw, 1381c09c7847SChengwen Feng "please call hierarchy_commit() before starting the port."); 1382c09c7847SChengwen Feng } 1383c09c7847SChengwen Feng 1384c09c7847SChengwen Feng /* 1385c09c7847SChengwen Feng * We need clear tm_conf committed flag when device stop so that user can modify 1386c09c7847SChengwen Feng * tm configuration (e.g. add or delete node). 1387c09c7847SChengwen Feng * 1388c09c7847SChengwen Feng * If user don't call hierarchy commit when device start later, the Port/TC's 1389c09c7847SChengwen Feng * shaper rate still the same as previous committed. 1390c09c7847SChengwen Feng * 1391c09c7847SChengwen Feng * To avoid the above problem, we need recover Port/TC shaper rate when device 1392c09c7847SChengwen Feng * stop. 1393c09c7847SChengwen Feng */ 1394c09c7847SChengwen Feng void 1395c09c7847SChengwen Feng hns3_tm_dev_stop_proc(struct hns3_hw *hw) 1396c09c7847SChengwen Feng { 1397c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 1398c09c7847SChengwen Feng struct hns3_tm_node_list *tc_list = &pf->tm_conf.tc_list; 1399c09c7847SChengwen Feng struct hns3_tm_node *tm_node; 1400c09c7847SChengwen Feng uint8_t tc_no; 1401c09c7847SChengwen Feng 1402c09c7847SChengwen Feng if (!pf->tm_conf.committed) 1403c09c7847SChengwen Feng return; 1404c09c7847SChengwen Feng 1405c09c7847SChengwen Feng tm_node = pf->tm_conf.root; 1406c09c7847SChengwen Feng if (tm_node != NULL && tm_node->shaper_profile) 1407c09c7847SChengwen Feng (void)hns3_tm_config_port_rate(hw, NULL); 1408c09c7847SChengwen Feng 1409c09c7847SChengwen Feng TAILQ_FOREACH(tm_node, tc_list, node) { 1410c09c7847SChengwen Feng if (tm_node->shaper_profile == NULL) 1411c09c7847SChengwen Feng continue; 1412c09c7847SChengwen Feng tc_no = hns3_tm_calc_node_tc_no(&pf->tm_conf, tm_node->id); 1413c09c7847SChengwen Feng (void)hns3_tm_config_tc_rate(hw, tc_no, NULL); 1414c09c7847SChengwen Feng } 1415c09c7847SChengwen Feng 1416c09c7847SChengwen Feng pf->tm_conf.committed = false; 1417c09c7847SChengwen Feng } 1418c09c7847SChengwen Feng 1419c09c7847SChengwen Feng int 1420c09c7847SChengwen Feng hns3_tm_conf_update(struct hns3_hw *hw) 1421c09c7847SChengwen Feng { 1422c09c7847SChengwen Feng struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); 1423c09c7847SChengwen Feng struct rte_tm_error error; 1424c09c7847SChengwen Feng 1425efcaa81eSChengchang Tang if (!hns3_dev_get_support(hw, TM)) 1426fc18d1b4SHuisong Li return 0; 1427fc18d1b4SHuisong Li 1428c09c7847SChengwen Feng if (pf->tm_conf.root == NULL || !pf->tm_conf.committed) 1429c09c7847SChengwen Feng return 0; 1430c09c7847SChengwen Feng 1431c09c7847SChengwen Feng memset(&error, 0, sizeof(struct rte_tm_error)); 1432c09c7847SChengwen Feng return hns3_tm_hierarchy_do_commit(hw, &error); 1433c09c7847SChengwen Feng } 1434