1*c1d14583SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2*c1d14583SBruce Richardson * Copyright(c) 2022 Intel Corporation 3*c1d14583SBruce Richardson */ 4*c1d14583SBruce Richardson #include <rte_ethdev.h> 5*c1d14583SBruce Richardson #include <rte_tm_driver.h> 6*c1d14583SBruce Richardson 7*c1d14583SBruce Richardson #include "ice_ethdev.h" 8*c1d14583SBruce Richardson #include "ice_rxtx.h" 9*c1d14583SBruce Richardson 10*c1d14583SBruce Richardson static int ice_hierarchy_commit(struct rte_eth_dev *dev, 11*c1d14583SBruce Richardson int clear_on_fail, 12*c1d14583SBruce Richardson struct rte_tm_error *error); 13*c1d14583SBruce Richardson static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, 14*c1d14583SBruce Richardson uint32_t parent_node_id, uint32_t priority, 15*c1d14583SBruce Richardson uint32_t weight, uint32_t level_id, 16*c1d14583SBruce Richardson const struct rte_tm_node_params *params, 17*c1d14583SBruce Richardson struct rte_tm_error *error); 18*c1d14583SBruce Richardson static int ice_node_query(const struct rte_eth_dev *dev, uint32_t node_id, 19*c1d14583SBruce Richardson uint32_t *parent_node_id, uint32_t *priority, 20*c1d14583SBruce Richardson uint32_t *weight, uint32_t *level_id, 21*c1d14583SBruce Richardson struct rte_tm_node_params *params, 22*c1d14583SBruce Richardson struct rte_tm_error *error); 23*c1d14583SBruce Richardson static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 24*c1d14583SBruce Richardson struct rte_tm_error *error); 25*c1d14583SBruce Richardson static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 26*c1d14583SBruce Richardson int *is_leaf, struct rte_tm_error *error); 27*c1d14583SBruce Richardson static int ice_shaper_profile_add(struct rte_eth_dev *dev, 28*c1d14583SBruce Richardson uint32_t shaper_profile_id, 29*c1d14583SBruce Richardson const struct rte_tm_shaper_params *profile, 30*c1d14583SBruce Richardson struct rte_tm_error *error); 31*c1d14583SBruce Richardson static int ice_shaper_profile_del(struct rte_eth_dev *dev, 32*c1d14583SBruce Richardson uint32_t shaper_profile_id, 33*c1d14583SBruce Richardson struct rte_tm_error *error); 34*c1d14583SBruce Richardson 35*c1d14583SBruce Richardson const struct rte_tm_ops ice_tm_ops = { 36*c1d14583SBruce Richardson .shaper_profile_add = ice_shaper_profile_add, 37*c1d14583SBruce Richardson .shaper_profile_delete = ice_shaper_profile_del, 38*c1d14583SBruce Richardson .node_add = ice_tm_node_add, 39*c1d14583SBruce Richardson .node_delete = ice_tm_node_delete, 40*c1d14583SBruce Richardson .node_type_get = ice_node_type_get, 41*c1d14583SBruce Richardson .node_query = ice_node_query, 42*c1d14583SBruce Richardson .hierarchy_commit = ice_hierarchy_commit, 43*c1d14583SBruce Richardson }; 44*c1d14583SBruce Richardson 45*c1d14583SBruce Richardson void 46*c1d14583SBruce Richardson ice_tm_conf_init(struct rte_eth_dev *dev) 47*c1d14583SBruce Richardson { 48*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 49*c1d14583SBruce Richardson 50*c1d14583SBruce Richardson /* initialize node configuration */ 51*c1d14583SBruce Richardson TAILQ_INIT(&pf->tm_conf.shaper_profile_list); 52*c1d14583SBruce Richardson pf->tm_conf.root = NULL; 53*c1d14583SBruce Richardson pf->tm_conf.committed = false; 54*c1d14583SBruce Richardson pf->tm_conf.clear_on_fail = false; 55*c1d14583SBruce Richardson } 56*c1d14583SBruce Richardson 57*c1d14583SBruce Richardson static void free_node(struct ice_tm_node *root) 58*c1d14583SBruce Richardson { 59*c1d14583SBruce Richardson uint32_t i; 60*c1d14583SBruce Richardson 61*c1d14583SBruce Richardson if (root == NULL) 62*c1d14583SBruce Richardson return; 63*c1d14583SBruce Richardson 64*c1d14583SBruce Richardson for (i = 0; i < root->reference_count; i++) 65*c1d14583SBruce Richardson free_node(root->children[i]); 66*c1d14583SBruce Richardson 67*c1d14583SBruce Richardson rte_free(root); 68*c1d14583SBruce Richardson } 69*c1d14583SBruce Richardson 70*c1d14583SBruce Richardson void 71*c1d14583SBruce Richardson ice_tm_conf_uninit(struct rte_eth_dev *dev) 72*c1d14583SBruce Richardson { 73*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 74*c1d14583SBruce Richardson struct ice_tm_shaper_profile *shaper_profile; 75*c1d14583SBruce Richardson 76*c1d14583SBruce Richardson /* clear profile */ 77*c1d14583SBruce Richardson while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { 78*c1d14583SBruce Richardson TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); 79*c1d14583SBruce Richardson rte_free(shaper_profile); 80*c1d14583SBruce Richardson } 81*c1d14583SBruce Richardson 82*c1d14583SBruce Richardson free_node(pf->tm_conf.root); 83*c1d14583SBruce Richardson pf->tm_conf.root = NULL; 84*c1d14583SBruce Richardson } 85*c1d14583SBruce Richardson 86*c1d14583SBruce Richardson static int 87*c1d14583SBruce Richardson ice_node_param_check(uint32_t node_id, 88*c1d14583SBruce Richardson uint32_t priority, uint32_t weight, 89*c1d14583SBruce Richardson const struct rte_tm_node_params *params, 90*c1d14583SBruce Richardson bool is_leaf, 91*c1d14583SBruce Richardson struct rte_tm_error *error) 92*c1d14583SBruce Richardson { 93*c1d14583SBruce Richardson /* checked all the unsupported parameter */ 94*c1d14583SBruce Richardson if (node_id == RTE_TM_NODE_ID_NULL) { 95*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 96*c1d14583SBruce Richardson error->message = "invalid node id"; 97*c1d14583SBruce Richardson return -EINVAL; 98*c1d14583SBruce Richardson } 99*c1d14583SBruce Richardson 100*c1d14583SBruce Richardson if (priority >= 8) { 101*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; 102*c1d14583SBruce Richardson error->message = "priority should be less than 8"; 103*c1d14583SBruce Richardson return -EINVAL; 104*c1d14583SBruce Richardson } 105*c1d14583SBruce Richardson 106*c1d14583SBruce Richardson if (weight > 200 || weight < 1) { 107*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; 108*c1d14583SBruce Richardson error->message = "weight must be between 1 and 200"; 109*c1d14583SBruce Richardson return -EINVAL; 110*c1d14583SBruce Richardson } 111*c1d14583SBruce Richardson 112*c1d14583SBruce Richardson /* not support shared shaper */ 113*c1d14583SBruce Richardson if (params->shared_shaper_id) { 114*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; 115*c1d14583SBruce Richardson error->message = "shared shaper not supported"; 116*c1d14583SBruce Richardson return -EINVAL; 117*c1d14583SBruce Richardson } 118*c1d14583SBruce Richardson if (params->n_shared_shapers) { 119*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; 120*c1d14583SBruce Richardson error->message = "shared shaper not supported"; 121*c1d14583SBruce Richardson return -EINVAL; 122*c1d14583SBruce Richardson } 123*c1d14583SBruce Richardson 124*c1d14583SBruce Richardson /* for non-leaf node */ 125*c1d14583SBruce Richardson if (!is_leaf) { 126*c1d14583SBruce Richardson if (params->nonleaf.wfq_weight_mode) { 127*c1d14583SBruce Richardson error->type = 128*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 129*c1d14583SBruce Richardson error->message = "WFQ not supported"; 130*c1d14583SBruce Richardson return -EINVAL; 131*c1d14583SBruce Richardson } 132*c1d14583SBruce Richardson if (params->nonleaf.n_sp_priorities != 1) { 133*c1d14583SBruce Richardson error->type = 134*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; 135*c1d14583SBruce Richardson error->message = "SP priority not supported"; 136*c1d14583SBruce Richardson return -EINVAL; 137*c1d14583SBruce Richardson } else if (params->nonleaf.wfq_weight_mode && 138*c1d14583SBruce Richardson !(*params->nonleaf.wfq_weight_mode)) { 139*c1d14583SBruce Richardson error->type = 140*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 141*c1d14583SBruce Richardson error->message = "WFP should be byte mode"; 142*c1d14583SBruce Richardson return -EINVAL; 143*c1d14583SBruce Richardson } 144*c1d14583SBruce Richardson 145*c1d14583SBruce Richardson return 0; 146*c1d14583SBruce Richardson } 147*c1d14583SBruce Richardson 148*c1d14583SBruce Richardson /* for leaf node */ 149*c1d14583SBruce Richardson if (node_id >= RTE_MAX_QUEUES_PER_PORT) { 150*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 151*c1d14583SBruce Richardson error->message = "Node ID out of range for a leaf node."; 152*c1d14583SBruce Richardson return -EINVAL; 153*c1d14583SBruce Richardson } 154*c1d14583SBruce Richardson if (params->leaf.cman) { 155*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; 156*c1d14583SBruce Richardson error->message = "Congestion management not supported"; 157*c1d14583SBruce Richardson return -EINVAL; 158*c1d14583SBruce Richardson } 159*c1d14583SBruce Richardson if (params->leaf.wred.wred_profile_id != 160*c1d14583SBruce Richardson RTE_TM_WRED_PROFILE_ID_NONE) { 161*c1d14583SBruce Richardson error->type = 162*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; 163*c1d14583SBruce Richardson error->message = "WRED not supported"; 164*c1d14583SBruce Richardson return -EINVAL; 165*c1d14583SBruce Richardson } 166*c1d14583SBruce Richardson if (params->leaf.wred.shared_wred_context_id) { 167*c1d14583SBruce Richardson error->type = 168*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; 169*c1d14583SBruce Richardson error->message = "WRED not supported"; 170*c1d14583SBruce Richardson return -EINVAL; 171*c1d14583SBruce Richardson } 172*c1d14583SBruce Richardson if (params->leaf.wred.n_shared_wred_contexts) { 173*c1d14583SBruce Richardson error->type = 174*c1d14583SBruce Richardson RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; 175*c1d14583SBruce Richardson error->message = "WRED not supported"; 176*c1d14583SBruce Richardson return -EINVAL; 177*c1d14583SBruce Richardson } 178*c1d14583SBruce Richardson 179*c1d14583SBruce Richardson return 0; 180*c1d14583SBruce Richardson } 181*c1d14583SBruce Richardson 182*c1d14583SBruce Richardson static struct ice_tm_node * 183*c1d14583SBruce Richardson find_node(struct ice_tm_node *root, uint32_t id) 184*c1d14583SBruce Richardson { 185*c1d14583SBruce Richardson uint32_t i; 186*c1d14583SBruce Richardson 187*c1d14583SBruce Richardson if (root == NULL || root->id == id) 188*c1d14583SBruce Richardson return root; 189*c1d14583SBruce Richardson 190*c1d14583SBruce Richardson for (i = 0; i < root->reference_count; i++) { 191*c1d14583SBruce Richardson struct ice_tm_node *node = find_node(root->children[i], id); 192*c1d14583SBruce Richardson 193*c1d14583SBruce Richardson if (node) 194*c1d14583SBruce Richardson return node; 195*c1d14583SBruce Richardson } 196*c1d14583SBruce Richardson 197*c1d14583SBruce Richardson return NULL; 198*c1d14583SBruce Richardson } 199*c1d14583SBruce Richardson 200*c1d14583SBruce Richardson static inline uint8_t 201*c1d14583SBruce Richardson ice_get_leaf_level(const struct ice_pf *pf) 202*c1d14583SBruce Richardson { 203*c1d14583SBruce Richardson const struct ice_hw *hw = ICE_PF_TO_HW(pf); 204*c1d14583SBruce Richardson return hw->num_tx_sched_layers - pf->tm_conf.hidden_layers - 1; 205*c1d14583SBruce Richardson } 206*c1d14583SBruce Richardson 207*c1d14583SBruce Richardson static int 208*c1d14583SBruce Richardson ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 209*c1d14583SBruce Richardson int *is_leaf, struct rte_tm_error *error) 210*c1d14583SBruce Richardson { 211*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 212*c1d14583SBruce Richardson struct ice_tm_node *tm_node; 213*c1d14583SBruce Richardson 214*c1d14583SBruce Richardson if (!is_leaf || !error) 215*c1d14583SBruce Richardson return -EINVAL; 216*c1d14583SBruce Richardson 217*c1d14583SBruce Richardson if (node_id == RTE_TM_NODE_ID_NULL) { 218*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 219*c1d14583SBruce Richardson error->message = "invalid node id"; 220*c1d14583SBruce Richardson return -EINVAL; 221*c1d14583SBruce Richardson } 222*c1d14583SBruce Richardson 223*c1d14583SBruce Richardson /* check if the node id exists */ 224*c1d14583SBruce Richardson tm_node = find_node(pf->tm_conf.root, node_id); 225*c1d14583SBruce Richardson if (!tm_node) { 226*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 227*c1d14583SBruce Richardson error->message = "no such node"; 228*c1d14583SBruce Richardson return -EINVAL; 229*c1d14583SBruce Richardson } 230*c1d14583SBruce Richardson 231*c1d14583SBruce Richardson if (tm_node->level == ice_get_leaf_level(pf)) 232*c1d14583SBruce Richardson *is_leaf = true; 233*c1d14583SBruce Richardson else 234*c1d14583SBruce Richardson *is_leaf = false; 235*c1d14583SBruce Richardson 236*c1d14583SBruce Richardson return 0; 237*c1d14583SBruce Richardson } 238*c1d14583SBruce Richardson 239*c1d14583SBruce Richardson static int 240*c1d14583SBruce Richardson ice_node_query(const struct rte_eth_dev *dev, uint32_t node_id, 241*c1d14583SBruce Richardson uint32_t *parent_node_id, uint32_t *priority, 242*c1d14583SBruce Richardson uint32_t *weight, uint32_t *level_id, 243*c1d14583SBruce Richardson struct rte_tm_node_params *params, 244*c1d14583SBruce Richardson struct rte_tm_error *error) 245*c1d14583SBruce Richardson { 246*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 247*c1d14583SBruce Richardson struct ice_tm_node *tm_node; 248*c1d14583SBruce Richardson 249*c1d14583SBruce Richardson if (node_id == RTE_TM_NODE_ID_NULL) { 250*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 251*c1d14583SBruce Richardson error->message = "invalid node id"; 252*c1d14583SBruce Richardson return -EINVAL; 253*c1d14583SBruce Richardson } 254*c1d14583SBruce Richardson 255*c1d14583SBruce Richardson /* check if the node id exists */ 256*c1d14583SBruce Richardson tm_node = find_node(pf->tm_conf.root, node_id); 257*c1d14583SBruce Richardson if (!tm_node) { 258*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 259*c1d14583SBruce Richardson error->message = "no such node"; 260*c1d14583SBruce Richardson return -EEXIST; 261*c1d14583SBruce Richardson } 262*c1d14583SBruce Richardson 263*c1d14583SBruce Richardson if (parent_node_id != NULL) { 264*c1d14583SBruce Richardson if (tm_node->parent != NULL) 265*c1d14583SBruce Richardson *parent_node_id = tm_node->parent->id; 266*c1d14583SBruce Richardson else 267*c1d14583SBruce Richardson *parent_node_id = RTE_TM_NODE_ID_NULL; 268*c1d14583SBruce Richardson } 269*c1d14583SBruce Richardson 270*c1d14583SBruce Richardson if (priority != NULL) 271*c1d14583SBruce Richardson *priority = tm_node->priority; 272*c1d14583SBruce Richardson 273*c1d14583SBruce Richardson if (weight != NULL) 274*c1d14583SBruce Richardson *weight = tm_node->weight; 275*c1d14583SBruce Richardson 276*c1d14583SBruce Richardson if (level_id != NULL) 277*c1d14583SBruce Richardson *level_id = tm_node->level; 278*c1d14583SBruce Richardson 279*c1d14583SBruce Richardson if (params != NULL) 280*c1d14583SBruce Richardson *params = tm_node->params; 281*c1d14583SBruce Richardson 282*c1d14583SBruce Richardson return 0; 283*c1d14583SBruce Richardson } 284*c1d14583SBruce Richardson 285*c1d14583SBruce Richardson static inline struct ice_tm_shaper_profile * 286*c1d14583SBruce Richardson ice_shaper_profile_search(struct rte_eth_dev *dev, 287*c1d14583SBruce Richardson uint32_t shaper_profile_id) 288*c1d14583SBruce Richardson { 289*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 290*c1d14583SBruce Richardson struct ice_shaper_profile_list *shaper_profile_list = 291*c1d14583SBruce Richardson &pf->tm_conf.shaper_profile_list; 292*c1d14583SBruce Richardson struct ice_tm_shaper_profile *shaper_profile; 293*c1d14583SBruce Richardson 294*c1d14583SBruce Richardson TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { 295*c1d14583SBruce Richardson if (shaper_profile_id == shaper_profile->shaper_profile_id) 296*c1d14583SBruce Richardson return shaper_profile; 297*c1d14583SBruce Richardson } 298*c1d14583SBruce Richardson 299*c1d14583SBruce Richardson return NULL; 300*c1d14583SBruce Richardson } 301*c1d14583SBruce Richardson 302*c1d14583SBruce Richardson static int 303*c1d14583SBruce Richardson ice_shaper_profile_param_check(const struct rte_tm_shaper_params *profile, 304*c1d14583SBruce Richardson struct rte_tm_error *error) 305*c1d14583SBruce Richardson { 306*c1d14583SBruce Richardson /* min bucket size not supported */ 307*c1d14583SBruce Richardson if (profile->committed.size) { 308*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; 309*c1d14583SBruce Richardson error->message = "committed bucket size not supported"; 310*c1d14583SBruce Richardson return -EINVAL; 311*c1d14583SBruce Richardson } 312*c1d14583SBruce Richardson /* max bucket size not supported */ 313*c1d14583SBruce Richardson if (profile->peak.size) { 314*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; 315*c1d14583SBruce Richardson error->message = "peak bucket size not supported"; 316*c1d14583SBruce Richardson return -EINVAL; 317*c1d14583SBruce Richardson } 318*c1d14583SBruce Richardson /* length adjustment not supported */ 319*c1d14583SBruce Richardson if (profile->pkt_length_adjust) { 320*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; 321*c1d14583SBruce Richardson error->message = "packet length adjustment not supported"; 322*c1d14583SBruce Richardson return -EINVAL; 323*c1d14583SBruce Richardson } 324*c1d14583SBruce Richardson 325*c1d14583SBruce Richardson return 0; 326*c1d14583SBruce Richardson } 327*c1d14583SBruce Richardson 328*c1d14583SBruce Richardson static int 329*c1d14583SBruce Richardson ice_shaper_profile_add(struct rte_eth_dev *dev, 330*c1d14583SBruce Richardson uint32_t shaper_profile_id, 331*c1d14583SBruce Richardson const struct rte_tm_shaper_params *profile, 332*c1d14583SBruce Richardson struct rte_tm_error *error) 333*c1d14583SBruce Richardson { 334*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 335*c1d14583SBruce Richardson struct ice_tm_shaper_profile *shaper_profile; 336*c1d14583SBruce Richardson int ret; 337*c1d14583SBruce Richardson 338*c1d14583SBruce Richardson if (!profile || !error) 339*c1d14583SBruce Richardson return -EINVAL; 340*c1d14583SBruce Richardson 341*c1d14583SBruce Richardson ret = ice_shaper_profile_param_check(profile, error); 342*c1d14583SBruce Richardson if (ret) 343*c1d14583SBruce Richardson return ret; 344*c1d14583SBruce Richardson 345*c1d14583SBruce Richardson shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id); 346*c1d14583SBruce Richardson 347*c1d14583SBruce Richardson if (shaper_profile) { 348*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 349*c1d14583SBruce Richardson error->message = "profile ID exist"; 350*c1d14583SBruce Richardson return -EINVAL; 351*c1d14583SBruce Richardson } 352*c1d14583SBruce Richardson 353*c1d14583SBruce Richardson shaper_profile = rte_zmalloc("ice_tm_shaper_profile", 354*c1d14583SBruce Richardson sizeof(struct ice_tm_shaper_profile), 355*c1d14583SBruce Richardson 0); 356*c1d14583SBruce Richardson if (!shaper_profile) 357*c1d14583SBruce Richardson return -ENOMEM; 358*c1d14583SBruce Richardson shaper_profile->shaper_profile_id = shaper_profile_id; 359*c1d14583SBruce Richardson rte_memcpy(&shaper_profile->profile, profile, 360*c1d14583SBruce Richardson sizeof(struct rte_tm_shaper_params)); 361*c1d14583SBruce Richardson TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list, 362*c1d14583SBruce Richardson shaper_profile, node); 363*c1d14583SBruce Richardson 364*c1d14583SBruce Richardson return 0; 365*c1d14583SBruce Richardson } 366*c1d14583SBruce Richardson 367*c1d14583SBruce Richardson static int 368*c1d14583SBruce Richardson ice_shaper_profile_del(struct rte_eth_dev *dev, 369*c1d14583SBruce Richardson uint32_t shaper_profile_id, 370*c1d14583SBruce Richardson struct rte_tm_error *error) 371*c1d14583SBruce Richardson { 372*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 373*c1d14583SBruce Richardson struct ice_tm_shaper_profile *shaper_profile; 374*c1d14583SBruce Richardson 375*c1d14583SBruce Richardson if (!error) 376*c1d14583SBruce Richardson return -EINVAL; 377*c1d14583SBruce Richardson 378*c1d14583SBruce Richardson shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id); 379*c1d14583SBruce Richardson 380*c1d14583SBruce Richardson if (!shaper_profile) { 381*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 382*c1d14583SBruce Richardson error->message = "profile ID not exist"; 383*c1d14583SBruce Richardson return -EINVAL; 384*c1d14583SBruce Richardson } 385*c1d14583SBruce Richardson 386*c1d14583SBruce Richardson /* don't delete a profile if it's used by one or several nodes */ 387*c1d14583SBruce Richardson if (shaper_profile->reference_count) { 388*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 389*c1d14583SBruce Richardson error->message = "profile in use"; 390*c1d14583SBruce Richardson return -EINVAL; 391*c1d14583SBruce Richardson } 392*c1d14583SBruce Richardson 393*c1d14583SBruce Richardson TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); 394*c1d14583SBruce Richardson rte_free(shaper_profile); 395*c1d14583SBruce Richardson 396*c1d14583SBruce Richardson return 0; 397*c1d14583SBruce Richardson } 398*c1d14583SBruce Richardson 399*c1d14583SBruce Richardson static int 400*c1d14583SBruce Richardson ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, 401*c1d14583SBruce Richardson uint32_t parent_node_id, uint32_t priority, 402*c1d14583SBruce Richardson uint32_t weight, uint32_t level_id, 403*c1d14583SBruce Richardson const struct rte_tm_node_params *params, 404*c1d14583SBruce Richardson struct rte_tm_error *error) 405*c1d14583SBruce Richardson { 406*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 407*c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 408*c1d14583SBruce Richardson struct ice_tm_shaper_profile *shaper_profile = NULL; 409*c1d14583SBruce Richardson struct ice_tm_node *tm_node; 410*c1d14583SBruce Richardson struct ice_tm_node *parent_node = NULL; 411*c1d14583SBruce Richardson uint8_t layer_offset = pf->tm_conf.hidden_layers; 412*c1d14583SBruce Richardson int ret; 413*c1d14583SBruce Richardson 414*c1d14583SBruce Richardson if (!params || !error) 415*c1d14583SBruce Richardson return -EINVAL; 416*c1d14583SBruce Richardson 417*c1d14583SBruce Richardson /* check the shaper profile id */ 418*c1d14583SBruce Richardson if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 419*c1d14583SBruce Richardson shaper_profile = ice_shaper_profile_search(dev, params->shaper_profile_id); 420*c1d14583SBruce Richardson if (!shaper_profile) { 421*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 422*c1d14583SBruce Richardson error->message = "shaper profile does not exist"; 423*c1d14583SBruce Richardson return -EINVAL; 424*c1d14583SBruce Richardson } 425*c1d14583SBruce Richardson } 426*c1d14583SBruce Richardson 427*c1d14583SBruce Richardson /* root node if not have a parent */ 428*c1d14583SBruce Richardson if (parent_node_id == RTE_TM_NODE_ID_NULL) { 429*c1d14583SBruce Richardson /* check level */ 430*c1d14583SBruce Richardson if (level_id != 0) { 431*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 432*c1d14583SBruce Richardson error->message = "Wrong level, root node (NULL parent) must be at level 0"; 433*c1d14583SBruce Richardson return -EINVAL; 434*c1d14583SBruce Richardson } 435*c1d14583SBruce Richardson 436*c1d14583SBruce Richardson /* obviously no more than one root */ 437*c1d14583SBruce Richardson if (pf->tm_conf.root) { 438*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 439*c1d14583SBruce Richardson error->message = "already have a root"; 440*c1d14583SBruce Richardson return -EINVAL; 441*c1d14583SBruce Richardson } 442*c1d14583SBruce Richardson 443*c1d14583SBruce Richardson ret = ice_node_param_check(node_id, priority, weight, params, false, error); 444*c1d14583SBruce Richardson if (ret) 445*c1d14583SBruce Richardson return ret; 446*c1d14583SBruce Richardson 447*c1d14583SBruce Richardson /* add the root node */ 448*c1d14583SBruce Richardson tm_node = rte_zmalloc(NULL, 449*c1d14583SBruce Richardson sizeof(struct ice_tm_node) + 450*c1d14583SBruce Richardson sizeof(struct ice_tm_node *) * hw->max_children[layer_offset], 451*c1d14583SBruce Richardson 0); 452*c1d14583SBruce Richardson if (!tm_node) 453*c1d14583SBruce Richardson return -ENOMEM; 454*c1d14583SBruce Richardson tm_node->id = node_id; 455*c1d14583SBruce Richardson tm_node->level = 0; 456*c1d14583SBruce Richardson tm_node->parent = NULL; 457*c1d14583SBruce Richardson tm_node->reference_count = 0; 458*c1d14583SBruce Richardson tm_node->shaper_profile = shaper_profile; 459*c1d14583SBruce Richardson tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node)); 460*c1d14583SBruce Richardson tm_node->params = *params; 461*c1d14583SBruce Richardson pf->tm_conf.root = tm_node; 462*c1d14583SBruce Richardson return 0; 463*c1d14583SBruce Richardson } 464*c1d14583SBruce Richardson 465*c1d14583SBruce Richardson parent_node = find_node(pf->tm_conf.root, parent_node_id); 466*c1d14583SBruce Richardson if (!parent_node) { 467*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 468*c1d14583SBruce Richardson error->message = "parent not exist"; 469*c1d14583SBruce Richardson return -EINVAL; 470*c1d14583SBruce Richardson } 471*c1d14583SBruce Richardson 472*c1d14583SBruce Richardson /* check level */ 473*c1d14583SBruce Richardson if (level_id == RTE_TM_NODE_LEVEL_ID_ANY) 474*c1d14583SBruce Richardson level_id = parent_node->level + 1; 475*c1d14583SBruce Richardson else if (level_id != parent_node->level + 1) { 476*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 477*c1d14583SBruce Richardson error->message = "Wrong level"; 478*c1d14583SBruce Richardson return -EINVAL; 479*c1d14583SBruce Richardson } 480*c1d14583SBruce Richardson 481*c1d14583SBruce Richardson ret = ice_node_param_check(node_id, priority, weight, 482*c1d14583SBruce Richardson params, level_id == ice_get_leaf_level(pf), error); 483*c1d14583SBruce Richardson if (ret) 484*c1d14583SBruce Richardson return ret; 485*c1d14583SBruce Richardson 486*c1d14583SBruce Richardson /* check if the node is already existed */ 487*c1d14583SBruce Richardson if (find_node(pf->tm_conf.root, node_id)) { 488*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 489*c1d14583SBruce Richardson error->message = "node id already used"; 490*c1d14583SBruce Richardson return -EINVAL; 491*c1d14583SBruce Richardson } 492*c1d14583SBruce Richardson 493*c1d14583SBruce Richardson /* check the parent node */ 494*c1d14583SBruce Richardson /* for n-level hierarchy, level n-1 is leaf, so last level with children is n-2 */ 495*c1d14583SBruce Richardson if ((int)parent_node->level > hw->num_tx_sched_layers - 2) { 496*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 497*c1d14583SBruce Richardson error->message = "parent is not valid"; 498*c1d14583SBruce Richardson return -EINVAL; 499*c1d14583SBruce Richardson } 500*c1d14583SBruce Richardson 501*c1d14583SBruce Richardson /* check the max children allowed at this level */ 502*c1d14583SBruce Richardson if (parent_node->reference_count >= hw->max_children[parent_node->level]) { 503*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; 504*c1d14583SBruce Richardson error->message = "insufficient number of child nodes supported"; 505*c1d14583SBruce Richardson return -EINVAL; 506*c1d14583SBruce Richardson } 507*c1d14583SBruce Richardson 508*c1d14583SBruce Richardson tm_node = rte_zmalloc(NULL, 509*c1d14583SBruce Richardson sizeof(struct ice_tm_node) + 510*c1d14583SBruce Richardson sizeof(struct ice_tm_node *) * hw->max_children[level_id + layer_offset], 511*c1d14583SBruce Richardson 0); 512*c1d14583SBruce Richardson if (!tm_node) 513*c1d14583SBruce Richardson return -ENOMEM; 514*c1d14583SBruce Richardson tm_node->id = node_id; 515*c1d14583SBruce Richardson tm_node->priority = priority; 516*c1d14583SBruce Richardson tm_node->weight = weight; 517*c1d14583SBruce Richardson tm_node->reference_count = 0; 518*c1d14583SBruce Richardson tm_node->parent = parent_node; 519*c1d14583SBruce Richardson tm_node->level = level_id; 520*c1d14583SBruce Richardson tm_node->shaper_profile = shaper_profile; 521*c1d14583SBruce Richardson tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node)); 522*c1d14583SBruce Richardson tm_node->parent->children[tm_node->parent->reference_count++] = tm_node; 523*c1d14583SBruce Richardson tm_node->params = *params; 524*c1d14583SBruce Richardson 525*c1d14583SBruce Richardson if (tm_node->priority != 0) 526*c1d14583SBruce Richardson PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d", level_id); 527*c1d14583SBruce Richardson 528*c1d14583SBruce Richardson if (tm_node->weight != 1 && level_id == 0) 529*c1d14583SBruce Richardson PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d", level_id); 530*c1d14583SBruce Richardson 531*c1d14583SBruce Richardson 532*c1d14583SBruce Richardson return 0; 533*c1d14583SBruce Richardson } 534*c1d14583SBruce Richardson 535*c1d14583SBruce Richardson static int 536*c1d14583SBruce Richardson ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 537*c1d14583SBruce Richardson struct rte_tm_error *error) 538*c1d14583SBruce Richardson { 539*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 540*c1d14583SBruce Richardson struct ice_tm_node *tm_node; 541*c1d14583SBruce Richardson uint32_t i, j; 542*c1d14583SBruce Richardson 543*c1d14583SBruce Richardson if (!error) 544*c1d14583SBruce Richardson return -EINVAL; 545*c1d14583SBruce Richardson 546*c1d14583SBruce Richardson if (node_id == RTE_TM_NODE_ID_NULL) { 547*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 548*c1d14583SBruce Richardson error->message = "invalid node id"; 549*c1d14583SBruce Richardson return -EINVAL; 550*c1d14583SBruce Richardson } 551*c1d14583SBruce Richardson 552*c1d14583SBruce Richardson /* check if the node id exists */ 553*c1d14583SBruce Richardson tm_node = find_node(pf->tm_conf.root, node_id); 554*c1d14583SBruce Richardson if (!tm_node) { 555*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 556*c1d14583SBruce Richardson error->message = "no such node"; 557*c1d14583SBruce Richardson return -EINVAL; 558*c1d14583SBruce Richardson } 559*c1d14583SBruce Richardson 560*c1d14583SBruce Richardson /* the node should have no child */ 561*c1d14583SBruce Richardson if (tm_node->reference_count) { 562*c1d14583SBruce Richardson error->type = RTE_TM_ERROR_TYPE_NODE_ID; 563*c1d14583SBruce Richardson error->message = 564*c1d14583SBruce Richardson "cannot delete a node which has children"; 565*c1d14583SBruce Richardson return -EINVAL; 566*c1d14583SBruce Richardson } 567*c1d14583SBruce Richardson 568*c1d14583SBruce Richardson /* root node */ 569*c1d14583SBruce Richardson if (tm_node->level == 0) { 570*c1d14583SBruce Richardson rte_free(tm_node); 571*c1d14583SBruce Richardson pf->tm_conf.root = NULL; 572*c1d14583SBruce Richardson return 0; 573*c1d14583SBruce Richardson } 574*c1d14583SBruce Richardson 575*c1d14583SBruce Richardson /* queue group or queue node */ 576*c1d14583SBruce Richardson for (i = 0; i < tm_node->parent->reference_count; i++) 577*c1d14583SBruce Richardson if (tm_node->parent->children[i] == tm_node) 578*c1d14583SBruce Richardson break; 579*c1d14583SBruce Richardson 580*c1d14583SBruce Richardson for (j = i ; j < tm_node->parent->reference_count - 1; j++) 581*c1d14583SBruce Richardson tm_node->parent->children[j] = tm_node->parent->children[j + 1]; 582*c1d14583SBruce Richardson 583*c1d14583SBruce Richardson tm_node->parent->reference_count--; 584*c1d14583SBruce Richardson rte_free(tm_node); 585*c1d14583SBruce Richardson 586*c1d14583SBruce Richardson return 0; 587*c1d14583SBruce Richardson } 588*c1d14583SBruce Richardson 589*c1d14583SBruce Richardson static int ice_set_node_rate(struct ice_hw *hw, 590*c1d14583SBruce Richardson struct ice_tm_node *tm_node, 591*c1d14583SBruce Richardson struct ice_sched_node *sched_node) 592*c1d14583SBruce Richardson { 593*c1d14583SBruce Richardson bool reset = false; 594*c1d14583SBruce Richardson uint32_t peak = 0; 595*c1d14583SBruce Richardson uint32_t committed = 0; 596*c1d14583SBruce Richardson uint32_t rate; 597*c1d14583SBruce Richardson int status; 598*c1d14583SBruce Richardson 599*c1d14583SBruce Richardson if (tm_node == NULL || tm_node->shaper_profile == NULL) { 600*c1d14583SBruce Richardson reset = true; 601*c1d14583SBruce Richardson } else { 602*c1d14583SBruce Richardson peak = (uint32_t)tm_node->shaper_profile->profile.peak.rate; 603*c1d14583SBruce Richardson committed = (uint32_t)tm_node->shaper_profile->profile.committed.rate; 604*c1d14583SBruce Richardson } 605*c1d14583SBruce Richardson 606*c1d14583SBruce Richardson if (reset || peak == 0) 607*c1d14583SBruce Richardson rate = ICE_SCHED_DFLT_BW; 608*c1d14583SBruce Richardson else 609*c1d14583SBruce Richardson rate = peak / 1000 * BITS_PER_BYTE; 610*c1d14583SBruce Richardson 611*c1d14583SBruce Richardson 612*c1d14583SBruce Richardson status = ice_sched_set_node_bw_lmt(hw->port_info, 613*c1d14583SBruce Richardson sched_node, 614*c1d14583SBruce Richardson ICE_MAX_BW, 615*c1d14583SBruce Richardson rate); 616*c1d14583SBruce Richardson if (status) 617*c1d14583SBruce Richardson return -EINVAL; 618*c1d14583SBruce Richardson 619*c1d14583SBruce Richardson if (reset || committed == 0) 620*c1d14583SBruce Richardson rate = ICE_SCHED_DFLT_BW; 621*c1d14583SBruce Richardson else 622*c1d14583SBruce Richardson rate = committed / 1000 * BITS_PER_BYTE; 623*c1d14583SBruce Richardson 624*c1d14583SBruce Richardson status = ice_sched_set_node_bw_lmt(hw->port_info, 625*c1d14583SBruce Richardson sched_node, 626*c1d14583SBruce Richardson ICE_MIN_BW, 627*c1d14583SBruce Richardson rate); 628*c1d14583SBruce Richardson if (status) 629*c1d14583SBruce Richardson return -EINVAL; 630*c1d14583SBruce Richardson 631*c1d14583SBruce Richardson return 0; 632*c1d14583SBruce Richardson } 633*c1d14583SBruce Richardson 634*c1d14583SBruce Richardson static int ice_cfg_hw_node(struct ice_hw *hw, 635*c1d14583SBruce Richardson struct ice_tm_node *tm_node, 636*c1d14583SBruce Richardson struct ice_sched_node *sched_node) 637*c1d14583SBruce Richardson { 638*c1d14583SBruce Richardson uint8_t priority; 639*c1d14583SBruce Richardson uint16_t weight; 640*c1d14583SBruce Richardson int status, ret; 641*c1d14583SBruce Richardson 642*c1d14583SBruce Richardson ret = ice_set_node_rate(hw, tm_node, sched_node); 643*c1d14583SBruce Richardson if (ret) { 644*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, 645*c1d14583SBruce Richardson "configure queue group %u bandwidth failed", 646*c1d14583SBruce Richardson sched_node->info.node_teid); 647*c1d14583SBruce Richardson return ret; 648*c1d14583SBruce Richardson } 649*c1d14583SBruce Richardson 650*c1d14583SBruce Richardson priority = tm_node ? (7 - tm_node->priority) : 0; 651*c1d14583SBruce Richardson status = ice_sched_cfg_sibl_node_prio(hw->port_info, 652*c1d14583SBruce Richardson sched_node, 653*c1d14583SBruce Richardson priority); 654*c1d14583SBruce Richardson if (status) { 655*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "configure node %u priority %u failed", 656*c1d14583SBruce Richardson sched_node->info.node_teid, 657*c1d14583SBruce Richardson priority); 658*c1d14583SBruce Richardson return -EINVAL; 659*c1d14583SBruce Richardson } 660*c1d14583SBruce Richardson 661*c1d14583SBruce Richardson weight = tm_node ? (uint16_t)tm_node->weight : 4; 662*c1d14583SBruce Richardson 663*c1d14583SBruce Richardson status = ice_sched_cfg_node_bw_alloc(hw, sched_node, 664*c1d14583SBruce Richardson ICE_MAX_BW, 665*c1d14583SBruce Richardson weight); 666*c1d14583SBruce Richardson if (status) { 667*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "configure node %u weight %u failed", 668*c1d14583SBruce Richardson sched_node->info.node_teid, 669*c1d14583SBruce Richardson weight); 670*c1d14583SBruce Richardson return -EINVAL; 671*c1d14583SBruce Richardson } 672*c1d14583SBruce Richardson 673*c1d14583SBruce Richardson return 0; 674*c1d14583SBruce Richardson } 675*c1d14583SBruce Richardson 676*c1d14583SBruce Richardson int 677*c1d14583SBruce Richardson ice_tm_setup_txq_node(struct ice_pf *pf, struct ice_hw *hw, uint16_t qid, uint32_t teid) 678*c1d14583SBruce Richardson { 679*c1d14583SBruce Richardson struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(hw->port_info->root, teid); 680*c1d14583SBruce Richardson struct ice_tm_node *sw_node = find_node(pf->tm_conf.root, qid); 681*c1d14583SBruce Richardson 682*c1d14583SBruce Richardson /* bad node teid passed */ 683*c1d14583SBruce Richardson if (hw_node == NULL) 684*c1d14583SBruce Richardson return -ENOENT; 685*c1d14583SBruce Richardson 686*c1d14583SBruce Richardson /* not configured in hierarchy */ 687*c1d14583SBruce Richardson if (sw_node == NULL) 688*c1d14583SBruce Richardson return 0; 689*c1d14583SBruce Richardson 690*c1d14583SBruce Richardson sw_node->sched_node = hw_node; 691*c1d14583SBruce Richardson 692*c1d14583SBruce Richardson /* if the queue node has been put in the wrong place in hierarchy */ 693*c1d14583SBruce Richardson if (hw_node->parent != sw_node->parent->sched_node) { 694*c1d14583SBruce Richardson struct ice_aqc_move_txqs_data *buf; 695*c1d14583SBruce Richardson uint8_t txqs_moved = 0; 696*c1d14583SBruce Richardson uint16_t buf_size = ice_struct_size(buf, txqs, 1); 697*c1d14583SBruce Richardson 698*c1d14583SBruce Richardson buf = ice_malloc(hw, buf_size); 699*c1d14583SBruce Richardson if (buf == NULL) 700*c1d14583SBruce Richardson return -ENOMEM; 701*c1d14583SBruce Richardson 702*c1d14583SBruce Richardson struct ice_sched_node *parent = hw_node->parent; 703*c1d14583SBruce Richardson struct ice_sched_node *new_parent = sw_node->parent->sched_node; 704*c1d14583SBruce Richardson buf->src_teid = parent->info.node_teid; 705*c1d14583SBruce Richardson buf->dest_teid = new_parent->info.node_teid; 706*c1d14583SBruce Richardson buf->txqs[0].q_teid = hw_node->info.node_teid; 707*c1d14583SBruce Richardson buf->txqs[0].txq_id = qid; 708*c1d14583SBruce Richardson 709*c1d14583SBruce Richardson int ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50, 710*c1d14583SBruce Richardson NULL, buf, buf_size, &txqs_moved, NULL); 711*c1d14583SBruce Richardson if (ret || txqs_moved == 0) { 712*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "move lan queue %u failed", qid); 713*c1d14583SBruce Richardson ice_free(hw, buf); 714*c1d14583SBruce Richardson return ICE_ERR_PARAM; 715*c1d14583SBruce Richardson } 716*c1d14583SBruce Richardson 717*c1d14583SBruce Richardson /* now update the ice_sched_nodes to match physical layout */ 718*c1d14583SBruce Richardson new_parent->children[new_parent->num_children++] = hw_node; 719*c1d14583SBruce Richardson hw_node->parent = new_parent; 720*c1d14583SBruce Richardson ice_sched_query_elem(hw, hw_node->info.node_teid, &hw_node->info); 721*c1d14583SBruce Richardson for (uint16_t i = 0; i < parent->num_children; i++) 722*c1d14583SBruce Richardson if (parent->children[i] == hw_node) { 723*c1d14583SBruce Richardson /* to remove, just overwrite the old node slot with the last ptr */ 724*c1d14583SBruce Richardson parent->children[i] = parent->children[--parent->num_children]; 725*c1d14583SBruce Richardson break; 726*c1d14583SBruce Richardson } 727*c1d14583SBruce Richardson } 728*c1d14583SBruce Richardson 729*c1d14583SBruce Richardson return ice_cfg_hw_node(hw, sw_node, hw_node); 730*c1d14583SBruce Richardson } 731*c1d14583SBruce Richardson 732*c1d14583SBruce Richardson /* from a given node, recursively deletes all the nodes that belong to that vsi. 733*c1d14583SBruce Richardson * Any nodes which can't be deleted because they have children belonging to a different 734*c1d14583SBruce Richardson * VSI, are now also adjusted to belong to that VSI also 735*c1d14583SBruce Richardson */ 736*c1d14583SBruce Richardson static int 737*c1d14583SBruce Richardson free_sched_node_recursive(struct ice_port_info *pi, const struct ice_sched_node *root, 738*c1d14583SBruce Richardson struct ice_sched_node *node, uint8_t vsi_id) 739*c1d14583SBruce Richardson { 740*c1d14583SBruce Richardson uint16_t i = 0; 741*c1d14583SBruce Richardson 742*c1d14583SBruce Richardson while (i < node->num_children) { 743*c1d14583SBruce Richardson if (node->children[i]->vsi_handle != vsi_id) { 744*c1d14583SBruce Richardson i++; 745*c1d14583SBruce Richardson continue; 746*c1d14583SBruce Richardson } 747*c1d14583SBruce Richardson free_sched_node_recursive(pi, root, node->children[i], vsi_id); 748*c1d14583SBruce Richardson } 749*c1d14583SBruce Richardson 750*c1d14583SBruce Richardson if (node != root) { 751*c1d14583SBruce Richardson if (node->num_children == 0) 752*c1d14583SBruce Richardson ice_free_sched_node(pi, node); 753*c1d14583SBruce Richardson else 754*c1d14583SBruce Richardson node->vsi_handle = node->children[0]->vsi_handle; 755*c1d14583SBruce Richardson } 756*c1d14583SBruce Richardson 757*c1d14583SBruce Richardson return 0; 758*c1d14583SBruce Richardson } 759*c1d14583SBruce Richardson 760*c1d14583SBruce Richardson static int 761*c1d14583SBruce Richardson create_sched_node_recursive(struct ice_pf *pf, struct ice_port_info *pi, 762*c1d14583SBruce Richardson struct ice_tm_node *sw_node, struct ice_sched_node *hw_root, uint16_t *created) 763*c1d14583SBruce Richardson { 764*c1d14583SBruce Richardson struct ice_sched_node *parent = sw_node->sched_node; 765*c1d14583SBruce Richardson uint32_t teid; 766*c1d14583SBruce Richardson uint16_t added; 767*c1d14583SBruce Richardson 768*c1d14583SBruce Richardson /* first create all child nodes */ 769*c1d14583SBruce Richardson for (uint16_t i = 0; i < sw_node->reference_count; i++) { 770*c1d14583SBruce Richardson struct ice_tm_node *tm_node = sw_node->children[i]; 771*c1d14583SBruce Richardson int res = ice_sched_add_elems(pi, hw_root, 772*c1d14583SBruce Richardson parent, parent->tx_sched_layer + 1, 773*c1d14583SBruce Richardson 1 /* num nodes */, &added, &teid, 774*c1d14583SBruce Richardson NULL /* no pre-alloc */); 775*c1d14583SBruce Richardson if (res != 0) { 776*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Error with ice_sched_add_elems, adding child node to teid %u", 777*c1d14583SBruce Richardson parent->info.node_teid); 778*c1d14583SBruce Richardson return -1; 779*c1d14583SBruce Richardson } 780*c1d14583SBruce Richardson struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(parent, teid); 781*c1d14583SBruce Richardson if (ice_cfg_hw_node(pi->hw, tm_node, hw_node) != 0) { 782*c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Error configuring node %u at layer %u", 783*c1d14583SBruce Richardson teid, parent->tx_sched_layer + 1); 784*c1d14583SBruce Richardson return -1; 785*c1d14583SBruce Richardson } 786*c1d14583SBruce Richardson tm_node->sched_node = hw_node; 787*c1d14583SBruce Richardson created[hw_node->tx_sched_layer]++; 788*c1d14583SBruce Richardson } 789*c1d14583SBruce Richardson 790*c1d14583SBruce Richardson /* if we have just created the child nodes in the q-group, i.e. last non-leaf layer, 791*c1d14583SBruce Richardson * then just return, rather than trying to create leaf nodes. 792*c1d14583SBruce Richardson * That is done later at queue start. 793*c1d14583SBruce Richardson */ 794*c1d14583SBruce Richardson if (sw_node->level + 2 == ice_get_leaf_level(pf)) 795*c1d14583SBruce Richardson return 0; 796*c1d14583SBruce Richardson 797*c1d14583SBruce Richardson for (uint16_t i = 0; i < sw_node->reference_count; i++) { 798*c1d14583SBruce Richardson if (sw_node->children[i]->reference_count == 0) 799*c1d14583SBruce Richardson continue; 800*c1d14583SBruce Richardson 801*c1d14583SBruce Richardson if (create_sched_node_recursive(pf, pi, sw_node->children[i], hw_root, created) < 0) 802*c1d14583SBruce Richardson return -1; 803*c1d14583SBruce Richardson } 804*c1d14583SBruce Richardson return 0; 805*c1d14583SBruce Richardson } 806*c1d14583SBruce Richardson 807*c1d14583SBruce Richardson static int 808*c1d14583SBruce Richardson commit_new_hierarchy(struct rte_eth_dev *dev) 809*c1d14583SBruce Richardson { 810*c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 811*c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 812*c1d14583SBruce Richardson struct ice_port_info *pi = hw->port_info; 813*c1d14583SBruce Richardson struct ice_tm_node *sw_root = pf->tm_conf.root; 814*c1d14583SBruce Richardson const uint16_t new_root_level = pf->tm_conf.hidden_layers; 815*c1d14583SBruce Richardson /* count nodes per hw level, not per logical */ 816*c1d14583SBruce Richardson uint16_t nodes_created_per_level[ICE_TM_MAX_LAYERS] = {0}; 817*c1d14583SBruce Richardson uint8_t q_lvl = ice_get_leaf_level(pf); 818*c1d14583SBruce Richardson uint8_t qg_lvl = q_lvl - 1; 819*c1d14583SBruce Richardson 820*c1d14583SBruce Richardson struct ice_sched_node *new_vsi_root = hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0]; 821*c1d14583SBruce Richardson while (new_vsi_root->tx_sched_layer > new_root_level) 822*c1d14583SBruce Richardson new_vsi_root = new_vsi_root->parent; 823*c1d14583SBruce Richardson 824*c1d14583SBruce Richardson free_sched_node_recursive(pi, new_vsi_root, new_vsi_root, new_vsi_root->vsi_handle); 825*c1d14583SBruce Richardson 826*c1d14583SBruce Richardson sw_root->sched_node = new_vsi_root; 827*c1d14583SBruce Richardson if (create_sched_node_recursive(pf, pi, sw_root, new_vsi_root, nodes_created_per_level) < 0) 828*c1d14583SBruce Richardson return -1; 829*c1d14583SBruce Richardson for (uint16_t i = 0; i < RTE_DIM(nodes_created_per_level); i++) 830*c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Created %u nodes at level %u", 831*c1d14583SBruce Richardson nodes_created_per_level[i], i); 832*c1d14583SBruce Richardson hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0] = new_vsi_root; 833*c1d14583SBruce Richardson 834*c1d14583SBruce Richardson pf->main_vsi->nb_qps = 835*c1d14583SBruce Richardson RTE_MIN(nodes_created_per_level[qg_lvl] * hw->max_children[qg_lvl], 836*c1d14583SBruce Richardson hw->layer_info[q_lvl].max_device_nodes); 837*c1d14583SBruce Richardson 838*c1d14583SBruce Richardson pf->tm_conf.committed = true; /* set flag to be checks on queue start */ 839*c1d14583SBruce Richardson 840*c1d14583SBruce Richardson return ice_alloc_lan_q_ctx(hw, 0, 0, pf->main_vsi->nb_qps); 841*c1d14583SBruce Richardson } 842*c1d14583SBruce Richardson 843*c1d14583SBruce Richardson static int 844*c1d14583SBruce Richardson ice_hierarchy_commit(struct rte_eth_dev *dev, 845*c1d14583SBruce Richardson int clear_on_fail, 846*c1d14583SBruce Richardson struct rte_tm_error *error) 847*c1d14583SBruce Richardson { 848*c1d14583SBruce Richardson bool restart = false; 849*c1d14583SBruce Richardson 850*c1d14583SBruce Richardson /* commit should only be done to topology before start 851*c1d14583SBruce Richardson * If port is already started, stop it and then restart when done. 852*c1d14583SBruce Richardson */ 853*c1d14583SBruce Richardson if (dev->data->dev_started) { 854*c1d14583SBruce Richardson if (rte_eth_dev_stop(dev->data->port_id) != 0) { 855*c1d14583SBruce Richardson error->message = "Device failed to Stop"; 856*c1d14583SBruce Richardson return -1; 857*c1d14583SBruce Richardson } 858*c1d14583SBruce Richardson restart = true; 859*c1d14583SBruce Richardson } 860*c1d14583SBruce Richardson 861*c1d14583SBruce Richardson int ret = commit_new_hierarchy(dev); 862*c1d14583SBruce Richardson if (ret < 0 && clear_on_fail) { 863*c1d14583SBruce Richardson ice_tm_conf_uninit(dev); 864*c1d14583SBruce Richardson ice_tm_conf_init(dev); 865*c1d14583SBruce Richardson } 866*c1d14583SBruce Richardson 867*c1d14583SBruce Richardson if (restart) { 868*c1d14583SBruce Richardson if (rte_eth_dev_start(dev->data->port_id) != 0) { 869*c1d14583SBruce Richardson error->message = "Device failed to Start"; 870*c1d14583SBruce Richardson return -1; 871*c1d14583SBruce Richardson } 872*c1d14583SBruce Richardson } 873*c1d14583SBruce Richardson return ret; 874*c1d14583SBruce Richardson } 875