1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 #include <rte_tm_driver.h> 5 6 #include "base/ice_sched.h" 7 #include "ice_dcf_ethdev.h" 8 9 static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, 10 __rte_unused int clear_on_fail, 11 __rte_unused struct rte_tm_error *error); 12 static int ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, 13 uint32_t parent_node_id, uint32_t priority, 14 uint32_t weight, uint32_t level_id, 15 const struct rte_tm_node_params *params, 16 struct rte_tm_error *error); 17 static int ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 18 struct rte_tm_error *error); 19 static int ice_dcf_shaper_profile_add(struct rte_eth_dev *dev, 20 uint32_t shaper_profile_id, 21 const struct rte_tm_shaper_params *profile, 22 struct rte_tm_error *error); 23 static int ice_dcf_shaper_profile_del(struct rte_eth_dev *dev, 24 uint32_t shaper_profile_id, 25 struct rte_tm_error *error); 26 27 const struct rte_tm_ops ice_dcf_tm_ops = { 28 .shaper_profile_add = ice_dcf_shaper_profile_add, 29 .shaper_profile_delete = ice_dcf_shaper_profile_del, 30 .hierarchy_commit = ice_dcf_hierarchy_commit, 31 .node_add = ice_dcf_node_add, 32 .node_delete = ice_dcf_node_delete, 33 }; 34 35 #define ICE_DCF_SCHED_TC_NODE 0xffff 36 #define ICE_DCF_VFID 0 37 38 void 39 ice_dcf_tm_conf_init(struct rte_eth_dev *dev) 40 { 41 struct ice_dcf_adapter *adapter = dev->data->dev_private; 42 struct ice_dcf_hw *hw = &adapter->real_hw; 43 44 /* initialize shaper profile list */ 45 TAILQ_INIT(&hw->tm_conf.shaper_profile_list); 46 47 /* initialize node configuration */ 48 hw->tm_conf.root = NULL; 49 TAILQ_INIT(&hw->tm_conf.tc_list); 50 TAILQ_INIT(&hw->tm_conf.vsi_list); 51 hw->tm_conf.nb_tc_node = 0; 52 hw->tm_conf.nb_vsi_node = 0; 53 hw->tm_conf.committed = false; 54 } 55 56 void 57 ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev) 58 { 59 struct ice_dcf_adapter *adapter = dev->data->dev_private; 60 struct ice_dcf_hw *hw = &adapter->real_hw; 61 struct ice_dcf_tm_shaper_profile *shaper_profile; 62 struct ice_dcf_tm_node *tm_node; 63 64 /* clear node configuration */ 65 while ((tm_node = TAILQ_FIRST(&hw->tm_conf.vsi_list))) { 66 TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node); 67 rte_free(tm_node); 68 } 69 hw->tm_conf.nb_vsi_node = 0; 70 while ((tm_node = TAILQ_FIRST(&hw->tm_conf.tc_list))) { 71 TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node); 72 rte_free(tm_node); 73 } 74 hw->tm_conf.nb_tc_node = 0; 75 if (hw->tm_conf.root) { 76 rte_free(hw->tm_conf.root); 77 hw->tm_conf.root = NULL; 78 } 79 80 /* Remove all shaper profiles */ 81 while ((shaper_profile = 82 TAILQ_FIRST(&hw->tm_conf.shaper_profile_list))) { 83 TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list, 84 shaper_profile, node); 85 rte_free(shaper_profile); 86 } 87 } 88 89 static inline struct ice_dcf_tm_node * 90 ice_dcf_tm_node_search(struct rte_eth_dev *dev, 91 uint32_t node_id, enum ice_dcf_tm_node_type *node_type) 92 { 93 struct ice_dcf_adapter *adapter = dev->data->dev_private; 94 struct ice_dcf_hw *hw = &adapter->real_hw; 95 struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list; 96 struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list; 97 struct ice_dcf_tm_node *tm_node; 98 99 if (hw->tm_conf.root && hw->tm_conf.root->id == node_id) { 100 *node_type = ICE_DCF_TM_NODE_TYPE_PORT; 101 return hw->tm_conf.root; 102 } 103 104 TAILQ_FOREACH(tm_node, tc_list, node) { 105 if (tm_node->id == node_id) { 106 *node_type = ICE_DCF_TM_NODE_TYPE_TC; 107 return tm_node; 108 } 109 } 110 111 TAILQ_FOREACH(tm_node, vsi_list, node) { 112 if (tm_node->id == node_id) { 113 *node_type = ICE_DCF_TM_NODE_TYPE_VSI; 114 return tm_node; 115 } 116 } 117 118 return NULL; 119 } 120 121 static inline struct ice_dcf_tm_shaper_profile * 122 ice_dcf_shaper_profile_search(struct rte_eth_dev *dev, 123 uint32_t shaper_profile_id) 124 { 125 struct ice_dcf_adapter *adapter = dev->data->dev_private; 126 struct ice_dcf_hw *hw = &adapter->real_hw; 127 struct ice_dcf_shaper_profile_list *shaper_profile_list = 128 &hw->tm_conf.shaper_profile_list; 129 struct ice_dcf_tm_shaper_profile *shaper_profile; 130 131 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { 132 if (shaper_profile_id == shaper_profile->shaper_profile_id) 133 return shaper_profile; 134 } 135 136 return NULL; 137 } 138 139 static int 140 ice_dcf_node_param_check(struct ice_dcf_hw *hw, uint32_t node_id, 141 uint32_t priority, uint32_t weight, 142 const struct rte_tm_node_params *params, 143 struct rte_tm_error *error) 144 { 145 /* checked all the unsupported parameter */ 146 if (node_id == RTE_TM_NODE_ID_NULL) { 147 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 148 error->message = "invalid node id"; 149 return -EINVAL; 150 } 151 152 if (priority) { 153 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; 154 error->message = "priority should be 0"; 155 return -EINVAL; 156 } 157 158 if (weight != 1) { 159 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; 160 error->message = "weight must be 1"; 161 return -EINVAL; 162 } 163 164 /* not support shared shaper */ 165 if (params->shared_shaper_id) { 166 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; 167 error->message = "shared shaper not supported"; 168 return -EINVAL; 169 } 170 if (params->n_shared_shapers) { 171 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; 172 error->message = "shared shaper not supported"; 173 return -EINVAL; 174 } 175 176 /* for non-leaf node */ 177 if (node_id >= 8 * hw->num_vfs) { 178 if (params->nonleaf.wfq_weight_mode) { 179 error->type = 180 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 181 error->message = "WFQ not supported"; 182 return -EINVAL; 183 } 184 if (params->nonleaf.n_sp_priorities != 1) { 185 error->type = 186 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; 187 error->message = "SP priority not supported"; 188 return -EINVAL; 189 } else if (params->nonleaf.wfq_weight_mode && 190 !(*params->nonleaf.wfq_weight_mode)) { 191 error->type = 192 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 193 error->message = "WFP should be byte mode"; 194 return -EINVAL; 195 } 196 197 return 0; 198 } 199 200 /* for leaf node */ 201 if (params->leaf.cman) { 202 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; 203 error->message = "Congestion management not supported"; 204 return -EINVAL; 205 } 206 if (params->leaf.wred.wred_profile_id != 207 RTE_TM_WRED_PROFILE_ID_NONE) { 208 error->type = 209 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; 210 error->message = "WRED not supported"; 211 return -EINVAL; 212 } 213 if (params->leaf.wred.shared_wred_context_id) { 214 error->type = 215 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; 216 error->message = "WRED not supported"; 217 return -EINVAL; 218 } 219 if (params->leaf.wred.n_shared_wred_contexts) { 220 error->type = 221 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; 222 error->message = "WRED not supported"; 223 return -EINVAL; 224 } 225 226 return 0; 227 } 228 229 static int 230 ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, 231 uint32_t parent_node_id, uint32_t priority, 232 uint32_t weight, uint32_t level_id, 233 const struct rte_tm_node_params *params, 234 struct rte_tm_error *error) 235 { 236 enum ice_dcf_tm_node_type parent_node_type = ICE_DCF_TM_NODE_TYPE_MAX; 237 enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; 238 struct ice_dcf_tm_shaper_profile *shaper_profile = NULL; 239 struct ice_dcf_adapter *adapter = dev->data->dev_private; 240 struct ice_adapter *ad = &adapter->parent; 241 struct ice_dcf_hw *hw = &adapter->real_hw; 242 struct ice_dcf_tm_node *parent_node; 243 struct ice_dcf_tm_node *tm_node; 244 uint16_t tc_nb = 1; 245 int i, ret; 246 247 if (!params || !error) 248 return -EINVAL; 249 250 /* if port is running */ 251 if (!ad->pf.adapter_stopped) { 252 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 253 error->message = "port is running"; 254 return -EINVAL; 255 } 256 257 ret = ice_dcf_node_param_check(hw, node_id, priority, weight, 258 params, error); 259 if (ret) 260 return ret; 261 262 for (i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) { 263 if (hw->ets_config->tc_valid_bits & (1 << i)) 264 tc_nb++; 265 } 266 267 /* check if the node is already existed */ 268 if (ice_dcf_tm_node_search(dev, node_id, &node_type)) { 269 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 270 error->message = "node id already used"; 271 return -EINVAL; 272 } 273 274 /* check the shaper profile id */ 275 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 276 shaper_profile = ice_dcf_shaper_profile_search(dev, 277 params->shaper_profile_id); 278 if (!shaper_profile) { 279 error->type = 280 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 281 error->message = "shaper profile not exist"; 282 return -EINVAL; 283 } 284 } 285 286 /* add root node if not have a parent */ 287 if (parent_node_id == RTE_TM_NODE_ID_NULL) { 288 /* check level */ 289 if (level_id != ICE_DCF_TM_NODE_TYPE_PORT) { 290 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 291 error->message = "Wrong level"; 292 return -EINVAL; 293 } 294 295 /* obviously no more than one root */ 296 if (hw->tm_conf.root) { 297 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 298 error->message = "already have a root"; 299 return -EINVAL; 300 } 301 302 /* add the root node */ 303 tm_node = rte_zmalloc("ice_dcf_tm_node", 304 sizeof(struct ice_dcf_tm_node), 305 0); 306 if (!tm_node) 307 return -ENOMEM; 308 tm_node->id = node_id; 309 tm_node->parent = NULL; 310 tm_node->reference_count = 0; 311 rte_memcpy(&tm_node->params, params, 312 sizeof(struct rte_tm_node_params)); 313 hw->tm_conf.root = tm_node; 314 315 return 0; 316 } 317 318 /* TC or vsi node */ 319 /* check the parent node */ 320 parent_node = ice_dcf_tm_node_search(dev, parent_node_id, 321 &parent_node_type); 322 if (!parent_node) { 323 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 324 error->message = "parent not exist"; 325 return -EINVAL; 326 } 327 if (parent_node_type != ICE_DCF_TM_NODE_TYPE_PORT && 328 parent_node_type != ICE_DCF_TM_NODE_TYPE_TC) { 329 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 330 error->message = "parent is not port or TC"; 331 return -EINVAL; 332 } 333 /* check level */ 334 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 335 level_id != (uint32_t)(parent_node_type + 1)) { 336 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 337 error->message = "Wrong level"; 338 return -EINVAL; 339 } 340 341 /* check the TC node number */ 342 if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) { 343 /* check the TC number */ 344 if (hw->tm_conf.nb_tc_node >= tc_nb) { 345 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 346 error->message = "too many TCs"; 347 return -EINVAL; 348 } 349 } else { 350 /* check the vsi node number */ 351 if (parent_node->reference_count >= hw->num_vfs) { 352 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 353 error->message = "too many VSI for one TC"; 354 return -EINVAL; 355 } 356 /* check the vsi node id */ 357 if (node_id > (uint32_t)(tc_nb * hw->num_vfs)) { 358 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 359 error->message = "too large VSI id"; 360 return -EINVAL; 361 } 362 } 363 364 /* add the TC or vsi node */ 365 tm_node = rte_zmalloc("ice_dcf_tm_node", 366 sizeof(struct ice_dcf_tm_node), 367 0); 368 if (!tm_node) 369 return -ENOMEM; 370 tm_node->id = node_id; 371 tm_node->priority = priority; 372 tm_node->weight = weight; 373 tm_node->shaper_profile = shaper_profile; 374 tm_node->reference_count = 0; 375 tm_node->parent = parent_node; 376 rte_memcpy(&tm_node->params, params, 377 sizeof(struct rte_tm_node_params)); 378 if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) { 379 TAILQ_INSERT_TAIL(&hw->tm_conf.tc_list, 380 tm_node, node); 381 tm_node->tc = hw->tm_conf.nb_tc_node; 382 hw->tm_conf.nb_tc_node++; 383 } else { 384 TAILQ_INSERT_TAIL(&hw->tm_conf.vsi_list, 385 tm_node, node); 386 tm_node->tc = parent_node->tc; 387 hw->tm_conf.nb_vsi_node++; 388 } 389 tm_node->parent->reference_count++; 390 391 /* increase the reference counter of the shaper profile */ 392 if (shaper_profile) 393 shaper_profile->reference_count++; 394 395 return 0; 396 } 397 398 static int 399 ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 400 struct rte_tm_error *error) 401 { 402 enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; 403 struct ice_dcf_adapter *adapter = dev->data->dev_private; 404 struct ice_adapter *ad = &adapter->parent; 405 struct ice_dcf_hw *hw = &adapter->real_hw; 406 struct ice_dcf_tm_node *tm_node; 407 408 if (!error) 409 return -EINVAL; 410 411 /* if port is running */ 412 if (!ad->pf.adapter_stopped) { 413 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 414 error->message = "port is running"; 415 return -EINVAL; 416 } 417 418 if (node_id == RTE_TM_NODE_ID_NULL) { 419 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 420 error->message = "invalid node id"; 421 return -EINVAL; 422 } 423 424 /* check if the node id exists */ 425 tm_node = ice_dcf_tm_node_search(dev, node_id, &node_type); 426 if (!tm_node) { 427 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 428 error->message = "no such node"; 429 return -EINVAL; 430 } 431 432 /* the node should have no child */ 433 if (tm_node->reference_count) { 434 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 435 error->message = 436 "cannot delete a node which has children"; 437 return -EINVAL; 438 } 439 440 /* root node */ 441 if (node_type == ICE_DCF_TM_NODE_TYPE_PORT) { 442 if (tm_node->shaper_profile) 443 tm_node->shaper_profile->reference_count--; 444 rte_free(tm_node); 445 hw->tm_conf.root = NULL; 446 return 0; 447 } 448 449 /* TC or VSI node */ 450 if (tm_node->shaper_profile) 451 tm_node->shaper_profile->reference_count--; 452 tm_node->parent->reference_count--; 453 if (node_type == ICE_DCF_TM_NODE_TYPE_TC) { 454 TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node); 455 hw->tm_conf.nb_tc_node--; 456 } else { 457 TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node); 458 hw->tm_conf.nb_vsi_node--; 459 } 460 rte_free(tm_node); 461 462 return 0; 463 } 464 465 static int 466 ice_dcf_shaper_profile_param_check(const struct rte_tm_shaper_params *profile, 467 struct rte_tm_error *error) 468 { 469 /* min bucket size not supported */ 470 if (profile->committed.size) { 471 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; 472 error->message = "committed bucket size not supported"; 473 return -EINVAL; 474 } 475 /* max bucket size not supported */ 476 if (profile->peak.size) { 477 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; 478 error->message = "peak bucket size not supported"; 479 return -EINVAL; 480 } 481 /* length adjustment not supported */ 482 if (profile->pkt_length_adjust) { 483 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; 484 error->message = "packet length adjustment not supported"; 485 return -EINVAL; 486 } 487 488 return 0; 489 } 490 491 static int 492 ice_dcf_shaper_profile_add(struct rte_eth_dev *dev, 493 uint32_t shaper_profile_id, 494 const struct rte_tm_shaper_params *profile, 495 struct rte_tm_error *error) 496 { 497 struct ice_dcf_adapter *adapter = dev->data->dev_private; 498 struct ice_dcf_hw *hw = &adapter->real_hw; 499 struct ice_dcf_tm_shaper_profile *shaper_profile; 500 int ret; 501 502 if (!profile || !error) 503 return -EINVAL; 504 505 ret = ice_dcf_shaper_profile_param_check(profile, error); 506 if (ret) 507 return ret; 508 509 shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id); 510 511 if (shaper_profile) { 512 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 513 error->message = "profile ID exist"; 514 return -EINVAL; 515 } 516 517 shaper_profile = rte_zmalloc("ice_dcf_tm_shaper_profile", 518 sizeof(struct ice_dcf_tm_shaper_profile), 519 0); 520 if (!shaper_profile) 521 return -ENOMEM; 522 shaper_profile->shaper_profile_id = shaper_profile_id; 523 rte_memcpy(&shaper_profile->profile, profile, 524 sizeof(struct rte_tm_shaper_params)); 525 TAILQ_INSERT_TAIL(&hw->tm_conf.shaper_profile_list, 526 shaper_profile, node); 527 528 return 0; 529 } 530 531 static int 532 ice_dcf_shaper_profile_del(struct rte_eth_dev *dev, 533 uint32_t shaper_profile_id, 534 struct rte_tm_error *error) 535 { 536 struct ice_dcf_adapter *adapter = dev->data->dev_private; 537 struct ice_dcf_hw *hw = &adapter->real_hw; 538 struct ice_dcf_tm_shaper_profile *shaper_profile; 539 540 if (!error) 541 return -EINVAL; 542 543 shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id); 544 545 if (!shaper_profile) { 546 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 547 error->message = "profile ID not exist"; 548 return -EINVAL; 549 } 550 551 /* don't delete a profile if it's used by one or several nodes */ 552 if (shaper_profile->reference_count) { 553 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 554 error->message = "profile in use"; 555 return -EINVAL; 556 } 557 558 TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list, shaper_profile, node); 559 rte_free(shaper_profile); 560 561 return 0; 562 } 563 564 static int 565 ice_dcf_set_vf_bw(struct ice_dcf_hw *hw, 566 struct virtchnl_dcf_bw_cfg_list *vf_bw, 567 uint16_t len) 568 { 569 struct dcf_virtchnl_cmd args; 570 int err; 571 572 memset(&args, 0, sizeof(args)); 573 args.v_op = VIRTCHNL_OP_DCF_CONFIG_BW; 574 args.req_msg = (uint8_t *)vf_bw; 575 args.req_msglen = len; 576 err = ice_dcf_execute_virtchnl_cmd(hw, &args); 577 if (err) 578 PMD_DRV_LOG(ERR, "fail to execute command %s", 579 "VIRTCHNL_OP_DCF_CONFIG_BW"); 580 return err; 581 } 582 583 static int 584 ice_dcf_validate_tc_bw(struct virtchnl_dcf_bw_cfg_list *tc_bw, 585 uint32_t port_bw) 586 { 587 struct virtchnl_dcf_bw_cfg *cfg; 588 bool lowest_cir_mark = false; 589 u32 total_peak, rest_peak; 590 u32 committed, peak; 591 int i; 592 593 total_peak = 0; 594 for (i = 0; i < tc_bw->num_elem; i++) 595 total_peak += tc_bw->cfg[i].shaper.peak; 596 597 for (i = 0; i < tc_bw->num_elem; i++) { 598 cfg = &tc_bw->cfg[i]; 599 peak = cfg->shaper.peak; 600 committed = cfg->shaper.committed; 601 rest_peak = total_peak - peak; 602 603 if (lowest_cir_mark && peak == 0) { 604 PMD_DRV_LOG(ERR, "Max bandwidth must be configured for TC%u", 605 cfg->tc_num); 606 return -EINVAL; 607 } 608 609 if (!lowest_cir_mark && committed) 610 lowest_cir_mark = true; 611 612 if (committed && committed + rest_peak > port_bw) { 613 PMD_DRV_LOG(ERR, "Total value of TC%u min bandwidth and other TCs' max bandwidth %ukbps should be less than port link speed %ukbps", 614 cfg->tc_num, committed + rest_peak, port_bw); 615 return -EINVAL; 616 } 617 618 if (committed && committed < ICE_SCHED_MIN_BW) { 619 PMD_DRV_LOG(ERR, "If TC%u min Tx bandwidth is set, it cannot be less than 500Kbps", 620 cfg->tc_num); 621 return -EINVAL; 622 } 623 624 if (peak && committed > peak) { 625 PMD_DRV_LOG(ERR, "TC%u Min Tx bandwidth cannot be greater than max Tx bandwidth", 626 cfg->tc_num); 627 return -EINVAL; 628 } 629 630 if (peak > port_bw) { 631 PMD_DRV_LOG(ERR, "TC%u max Tx bandwidth %uKbps is greater than current link speed %uKbps", 632 cfg->tc_num, peak, port_bw); 633 return -EINVAL; 634 } 635 } 636 637 return 0; 638 } 639 640 static int ice_dcf_commit_check(struct ice_dcf_hw *hw) 641 { 642 struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list; 643 struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list; 644 struct ice_dcf_tm_node *tm_node; 645 646 if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) { 647 PMD_DRV_LOG(ERR, "Configure VF bandwidth is not supported"); 648 return ICE_ERR_NOT_SUPPORTED; 649 } 650 651 /* check if all TC nodes are set */ 652 if (BIT(hw->tm_conf.nb_tc_node) & hw->ets_config->tc_valid_bits) { 653 PMD_DRV_LOG(ERR, "Not all enabled TC nodes are set"); 654 return ICE_ERR_PARAM; 655 } 656 657 /* check if all VF vsi nodes are binded to all TCs */ 658 TAILQ_FOREACH(tm_node, tc_list, node) { 659 if (tm_node->reference_count != hw->num_vfs) { 660 PMD_DRV_LOG(ERR, "Not all VFs are binded to TC%u", 661 tm_node->tc); 662 return ICE_ERR_PARAM; 663 } 664 } 665 666 /* check if VF vsi node id start with 0 */ 667 tm_node = TAILQ_FIRST(vsi_list); 668 if (tm_node->id != 0) { 669 PMD_DRV_LOG(ERR, "VF vsi node id must start with 0"); 670 return ICE_ERR_PARAM; 671 } 672 673 return ICE_SUCCESS; 674 } 675 676 int 677 ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id) 678 { 679 struct ice_aqc_port_ets_elem old_ets_config; 680 struct ice_dcf_adapter *adapter; 681 struct ice_hw *parent_hw; 682 int ret, size; 683 684 adapter = hw->eth_dev->data->dev_private; 685 parent_hw = &adapter->parent.hw; 686 687 /* store the old ets config */ 688 old_ets_config = *hw->ets_config; 689 690 ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM); 691 ret = ice_aq_query_port_ets(parent_hw->port_info, 692 hw->ets_config, sizeof(*hw->ets_config), 693 NULL); 694 if (ret) { 695 PMD_DRV_LOG(ERR, "DCF Query Port ETS failed"); 696 return ret; 697 } 698 699 if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) { 700 PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW"); 701 return ICE_SUCCESS; 702 } 703 704 size = sizeof(struct virtchnl_dcf_bw_cfg_list) + 705 sizeof(struct virtchnl_dcf_bw_cfg) * 706 (hw->tm_conf.nb_tc_node - 1); 707 708 ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size); 709 if (ret) { 710 PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id); 711 return ICE_ERR_CFG; 712 } 713 714 return ICE_SUCCESS; 715 } 716 717 int 718 ice_dcf_clear_bw(struct ice_dcf_hw *hw) 719 { 720 uint16_t vf_id; 721 uint32_t tc; 722 int ret, size; 723 724 size = sizeof(struct virtchnl_dcf_bw_cfg_list) + 725 sizeof(struct virtchnl_dcf_bw_cfg) * 726 (hw->tm_conf.nb_tc_node - 1); 727 728 for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) { 729 for (tc = 0; tc < hw->tm_conf.nb_tc_node; tc++) { 730 hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.peak = 0; 731 hw->qos_bw_cfg[vf_id]->cfg[tc].shaper.committed = 0; 732 } 733 ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size); 734 if (ret) { 735 PMD_DRV_LOG(DEBUG, "VF %u BW clear failed", vf_id); 736 return ICE_ERR_CFG; 737 } 738 } 739 740 return ICE_SUCCESS; 741 } 742 743 static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev, 744 int clear_on_fail, 745 __rte_unused struct rte_tm_error *error) 746 { 747 struct ice_dcf_adapter *adapter = dev->data->dev_private; 748 struct ice_dcf_hw *hw = &adapter->real_hw; 749 struct virtchnl_dcf_bw_cfg_list *vf_bw; 750 struct virtchnl_dcf_bw_cfg_list *tc_bw; 751 struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list; 752 struct rte_tm_shaper_params *profile; 753 struct ice_dcf_tm_node *tm_node; 754 uint32_t port_bw, cir_total; 755 uint16_t size, vf_id; 756 uint8_t num_elem = 0; 757 int i, ret_val; 758 759 /* check if port is stopped */ 760 if (!adapter->parent.pf.adapter_stopped) { 761 PMD_DRV_LOG(ERR, "Please stop port first"); 762 ret_val = ICE_ERR_NOT_READY; 763 goto err; 764 } 765 766 ret_val = ice_dcf_commit_check(hw); 767 if (ret_val) 768 goto fail_clear; 769 770 size = sizeof(struct virtchnl_dcf_bw_cfg_list) + 771 sizeof(struct virtchnl_dcf_bw_cfg) * 772 (hw->tm_conf.nb_tc_node - 1); 773 vf_bw = rte_zmalloc("vf_bw", size, 0); 774 if (!vf_bw) { 775 ret_val = ICE_ERR_NO_MEMORY; 776 goto fail_clear; 777 } 778 tc_bw = rte_zmalloc("tc_bw", size, 0); 779 if (!tc_bw) { 780 ret_val = ICE_ERR_NO_MEMORY; 781 goto fail_clear; 782 } 783 784 /* port bandwidth (Kbps) */ 785 port_bw = hw->link_speed * 1000; 786 cir_total = 0; 787 788 /* init tc bw configuration */ 789 tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE; 790 tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW; 791 tc_bw->num_elem = hw->tm_conf.nb_tc_node; 792 for (i = 0; i < tc_bw->num_elem; i++) { 793 tc_bw->cfg[i].tc_num = i; 794 tc_bw->cfg[i].type = VIRTCHNL_BW_SHAPER; 795 tc_bw->cfg[i].bw_type |= 796 VIRTCHNL_DCF_BW_PIR | VIRTCHNL_DCF_BW_CIR; 797 } 798 799 /* start with VF1, skip VF0 since DCF does not need to configure 800 * bandwidth for itself 801 */ 802 for (vf_id = 1; vf_id < hw->num_vfs; vf_id++) { 803 num_elem = 0; 804 vf_bw->vf_id = vf_id; 805 vf_bw->node_type = VIRTCHNL_DCF_TARGET_VF_BW; 806 TAILQ_FOREACH(tm_node, vsi_list, node) { 807 /* scan the nodes belong to one VSI */ 808 if (tm_node->id - hw->num_vfs * tm_node->tc != vf_id) 809 continue; 810 vf_bw->cfg[num_elem].tc_num = tm_node->tc; 811 vf_bw->cfg[num_elem].type = VIRTCHNL_BW_SHAPER; 812 if (tm_node->shaper_profile) { 813 /* Transfer from Byte per seconds to Kbps */ 814 profile = &tm_node->shaper_profile->profile; 815 vf_bw->cfg[num_elem].shaper.peak = 816 profile->peak.rate / 1000 * BITS_PER_BYTE; 817 vf_bw->cfg[num_elem].shaper.committed = 818 profile->committed.rate / 1000 * BITS_PER_BYTE; 819 vf_bw->cfg[num_elem].bw_type |= 820 VIRTCHNL_DCF_BW_PIR | 821 VIRTCHNL_DCF_BW_CIR; 822 } 823 824 /* update tc node bw configuration */ 825 tc_bw->cfg[tm_node->tc].shaper.peak += 826 vf_bw->cfg[num_elem].shaper.peak; 827 tc_bw->cfg[tm_node->tc].shaper.committed += 828 vf_bw->cfg[num_elem].shaper.committed; 829 830 cir_total += vf_bw->cfg[num_elem].shaper.committed; 831 num_elem++; 832 } 833 834 vf_bw->num_elem = num_elem; 835 ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size); 836 if (ret_val) 837 goto fail_clear; 838 839 hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0); 840 if (!hw->qos_bw_cfg[vf_id]) { 841 ret_val = ICE_ERR_NO_MEMORY; 842 goto fail_clear; 843 } 844 /* store the bandwidth information for replay */ 845 ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, size, 846 ICE_NONDMA_TO_NONDMA); 847 ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM); 848 } 849 850 /* check if total CIR is larger than port bandwidth */ 851 if (cir_total > port_bw) { 852 PMD_DRV_LOG(ERR, "Total CIR of all VFs is larger than port bandwidth"); 853 ret_val = ICE_ERR_PARAM; 854 goto fail_clear; 855 } 856 857 /* check and commit tc node bw configuration */ 858 ret_val = ice_dcf_validate_tc_bw(tc_bw, port_bw); 859 if (ret_val) 860 goto fail_clear; 861 ret_val = ice_dcf_set_vf_bw(hw, tc_bw, size); 862 if (ret_val) 863 goto fail_clear; 864 865 /* store TC node bw configuration */ 866 hw->qos_bw_cfg[ICE_DCF_VFID] = rte_zmalloc("tc_bw_cfg", size, 0); 867 if (!hw->qos_bw_cfg[ICE_DCF_VFID]) { 868 ret_val = ICE_ERR_NO_MEMORY; 869 goto fail_clear; 870 } 871 ice_memcpy(hw->qos_bw_cfg[ICE_DCF_VFID], tc_bw, size, 872 ICE_NONDMA_TO_NONDMA); 873 874 hw->tm_conf.committed = true; 875 return ret_val; 876 877 fail_clear: 878 /* clear all the traffic manager configuration */ 879 if (clear_on_fail) { 880 ice_dcf_tm_conf_uninit(dev); 881 ice_dcf_tm_conf_init(dev); 882 } 883 err: 884 return ret_val; 885 } 886