1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <rte_malloc.h> 7 8 #include "txgbe_ethdev.h" 9 10 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev, 11 struct rte_tm_capabilities *cap, 12 struct rte_tm_error *error); 13 static int txgbe_shaper_profile_add(struct rte_eth_dev *dev, 14 uint32_t shaper_profile_id, 15 struct rte_tm_shaper_params *profile, 16 struct rte_tm_error *error); 17 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev, 18 uint32_t shaper_profile_id, 19 struct rte_tm_error *error); 20 static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, 21 uint32_t parent_node_id, uint32_t priority, 22 uint32_t weight, uint32_t level_id, 23 struct rte_tm_node_params *params, 24 struct rte_tm_error *error); 25 static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 26 struct rte_tm_error *error); 27 static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 28 int *is_leaf, struct rte_tm_error *error); 29 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, 30 uint32_t level_id, 31 struct rte_tm_level_capabilities *cap, 32 struct rte_tm_error *error); 33 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev, 34 uint32_t node_id, 35 struct rte_tm_node_capabilities *cap, 36 struct rte_tm_error *error); 37 static int txgbe_hierarchy_commit(struct rte_eth_dev *dev, 38 int clear_on_fail, 39 struct rte_tm_error *error); 40 41 const struct rte_tm_ops txgbe_tm_ops = { 42 .capabilities_get = txgbe_tm_capabilities_get, 43 .shaper_profile_add = txgbe_shaper_profile_add, 44 .shaper_profile_delete = txgbe_shaper_profile_del, 45 .node_add = txgbe_node_add, 46 .node_delete = txgbe_node_delete, 47 .node_type_get = txgbe_node_type_get, 48 .level_capabilities_get = txgbe_level_capabilities_get, 49 .node_capabilities_get = txgbe_node_capabilities_get, 50 .hierarchy_commit = txgbe_hierarchy_commit, 51 }; 52 53 int 54 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, 55 void *arg) 56 { 57 if (!arg) 58 return -EINVAL; 59 60 *(const void **)arg = &txgbe_tm_ops; 61 62 return 0; 63 } 64 65 void 66 txgbe_tm_conf_init(struct rte_eth_dev *dev) 67 { 68 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 69 70 /* initialize shaper profile list */ 71 TAILQ_INIT(&tm_conf->shaper_profile_list); 72 73 /* initialize node configuration */ 74 tm_conf->root = NULL; 75 TAILQ_INIT(&tm_conf->queue_list); 76 TAILQ_INIT(&tm_conf->tc_list); 77 tm_conf->nb_tc_node = 0; 78 tm_conf->nb_queue_node = 0; 79 tm_conf->committed = false; 80 } 81 82 void 83 txgbe_tm_conf_uninit(struct rte_eth_dev *dev) 84 { 85 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 86 struct txgbe_tm_shaper_profile *shaper_profile; 87 struct txgbe_tm_node *tm_node; 88 89 /* clear node configuration */ 90 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) { 91 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); 92 rte_free(tm_node); 93 } 94 tm_conf->nb_queue_node = 0; 95 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) { 96 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); 97 rte_free(tm_node); 98 } 99 tm_conf->nb_tc_node = 0; 100 if (tm_conf->root) { 101 rte_free(tm_conf->root); 102 tm_conf->root = NULL; 103 } 104 105 /* Remove all shaper profiles */ 106 while ((shaper_profile = 107 TAILQ_FIRST(&tm_conf->shaper_profile_list))) { 108 TAILQ_REMOVE(&tm_conf->shaper_profile_list, 109 shaper_profile, node); 110 rte_free(shaper_profile); 111 } 112 } 113 114 static inline uint8_t 115 txgbe_tc_nb_get(struct rte_eth_dev *dev) 116 { 117 struct rte_eth_conf *eth_conf; 118 uint8_t nb_tcs = 0; 119 120 eth_conf = &dev->data->dev_conf; 121 if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 122 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; 123 } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 124 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == 125 RTE_ETH_32_POOLS) 126 nb_tcs = RTE_ETH_4_TCS; 127 else 128 nb_tcs = RTE_ETH_8_TCS; 129 } else { 130 nb_tcs = 1; 131 } 132 133 return nb_tcs; 134 } 135 136 static int 137 txgbe_tm_capabilities_get(struct rte_eth_dev *dev, 138 struct rte_tm_capabilities *cap, 139 struct rte_tm_error *error) 140 { 141 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 142 uint8_t tc_nb = txgbe_tc_nb_get(dev); 143 144 if (!cap || !error) 145 return -EINVAL; 146 147 if (tc_nb > hw->mac.max_tx_queues) 148 return -EINVAL; 149 150 error->type = RTE_TM_ERROR_TYPE_NONE; 151 152 /* set all the parameters to 0 first. */ 153 memset(cap, 0, sizeof(struct rte_tm_capabilities)); 154 155 /** 156 * here is the max capability not the current configuration. 157 */ 158 /* port + TCs + queues */ 159 cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX + 160 hw->mac.max_tx_queues; 161 cap->n_levels_max = 3; 162 cap->non_leaf_nodes_identical = 1; 163 cap->leaf_nodes_identical = 1; 164 cap->shaper_n_max = cap->n_nodes_max; 165 cap->shaper_private_n_max = cap->n_nodes_max; 166 cap->shaper_private_dual_rate_n_max = 0; 167 cap->shaper_private_rate_min = 0; 168 /* 10Gbps -> 1.25GBps */ 169 cap->shaper_private_rate_max = 1250000000ull; 170 cap->shaper_shared_n_max = 0; 171 cap->shaper_shared_n_nodes_per_shaper_max = 0; 172 cap->shaper_shared_n_shapers_per_node_max = 0; 173 cap->shaper_shared_dual_rate_n_max = 0; 174 cap->shaper_shared_rate_min = 0; 175 cap->shaper_shared_rate_max = 0; 176 cap->sched_n_children_max = hw->mac.max_tx_queues; 177 /** 178 * HW supports SP. But no plan to support it now. 179 * So, all the nodes should have the same priority. 180 */ 181 cap->sched_sp_n_priorities_max = 1; 182 cap->sched_wfq_n_children_per_group_max = 0; 183 cap->sched_wfq_n_groups_max = 0; 184 /** 185 * SW only supports fair round robin now. 186 * So, all the nodes should have the same weight. 187 */ 188 cap->sched_wfq_weight_max = 1; 189 cap->cman_head_drop_supported = 0; 190 cap->dynamic_update_mask = 0; 191 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; 192 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; 193 cap->cman_wred_context_n_max = 0; 194 cap->cman_wred_context_private_n_max = 0; 195 cap->cman_wred_context_shared_n_max = 0; 196 cap->cman_wred_context_shared_n_nodes_per_context_max = 0; 197 cap->cman_wred_context_shared_n_contexts_per_node_max = 0; 198 cap->stats_mask = 0; 199 200 return 0; 201 } 202 203 static inline struct txgbe_tm_shaper_profile * 204 txgbe_shaper_profile_search(struct rte_eth_dev *dev, 205 uint32_t shaper_profile_id) 206 { 207 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 208 struct txgbe_shaper_profile_list *shaper_profile_list = 209 &tm_conf->shaper_profile_list; 210 struct txgbe_tm_shaper_profile *shaper_profile; 211 212 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { 213 if (shaper_profile_id == shaper_profile->shaper_profile_id) 214 return shaper_profile; 215 } 216 217 return NULL; 218 } 219 220 static int 221 txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile, 222 struct rte_tm_error *error) 223 { 224 /* min rate not supported */ 225 if (profile->committed.rate) { 226 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; 227 error->message = "committed rate not supported"; 228 return -EINVAL; 229 } 230 /* min bucket size not supported */ 231 if (profile->committed.size) { 232 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; 233 error->message = "committed bucket size not supported"; 234 return -EINVAL; 235 } 236 /* max bucket size not supported */ 237 if (profile->peak.size) { 238 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; 239 error->message = "peak bucket size not supported"; 240 return -EINVAL; 241 } 242 /* length adjustment not supported */ 243 if (profile->pkt_length_adjust) { 244 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; 245 error->message = "packet length adjustment not supported"; 246 return -EINVAL; 247 } 248 249 return 0; 250 } 251 252 static int 253 txgbe_shaper_profile_add(struct rte_eth_dev *dev, 254 uint32_t shaper_profile_id, 255 struct rte_tm_shaper_params *profile, 256 struct rte_tm_error *error) 257 { 258 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 259 struct txgbe_tm_shaper_profile *shaper_profile; 260 int ret; 261 262 if (!profile || !error) 263 return -EINVAL; 264 265 ret = txgbe_shaper_profile_param_check(profile, error); 266 if (ret) 267 return ret; 268 269 shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id); 270 271 if (shaper_profile) { 272 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 273 error->message = "profile ID exist"; 274 return -EINVAL; 275 } 276 277 shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile", 278 sizeof(struct txgbe_tm_shaper_profile), 279 0); 280 if (!shaper_profile) 281 return -ENOMEM; 282 shaper_profile->shaper_profile_id = shaper_profile_id; 283 rte_memcpy(&shaper_profile->profile, profile, 284 sizeof(struct rte_tm_shaper_params)); 285 TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, 286 shaper_profile, node); 287 288 return 0; 289 } 290 291 static int 292 txgbe_shaper_profile_del(struct rte_eth_dev *dev, 293 uint32_t shaper_profile_id, 294 struct rte_tm_error *error) 295 { 296 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 297 struct txgbe_tm_shaper_profile *shaper_profile; 298 299 if (!error) 300 return -EINVAL; 301 302 shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id); 303 304 if (!shaper_profile) { 305 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 306 error->message = "profile ID not exist"; 307 return -EINVAL; 308 } 309 310 /* don't delete a profile if it's used by one or several nodes */ 311 if (shaper_profile->reference_count) { 312 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 313 error->message = "profile in use"; 314 return -EINVAL; 315 } 316 317 TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node); 318 rte_free(shaper_profile); 319 320 return 0; 321 } 322 323 static inline struct txgbe_tm_node * 324 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, 325 enum txgbe_tm_node_type *node_type) 326 { 327 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 328 struct txgbe_tm_node *tm_node; 329 330 if (tm_conf->root && tm_conf->root->id == node_id) { 331 *node_type = TXGBE_TM_NODE_TYPE_PORT; 332 return tm_conf->root; 333 } 334 335 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { 336 if (tm_node->id == node_id) { 337 *node_type = TXGBE_TM_NODE_TYPE_TC; 338 return tm_node; 339 } 340 } 341 342 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { 343 if (tm_node->id == node_id) { 344 *node_type = TXGBE_TM_NODE_TYPE_QUEUE; 345 return tm_node; 346 } 347 } 348 349 return NULL; 350 } 351 352 static void 353 txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, 354 uint16_t *base, uint16_t *nb) 355 { 356 uint8_t nb_tcs = txgbe_tc_nb_get(dev); 357 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 358 uint16_t vf_num = pci_dev->max_vfs; 359 360 *base = 0; 361 *nb = 0; 362 363 /* VT on */ 364 if (vf_num) { 365 /* no DCB */ 366 if (nb_tcs == 1) { 367 if (vf_num >= RTE_ETH_32_POOLS) { 368 *nb = 2; 369 *base = vf_num * 2; 370 } else if (vf_num >= RTE_ETH_16_POOLS) { 371 *nb = 4; 372 *base = vf_num * 4; 373 } else { 374 *nb = 8; 375 *base = vf_num * 8; 376 } 377 } else { 378 /* DCB */ 379 *nb = 1; 380 *base = vf_num * nb_tcs + tc_node_no; 381 } 382 } else { 383 /* VT off */ 384 if (nb_tcs == RTE_ETH_8_TCS) { 385 switch (tc_node_no) { 386 case 0: 387 *base = 0; 388 *nb = 32; 389 break; 390 case 1: 391 *base = 32; 392 *nb = 32; 393 break; 394 case 2: 395 *base = 64; 396 *nb = 16; 397 break; 398 case 3: 399 *base = 80; 400 *nb = 16; 401 break; 402 case 4: 403 *base = 96; 404 *nb = 8; 405 break; 406 case 5: 407 *base = 104; 408 *nb = 8; 409 break; 410 case 6: 411 *base = 112; 412 *nb = 8; 413 break; 414 case 7: 415 *base = 120; 416 *nb = 8; 417 break; 418 default: 419 return; 420 } 421 } else { 422 switch (tc_node_no) { 423 /** 424 * If no VF and no DCB, only 64 queues can be used. 425 * This case also be covered by this "case 0". 426 */ 427 case 0: 428 *base = 0; 429 *nb = 64; 430 break; 431 case 1: 432 *base = 64; 433 *nb = 32; 434 break; 435 case 2: 436 *base = 96; 437 *nb = 16; 438 break; 439 case 3: 440 *base = 112; 441 *nb = 16; 442 break; 443 default: 444 return; 445 } 446 } 447 } 448 } 449 450 static int 451 txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, 452 uint32_t priority, uint32_t weight, 453 struct rte_tm_node_params *params, 454 struct rte_tm_error *error) 455 { 456 if (node_id == RTE_TM_NODE_ID_NULL) { 457 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 458 error->message = "invalid node id"; 459 return -EINVAL; 460 } 461 462 if (priority) { 463 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; 464 error->message = "priority should be 0"; 465 return -EINVAL; 466 } 467 468 if (weight != 1) { 469 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; 470 error->message = "weight must be 1"; 471 return -EINVAL; 472 } 473 474 /* not support shared shaper */ 475 if (params->shared_shaper_id) { 476 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; 477 error->message = "shared shaper not supported"; 478 return -EINVAL; 479 } 480 if (params->n_shared_shapers) { 481 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; 482 error->message = "shared shaper not supported"; 483 return -EINVAL; 484 } 485 486 /* for non-leaf node */ 487 if (node_id >= dev->data->nb_tx_queues) { 488 /* check the unsupported parameters */ 489 if (params->nonleaf.wfq_weight_mode) { 490 error->type = 491 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 492 error->message = "WFQ not supported"; 493 return -EINVAL; 494 } 495 if (params->nonleaf.n_sp_priorities != 1) { 496 error->type = 497 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; 498 error->message = "SP priority not supported"; 499 return -EINVAL; 500 } else if (params->nonleaf.wfq_weight_mode && 501 !(*params->nonleaf.wfq_weight_mode)) { 502 error->type = 503 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 504 error->message = "WFP should be byte mode"; 505 return -EINVAL; 506 } 507 508 return 0; 509 } 510 511 /* for leaf node */ 512 /* check the unsupported parameters */ 513 if (params->leaf.cman) { 514 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; 515 error->message = "Congestion management not supported"; 516 return -EINVAL; 517 } 518 if (params->leaf.wred.wred_profile_id != 519 RTE_TM_WRED_PROFILE_ID_NONE) { 520 error->type = 521 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; 522 error->message = "WRED not supported"; 523 return -EINVAL; 524 } 525 if (params->leaf.wred.shared_wred_context_id) { 526 error->type = 527 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; 528 error->message = "WRED not supported"; 529 return -EINVAL; 530 } 531 if (params->leaf.wred.n_shared_wred_contexts) { 532 error->type = 533 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; 534 error->message = "WRED not supported"; 535 return -EINVAL; 536 } 537 538 return 0; 539 } 540 541 /** 542 * Now the TC and queue configuration is controlled by DCB. 543 * We need check if the node configuration follows the DCB configuration. 544 * In the future, we may use TM to cover DCB. 545 */ 546 static int 547 txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, 548 uint32_t parent_node_id, uint32_t priority, 549 uint32_t weight, uint32_t level_id, 550 struct rte_tm_node_params *params, 551 struct rte_tm_error *error) 552 { 553 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 554 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; 555 enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX; 556 struct txgbe_tm_shaper_profile *shaper_profile = NULL; 557 struct txgbe_tm_node *tm_node; 558 struct txgbe_tm_node *parent_node; 559 uint8_t nb_tcs; 560 uint16_t q_base = 0; 561 uint16_t q_nb = 0; 562 int ret; 563 564 if (!params || !error) 565 return -EINVAL; 566 567 /* if already committed */ 568 if (tm_conf->committed) { 569 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 570 error->message = "already committed"; 571 return -EINVAL; 572 } 573 574 ret = txgbe_node_param_check(dev, node_id, priority, weight, 575 params, error); 576 if (ret) 577 return ret; 578 579 /* check if the node ID is already used */ 580 if (txgbe_tm_node_search(dev, node_id, &node_type)) { 581 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 582 error->message = "node id already used"; 583 return -EINVAL; 584 } 585 586 /* check the shaper profile id */ 587 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 588 shaper_profile = txgbe_shaper_profile_search(dev, 589 params->shaper_profile_id); 590 if (!shaper_profile) { 591 error->type = 592 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 593 error->message = "shaper profile not exist"; 594 return -EINVAL; 595 } 596 } 597 598 /* root node if not have a parent */ 599 if (parent_node_id == RTE_TM_NODE_ID_NULL) { 600 /* check level */ 601 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 602 level_id > TXGBE_TM_NODE_TYPE_PORT) { 603 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 604 error->message = "Wrong level"; 605 return -EINVAL; 606 } 607 608 /* obviously no more than one root */ 609 if (tm_conf->root) { 610 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 611 error->message = "already have a root"; 612 return -EINVAL; 613 } 614 615 /* add the root node */ 616 tm_node = rte_zmalloc("txgbe_tm_node", 617 sizeof(struct txgbe_tm_node), 618 0); 619 if (!tm_node) 620 return -ENOMEM; 621 tm_node->id = node_id; 622 tm_node->priority = priority; 623 tm_node->weight = weight; 624 tm_node->reference_count = 0; 625 tm_node->no = 0; 626 tm_node->parent = NULL; 627 tm_node->shaper_profile = shaper_profile; 628 rte_memcpy(&tm_node->params, params, 629 sizeof(struct rte_tm_node_params)); 630 tm_conf->root = tm_node; 631 632 /* increase the reference counter of the shaper profile */ 633 if (shaper_profile) 634 shaper_profile->reference_count++; 635 636 return 0; 637 } 638 639 /* TC or queue node */ 640 /* check the parent node */ 641 parent_node = txgbe_tm_node_search(dev, parent_node_id, 642 &parent_node_type); 643 if (!parent_node) { 644 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 645 error->message = "parent not exist"; 646 return -EINVAL; 647 } 648 if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT && 649 parent_node_type != TXGBE_TM_NODE_TYPE_TC) { 650 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 651 error->message = "parent is not port or TC"; 652 return -EINVAL; 653 } 654 /* check level */ 655 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 656 level_id != parent_node_type + 1) { 657 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 658 error->message = "Wrong level"; 659 return -EINVAL; 660 } 661 662 /* check the node number */ 663 if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) { 664 /* check TC number */ 665 nb_tcs = txgbe_tc_nb_get(dev); 666 if (tm_conf->nb_tc_node >= nb_tcs) { 667 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 668 error->message = "too many TCs"; 669 return -EINVAL; 670 } 671 } else { 672 /* check queue number */ 673 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) { 674 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 675 error->message = "too many queues"; 676 return -EINVAL; 677 } 678 679 txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb); 680 if (parent_node->reference_count >= q_nb) { 681 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 682 error->message = "too many queues than TC supported"; 683 return -EINVAL; 684 } 685 686 /** 687 * check the node id. 688 * For queue, the node id means queue id. 689 */ 690 if (node_id >= dev->data->nb_tx_queues) { 691 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 692 error->message = "too large queue id"; 693 return -EINVAL; 694 } 695 } 696 697 /* add the TC or queue node */ 698 tm_node = rte_zmalloc("txgbe_tm_node", 699 sizeof(struct txgbe_tm_node), 700 0); 701 if (!tm_node) 702 return -ENOMEM; 703 tm_node->id = node_id; 704 tm_node->priority = priority; 705 tm_node->weight = weight; 706 tm_node->reference_count = 0; 707 tm_node->parent = parent_node; 708 tm_node->shaper_profile = shaper_profile; 709 rte_memcpy(&tm_node->params, params, 710 sizeof(struct rte_tm_node_params)); 711 if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) { 712 tm_node->no = parent_node->reference_count; 713 TAILQ_INSERT_TAIL(&tm_conf->tc_list, 714 tm_node, node); 715 tm_conf->nb_tc_node++; 716 } else { 717 tm_node->no = q_base + parent_node->reference_count; 718 TAILQ_INSERT_TAIL(&tm_conf->queue_list, 719 tm_node, node); 720 tm_conf->nb_queue_node++; 721 } 722 tm_node->parent->reference_count++; 723 724 /* increase the reference counter of the shaper profile */ 725 if (shaper_profile) 726 shaper_profile->reference_count++; 727 728 return 0; 729 } 730 731 static int 732 txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 733 struct rte_tm_error *error) 734 { 735 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 736 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; 737 struct txgbe_tm_node *tm_node; 738 739 if (!error) 740 return -EINVAL; 741 742 /* if already committed */ 743 if (tm_conf->committed) { 744 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 745 error->message = "already committed"; 746 return -EINVAL; 747 } 748 749 if (node_id == RTE_TM_NODE_ID_NULL) { 750 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 751 error->message = "invalid node id"; 752 return -EINVAL; 753 } 754 755 /* check the if the node id exists */ 756 tm_node = txgbe_tm_node_search(dev, node_id, &node_type); 757 if (!tm_node) { 758 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 759 error->message = "no such node"; 760 return -EINVAL; 761 } 762 763 /* the node should have no child */ 764 if (tm_node->reference_count) { 765 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 766 error->message = 767 "cannot delete a node which has children"; 768 return -EINVAL; 769 } 770 771 /* root node */ 772 if (node_type == TXGBE_TM_NODE_TYPE_PORT) { 773 if (tm_node->shaper_profile) 774 tm_node->shaper_profile->reference_count--; 775 rte_free(tm_node); 776 tm_conf->root = NULL; 777 return 0; 778 } 779 780 /* TC or queue node */ 781 if (tm_node->shaper_profile) 782 tm_node->shaper_profile->reference_count--; 783 tm_node->parent->reference_count--; 784 if (node_type == TXGBE_TM_NODE_TYPE_TC) { 785 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); 786 tm_conf->nb_tc_node--; 787 } else { 788 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); 789 tm_conf->nb_queue_node--; 790 } 791 rte_free(tm_node); 792 793 return 0; 794 } 795 796 static int 797 txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 798 int *is_leaf, struct rte_tm_error *error) 799 { 800 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; 801 struct txgbe_tm_node *tm_node; 802 803 if (!is_leaf || !error) 804 return -EINVAL; 805 806 if (node_id == RTE_TM_NODE_ID_NULL) { 807 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 808 error->message = "invalid node id"; 809 return -EINVAL; 810 } 811 812 /* check if the node id exists */ 813 tm_node = txgbe_tm_node_search(dev, node_id, &node_type); 814 if (!tm_node) { 815 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 816 error->message = "no such node"; 817 return -EINVAL; 818 } 819 820 if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) 821 *is_leaf = true; 822 else 823 *is_leaf = false; 824 825 return 0; 826 } 827 828 static int 829 txgbe_level_capabilities_get(struct rte_eth_dev *dev, 830 uint32_t level_id, 831 struct rte_tm_level_capabilities *cap, 832 struct rte_tm_error *error) 833 { 834 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 835 836 if (!cap || !error) 837 return -EINVAL; 838 839 if (level_id >= TXGBE_TM_NODE_TYPE_MAX) { 840 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; 841 error->message = "too deep level"; 842 return -EINVAL; 843 } 844 845 /* root node */ 846 if (level_id == TXGBE_TM_NODE_TYPE_PORT) { 847 cap->n_nodes_max = 1; 848 cap->n_nodes_nonleaf_max = 1; 849 cap->n_nodes_leaf_max = 0; 850 } else if (level_id == TXGBE_TM_NODE_TYPE_TC) { 851 /* TC */ 852 cap->n_nodes_max = TXGBE_DCB_TC_MAX; 853 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX; 854 cap->n_nodes_leaf_max = 0; 855 } else { 856 /* queue */ 857 cap->n_nodes_max = hw->mac.max_tx_queues; 858 cap->n_nodes_nonleaf_max = 0; 859 cap->n_nodes_leaf_max = hw->mac.max_tx_queues; 860 } 861 862 cap->non_leaf_nodes_identical = true; 863 cap->leaf_nodes_identical = true; 864 865 if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) { 866 cap->nonleaf.shaper_private_supported = true; 867 cap->nonleaf.shaper_private_dual_rate_supported = false; 868 cap->nonleaf.shaper_private_rate_min = 0; 869 /* 10Gbps -> 1.25GBps */ 870 cap->nonleaf.shaper_private_rate_max = 1250000000ull; 871 cap->nonleaf.shaper_shared_n_max = 0; 872 if (level_id == TXGBE_TM_NODE_TYPE_PORT) 873 cap->nonleaf.sched_n_children_max = 874 TXGBE_DCB_TC_MAX; 875 else 876 cap->nonleaf.sched_n_children_max = 877 hw->mac.max_tx_queues; 878 cap->nonleaf.sched_sp_n_priorities_max = 1; 879 cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 880 cap->nonleaf.sched_wfq_n_groups_max = 0; 881 cap->nonleaf.sched_wfq_weight_max = 1; 882 cap->nonleaf.stats_mask = 0; 883 884 return 0; 885 } 886 887 /* queue node */ 888 cap->leaf.shaper_private_supported = true; 889 cap->leaf.shaper_private_dual_rate_supported = false; 890 cap->leaf.shaper_private_rate_min = 0; 891 /* 10Gbps -> 1.25GBps */ 892 cap->leaf.shaper_private_rate_max = 1250000000ull; 893 cap->leaf.shaper_shared_n_max = 0; 894 cap->leaf.cman_head_drop_supported = false; 895 cap->leaf.cman_wred_context_private_supported = true; 896 cap->leaf.cman_wred_context_shared_n_max = 0; 897 cap->leaf.stats_mask = 0; 898 899 return 0; 900 } 901 902 static int 903 txgbe_node_capabilities_get(struct rte_eth_dev *dev, 904 uint32_t node_id, 905 struct rte_tm_node_capabilities *cap, 906 struct rte_tm_error *error) 907 { 908 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 909 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; 910 struct txgbe_tm_node *tm_node; 911 912 if (!cap || !error) 913 return -EINVAL; 914 915 if (node_id == RTE_TM_NODE_ID_NULL) { 916 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 917 error->message = "invalid node id"; 918 return -EINVAL; 919 } 920 921 /* check if the node id exists */ 922 tm_node = txgbe_tm_node_search(dev, node_id, &node_type); 923 if (!tm_node) { 924 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 925 error->message = "no such node"; 926 return -EINVAL; 927 } 928 929 cap->shaper_private_supported = true; 930 cap->shaper_private_dual_rate_supported = false; 931 cap->shaper_private_rate_min = 0; 932 /* 10Gbps -> 1.25GBps */ 933 cap->shaper_private_rate_max = 1250000000ull; 934 cap->shaper_shared_n_max = 0; 935 936 if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) { 937 cap->leaf.cman_head_drop_supported = false; 938 cap->leaf.cman_wred_context_private_supported = true; 939 cap->leaf.cman_wred_context_shared_n_max = 0; 940 } else { 941 if (node_type == TXGBE_TM_NODE_TYPE_PORT) 942 cap->nonleaf.sched_n_children_max = 943 TXGBE_DCB_TC_MAX; 944 else 945 cap->nonleaf.sched_n_children_max = 946 hw->mac.max_tx_queues; 947 cap->nonleaf.sched_sp_n_priorities_max = 1; 948 cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 949 cap->nonleaf.sched_wfq_n_groups_max = 0; 950 cap->nonleaf.sched_wfq_weight_max = 1; 951 } 952 953 cap->stats_mask = 0; 954 955 return 0; 956 } 957 958 static int 959 txgbe_hierarchy_commit(struct rte_eth_dev *dev, 960 int clear_on_fail, 961 struct rte_tm_error *error) 962 { 963 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 964 struct txgbe_tm_node *tm_node; 965 uint64_t bw; 966 int ret; 967 968 if (!error) 969 return -EINVAL; 970 971 /* check the setting */ 972 if (!tm_conf->root) 973 goto done; 974 975 /* not support port max bandwidth yet */ 976 if (tm_conf->root->shaper_profile && 977 tm_conf->root->shaper_profile->profile.peak.rate) { 978 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 979 error->message = "no port max bandwidth"; 980 goto fail_clear; 981 } 982 983 /* HW not support TC max bandwidth */ 984 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { 985 if (tm_node->shaper_profile && 986 tm_node->shaper_profile->profile.peak.rate) { 987 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 988 error->message = "no TC max bandwidth"; 989 goto fail_clear; 990 } 991 } 992 993 /* queue max bandwidth */ 994 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { 995 if (tm_node->shaper_profile) 996 bw = tm_node->shaper_profile->profile.peak.rate; 997 else 998 bw = 0; 999 if (bw) { 1000 /* interpret Bps to Mbps */ 1001 bw = bw * 8 / 1000 / 1000; 1002 ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw); 1003 if (ret) { 1004 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1005 error->message = 1006 "failed to set queue max bandwidth"; 1007 goto fail_clear; 1008 } 1009 } 1010 } 1011 1012 done: 1013 tm_conf->committed = true; 1014 return 0; 1015 1016 fail_clear: 1017 /* clear all the traffic manager configuration */ 1018 if (clear_on_fail) { 1019 txgbe_tm_conf_uninit(dev); 1020 txgbe_tm_conf_init(dev); 1021 } 1022 return -EINVAL; 1023 } 1024