1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 7 #include "ixgbe_ethdev.h" 8 9 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, 10 struct rte_tm_capabilities *cap, 11 struct rte_tm_error *error); 12 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev, 13 uint32_t shaper_profile_id, 14 struct rte_tm_shaper_params *profile, 15 struct rte_tm_error *error); 16 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev, 17 uint32_t shaper_profile_id, 18 struct rte_tm_error *error); 19 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, 20 uint32_t parent_node_id, uint32_t priority, 21 uint32_t weight, uint32_t level_id, 22 struct rte_tm_node_params *params, 23 struct rte_tm_error *error); 24 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 25 struct rte_tm_error *error); 26 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 27 int *is_leaf, struct rte_tm_error *error); 28 static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev, 29 uint32_t level_id, 30 struct rte_tm_level_capabilities *cap, 31 struct rte_tm_error *error); 32 static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, 33 uint32_t node_id, 34 struct rte_tm_node_capabilities *cap, 35 struct rte_tm_error *error); 36 static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, 37 int clear_on_fail, 38 struct rte_tm_error *error); 39 40 const struct rte_tm_ops ixgbe_tm_ops = { 41 .capabilities_get = ixgbe_tm_capabilities_get, 42 .shaper_profile_add = ixgbe_shaper_profile_add, 43 .shaper_profile_delete = ixgbe_shaper_profile_del, 44 .node_add = ixgbe_node_add, 45 .node_delete = ixgbe_node_delete, 46 .node_type_get = ixgbe_node_type_get, 47 .level_capabilities_get = ixgbe_level_capabilities_get, 48 .node_capabilities_get = ixgbe_node_capabilities_get, 49 .hierarchy_commit = ixgbe_hierarchy_commit, 50 }; 51 52 int 53 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, 54 void *arg) 55 { 56 if (!arg) 57 return -EINVAL; 58 59 *(const void **)arg = &ixgbe_tm_ops; 60 61 return 0; 62 } 63 64 void 65 ixgbe_tm_conf_init(struct rte_eth_dev *dev) 66 { 67 struct ixgbe_tm_conf *tm_conf = 68 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 69 70 /* initialize shaper profile list */ 71 TAILQ_INIT(&tm_conf->shaper_profile_list); 72 73 /* initialize node configuration */ 74 tm_conf->root = NULL; 75 TAILQ_INIT(&tm_conf->queue_list); 76 TAILQ_INIT(&tm_conf->tc_list); 77 tm_conf->nb_tc_node = 0; 78 tm_conf->nb_queue_node = 0; 79 tm_conf->committed = false; 80 } 81 82 void 83 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev) 84 { 85 struct ixgbe_tm_conf *tm_conf = 86 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 87 struct ixgbe_tm_shaper_profile *shaper_profile; 88 struct ixgbe_tm_node *tm_node; 89 90 /* clear node configuration */ 91 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) { 92 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); 93 rte_free(tm_node); 94 } 95 tm_conf->nb_queue_node = 0; 96 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) { 97 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); 98 rte_free(tm_node); 99 } 100 tm_conf->nb_tc_node = 0; 101 if (tm_conf->root) { 102 rte_free(tm_conf->root); 103 tm_conf->root = NULL; 104 } 105 106 /* Remove all shaper profiles */ 107 while ((shaper_profile = 108 TAILQ_FIRST(&tm_conf->shaper_profile_list))) { 109 TAILQ_REMOVE(&tm_conf->shaper_profile_list, 110 shaper_profile, node); 111 rte_free(shaper_profile); 112 } 113 } 114 115 static inline uint8_t 116 ixgbe_tc_nb_get(struct rte_eth_dev *dev) 117 { 118 struct rte_eth_conf *eth_conf; 119 uint8_t nb_tcs = 0; 120 121 eth_conf = &dev->data->dev_conf; 122 if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 123 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; 124 } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 125 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == 126 RTE_ETH_32_POOLS) 127 nb_tcs = RTE_ETH_4_TCS; 128 else 129 nb_tcs = RTE_ETH_8_TCS; 130 } else { 131 nb_tcs = 1; 132 } 133 134 return nb_tcs; 135 } 136 137 static int 138 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev, 139 struct rte_tm_capabilities *cap, 140 struct rte_tm_error *error) 141 { 142 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 143 uint8_t tc_nb = ixgbe_tc_nb_get(dev); 144 145 if (!cap || !error) 146 return -EINVAL; 147 148 if (tc_nb > hw->mac.max_tx_queues) 149 return -EINVAL; 150 151 error->type = RTE_TM_ERROR_TYPE_NONE; 152 153 /* set all the parameters to 0 first. */ 154 memset(cap, 0, sizeof(struct rte_tm_capabilities)); 155 156 /** 157 * here is the max capability not the current configuration. 158 */ 159 /* port + TCs + queues */ 160 cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS + 161 hw->mac.max_tx_queues; 162 cap->n_levels_max = 3; 163 cap->non_leaf_nodes_identical = 1; 164 cap->leaf_nodes_identical = 1; 165 cap->shaper_n_max = cap->n_nodes_max; 166 cap->shaper_private_n_max = cap->n_nodes_max; 167 cap->shaper_private_dual_rate_n_max = 0; 168 cap->shaper_private_rate_min = 0; 169 /* 10Gbps -> 1.25GBps */ 170 cap->shaper_private_rate_max = 1250000000ull; 171 cap->shaper_private_packet_mode_supported = 0; 172 cap->shaper_private_byte_mode_supported = 1; 173 cap->shaper_shared_n_max = 0; 174 cap->shaper_shared_n_nodes_per_shaper_max = 0; 175 cap->shaper_shared_n_shapers_per_node_max = 0; 176 cap->shaper_shared_dual_rate_n_max = 0; 177 cap->shaper_shared_rate_min = 0; 178 cap->shaper_shared_rate_max = 0; 179 cap->shaper_shared_packet_mode_supported = 0; 180 cap->shaper_shared_byte_mode_supported = 0; 181 cap->sched_n_children_max = hw->mac.max_tx_queues; 182 /** 183 * HW supports SP. But no plan to support it now. 184 * So, all the nodes should have the same priority. 185 */ 186 cap->sched_sp_n_priorities_max = 1; 187 cap->sched_wfq_n_children_per_group_max = 0; 188 cap->sched_wfq_n_groups_max = 0; 189 cap->sched_wfq_packet_mode_supported = 0; 190 cap->sched_wfq_byte_mode_supported = 0; 191 /** 192 * SW only supports fair round robin now. 193 * So, all the nodes should have the same weight. 194 */ 195 cap->sched_wfq_weight_max = 1; 196 cap->cman_head_drop_supported = 0; 197 cap->dynamic_update_mask = 0; 198 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; 199 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; 200 cap->cman_wred_context_n_max = 0; 201 cap->cman_wred_context_private_n_max = 0; 202 cap->cman_wred_context_shared_n_max = 0; 203 cap->cman_wred_context_shared_n_nodes_per_context_max = 0; 204 cap->cman_wred_context_shared_n_contexts_per_node_max = 0; 205 cap->stats_mask = 0; 206 207 return 0; 208 } 209 210 static inline struct ixgbe_tm_shaper_profile * 211 ixgbe_shaper_profile_search(struct rte_eth_dev *dev, 212 uint32_t shaper_profile_id) 213 { 214 struct ixgbe_tm_conf *tm_conf = 215 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 216 struct ixgbe_shaper_profile_list *shaper_profile_list = 217 &tm_conf->shaper_profile_list; 218 struct ixgbe_tm_shaper_profile *shaper_profile; 219 220 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) { 221 if (shaper_profile_id == shaper_profile->shaper_profile_id) 222 return shaper_profile; 223 } 224 225 return NULL; 226 } 227 228 static int 229 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile, 230 struct rte_tm_error *error) 231 { 232 /* min rate not supported */ 233 if (profile->committed.rate) { 234 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE; 235 error->message = "committed rate not supported"; 236 return -EINVAL; 237 } 238 /* min bucket size not supported */ 239 if (profile->committed.size) { 240 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE; 241 error->message = "committed bucket size not supported"; 242 return -EINVAL; 243 } 244 /* max bucket size not supported */ 245 if (profile->peak.size) { 246 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; 247 error->message = "peak bucket size not supported"; 248 return -EINVAL; 249 } 250 /* length adjustment not supported */ 251 if (profile->pkt_length_adjust) { 252 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN; 253 error->message = "packet length adjustment not supported"; 254 return -EINVAL; 255 } 256 257 return 0; 258 } 259 260 static int 261 ixgbe_shaper_profile_add(struct rte_eth_dev *dev, 262 uint32_t shaper_profile_id, 263 struct rte_tm_shaper_params *profile, 264 struct rte_tm_error *error) 265 { 266 struct ixgbe_tm_conf *tm_conf = 267 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 268 struct ixgbe_tm_shaper_profile *shaper_profile; 269 int ret; 270 271 if (!profile || !error) 272 return -EINVAL; 273 274 ret = ixgbe_shaper_profile_param_check(profile, error); 275 if (ret) 276 return ret; 277 278 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); 279 280 if (shaper_profile) { 281 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 282 error->message = "profile ID exist"; 283 return -EINVAL; 284 } 285 286 shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile", 287 sizeof(struct ixgbe_tm_shaper_profile), 288 0); 289 if (!shaper_profile) 290 return -ENOMEM; 291 shaper_profile->shaper_profile_id = shaper_profile_id; 292 rte_memcpy(&shaper_profile->profile, profile, 293 sizeof(struct rte_tm_shaper_params)); 294 TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list, 295 shaper_profile, node); 296 297 return 0; 298 } 299 300 static int 301 ixgbe_shaper_profile_del(struct rte_eth_dev *dev, 302 uint32_t shaper_profile_id, 303 struct rte_tm_error *error) 304 { 305 struct ixgbe_tm_conf *tm_conf = 306 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 307 struct ixgbe_tm_shaper_profile *shaper_profile; 308 309 if (!error) 310 return -EINVAL; 311 312 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id); 313 314 if (!shaper_profile) { 315 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; 316 error->message = "profile ID not exist"; 317 return -EINVAL; 318 } 319 320 /* don't delete a profile if it's used by one or several nodes */ 321 if (shaper_profile->reference_count) { 322 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 323 error->message = "profile in use"; 324 return -EINVAL; 325 } 326 327 TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node); 328 rte_free(shaper_profile); 329 330 return 0; 331 } 332 333 static inline struct ixgbe_tm_node * 334 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, 335 enum ixgbe_tm_node_type *node_type) 336 { 337 struct ixgbe_tm_conf *tm_conf = 338 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 339 struct ixgbe_tm_node *tm_node; 340 341 if (tm_conf->root && tm_conf->root->id == node_id) { 342 *node_type = IXGBE_TM_NODE_TYPE_PORT; 343 return tm_conf->root; 344 } 345 346 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { 347 if (tm_node->id == node_id) { 348 *node_type = IXGBE_TM_NODE_TYPE_TC; 349 return tm_node; 350 } 351 } 352 353 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { 354 if (tm_node->id == node_id) { 355 *node_type = IXGBE_TM_NODE_TYPE_QUEUE; 356 return tm_node; 357 } 358 } 359 360 return NULL; 361 } 362 363 static void 364 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no, 365 uint16_t *base, uint16_t *nb) 366 { 367 uint8_t nb_tcs = ixgbe_tc_nb_get(dev); 368 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 369 uint16_t vf_num = pci_dev->max_vfs; 370 371 *base = 0; 372 *nb = 0; 373 374 /* VT on */ 375 if (vf_num) { 376 /* no DCB */ 377 if (nb_tcs == 1) { 378 if (vf_num >= RTE_ETH_32_POOLS) { 379 *nb = 2; 380 *base = vf_num * 2; 381 } else if (vf_num >= RTE_ETH_16_POOLS) { 382 *nb = 4; 383 *base = vf_num * 4; 384 } else { 385 *nb = 8; 386 *base = vf_num * 8; 387 } 388 } else { 389 /* DCB */ 390 *nb = 1; 391 *base = vf_num * nb_tcs + tc_node_no; 392 } 393 } else { 394 /* VT off */ 395 if (nb_tcs == RTE_ETH_8_TCS) { 396 switch (tc_node_no) { 397 case 0: 398 *base = 0; 399 *nb = 32; 400 break; 401 case 1: 402 *base = 32; 403 *nb = 32; 404 break; 405 case 2: 406 *base = 64; 407 *nb = 16; 408 break; 409 case 3: 410 *base = 80; 411 *nb = 16; 412 break; 413 case 4: 414 *base = 96; 415 *nb = 8; 416 break; 417 case 5: 418 *base = 104; 419 *nb = 8; 420 break; 421 case 6: 422 *base = 112; 423 *nb = 8; 424 break; 425 case 7: 426 *base = 120; 427 *nb = 8; 428 break; 429 default: 430 return; 431 } 432 } else { 433 switch (tc_node_no) { 434 /** 435 * If no VF and no DCB, only 64 queues can be used. 436 * This case also be covered by this "case 0". 437 */ 438 case 0: 439 *base = 0; 440 *nb = 64; 441 break; 442 case 1: 443 *base = 64; 444 *nb = 32; 445 break; 446 case 2: 447 *base = 96; 448 *nb = 16; 449 break; 450 case 3: 451 *base = 112; 452 *nb = 16; 453 break; 454 default: 455 return; 456 } 457 } 458 } 459 } 460 461 static int 462 ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id, 463 uint32_t priority, uint32_t weight, 464 struct rte_tm_node_params *params, 465 struct rte_tm_error *error) 466 { 467 if (node_id == RTE_TM_NODE_ID_NULL) { 468 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 469 error->message = "invalid node id"; 470 return -EINVAL; 471 } 472 473 if (priority) { 474 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY; 475 error->message = "priority should be 0"; 476 return -EINVAL; 477 } 478 479 if (weight != 1) { 480 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT; 481 error->message = "weight must be 1"; 482 return -EINVAL; 483 } 484 485 /* not support shared shaper */ 486 if (params->shared_shaper_id) { 487 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID; 488 error->message = "shared shaper not supported"; 489 return -EINVAL; 490 } 491 if (params->n_shared_shapers) { 492 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS; 493 error->message = "shared shaper not supported"; 494 return -EINVAL; 495 } 496 497 /* for non-leaf node */ 498 if (node_id >= dev->data->nb_tx_queues) { 499 /* check the unsupported parameters */ 500 if (params->nonleaf.wfq_weight_mode) { 501 error->type = 502 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 503 error->message = "WFQ not supported"; 504 return -EINVAL; 505 } 506 if (params->nonleaf.n_sp_priorities != 1) { 507 error->type = 508 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES; 509 error->message = "SP priority not supported"; 510 return -EINVAL; 511 } else if (params->nonleaf.wfq_weight_mode && 512 !(*params->nonleaf.wfq_weight_mode)) { 513 error->type = 514 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; 515 error->message = "WFP should be byte mode"; 516 return -EINVAL; 517 } 518 519 return 0; 520 } 521 522 /* for leaf node */ 523 /* check the unsupported parameters */ 524 if (params->leaf.cman) { 525 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; 526 error->message = "Congestion management not supported"; 527 return -EINVAL; 528 } 529 if (params->leaf.wred.wred_profile_id != 530 RTE_TM_WRED_PROFILE_ID_NONE) { 531 error->type = 532 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID; 533 error->message = "WRED not supported"; 534 return -EINVAL; 535 } 536 if (params->leaf.wred.shared_wred_context_id) { 537 error->type = 538 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID; 539 error->message = "WRED not supported"; 540 return -EINVAL; 541 } 542 if (params->leaf.wred.n_shared_wred_contexts) { 543 error->type = 544 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS; 545 error->message = "WRED not supported"; 546 return -EINVAL; 547 } 548 549 return 0; 550 } 551 552 /** 553 * Now the TC and queue configuration is controlled by DCB. 554 * We need check if the node configuration follows the DCB configuration. 555 * In the future, we may use TM to cover DCB. 556 */ 557 static int 558 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id, 559 uint32_t parent_node_id, uint32_t priority, 560 uint32_t weight, uint32_t level_id, 561 struct rte_tm_node_params *params, 562 struct rte_tm_error *error) 563 { 564 struct ixgbe_tm_conf *tm_conf = 565 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 566 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; 567 enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX; 568 struct ixgbe_tm_shaper_profile *shaper_profile = NULL; 569 struct ixgbe_tm_node *tm_node; 570 struct ixgbe_tm_node *parent_node; 571 uint8_t nb_tcs; 572 uint16_t q_base = 0; 573 uint16_t q_nb = 0; 574 int ret; 575 576 if (!params || !error) 577 return -EINVAL; 578 579 /* if already committed */ 580 if (tm_conf->committed) { 581 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 582 error->message = "already committed"; 583 return -EINVAL; 584 } 585 586 ret = ixgbe_node_param_check(dev, node_id, priority, weight, 587 params, error); 588 if (ret) 589 return ret; 590 591 /* check if the node ID is already used */ 592 if (ixgbe_tm_node_search(dev, node_id, &node_type)) { 593 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 594 error->message = "node id already used"; 595 return -EINVAL; 596 } 597 598 /* check the shaper profile id */ 599 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { 600 shaper_profile = ixgbe_shaper_profile_search( 601 dev, params->shaper_profile_id); 602 if (!shaper_profile) { 603 error->type = 604 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; 605 error->message = "shaper profile not exist"; 606 return -EINVAL; 607 } 608 } 609 610 /* root node if not have a parent */ 611 if (parent_node_id == RTE_TM_NODE_ID_NULL) { 612 /* check level */ 613 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 614 level_id > IXGBE_TM_NODE_TYPE_PORT) { 615 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 616 error->message = "Wrong level"; 617 return -EINVAL; 618 } 619 620 /* obviously no more than one root */ 621 if (tm_conf->root) { 622 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 623 error->message = "already have a root"; 624 return -EINVAL; 625 } 626 627 /* add the root node */ 628 tm_node = rte_zmalloc("ixgbe_tm_node", 629 sizeof(struct ixgbe_tm_node), 630 0); 631 if (!tm_node) 632 return -ENOMEM; 633 tm_node->id = node_id; 634 tm_node->priority = priority; 635 tm_node->weight = weight; 636 tm_node->reference_count = 0; 637 tm_node->no = 0; 638 tm_node->parent = NULL; 639 tm_node->shaper_profile = shaper_profile; 640 rte_memcpy(&tm_node->params, params, 641 sizeof(struct rte_tm_node_params)); 642 tm_conf->root = tm_node; 643 644 /* increase the reference counter of the shaper profile */ 645 if (shaper_profile) 646 shaper_profile->reference_count++; 647 648 return 0; 649 } 650 651 /* TC or queue node */ 652 /* check the parent node */ 653 parent_node = ixgbe_tm_node_search(dev, parent_node_id, 654 &parent_node_type); 655 if (!parent_node) { 656 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 657 error->message = "parent not exist"; 658 return -EINVAL; 659 } 660 if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT && 661 parent_node_type != IXGBE_TM_NODE_TYPE_TC) { 662 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; 663 error->message = "parent is not port or TC"; 664 return -EINVAL; 665 } 666 /* check level */ 667 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && 668 level_id != (uint32_t)parent_node_type + 1) { 669 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; 670 error->message = "Wrong level"; 671 return -EINVAL; 672 } 673 674 /* check the node number */ 675 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { 676 /* check TC number */ 677 nb_tcs = ixgbe_tc_nb_get(dev); 678 if (tm_conf->nb_tc_node >= nb_tcs) { 679 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 680 error->message = "too many TCs"; 681 return -EINVAL; 682 } 683 } else { 684 /* check queue number */ 685 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) { 686 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 687 error->message = "too many queues"; 688 return -EINVAL; 689 } 690 691 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb); 692 if (parent_node->reference_count >= q_nb) { 693 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 694 error->message = "too many queues than TC supported"; 695 return -EINVAL; 696 } 697 698 /** 699 * check the node id. 700 * For queue, the node id means queue id. 701 */ 702 if (node_id >= dev->data->nb_tx_queues) { 703 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 704 error->message = "too large queue id"; 705 return -EINVAL; 706 } 707 } 708 709 /* add the TC or queue node */ 710 tm_node = rte_zmalloc("ixgbe_tm_node", 711 sizeof(struct ixgbe_tm_node), 712 0); 713 if (!tm_node) 714 return -ENOMEM; 715 tm_node->id = node_id; 716 tm_node->priority = priority; 717 tm_node->weight = weight; 718 tm_node->reference_count = 0; 719 tm_node->parent = parent_node; 720 tm_node->shaper_profile = shaper_profile; 721 rte_memcpy(&tm_node->params, params, 722 sizeof(struct rte_tm_node_params)); 723 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) { 724 tm_node->no = parent_node->reference_count; 725 TAILQ_INSERT_TAIL(&tm_conf->tc_list, 726 tm_node, node); 727 tm_conf->nb_tc_node++; 728 } else { 729 tm_node->no = q_base + parent_node->reference_count; 730 TAILQ_INSERT_TAIL(&tm_conf->queue_list, 731 tm_node, node); 732 tm_conf->nb_queue_node++; 733 } 734 tm_node->parent->reference_count++; 735 736 /* increase the reference counter of the shaper profile */ 737 if (shaper_profile) 738 shaper_profile->reference_count++; 739 740 return 0; 741 } 742 743 static int 744 ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id, 745 struct rte_tm_error *error) 746 { 747 struct ixgbe_tm_conf *tm_conf = 748 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 749 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; 750 struct ixgbe_tm_node *tm_node; 751 752 if (!error) 753 return -EINVAL; 754 755 /* if already committed */ 756 if (tm_conf->committed) { 757 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; 758 error->message = "already committed"; 759 return -EINVAL; 760 } 761 762 if (node_id == RTE_TM_NODE_ID_NULL) { 763 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 764 error->message = "invalid node id"; 765 return -EINVAL; 766 } 767 768 /* check the if the node id exists */ 769 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); 770 if (!tm_node) { 771 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 772 error->message = "no such node"; 773 return -EINVAL; 774 } 775 776 /* the node should have no child */ 777 if (tm_node->reference_count) { 778 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 779 error->message = 780 "cannot delete a node which has children"; 781 return -EINVAL; 782 } 783 784 /* root node */ 785 if (node_type == IXGBE_TM_NODE_TYPE_PORT) { 786 if (tm_node->shaper_profile) 787 tm_node->shaper_profile->reference_count--; 788 rte_free(tm_node); 789 tm_conf->root = NULL; 790 return 0; 791 } 792 793 /* TC or queue node */ 794 if (tm_node->shaper_profile) 795 tm_node->shaper_profile->reference_count--; 796 tm_node->parent->reference_count--; 797 if (node_type == IXGBE_TM_NODE_TYPE_TC) { 798 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node); 799 tm_conf->nb_tc_node--; 800 } else { 801 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node); 802 tm_conf->nb_queue_node--; 803 } 804 rte_free(tm_node); 805 806 return 0; 807 } 808 809 static int 810 ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, 811 int *is_leaf, struct rte_tm_error *error) 812 { 813 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; 814 struct ixgbe_tm_node *tm_node; 815 816 if (!is_leaf || !error) 817 return -EINVAL; 818 819 if (node_id == RTE_TM_NODE_ID_NULL) { 820 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 821 error->message = "invalid node id"; 822 return -EINVAL; 823 } 824 825 /* check if the node id exists */ 826 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); 827 if (!tm_node) { 828 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 829 error->message = "no such node"; 830 return -EINVAL; 831 } 832 833 if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) 834 *is_leaf = true; 835 else 836 *is_leaf = false; 837 838 return 0; 839 } 840 841 static int 842 ixgbe_level_capabilities_get(struct rte_eth_dev *dev, 843 uint32_t level_id, 844 struct rte_tm_level_capabilities *cap, 845 struct rte_tm_error *error) 846 { 847 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 848 849 if (!cap || !error) 850 return -EINVAL; 851 852 if (level_id >= IXGBE_TM_NODE_TYPE_MAX) { 853 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; 854 error->message = "too deep level"; 855 return -EINVAL; 856 } 857 858 /* root node */ 859 if (level_id == IXGBE_TM_NODE_TYPE_PORT) { 860 cap->n_nodes_max = 1; 861 cap->n_nodes_nonleaf_max = 1; 862 cap->n_nodes_leaf_max = 0; 863 } else if (level_id == IXGBE_TM_NODE_TYPE_TC) { 864 /* TC */ 865 cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; 866 cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS; 867 cap->n_nodes_leaf_max = 0; 868 } else { 869 /* queue */ 870 cap->n_nodes_max = hw->mac.max_tx_queues; 871 cap->n_nodes_nonleaf_max = 0; 872 cap->n_nodes_leaf_max = hw->mac.max_tx_queues; 873 } 874 875 cap->non_leaf_nodes_identical = true; 876 cap->leaf_nodes_identical = true; 877 878 if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) { 879 cap->nonleaf.shaper_private_supported = true; 880 cap->nonleaf.shaper_private_dual_rate_supported = false; 881 cap->nonleaf.shaper_private_rate_min = 0; 882 /* 10Gbps -> 1.25GBps */ 883 cap->nonleaf.shaper_private_rate_max = 1250000000ull; 884 cap->nonleaf.shaper_private_packet_mode_supported = 0; 885 cap->nonleaf.shaper_private_byte_mode_supported = 1; 886 cap->nonleaf.shaper_shared_n_max = 0; 887 cap->nonleaf.shaper_shared_packet_mode_supported = 0; 888 cap->nonleaf.shaper_shared_byte_mode_supported = 0; 889 if (level_id == IXGBE_TM_NODE_TYPE_PORT) 890 cap->nonleaf.sched_n_children_max = 891 IXGBE_DCB_MAX_TRAFFIC_CLASS; 892 else 893 cap->nonleaf.sched_n_children_max = 894 hw->mac.max_tx_queues; 895 cap->nonleaf.sched_sp_n_priorities_max = 1; 896 cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 897 cap->nonleaf.sched_wfq_n_groups_max = 0; 898 cap->nonleaf.sched_wfq_weight_max = 1; 899 cap->nonleaf.sched_wfq_packet_mode_supported = 0; 900 cap->nonleaf.sched_wfq_byte_mode_supported = 0; 901 cap->nonleaf.stats_mask = 0; 902 903 return 0; 904 } 905 906 /* queue node */ 907 cap->leaf.shaper_private_supported = true; 908 cap->leaf.shaper_private_dual_rate_supported = false; 909 cap->leaf.shaper_private_rate_min = 0; 910 /* 10Gbps -> 1.25GBps */ 911 cap->leaf.shaper_private_rate_max = 1250000000ull; 912 cap->leaf.shaper_private_packet_mode_supported = 0; 913 cap->leaf.shaper_private_byte_mode_supported = 1; 914 cap->leaf.shaper_shared_n_max = 0; 915 cap->leaf.shaper_shared_packet_mode_supported = 0; 916 cap->leaf.shaper_shared_byte_mode_supported = 0; 917 cap->leaf.cman_head_drop_supported = false; 918 cap->leaf.cman_wred_context_private_supported = true; 919 cap->leaf.cman_wred_context_shared_n_max = 0; 920 cap->leaf.stats_mask = 0; 921 922 return 0; 923 } 924 925 static int 926 ixgbe_node_capabilities_get(struct rte_eth_dev *dev, 927 uint32_t node_id, 928 struct rte_tm_node_capabilities *cap, 929 struct rte_tm_error *error) 930 { 931 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 932 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX; 933 struct ixgbe_tm_node *tm_node; 934 935 if (!cap || !error) 936 return -EINVAL; 937 938 if (node_id == RTE_TM_NODE_ID_NULL) { 939 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 940 error->message = "invalid node id"; 941 return -EINVAL; 942 } 943 944 /* check if the node id exists */ 945 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type); 946 if (!tm_node) { 947 error->type = RTE_TM_ERROR_TYPE_NODE_ID; 948 error->message = "no such node"; 949 return -EINVAL; 950 } 951 952 cap->shaper_private_supported = true; 953 cap->shaper_private_dual_rate_supported = false; 954 cap->shaper_private_rate_min = 0; 955 /* 10Gbps -> 1.25GBps */ 956 cap->shaper_private_rate_max = 1250000000ull; 957 cap->shaper_private_packet_mode_supported = 0; 958 cap->shaper_private_byte_mode_supported = 1; 959 cap->shaper_shared_n_max = 0; 960 cap->shaper_shared_packet_mode_supported = 0; 961 cap->shaper_shared_byte_mode_supported = 0; 962 963 if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) { 964 cap->leaf.cman_head_drop_supported = false; 965 cap->leaf.cman_wred_context_private_supported = true; 966 cap->leaf.cman_wred_context_shared_n_max = 0; 967 } else { 968 if (node_type == IXGBE_TM_NODE_TYPE_PORT) 969 cap->nonleaf.sched_n_children_max = 970 IXGBE_DCB_MAX_TRAFFIC_CLASS; 971 else 972 cap->nonleaf.sched_n_children_max = 973 hw->mac.max_tx_queues; 974 cap->nonleaf.sched_sp_n_priorities_max = 1; 975 cap->nonleaf.sched_wfq_n_children_per_group_max = 0; 976 cap->nonleaf.sched_wfq_n_groups_max = 0; 977 cap->nonleaf.sched_wfq_weight_max = 1; 978 cap->nonleaf.sched_wfq_packet_mode_supported = 0; 979 cap->nonleaf.sched_wfq_byte_mode_supported = 0; 980 } 981 982 cap->stats_mask = 0; 983 984 return 0; 985 } 986 987 static int 988 ixgbe_hierarchy_commit(struct rte_eth_dev *dev, 989 int clear_on_fail, 990 struct rte_tm_error *error) 991 { 992 struct ixgbe_tm_conf *tm_conf = 993 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 994 struct ixgbe_tm_node *tm_node; 995 uint64_t bw; 996 int ret; 997 998 if (!error) 999 return -EINVAL; 1000 1001 /* check the setting */ 1002 if (!tm_conf->root) 1003 goto done; 1004 1005 /* not support port max bandwidth yet */ 1006 if (tm_conf->root->shaper_profile && 1007 tm_conf->root->shaper_profile->profile.peak.rate) { 1008 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1009 error->message = "no port max bandwidth"; 1010 goto fail_clear; 1011 } 1012 1013 /* HW not support TC max bandwidth */ 1014 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { 1015 if (tm_node->shaper_profile && 1016 tm_node->shaper_profile->profile.peak.rate) { 1017 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1018 error->message = "no TC max bandwidth"; 1019 goto fail_clear; 1020 } 1021 } 1022 1023 /* queue max bandwidth */ 1024 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { 1025 if (tm_node->shaper_profile) 1026 bw = tm_node->shaper_profile->profile.peak.rate; 1027 else 1028 bw = 0; 1029 if (bw) { 1030 /* interpret Bps to Mbps */ 1031 bw = bw * 8 / 1000 / 1000; 1032 ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw); 1033 if (ret) { 1034 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; 1035 error->message = 1036 "failed to set queue max bandwidth"; 1037 goto fail_clear; 1038 } 1039 } 1040 } 1041 1042 done: 1043 tm_conf->committed = true; 1044 return 0; 1045 1046 fail_clear: 1047 /* clear all the traffic manager configuration */ 1048 if (clear_on_fail) { 1049 ixgbe_tm_conf_uninit(dev); 1050 ixgbe_tm_conf_init(dev); 1051 } 1052 return -EINVAL; 1053 } 1054