1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6
7 #include "base/i40e_prototype.h"
8 #include "i40e_ethdev.h"
9
10 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
11 struct rte_tm_capabilities *cap,
12 struct rte_tm_error *error);
13 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
14 uint32_t shaper_profile_id,
15 struct rte_tm_shaper_params *profile,
16 struct rte_tm_error *error);
17 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
18 uint32_t shaper_profile_id,
19 struct rte_tm_error *error);
20 static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
21 uint32_t parent_node_id, uint32_t priority,
22 uint32_t weight, uint32_t level_id,
23 struct rte_tm_node_params *params,
24 struct rte_tm_error *error);
25 static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
26 struct rte_tm_error *error);
27 static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
28 int *is_leaf, struct rte_tm_error *error);
29 static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
30 uint32_t level_id,
31 struct rte_tm_level_capabilities *cap,
32 struct rte_tm_error *error);
33 static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
34 uint32_t node_id,
35 struct rte_tm_node_capabilities *cap,
36 struct rte_tm_error *error);
37 static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
38 int clear_on_fail,
39 struct rte_tm_error *error);
40
41 const struct rte_tm_ops i40e_tm_ops = {
42 .capabilities_get = i40e_tm_capabilities_get,
43 .shaper_profile_add = i40e_shaper_profile_add,
44 .shaper_profile_delete = i40e_shaper_profile_del,
45 .node_add = i40e_node_add,
46 .node_delete = i40e_node_delete,
47 .node_type_get = i40e_node_type_get,
48 .level_capabilities_get = i40e_level_capabilities_get,
49 .node_capabilities_get = i40e_node_capabilities_get,
50 .hierarchy_commit = i40e_hierarchy_commit,
51 };
52
53 int
i40e_tm_ops_get(struct rte_eth_dev * dev __rte_unused,void * arg)54 i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
55 void *arg)
56 {
57 if (!arg)
58 return -EINVAL;
59
60 *(const void **)arg = &i40e_tm_ops;
61
62 return 0;
63 }
64
65 void
i40e_tm_conf_init(struct rte_eth_dev * dev)66 i40e_tm_conf_init(struct rte_eth_dev *dev)
67 {
68 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
69
70 /* initialize shaper profile list */
71 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
72
73 /* initialize node configuration */
74 pf->tm_conf.root = NULL;
75 TAILQ_INIT(&pf->tm_conf.tc_list);
76 TAILQ_INIT(&pf->tm_conf.queue_list);
77 pf->tm_conf.nb_tc_node = 0;
78 pf->tm_conf.nb_queue_node = 0;
79 pf->tm_conf.committed = false;
80 }
81
82 void
i40e_tm_conf_uninit(struct rte_eth_dev * dev)83 i40e_tm_conf_uninit(struct rte_eth_dev *dev)
84 {
85 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
86 struct i40e_tm_shaper_profile *shaper_profile;
87 struct i40e_tm_node *tm_node;
88
89 /* clear node configuration */
90 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
91 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
92 rte_free(tm_node);
93 }
94 pf->tm_conf.nb_queue_node = 0;
95 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
96 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
97 rte_free(tm_node);
98 }
99 pf->tm_conf.nb_tc_node = 0;
100 if (pf->tm_conf.root) {
101 rte_free(pf->tm_conf.root);
102 pf->tm_conf.root = NULL;
103 }
104
105 /* Remove all shaper profiles */
106 while ((shaper_profile =
107 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
108 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
109 shaper_profile, node);
110 rte_free(shaper_profile);
111 }
112 }
113
114 static inline uint16_t
i40e_tc_nb_get(struct rte_eth_dev * dev)115 i40e_tc_nb_get(struct rte_eth_dev *dev)
116 {
117 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
118 struct i40e_vsi *main_vsi = pf->main_vsi;
119 uint16_t sum = 0;
120 int i;
121
122 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
123 if (main_vsi->enabled_tc & BIT_ULL(i))
124 sum++;
125 }
126
127 return sum;
128 }
129
130 static int
i40e_tm_capabilities_get(struct rte_eth_dev * dev,struct rte_tm_capabilities * cap,struct rte_tm_error * error)131 i40e_tm_capabilities_get(struct rte_eth_dev *dev,
132 struct rte_tm_capabilities *cap,
133 struct rte_tm_error *error)
134 {
135 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
136 uint16_t tc_nb = i40e_tc_nb_get(dev);
137
138 if (!cap || !error)
139 return -EINVAL;
140
141 if (tc_nb > hw->func_caps.num_tx_qp)
142 return -EINVAL;
143
144 error->type = RTE_TM_ERROR_TYPE_NONE;
145
146 /* set all the parameters to 0 first. */
147 memset(cap, 0, sizeof(struct rte_tm_capabilities));
148
149 /**
150 * support port + TCs + queues
151 * here shows the max capability not the current configuration.
152 */
153 cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
154 cap->n_levels_max = 3; /* port, TC, queue */
155 cap->non_leaf_nodes_identical = 1;
156 cap->leaf_nodes_identical = 1;
157 cap->shaper_n_max = cap->n_nodes_max;
158 cap->shaper_private_n_max = cap->n_nodes_max;
159 cap->shaper_private_dual_rate_n_max = 0;
160 cap->shaper_private_rate_min = 0;
161 /* 40Gbps -> 5GBps */
162 cap->shaper_private_rate_max = 5000000000ull;
163 cap->shaper_private_packet_mode_supported = 0;
164 cap->shaper_private_byte_mode_supported = 1;
165 cap->shaper_shared_n_max = 0;
166 cap->shaper_shared_n_nodes_per_shaper_max = 0;
167 cap->shaper_shared_n_shapers_per_node_max = 0;
168 cap->shaper_shared_dual_rate_n_max = 0;
169 cap->shaper_shared_rate_min = 0;
170 cap->shaper_shared_rate_max = 0;
171 cap->shaper_shared_packet_mode_supported = 0;
172 cap->shaper_shared_byte_mode_supported = 0;
173 cap->sched_n_children_max = hw->func_caps.num_tx_qp;
174 /**
175 * HW supports SP. But no plan to support it now.
176 * So, all the nodes should have the same priority.
177 */
178 cap->sched_sp_n_priorities_max = 1;
179 cap->sched_wfq_n_children_per_group_max = 0;
180 cap->sched_wfq_n_groups_max = 0;
181 /**
182 * SW only supports fair round robin now.
183 * So, all the nodes should have the same weight.
184 */
185 cap->sched_wfq_weight_max = 1;
186 cap->sched_wfq_packet_mode_supported = 0;
187 cap->sched_wfq_byte_mode_supported = 0;
188 cap->cman_head_drop_supported = 0;
189 cap->dynamic_update_mask = 0;
190 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
191 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
192 cap->cman_wred_context_n_max = 0;
193 cap->cman_wred_context_private_n_max = 0;
194 cap->cman_wred_context_shared_n_max = 0;
195 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
196 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
197 cap->stats_mask = 0;
198
199 return 0;
200 }
201
202 static inline struct i40e_tm_shaper_profile *
i40e_shaper_profile_search(struct rte_eth_dev * dev,uint32_t shaper_profile_id)203 i40e_shaper_profile_search(struct rte_eth_dev *dev,
204 uint32_t shaper_profile_id)
205 {
206 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
207 struct i40e_shaper_profile_list *shaper_profile_list =
208 &pf->tm_conf.shaper_profile_list;
209 struct i40e_tm_shaper_profile *shaper_profile;
210
211 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
212 if (shaper_profile_id == shaper_profile->shaper_profile_id)
213 return shaper_profile;
214 }
215
216 return NULL;
217 }
218
219 static int
i40e_shaper_profile_param_check(struct rte_tm_shaper_params * profile,struct rte_tm_error * error)220 i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
221 struct rte_tm_error *error)
222 {
223 /* min rate not supported */
224 if (profile->committed.rate) {
225 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
226 error->message = "committed rate not supported";
227 return -EINVAL;
228 }
229 /* min bucket size not supported */
230 if (profile->committed.size) {
231 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
232 error->message = "committed bucket size not supported";
233 return -EINVAL;
234 }
235 /* max bucket size not supported */
236 if (profile->peak.size) {
237 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
238 error->message = "peak bucket size not supported";
239 return -EINVAL;
240 }
241 /* length adjustment not supported */
242 if (profile->pkt_length_adjust) {
243 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
244 error->message = "packet length adjustment not supported";
245 return -EINVAL;
246 }
247
248 return 0;
249 }
250
251 static int
i40e_shaper_profile_add(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_shaper_params * profile,struct rte_tm_error * error)252 i40e_shaper_profile_add(struct rte_eth_dev *dev,
253 uint32_t shaper_profile_id,
254 struct rte_tm_shaper_params *profile,
255 struct rte_tm_error *error)
256 {
257 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
258 struct i40e_tm_shaper_profile *shaper_profile;
259 int ret;
260
261 if (!profile || !error)
262 return -EINVAL;
263
264 ret = i40e_shaper_profile_param_check(profile, error);
265 if (ret)
266 return ret;
267
268 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
269
270 if (shaper_profile) {
271 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
272 error->message = "profile ID exist";
273 return -EINVAL;
274 }
275
276 shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
277 sizeof(struct i40e_tm_shaper_profile),
278 0);
279 if (!shaper_profile)
280 return -ENOMEM;
281 shaper_profile->shaper_profile_id = shaper_profile_id;
282 rte_memcpy(&shaper_profile->profile, profile,
283 sizeof(struct rte_tm_shaper_params));
284 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
285 shaper_profile, node);
286
287 return 0;
288 }
289
290 static int
i40e_shaper_profile_del(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_error * error)291 i40e_shaper_profile_del(struct rte_eth_dev *dev,
292 uint32_t shaper_profile_id,
293 struct rte_tm_error *error)
294 {
295 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
296 struct i40e_tm_shaper_profile *shaper_profile;
297
298 if (!error)
299 return -EINVAL;
300
301 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
302
303 if (!shaper_profile) {
304 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
305 error->message = "profile ID not exist";
306 return -EINVAL;
307 }
308
309 /* don't delete a profile if it's used by one or several nodes */
310 if (shaper_profile->reference_count) {
311 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
312 error->message = "profile in use";
313 return -EINVAL;
314 }
315
316 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
317 rte_free(shaper_profile);
318
319 return 0;
320 }
321
322 static inline struct i40e_tm_node *
i40e_tm_node_search(struct rte_eth_dev * dev,uint32_t node_id,enum i40e_tm_node_type * node_type)323 i40e_tm_node_search(struct rte_eth_dev *dev,
324 uint32_t node_id, enum i40e_tm_node_type *node_type)
325 {
326 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
327 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
328 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
329 struct i40e_tm_node *tm_node;
330
331 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
332 *node_type = I40E_TM_NODE_TYPE_PORT;
333 return pf->tm_conf.root;
334 }
335
336 TAILQ_FOREACH(tm_node, tc_list, node) {
337 if (tm_node->id == node_id) {
338 *node_type = I40E_TM_NODE_TYPE_TC;
339 return tm_node;
340 }
341 }
342
343 TAILQ_FOREACH(tm_node, queue_list, node) {
344 if (tm_node->id == node_id) {
345 *node_type = I40E_TM_NODE_TYPE_QUEUE;
346 return tm_node;
347 }
348 }
349
350 return NULL;
351 }
352
353 static int
i40e_node_param_check(struct rte_eth_dev * dev,uint32_t node_id,uint32_t priority,uint32_t weight,struct rte_tm_node_params * params,struct rte_tm_error * error)354 i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
355 uint32_t priority, uint32_t weight,
356 struct rte_tm_node_params *params,
357 struct rte_tm_error *error)
358 {
359 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
360
361 if (node_id == RTE_TM_NODE_ID_NULL) {
362 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
363 error->message = "invalid node id";
364 return -EINVAL;
365 }
366
367 if (priority) {
368 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
369 error->message = "priority should be 0";
370 return -EINVAL;
371 }
372
373 if (weight != 1) {
374 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
375 error->message = "weight must be 1";
376 return -EINVAL;
377 }
378
379 /* not support shared shaper */
380 if (params->shared_shaper_id) {
381 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
382 error->message = "shared shaper not supported";
383 return -EINVAL;
384 }
385 if (params->n_shared_shapers) {
386 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
387 error->message = "shared shaper not supported";
388 return -EINVAL;
389 }
390
391 /* for non-leaf node */
392 if (node_id >= hw->func_caps.num_tx_qp) {
393 if (params->nonleaf.wfq_weight_mode) {
394 error->type =
395 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
396 error->message = "WFQ not supported";
397 return -EINVAL;
398 }
399 if (params->nonleaf.n_sp_priorities != 1) {
400 error->type =
401 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
402 error->message = "SP priority not supported";
403 return -EINVAL;
404 } else if (params->nonleaf.wfq_weight_mode &&
405 !(*params->nonleaf.wfq_weight_mode)) {
406 error->type =
407 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
408 error->message = "WFP should be byte mode";
409 return -EINVAL;
410 }
411
412 return 0;
413 }
414
415 /* for leaf node */
416 if (params->leaf.cman) {
417 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
418 error->message = "Congestion management not supported";
419 return -EINVAL;
420 }
421 if (params->leaf.wred.wred_profile_id !=
422 RTE_TM_WRED_PROFILE_ID_NONE) {
423 error->type =
424 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
425 error->message = "WRED not supported";
426 return -EINVAL;
427 }
428 if (params->leaf.wred.shared_wred_context_id) {
429 error->type =
430 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
431 error->message = "WRED not supported";
432 return -EINVAL;
433 }
434 if (params->leaf.wred.n_shared_wred_contexts) {
435 error->type =
436 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
437 error->message = "WRED not supported";
438 return -EINVAL;
439 }
440
441 return 0;
442 }
443
444 /**
445 * Now the TC and queue configuration is controlled by DCB.
446 * We need check if the node configuration follows the DCB configuration.
447 * In the future, we may use TM to cover DCB.
448 */
449 static int
i40e_node_add(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)450 i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
451 uint32_t parent_node_id, uint32_t priority,
452 uint32_t weight, uint32_t level_id,
453 struct rte_tm_node_params *params,
454 struct rte_tm_error *error)
455 {
456 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
457 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
458 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
459 enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
460 struct i40e_tm_shaper_profile *shaper_profile = NULL;
461 struct i40e_tm_node *tm_node;
462 struct i40e_tm_node *parent_node;
463 uint16_t tc_nb = 0;
464 int ret;
465
466 if (!params || !error)
467 return -EINVAL;
468
469 /* if already committed */
470 if (pf->tm_conf.committed) {
471 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
472 error->message = "already committed";
473 return -EINVAL;
474 }
475
476 ret = i40e_node_param_check(dev, node_id, priority, weight,
477 params, error);
478 if (ret)
479 return ret;
480
481 /* check if the node ID is already used */
482 if (i40e_tm_node_search(dev, node_id, &node_type)) {
483 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
484 error->message = "node id already used";
485 return -EINVAL;
486 }
487
488 /* check the shaper profile id */
489 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
490 shaper_profile = i40e_shaper_profile_search(
491 dev, params->shaper_profile_id);
492 if (!shaper_profile) {
493 error->type =
494 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
495 error->message = "shaper profile not exist";
496 return -EINVAL;
497 }
498 }
499
500 /* root node if not have a parent */
501 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
502 /* check level */
503 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
504 level_id > I40E_TM_NODE_TYPE_PORT) {
505 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
506 error->message = "Wrong level";
507 return -EINVAL;
508 }
509
510 /* obviously no more than one root */
511 if (pf->tm_conf.root) {
512 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
513 error->message = "already have a root";
514 return -EINVAL;
515 }
516
517 /* add the root node */
518 tm_node = rte_zmalloc("i40e_tm_node",
519 sizeof(struct i40e_tm_node),
520 0);
521 if (!tm_node)
522 return -ENOMEM;
523 tm_node->id = node_id;
524 tm_node->priority = priority;
525 tm_node->weight = weight;
526 tm_node->reference_count = 0;
527 tm_node->parent = NULL;
528 tm_node->shaper_profile = shaper_profile;
529 rte_memcpy(&tm_node->params, params,
530 sizeof(struct rte_tm_node_params));
531 pf->tm_conf.root = tm_node;
532
533 /* increase the reference counter of the shaper profile */
534 if (shaper_profile)
535 shaper_profile->reference_count++;
536
537 return 0;
538 }
539
540 /* TC or queue node */
541 /* check the parent node */
542 parent_node = i40e_tm_node_search(dev, parent_node_id,
543 &parent_node_type);
544 if (!parent_node) {
545 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
546 error->message = "parent not exist";
547 return -EINVAL;
548 }
549 if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
550 parent_node_type != I40E_TM_NODE_TYPE_TC) {
551 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
552 error->message = "parent is not port or TC";
553 return -EINVAL;
554 }
555 /* check level */
556 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
557 level_id != parent_node_type + 1) {
558 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
559 error->message = "Wrong level";
560 return -EINVAL;
561 }
562
563 /* check the node number */
564 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
565 /* check the TC number */
566 tc_nb = i40e_tc_nb_get(dev);
567 if (pf->tm_conf.nb_tc_node >= tc_nb) {
568 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
569 error->message = "too many TCs";
570 return -EINVAL;
571 }
572 } else {
573 /* check the queue number */
574 if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
575 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
576 error->message = "too many queues";
577 return -EINVAL;
578 }
579
580 /**
581 * check the node id.
582 * For queue, the node id means queue id.
583 */
584 if (node_id >= hw->func_caps.num_tx_qp) {
585 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
586 error->message = "too large queue id";
587 return -EINVAL;
588 }
589 }
590
591 /* add the TC or queue node */
592 tm_node = rte_zmalloc("i40e_tm_node",
593 sizeof(struct i40e_tm_node),
594 0);
595 if (!tm_node)
596 return -ENOMEM;
597 tm_node->id = node_id;
598 tm_node->priority = priority;
599 tm_node->weight = weight;
600 tm_node->reference_count = 0;
601 tm_node->parent = parent_node;
602 tm_node->shaper_profile = shaper_profile;
603 rte_memcpy(&tm_node->params, params,
604 sizeof(struct rte_tm_node_params));
605 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
606 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
607 tm_node, node);
608 pf->tm_conf.nb_tc_node++;
609 } else {
610 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
611 tm_node, node);
612 pf->tm_conf.nb_queue_node++;
613 }
614 tm_node->parent->reference_count++;
615
616 /* increase the reference counter of the shaper profile */
617 if (shaper_profile)
618 shaper_profile->reference_count++;
619
620 return 0;
621 }
622
623 static int
i40e_node_delete(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)624 i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
625 struct rte_tm_error *error)
626 {
627 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
628 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
629 struct i40e_tm_node *tm_node;
630
631 if (!error)
632 return -EINVAL;
633
634 /* if already committed */
635 if (pf->tm_conf.committed) {
636 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
637 error->message = "already committed";
638 return -EINVAL;
639 }
640
641 if (node_id == RTE_TM_NODE_ID_NULL) {
642 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
643 error->message = "invalid node id";
644 return -EINVAL;
645 }
646
647 /* check if the node id exists */
648 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
649 if (!tm_node) {
650 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
651 error->message = "no such node";
652 return -EINVAL;
653 }
654
655 /* the node should have no child */
656 if (tm_node->reference_count) {
657 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
658 error->message =
659 "cannot delete a node which has children";
660 return -EINVAL;
661 }
662
663 /* root node */
664 if (node_type == I40E_TM_NODE_TYPE_PORT) {
665 if (tm_node->shaper_profile)
666 tm_node->shaper_profile->reference_count--;
667 rte_free(tm_node);
668 pf->tm_conf.root = NULL;
669 return 0;
670 }
671
672 /* TC or queue node */
673 if (tm_node->shaper_profile)
674 tm_node->shaper_profile->reference_count--;
675 tm_node->parent->reference_count--;
676 if (node_type == I40E_TM_NODE_TYPE_TC) {
677 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
678 pf->tm_conf.nb_tc_node--;
679 } else {
680 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
681 pf->tm_conf.nb_queue_node--;
682 }
683 rte_free(tm_node);
684
685 return 0;
686 }
687
688 static int
i40e_node_type_get(struct rte_eth_dev * dev,uint32_t node_id,int * is_leaf,struct rte_tm_error * error)689 i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
690 int *is_leaf, struct rte_tm_error *error)
691 {
692 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
693 struct i40e_tm_node *tm_node;
694
695 if (!is_leaf || !error)
696 return -EINVAL;
697
698 if (node_id == RTE_TM_NODE_ID_NULL) {
699 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
700 error->message = "invalid node id";
701 return -EINVAL;
702 }
703
704 /* check if the node id exists */
705 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
706 if (!tm_node) {
707 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
708 error->message = "no such node";
709 return -EINVAL;
710 }
711
712 if (node_type == I40E_TM_NODE_TYPE_QUEUE)
713 *is_leaf = true;
714 else
715 *is_leaf = false;
716
717 return 0;
718 }
719
720 static int
i40e_level_capabilities_get(struct rte_eth_dev * dev,uint32_t level_id,struct rte_tm_level_capabilities * cap,struct rte_tm_error * error)721 i40e_level_capabilities_get(struct rte_eth_dev *dev,
722 uint32_t level_id,
723 struct rte_tm_level_capabilities *cap,
724 struct rte_tm_error *error)
725 {
726 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
727
728 if (!cap || !error)
729 return -EINVAL;
730
731 if (level_id >= I40E_TM_NODE_TYPE_MAX) {
732 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
733 error->message = "too deep level";
734 return -EINVAL;
735 }
736
737 /* root node */
738 if (level_id == I40E_TM_NODE_TYPE_PORT) {
739 cap->n_nodes_max = 1;
740 cap->n_nodes_nonleaf_max = 1;
741 cap->n_nodes_leaf_max = 0;
742 } else if (level_id == I40E_TM_NODE_TYPE_TC) {
743 /* TC */
744 cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
745 cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
746 cap->n_nodes_leaf_max = 0;
747 } else {
748 /* queue */
749 cap->n_nodes_max = hw->func_caps.num_tx_qp;
750 cap->n_nodes_nonleaf_max = 0;
751 cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
752 }
753
754 cap->non_leaf_nodes_identical = true;
755 cap->leaf_nodes_identical = true;
756
757 if (level_id != I40E_TM_NODE_TYPE_QUEUE) {
758 cap->nonleaf.shaper_private_supported = true;
759 cap->nonleaf.shaper_private_dual_rate_supported = false;
760 cap->nonleaf.shaper_private_rate_min = 0;
761 /* 40Gbps -> 5GBps */
762 cap->nonleaf.shaper_private_rate_max = 5000000000ull;
763 cap->nonleaf.shaper_private_packet_mode_supported = 0;
764 cap->nonleaf.shaper_private_byte_mode_supported = 1;
765 cap->nonleaf.shaper_shared_n_max = 0;
766 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
767 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
768 if (level_id == I40E_TM_NODE_TYPE_PORT)
769 cap->nonleaf.sched_n_children_max =
770 I40E_MAX_TRAFFIC_CLASS;
771 else
772 cap->nonleaf.sched_n_children_max =
773 hw->func_caps.num_tx_qp;
774 cap->nonleaf.sched_sp_n_priorities_max = 1;
775 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
776 cap->nonleaf.sched_wfq_n_groups_max = 0;
777 cap->nonleaf.sched_wfq_weight_max = 1;
778 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
779 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
780 cap->nonleaf.stats_mask = 0;
781
782 return 0;
783 }
784
785 /* queue node */
786 cap->leaf.shaper_private_supported = true;
787 cap->leaf.shaper_private_dual_rate_supported = false;
788 cap->leaf.shaper_private_rate_min = 0;
789 /* 40Gbps -> 5GBps */
790 cap->leaf.shaper_private_rate_max = 5000000000ull;
791 cap->leaf.shaper_private_packet_mode_supported = 0;
792 cap->leaf.shaper_private_byte_mode_supported = 1;
793 cap->leaf.shaper_shared_n_max = 0;
794 cap->leaf.shaper_shared_packet_mode_supported = 0;
795 cap->leaf.shaper_shared_byte_mode_supported = 0;
796 cap->leaf.cman_head_drop_supported = false;
797 cap->leaf.cman_wred_context_private_supported = true;
798 cap->leaf.cman_wred_context_shared_n_max = 0;
799 cap->leaf.stats_mask = 0;
800
801 return 0;
802 }
803
804 static int
i40e_node_capabilities_get(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_capabilities * cap,struct rte_tm_error * error)805 i40e_node_capabilities_get(struct rte_eth_dev *dev,
806 uint32_t node_id,
807 struct rte_tm_node_capabilities *cap,
808 struct rte_tm_error *error)
809 {
810 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811 enum i40e_tm_node_type node_type;
812 struct i40e_tm_node *tm_node;
813
814 if (!cap || !error)
815 return -EINVAL;
816
817 if (node_id == RTE_TM_NODE_ID_NULL) {
818 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
819 error->message = "invalid node id";
820 return -EINVAL;
821 }
822
823 /* check if the node id exists */
824 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
825 if (!tm_node) {
826 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
827 error->message = "no such node";
828 return -EINVAL;
829 }
830
831 cap->shaper_private_supported = true;
832 cap->shaper_private_dual_rate_supported = false;
833 cap->shaper_private_rate_min = 0;
834 /* 40Gbps -> 5GBps */
835 cap->shaper_private_rate_max = 5000000000ull;
836 cap->shaper_private_packet_mode_supported = 0;
837 cap->shaper_private_byte_mode_supported = 1;
838 cap->shaper_shared_n_max = 0;
839 cap->shaper_shared_packet_mode_supported = 0;
840 cap->shaper_shared_byte_mode_supported = 0;
841
842 if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
843 cap->leaf.cman_head_drop_supported = false;
844 cap->leaf.cman_wred_context_private_supported = true;
845 cap->leaf.cman_wred_context_shared_n_max = 0;
846 } else {
847 if (node_type == I40E_TM_NODE_TYPE_PORT)
848 cap->nonleaf.sched_n_children_max =
849 I40E_MAX_TRAFFIC_CLASS;
850 else
851 cap->nonleaf.sched_n_children_max =
852 hw->func_caps.num_tx_qp;
853 cap->nonleaf.sched_sp_n_priorities_max = 1;
854 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
855 cap->nonleaf.sched_wfq_n_groups_max = 0;
856 cap->nonleaf.sched_wfq_weight_max = 1;
857 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
858 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
859 }
860
861 cap->stats_mask = 0;
862
863 return 0;
864 }
865
866 static int
i40e_hierarchy_commit(struct rte_eth_dev * dev,int clear_on_fail,struct rte_tm_error * error)867 i40e_hierarchy_commit(struct rte_eth_dev *dev,
868 int clear_on_fail,
869 struct rte_tm_error *error)
870 {
871 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
872 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
873 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
874 struct i40e_tm_node *tm_node;
875 struct i40e_vsi *vsi;
876 struct i40e_hw *hw;
877 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
878 uint64_t bw;
879 uint8_t tc_map;
880 int ret;
881 int i;
882
883 if (!error)
884 return -EINVAL;
885
886 /* check the setting */
887 if (!pf->tm_conf.root)
888 goto done;
889
890 vsi = pf->main_vsi;
891 hw = I40E_VSI_TO_HW(vsi);
892
893 /**
894 * Don't support bandwidth control for port and TCs in parallel.
895 * If the port has a max bandwidth, the TCs should have none.
896 */
897 /* port */
898 if (pf->tm_conf.root->shaper_profile)
899 bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
900 else
901 bw = 0;
902 if (bw) {
903 /* check if any TC has a max bandwidth */
904 TAILQ_FOREACH(tm_node, tc_list, node) {
905 if (tm_node->shaper_profile &&
906 tm_node->shaper_profile->profile.peak.rate) {
907 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
908 error->message = "no port and TC max bandwidth"
909 " in parallel";
910 goto fail_clear;
911 }
912 }
913
914 /* interpret Bps to 50Mbps */
915 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
916
917 /* set the max bandwidth */
918 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
919 (uint16_t)bw, 0, NULL);
920 if (ret) {
921 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
922 error->message = "fail to set port max bandwidth";
923 goto fail_clear;
924 }
925
926 goto done;
927 }
928
929 /* TC */
930 memset(&tc_bw, 0, sizeof(tc_bw));
931 tc_bw.tc_valid_bits = vsi->enabled_tc;
932 tc_map = vsi->enabled_tc;
933 TAILQ_FOREACH(tm_node, tc_list, node) {
934 if (!tm_node->reference_count) {
935 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
936 error->message = "TC without queue assigned";
937 goto fail_clear;
938 }
939
940 i = 0;
941 while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
942 i++;
943 if (i >= I40E_MAX_TRAFFIC_CLASS) {
944 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
945 error->message = "cannot find the TC";
946 goto fail_clear;
947 }
948 tc_map &= ~BIT_ULL(i);
949
950 if (tm_node->shaper_profile)
951 bw = tm_node->shaper_profile->profile.peak.rate;
952 else
953 bw = 0;
954 if (!bw)
955 continue;
956
957 /* interpret Bps to 50Mbps */
958 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
959
960 tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
961 }
962
963 TAILQ_FOREACH(tm_node, queue_list, node) {
964 if (tm_node->shaper_profile)
965 bw = tm_node->shaper_profile->profile.peak.rate;
966 else
967 bw = 0;
968 if (bw) {
969 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
970 error->message = "not support queue QoS";
971 goto fail_clear;
972 }
973 }
974
975 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
976 if (ret) {
977 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
978 error->message = "fail to set TC max bandwidth";
979 goto fail_clear;
980 }
981
982 done:
983 pf->tm_conf.committed = true;
984 return 0;
985
986 fail_clear:
987 /* clear all the traffic manager configuration */
988 if (clear_on_fail) {
989 i40e_tm_conf_uninit(dev);
990 i40e_tm_conf_init(dev);
991 }
992 return -EINVAL;
993 }
994