1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
11
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
14
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
17
18 int
softnic_tmgr_init(struct pmd_internals * p)19 softnic_tmgr_init(struct pmd_internals *p)
20 {
21 TAILQ_INIT(&p->tmgr_port_list);
22
23 return 0;
24 }
25
26 void
softnic_tmgr_free(struct pmd_internals * p)27 softnic_tmgr_free(struct pmd_internals *p)
28 {
29 for ( ; ; ) {
30 struct softnic_tmgr_port *tmgr_port;
31
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
34 break;
35
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
38 free(tmgr_port);
39 }
40 }
41
42 struct softnic_tmgr_port *
softnic_tmgr_port_find(struct pmd_internals * p,const char * name)43 softnic_tmgr_port_find(struct pmd_internals *p,
44 const char *name)
45 {
46 struct softnic_tmgr_port *tmgr_port;
47
48 if (name == NULL)
49 return NULL;
50
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
53 return tmgr_port;
54
55 return NULL;
56 }
57
58 struct softnic_tmgr_port *
softnic_tmgr_port_create(struct pmd_internals * p,const char * name)59 softnic_tmgr_port_create(struct pmd_internals *p,
60 const char *name)
61 {
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
66
67 /* Check input params */
68 if (name == NULL ||
69 softnic_tmgr_port_find(p, name))
70 return NULL;
71
72 /*
73 * Resource
74 */
75
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
78 return NULL;
79
80 /* Port */
81 sched = rte_sched_port_config(&t->port_params);
82 if (sched == NULL)
83 return NULL;
84
85 /* Subport */
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport =
89 t->subport_params[subport_id].n_pipes_per_subport_enabled;
90 uint32_t pipe_id;
91 int status;
92
93 status = rte_sched_subport_config(sched,
94 subport_id,
95 &t->subport_params[subport_id],
96 t->subport_to_profile[subport_id]);
97 if (status) {
98 rte_sched_port_free(sched);
99 return NULL;
100 }
101
102 /* Pipe */
103 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
104 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
105 int profile_id = t->pipe_to_profile[pos];
106
107 if (profile_id < 0)
108 continue;
109
110 status = rte_sched_pipe_config(sched,
111 subport_id,
112 pipe_id,
113 profile_id);
114 if (status) {
115 rte_sched_port_free(sched);
116 return NULL;
117 }
118 }
119 }
120
121 /* Node allocation */
122 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
123 if (tmgr_port == NULL) {
124 rte_sched_port_free(sched);
125 return NULL;
126 }
127
128 /* Node fill in */
129 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
130 tmgr_port->s = sched;
131
132 /* Node add to list */
133 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
134
135 return tmgr_port;
136 }
137
138 static struct rte_sched_port *
SCHED(struct pmd_internals * p)139 SCHED(struct pmd_internals *p)
140 {
141 struct softnic_tmgr_port *tmgr_port;
142
143 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
144 if (tmgr_port == NULL)
145 return NULL;
146
147 return tmgr_port->s;
148 }
149
150 void
tm_hierarchy_init(struct pmd_internals * p)151 tm_hierarchy_init(struct pmd_internals *p)
152 {
153 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
154
155 /* Initialize shaper profile list */
156 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
157
158 /* Initialize shared shaper list */
159 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
160
161 /* Initialize wred profile list */
162 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
163
164 /* Initialize TM node list */
165 TAILQ_INIT(&p->soft.tm.h.nodes);
166 }
167
168 void
tm_hierarchy_free(struct pmd_internals * p)169 tm_hierarchy_free(struct pmd_internals *p)
170 {
171 /* Remove all nodes*/
172 for ( ; ; ) {
173 struct tm_node *tm_node;
174
175 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
176 if (tm_node == NULL)
177 break;
178
179 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
180 free(tm_node);
181 }
182
183 /* Remove all WRED profiles */
184 for ( ; ; ) {
185 struct tm_wred_profile *wred_profile;
186
187 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
188 if (wred_profile == NULL)
189 break;
190
191 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
192 free(wred_profile);
193 }
194
195 /* Remove all shared shapers */
196 for ( ; ; ) {
197 struct tm_shared_shaper *shared_shaper;
198
199 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
200 if (shared_shaper == NULL)
201 break;
202
203 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
204 free(shared_shaper);
205 }
206
207 /* Remove all shaper profiles */
208 for ( ; ; ) {
209 struct tm_shaper_profile *shaper_profile;
210
211 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
212 if (shaper_profile == NULL)
213 break;
214
215 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
216 shaper_profile, node);
217 free(shaper_profile);
218 }
219
220 tm_hierarchy_init(p);
221 }
222
223 static struct tm_shaper_profile *
tm_shaper_profile_search(struct rte_eth_dev * dev,uint32_t shaper_profile_id)224 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
225 {
226 struct pmd_internals *p = dev->data->dev_private;
227 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
228 struct tm_shaper_profile *sp;
229
230 TAILQ_FOREACH(sp, spl, node)
231 if (shaper_profile_id == sp->shaper_profile_id)
232 return sp;
233
234 return NULL;
235 }
236
237 static struct tm_shared_shaper *
tm_shared_shaper_search(struct rte_eth_dev * dev,uint32_t shared_shaper_id)238 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
239 {
240 struct pmd_internals *p = dev->data->dev_private;
241 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
242 struct tm_shared_shaper *ss;
243
244 TAILQ_FOREACH(ss, ssl, node)
245 if (shared_shaper_id == ss->shared_shaper_id)
246 return ss;
247
248 return NULL;
249 }
250
251 static struct tm_wred_profile *
tm_wred_profile_search(struct rte_eth_dev * dev,uint32_t wred_profile_id)252 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
253 {
254 struct pmd_internals *p = dev->data->dev_private;
255 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
256 struct tm_wred_profile *wp;
257
258 TAILQ_FOREACH(wp, wpl, node)
259 if (wred_profile_id == wp->wred_profile_id)
260 return wp;
261
262 return NULL;
263 }
264
265 static struct tm_node *
tm_node_search(struct rte_eth_dev * dev,uint32_t node_id)266 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
267 {
268 struct pmd_internals *p = dev->data->dev_private;
269 struct tm_node_list *nl = &p->soft.tm.h.nodes;
270 struct tm_node *n;
271
272 TAILQ_FOREACH(n, nl, node)
273 if (n->node_id == node_id)
274 return n;
275
276 return NULL;
277 }
278
279 static struct tm_node *
tm_root_node_present(struct rte_eth_dev * dev)280 tm_root_node_present(struct rte_eth_dev *dev)
281 {
282 struct pmd_internals *p = dev->data->dev_private;
283 struct tm_node_list *nl = &p->soft.tm.h.nodes;
284 struct tm_node *n;
285
286 TAILQ_FOREACH(n, nl, node)
287 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
288 return n;
289
290 return NULL;
291 }
292
293 static uint32_t
tm_node_subport_id(struct rte_eth_dev * dev,struct tm_node * subport_node)294 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
295 {
296 struct pmd_internals *p = dev->data->dev_private;
297 struct tm_node_list *nl = &p->soft.tm.h.nodes;
298 struct tm_node *ns;
299 uint32_t subport_id;
300
301 subport_id = 0;
302 TAILQ_FOREACH(ns, nl, node) {
303 if (ns->level != TM_NODE_LEVEL_SUBPORT)
304 continue;
305
306 if (ns->node_id == subport_node->node_id)
307 return subport_id;
308
309 subport_id++;
310 }
311
312 return UINT32_MAX;
313 }
314
315 static uint32_t
tm_node_pipe_id(struct rte_eth_dev * dev,struct tm_node * pipe_node)316 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
317 {
318 struct pmd_internals *p = dev->data->dev_private;
319 struct tm_node_list *nl = &p->soft.tm.h.nodes;
320 struct tm_node *np;
321 uint32_t pipe_id;
322
323 pipe_id = 0;
324 TAILQ_FOREACH(np, nl, node) {
325 if (np->level != TM_NODE_LEVEL_PIPE ||
326 np->parent_node_id != pipe_node->parent_node_id)
327 continue;
328
329 if (np->node_id == pipe_node->node_id)
330 return pipe_id;
331
332 pipe_id++;
333 }
334
335 return UINT32_MAX;
336 }
337
338 static uint32_t
tm_node_tc_id(struct rte_eth_dev * dev __rte_unused,struct tm_node * tc_node)339 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
340 {
341 return tc_node->priority;
342 }
343
344 static uint32_t
tm_node_queue_id(struct rte_eth_dev * dev,struct tm_node * queue_node)345 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
346 {
347 struct pmd_internals *p = dev->data->dev_private;
348 struct tm_node_list *nl = &p->soft.tm.h.nodes;
349 struct tm_node *nq;
350 uint32_t queue_id;
351
352 queue_id = 0;
353 TAILQ_FOREACH(nq, nl, node) {
354 if (nq->level != TM_NODE_LEVEL_QUEUE ||
355 nq->parent_node_id != queue_node->parent_node_id)
356 continue;
357
358 if (nq->node_id == queue_node->node_id)
359 return queue_id;
360
361 queue_id++;
362 }
363
364 return UINT32_MAX;
365 }
366
367 static uint32_t
tm_level_get_max_nodes(struct rte_eth_dev * dev,enum tm_node_level level)368 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
369 {
370 struct pmd_internals *p = dev->data->dev_private;
371 uint32_t n_queues_max = p->params.tm.n_queues;
372 uint32_t n_tc_max =
373 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
374 / RTE_SCHED_QUEUES_PER_PIPE;
375 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
376 uint32_t n_subports_max = n_pipes_max;
377 uint32_t n_root_max = 1;
378
379 switch (level) {
380 case TM_NODE_LEVEL_PORT:
381 return n_root_max;
382 case TM_NODE_LEVEL_SUBPORT:
383 return n_subports_max;
384 case TM_NODE_LEVEL_PIPE:
385 return n_pipes_max;
386 case TM_NODE_LEVEL_TC:
387 return n_tc_max;
388 case TM_NODE_LEVEL_QUEUE:
389 default:
390 return n_queues_max;
391 }
392 }
393
394 /* Traffic manager node type get */
395 static int
pmd_tm_node_type_get(struct rte_eth_dev * dev,uint32_t node_id,int * is_leaf,struct rte_tm_error * error)396 pmd_tm_node_type_get(struct rte_eth_dev *dev,
397 uint32_t node_id,
398 int *is_leaf,
399 struct rte_tm_error *error)
400 {
401 struct pmd_internals *p = dev->data->dev_private;
402
403 if (is_leaf == NULL)
404 return -rte_tm_error_set(error,
405 EINVAL,
406 RTE_TM_ERROR_TYPE_UNSPECIFIED,
407 NULL,
408 rte_strerror(EINVAL));
409
410 if (node_id == RTE_TM_NODE_ID_NULL ||
411 (tm_node_search(dev, node_id) == NULL))
412 return -rte_tm_error_set(error,
413 EINVAL,
414 RTE_TM_ERROR_TYPE_NODE_ID,
415 NULL,
416 rte_strerror(EINVAL));
417
418 *is_leaf = node_id < p->params.tm.n_queues;
419
420 return 0;
421 }
422
423 #ifdef RTE_SCHED_RED
424 #define WRED_SUPPORTED 1
425 #else
426 #define WRED_SUPPORTED 0
427 #endif
428
429 #define STATS_MASK_DEFAULT \
430 (RTE_TM_STATS_N_PKTS | \
431 RTE_TM_STATS_N_BYTES | \
432 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
433 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
434
435 #define STATS_MASK_QUEUE \
436 (STATS_MASK_DEFAULT | \
437 RTE_TM_STATS_N_PKTS_QUEUED)
438
439 static const struct rte_tm_capabilities tm_cap = {
440 .n_nodes_max = UINT32_MAX,
441 .n_levels_max = TM_NODE_LEVEL_MAX,
442
443 .non_leaf_nodes_identical = 0,
444 .leaf_nodes_identical = 1,
445
446 .shaper_n_max = UINT32_MAX,
447 .shaper_private_n_max = UINT32_MAX,
448 .shaper_private_dual_rate_n_max = 0,
449 .shaper_private_rate_min = 1,
450 .shaper_private_rate_max = UINT32_MAX,
451 .shaper_private_packet_mode_supported = 0,
452 .shaper_private_byte_mode_supported = 1,
453
454 .shaper_shared_n_max = UINT32_MAX,
455 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
456 .shaper_shared_n_shapers_per_node_max = 1,
457 .shaper_shared_dual_rate_n_max = 0,
458 .shaper_shared_rate_min = 1,
459 .shaper_shared_rate_max = UINT32_MAX,
460 .shaper_shared_packet_mode_supported = 0,
461 .shaper_shared_byte_mode_supported = 1,
462
463 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
464 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
465
466 .sched_n_children_max = UINT32_MAX,
467 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
468 .sched_wfq_n_children_per_group_max = UINT32_MAX,
469 .sched_wfq_n_groups_max = 1,
470 .sched_wfq_weight_max = UINT32_MAX,
471 .sched_wfq_packet_mode_supported = 0,
472 .sched_wfq_byte_mode_supported = 1,
473
474 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
475 .cman_wred_byte_mode_supported = 0,
476 .cman_head_drop_supported = 0,
477 .cman_wred_context_n_max = 0,
478 .cman_wred_context_private_n_max = 0,
479 .cman_wred_context_shared_n_max = 0,
480 .cman_wred_context_shared_n_nodes_per_context_max = 0,
481 .cman_wred_context_shared_n_contexts_per_node_max = 0,
482
483 .mark_vlan_dei_supported = {0, 0, 0},
484 .mark_ip_ecn_tcp_supported = {0, 0, 0},
485 .mark_ip_ecn_sctp_supported = {0, 0, 0},
486 .mark_ip_dscp_supported = {0, 0, 0},
487
488 .dynamic_update_mask = 0,
489
490 .stats_mask = STATS_MASK_QUEUE,
491 };
492
493 /* Traffic manager capabilities get */
494 static int
pmd_tm_capabilities_get(struct rte_eth_dev * dev __rte_unused,struct rte_tm_capabilities * cap,struct rte_tm_error * error)495 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
496 struct rte_tm_capabilities *cap,
497 struct rte_tm_error *error)
498 {
499 if (cap == NULL)
500 return -rte_tm_error_set(error,
501 EINVAL,
502 RTE_TM_ERROR_TYPE_CAPABILITIES,
503 NULL,
504 rte_strerror(EINVAL));
505
506 memcpy(cap, &tm_cap, sizeof(*cap));
507
508 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
511 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
512 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
513
514 cap->shaper_private_n_max =
515 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
516 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
518 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
519
520 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
521 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
522
523 cap->shaper_n_max = cap->shaper_private_n_max +
524 cap->shaper_shared_n_max;
525
526 cap->shaper_shared_n_nodes_per_shaper_max =
527 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
528
529 cap->sched_n_children_max = RTE_MAX(
530 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
531 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
532
533 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
534
535 if (WRED_SUPPORTED)
536 cap->cman_wred_context_private_n_max =
537 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
538
539 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
540 cap->cman_wred_context_shared_n_max;
541
542 return 0;
543 }
544
545 static const struct rte_tm_level_capabilities tm_level_cap[] = {
546 [TM_NODE_LEVEL_PORT] = {
547 .n_nodes_max = 1,
548 .n_nodes_nonleaf_max = 1,
549 .n_nodes_leaf_max = 0,
550 .non_leaf_nodes_identical = 1,
551 .leaf_nodes_identical = 0,
552
553 {.nonleaf = {
554 .shaper_private_supported = 1,
555 .shaper_private_dual_rate_supported = 0,
556 .shaper_private_rate_min = 1,
557 .shaper_private_rate_max = UINT32_MAX,
558 .shaper_private_packet_mode_supported = 0,
559 .shaper_private_byte_mode_supported = 1,
560 .shaper_shared_n_max = 0,
561 .shaper_shared_packet_mode_supported = 0,
562 .shaper_shared_byte_mode_supported = 0,
563
564 .sched_n_children_max = UINT32_MAX,
565 .sched_sp_n_priorities_max = 1,
566 .sched_wfq_n_children_per_group_max = UINT32_MAX,
567 .sched_wfq_n_groups_max = 1,
568 .sched_wfq_weight_max = 1,
569 .sched_wfq_packet_mode_supported = 0,
570 .sched_wfq_byte_mode_supported = 1,
571
572 .stats_mask = STATS_MASK_DEFAULT,
573 } },
574 },
575
576 [TM_NODE_LEVEL_SUBPORT] = {
577 .n_nodes_max = UINT32_MAX,
578 .n_nodes_nonleaf_max = UINT32_MAX,
579 .n_nodes_leaf_max = 0,
580 .non_leaf_nodes_identical = 1,
581 .leaf_nodes_identical = 0,
582
583 {.nonleaf = {
584 .shaper_private_supported = 1,
585 .shaper_private_dual_rate_supported = 0,
586 .shaper_private_rate_min = 1,
587 .shaper_private_rate_max = UINT32_MAX,
588 .shaper_private_packet_mode_supported = 0,
589 .shaper_private_byte_mode_supported = 1,
590 .shaper_shared_n_max = 0,
591 .shaper_shared_packet_mode_supported = 0,
592 .shaper_shared_byte_mode_supported = 0,
593
594 .sched_n_children_max = UINT32_MAX,
595 .sched_sp_n_priorities_max = 1,
596 .sched_wfq_n_children_per_group_max = UINT32_MAX,
597 .sched_wfq_n_groups_max = 1,
598 #ifdef RTE_SCHED_SUBPORT_TC_OV
599 .sched_wfq_weight_max = UINT32_MAX,
600 .sched_wfq_packet_mode_supported = 0,
601 .sched_wfq_byte_mode_supported = 1,
602 #else
603 .sched_wfq_weight_max = 1,
604 .sched_wfq_packet_mode_supported = 0,
605 .sched_wfq_byte_mode_supported = 1,
606 #endif
607
608 .stats_mask = STATS_MASK_DEFAULT,
609 } },
610 },
611
612 [TM_NODE_LEVEL_PIPE] = {
613 .n_nodes_max = UINT32_MAX,
614 .n_nodes_nonleaf_max = UINT32_MAX,
615 .n_nodes_leaf_max = 0,
616 .non_leaf_nodes_identical = 1,
617 .leaf_nodes_identical = 0,
618
619 {.nonleaf = {
620 .shaper_private_supported = 1,
621 .shaper_private_dual_rate_supported = 0,
622 .shaper_private_rate_min = 1,
623 .shaper_private_rate_max = UINT32_MAX,
624 .shaper_private_packet_mode_supported = 0,
625 .shaper_private_byte_mode_supported = 1,
626 .shaper_shared_n_max = 0,
627 .shaper_shared_packet_mode_supported = 0,
628 .shaper_shared_byte_mode_supported = 0,
629
630 .sched_n_children_max =
631 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
632 .sched_sp_n_priorities_max =
633 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
634 .sched_wfq_n_children_per_group_max = 1,
635 .sched_wfq_n_groups_max = 0,
636 .sched_wfq_weight_max = 1,
637 .sched_wfq_packet_mode_supported = 0,
638 .sched_wfq_byte_mode_supported = 0,
639
640 .stats_mask = STATS_MASK_DEFAULT,
641 } },
642 },
643
644 [TM_NODE_LEVEL_TC] = {
645 .n_nodes_max = UINT32_MAX,
646 .n_nodes_nonleaf_max = UINT32_MAX,
647 .n_nodes_leaf_max = 0,
648 .non_leaf_nodes_identical = 1,
649 .leaf_nodes_identical = 0,
650
651 {.nonleaf = {
652 .shaper_private_supported = 1,
653 .shaper_private_dual_rate_supported = 0,
654 .shaper_private_rate_min = 1,
655 .shaper_private_rate_max = UINT32_MAX,
656 .shaper_private_packet_mode_supported = 0,
657 .shaper_private_byte_mode_supported = 1,
658 .shaper_shared_n_max = 1,
659 .shaper_shared_packet_mode_supported = 0,
660 .shaper_shared_byte_mode_supported = 1,
661
662 .sched_n_children_max =
663 RTE_SCHED_BE_QUEUES_PER_PIPE,
664 .sched_sp_n_priorities_max = 1,
665 .sched_wfq_n_children_per_group_max =
666 RTE_SCHED_BE_QUEUES_PER_PIPE,
667 .sched_wfq_n_groups_max = 1,
668 .sched_wfq_weight_max = UINT32_MAX,
669 .sched_wfq_packet_mode_supported = 0,
670 .sched_wfq_byte_mode_supported = 1,
671
672 .stats_mask = STATS_MASK_DEFAULT,
673 } },
674 },
675
676 [TM_NODE_LEVEL_QUEUE] = {
677 .n_nodes_max = UINT32_MAX,
678 .n_nodes_nonleaf_max = 0,
679 .n_nodes_leaf_max = UINT32_MAX,
680 .non_leaf_nodes_identical = 0,
681 .leaf_nodes_identical = 1,
682
683 {.leaf = {
684 .shaper_private_supported = 0,
685 .shaper_private_dual_rate_supported = 0,
686 .shaper_private_rate_min = 0,
687 .shaper_private_rate_max = 0,
688 .shaper_private_packet_mode_supported = 0,
689 .shaper_private_byte_mode_supported = 0,
690 .shaper_shared_n_max = 0,
691 .shaper_shared_packet_mode_supported = 0,
692 .shaper_shared_byte_mode_supported = 0,
693
694 .cman_head_drop_supported = 0,
695 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
696 .cman_wred_byte_mode_supported = 0,
697 .cman_wred_context_private_supported = WRED_SUPPORTED,
698 .cman_wred_context_shared_n_max = 0,
699
700 .stats_mask = STATS_MASK_QUEUE,
701 } },
702 },
703 };
704
705 /* Traffic manager level capabilities get */
706 static int
pmd_tm_level_capabilities_get(struct rte_eth_dev * dev __rte_unused,uint32_t level_id,struct rte_tm_level_capabilities * cap,struct rte_tm_error * error)707 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
708 uint32_t level_id,
709 struct rte_tm_level_capabilities *cap,
710 struct rte_tm_error *error)
711 {
712 if (cap == NULL)
713 return -rte_tm_error_set(error,
714 EINVAL,
715 RTE_TM_ERROR_TYPE_CAPABILITIES,
716 NULL,
717 rte_strerror(EINVAL));
718
719 if (level_id >= TM_NODE_LEVEL_MAX)
720 return -rte_tm_error_set(error,
721 EINVAL,
722 RTE_TM_ERROR_TYPE_LEVEL_ID,
723 NULL,
724 rte_strerror(EINVAL));
725
726 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
727
728 switch (level_id) {
729 case TM_NODE_LEVEL_PORT:
730 cap->nonleaf.sched_n_children_max =
731 tm_level_get_max_nodes(dev,
732 TM_NODE_LEVEL_SUBPORT);
733 cap->nonleaf.sched_wfq_n_children_per_group_max =
734 cap->nonleaf.sched_n_children_max;
735 break;
736
737 case TM_NODE_LEVEL_SUBPORT:
738 cap->n_nodes_max = tm_level_get_max_nodes(dev,
739 TM_NODE_LEVEL_SUBPORT);
740 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
741 cap->nonleaf.sched_n_children_max =
742 tm_level_get_max_nodes(dev,
743 TM_NODE_LEVEL_PIPE);
744 cap->nonleaf.sched_wfq_n_children_per_group_max =
745 cap->nonleaf.sched_n_children_max;
746 break;
747
748 case TM_NODE_LEVEL_PIPE:
749 cap->n_nodes_max = tm_level_get_max_nodes(dev,
750 TM_NODE_LEVEL_PIPE);
751 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
752 break;
753
754 case TM_NODE_LEVEL_TC:
755 cap->n_nodes_max = tm_level_get_max_nodes(dev,
756 TM_NODE_LEVEL_TC);
757 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
758 break;
759
760 case TM_NODE_LEVEL_QUEUE:
761 default:
762 cap->n_nodes_max = tm_level_get_max_nodes(dev,
763 TM_NODE_LEVEL_QUEUE);
764 cap->n_nodes_leaf_max = cap->n_nodes_max;
765 break;
766 }
767
768 return 0;
769 }
770
771 static const struct rte_tm_node_capabilities tm_node_cap[] = {
772 [TM_NODE_LEVEL_PORT] = {
773 .shaper_private_supported = 1,
774 .shaper_private_dual_rate_supported = 0,
775 .shaper_private_rate_min = 1,
776 .shaper_private_rate_max = UINT32_MAX,
777 .shaper_private_packet_mode_supported = 0,
778 .shaper_private_byte_mode_supported = 1,
779 .shaper_shared_n_max = 0,
780 .shaper_shared_packet_mode_supported = 0,
781 .shaper_shared_byte_mode_supported = 0,
782
783 {.nonleaf = {
784 .sched_n_children_max = UINT32_MAX,
785 .sched_sp_n_priorities_max = 1,
786 .sched_wfq_n_children_per_group_max = UINT32_MAX,
787 .sched_wfq_n_groups_max = 1,
788 .sched_wfq_weight_max = 1,
789 .sched_wfq_packet_mode_supported = 0,
790 .sched_wfq_byte_mode_supported = 1,
791 } },
792
793 .stats_mask = STATS_MASK_DEFAULT,
794 },
795
796 [TM_NODE_LEVEL_SUBPORT] = {
797 .shaper_private_supported = 1,
798 .shaper_private_dual_rate_supported = 0,
799 .shaper_private_rate_min = 1,
800 .shaper_private_rate_max = UINT32_MAX,
801 .shaper_private_packet_mode_supported = 0,
802 .shaper_private_byte_mode_supported = 1,
803 .shaper_shared_n_max = 0,
804 .shaper_shared_packet_mode_supported = 0,
805 .shaper_shared_byte_mode_supported = 0,
806
807 {.nonleaf = {
808 .sched_n_children_max = UINT32_MAX,
809 .sched_sp_n_priorities_max = 1,
810 .sched_wfq_n_children_per_group_max = UINT32_MAX,
811 .sched_wfq_n_groups_max = 1,
812 .sched_wfq_weight_max = UINT32_MAX,
813 .sched_wfq_packet_mode_supported = 0,
814 .sched_wfq_byte_mode_supported = 1,
815 } },
816
817 .stats_mask = STATS_MASK_DEFAULT,
818 },
819
820 [TM_NODE_LEVEL_PIPE] = {
821 .shaper_private_supported = 1,
822 .shaper_private_dual_rate_supported = 0,
823 .shaper_private_rate_min = 1,
824 .shaper_private_rate_max = UINT32_MAX,
825 .shaper_private_packet_mode_supported = 0,
826 .shaper_private_byte_mode_supported = 1,
827 .shaper_shared_n_max = 0,
828 .shaper_shared_packet_mode_supported = 0,
829 .shaper_shared_byte_mode_supported = 0,
830
831 {.nonleaf = {
832 .sched_n_children_max =
833 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
834 .sched_sp_n_priorities_max =
835 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
836 .sched_wfq_n_children_per_group_max = 1,
837 .sched_wfq_n_groups_max = 0,
838 .sched_wfq_weight_max = 1,
839 .sched_wfq_packet_mode_supported = 0,
840 .sched_wfq_byte_mode_supported = 0,
841 } },
842
843 .stats_mask = STATS_MASK_DEFAULT,
844 },
845
846 [TM_NODE_LEVEL_TC] = {
847 .shaper_private_supported = 1,
848 .shaper_private_dual_rate_supported = 0,
849 .shaper_private_rate_min = 1,
850 .shaper_private_rate_max = UINT32_MAX,
851 .shaper_private_packet_mode_supported = 0,
852 .shaper_private_byte_mode_supported = 1,
853 .shaper_shared_n_max = 1,
854 .shaper_shared_packet_mode_supported = 0,
855 .shaper_shared_byte_mode_supported = 1,
856
857 {.nonleaf = {
858 .sched_n_children_max =
859 RTE_SCHED_BE_QUEUES_PER_PIPE,
860 .sched_sp_n_priorities_max = 1,
861 .sched_wfq_n_children_per_group_max =
862 RTE_SCHED_BE_QUEUES_PER_PIPE,
863 .sched_wfq_n_groups_max = 1,
864 .sched_wfq_weight_max = UINT32_MAX,
865 .sched_wfq_packet_mode_supported = 0,
866 .sched_wfq_byte_mode_supported = 1,
867 } },
868
869 .stats_mask = STATS_MASK_DEFAULT,
870 },
871
872 [TM_NODE_LEVEL_QUEUE] = {
873 .shaper_private_supported = 0,
874 .shaper_private_dual_rate_supported = 0,
875 .shaper_private_rate_min = 0,
876 .shaper_private_rate_max = 0,
877 .shaper_private_packet_mode_supported = 0,
878 .shaper_private_byte_mode_supported = 0,
879 .shaper_shared_n_max = 0,
880 .shaper_shared_packet_mode_supported = 0,
881 .shaper_shared_byte_mode_supported = 0,
882
883
884 {.leaf = {
885 .cman_head_drop_supported = 0,
886 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
887 .cman_wred_byte_mode_supported = 0,
888 .cman_wred_context_private_supported = WRED_SUPPORTED,
889 .cman_wred_context_shared_n_max = 0,
890 } },
891
892 .stats_mask = STATS_MASK_QUEUE,
893 },
894 };
895
896 /* Traffic manager node capabilities get */
897 static int
pmd_tm_node_capabilities_get(struct rte_eth_dev * dev __rte_unused,uint32_t node_id,struct rte_tm_node_capabilities * cap,struct rte_tm_error * error)898 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
899 uint32_t node_id,
900 struct rte_tm_node_capabilities *cap,
901 struct rte_tm_error *error)
902 {
903 struct tm_node *tm_node;
904
905 if (cap == NULL)
906 return -rte_tm_error_set(error,
907 EINVAL,
908 RTE_TM_ERROR_TYPE_CAPABILITIES,
909 NULL,
910 rte_strerror(EINVAL));
911
912 tm_node = tm_node_search(dev, node_id);
913 if (tm_node == NULL)
914 return -rte_tm_error_set(error,
915 EINVAL,
916 RTE_TM_ERROR_TYPE_NODE_ID,
917 NULL,
918 rte_strerror(EINVAL));
919
920 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
921
922 switch (tm_node->level) {
923 case TM_NODE_LEVEL_PORT:
924 cap->nonleaf.sched_n_children_max =
925 tm_level_get_max_nodes(dev,
926 TM_NODE_LEVEL_SUBPORT);
927 cap->nonleaf.sched_wfq_n_children_per_group_max =
928 cap->nonleaf.sched_n_children_max;
929 break;
930
931 case TM_NODE_LEVEL_SUBPORT:
932 cap->nonleaf.sched_n_children_max =
933 tm_level_get_max_nodes(dev,
934 TM_NODE_LEVEL_PIPE);
935 cap->nonleaf.sched_wfq_n_children_per_group_max =
936 cap->nonleaf.sched_n_children_max;
937 break;
938
939 case TM_NODE_LEVEL_PIPE:
940 case TM_NODE_LEVEL_TC:
941 case TM_NODE_LEVEL_QUEUE:
942 default:
943 break;
944 }
945
946 return 0;
947 }
948
949 static int
shaper_profile_check(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_shaper_params * profile,struct rte_tm_error * error)950 shaper_profile_check(struct rte_eth_dev *dev,
951 uint32_t shaper_profile_id,
952 struct rte_tm_shaper_params *profile,
953 struct rte_tm_error *error)
954 {
955 struct tm_shaper_profile *sp;
956
957 /* Shaper profile ID must not be NONE. */
958 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
959 return -rte_tm_error_set(error,
960 EINVAL,
961 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
962 NULL,
963 rte_strerror(EINVAL));
964
965 /* Shaper profile must not exist. */
966 sp = tm_shaper_profile_search(dev, shaper_profile_id);
967 if (sp)
968 return -rte_tm_error_set(error,
969 EEXIST,
970 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
971 NULL,
972 rte_strerror(EEXIST));
973
974 /* Profile must not be NULL. */
975 if (profile == NULL)
976 return -rte_tm_error_set(error,
977 EINVAL,
978 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
979 NULL,
980 rte_strerror(EINVAL));
981
982 /* Peak rate: non-zero, 32-bit */
983 if (profile->peak.rate == 0 ||
984 profile->peak.rate >= UINT32_MAX)
985 return -rte_tm_error_set(error,
986 EINVAL,
987 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
988 NULL,
989 rte_strerror(EINVAL));
990
991 /* Peak size: non-zero, 32-bit */
992 if (profile->peak.size == 0 ||
993 profile->peak.size >= UINT32_MAX)
994 return -rte_tm_error_set(error,
995 EINVAL,
996 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
997 NULL,
998 rte_strerror(EINVAL));
999
1000 /* Dual-rate profiles are not supported. */
1001 if (profile->committed.rate != 0)
1002 return -rte_tm_error_set(error,
1003 EINVAL,
1004 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
1005 NULL,
1006 rte_strerror(EINVAL));
1007
1008 /* Packet length adjust: 24 bytes */
1009 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
1010 return -rte_tm_error_set(error,
1011 EINVAL,
1012 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
1013 NULL,
1014 rte_strerror(EINVAL));
1015
1016 /* Packet mode is not supported. */
1017 if (profile->packet_mode != 0)
1018 return -rte_tm_error_set(error,
1019 EINVAL,
1020 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
1021 NULL,
1022 rte_strerror(EINVAL));
1023 return 0;
1024 }
1025
1026 /* Traffic manager shaper profile add */
1027 static int
pmd_tm_shaper_profile_add(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_shaper_params * profile,struct rte_tm_error * error)1028 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
1029 uint32_t shaper_profile_id,
1030 struct rte_tm_shaper_params *profile,
1031 struct rte_tm_error *error)
1032 {
1033 struct pmd_internals *p = dev->data->dev_private;
1034 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1035 struct tm_shaper_profile *sp;
1036 int status;
1037
1038 /* Check input params */
1039 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1040 if (status)
1041 return status;
1042
1043 /* Memory allocation */
1044 sp = calloc(1, sizeof(struct tm_shaper_profile));
1045 if (sp == NULL)
1046 return -rte_tm_error_set(error,
1047 ENOMEM,
1048 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1049 NULL,
1050 rte_strerror(ENOMEM));
1051
1052 /* Fill in */
1053 sp->shaper_profile_id = shaper_profile_id;
1054 memcpy(&sp->params, profile, sizeof(sp->params));
1055
1056 /* Add to list */
1057 TAILQ_INSERT_TAIL(spl, sp, node);
1058 p->soft.tm.h.n_shaper_profiles++;
1059
1060 return 0;
1061 }
1062
1063 /* Traffic manager shaper profile delete */
1064 static int
pmd_tm_shaper_profile_delete(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_error * error)1065 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1066 uint32_t shaper_profile_id,
1067 struct rte_tm_error *error)
1068 {
1069 struct pmd_internals *p = dev->data->dev_private;
1070 struct tm_shaper_profile *sp;
1071
1072 /* Check existing */
1073 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1074 if (sp == NULL)
1075 return -rte_tm_error_set(error,
1076 EINVAL,
1077 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1078 NULL,
1079 rte_strerror(EINVAL));
1080
1081 /* Check unused */
1082 if (sp->n_users)
1083 return -rte_tm_error_set(error,
1084 EBUSY,
1085 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1086 NULL,
1087 rte_strerror(EBUSY));
1088
1089 /* Remove from list */
1090 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1091 p->soft.tm.h.n_shaper_profiles--;
1092 free(sp);
1093
1094 return 0;
1095 }
1096
1097 static struct tm_node *
tm_shared_shaper_get_tc(struct rte_eth_dev * dev,struct tm_shared_shaper * ss)1098 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1099 struct tm_shared_shaper *ss)
1100 {
1101 struct pmd_internals *p = dev->data->dev_private;
1102 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1103 struct tm_node *n;
1104
1105 /* Subport: each TC uses shared shaper */
1106 TAILQ_FOREACH(n, nl, node) {
1107 if (n->level != TM_NODE_LEVEL_TC ||
1108 n->params.n_shared_shapers == 0 ||
1109 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1110 continue;
1111
1112 return n;
1113 }
1114
1115 return NULL;
1116 }
1117
1118 static int
subport_profile_exists(struct rte_eth_dev * dev,struct rte_sched_subport_profile_params * sp,uint32_t * subport_profile_id)1119 subport_profile_exists(struct rte_eth_dev *dev,
1120 struct rte_sched_subport_profile_params *sp,
1121 uint32_t *subport_profile_id)
1122 {
1123 struct pmd_internals *p = dev->data->dev_private;
1124 struct tm_params *t = &p->soft.tm.params;
1125 uint32_t i;
1126
1127 for (i = 0; i < t->n_subport_profiles; i++)
1128 if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) {
1129 if (subport_profile_id)
1130 *subport_profile_id = i;
1131 return 1;
1132 }
1133
1134 return 0;
1135 }
1136
1137 static int
update_subport_tc_rate(struct rte_eth_dev * dev,struct tm_node * nt,struct tm_shared_shaper * ss,struct tm_shaper_profile * sp_new)1138 update_subport_tc_rate(struct rte_eth_dev *dev,
1139 struct tm_node *nt,
1140 struct tm_shared_shaper *ss,
1141 struct tm_shaper_profile *sp_new)
1142 {
1143 struct rte_sched_subport_profile_params subport_profile;
1144 struct pmd_internals *p = dev->data->dev_private;
1145 uint32_t tc_id = tm_node_tc_id(dev, nt);
1146 struct tm_node *np = nt->parent_node;
1147 struct tm_node *ns = np->parent_node;
1148 uint32_t subport_id = tm_node_subport_id(dev, ns);
1149 struct tm_params *t = &p->soft.tm.params;
1150 uint32_t subport_profile_id;
1151 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1152 ss->shaper_profile_id);
1153
1154 if (subport_id >= TM_MAX_SUBPORT_PROFILE)
1155 return -1;
1156
1157 subport_profile_id = t->subport_to_profile[subport_id];
1158
1159 /* Derive new subport configuration. */
1160 memcpy(&subport_profile,
1161 &p->soft.tm.params.subport_profile[subport_profile_id],
1162 sizeof(subport_profile));
1163 subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate;
1164
1165 /* Update the subport configuration. */
1166 if (rte_sched_subport_config(SCHED(p),
1167 subport_id, NULL, subport_profile_id))
1168 return -1;
1169
1170 /* Commit changes. */
1171 sp_old->n_users--;
1172
1173 ss->shaper_profile_id = sp_new->shaper_profile_id;
1174 sp_new->n_users++;
1175
1176 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
1177 &subport_profile,
1178 sizeof(subport_profile));
1179
1180 return 0;
1181 }
1182
1183 /* Traffic manager shared shaper add/update */
1184 static int
pmd_tm_shared_shaper_add_update(struct rte_eth_dev * dev,uint32_t shared_shaper_id,uint32_t shaper_profile_id,struct rte_tm_error * error)1185 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1186 uint32_t shared_shaper_id,
1187 uint32_t shaper_profile_id,
1188 struct rte_tm_error *error)
1189 {
1190 struct pmd_internals *p = dev->data->dev_private;
1191 struct tm_shared_shaper *ss;
1192 struct tm_shaper_profile *sp;
1193 struct tm_node *nt;
1194
1195 /* Shaper profile must be valid. */
1196 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1197 if (sp == NULL)
1198 return -rte_tm_error_set(error,
1199 EINVAL,
1200 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1201 NULL,
1202 rte_strerror(EINVAL));
1203
1204 /**
1205 * Add new shared shaper
1206 */
1207 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1208 if (ss == NULL) {
1209 struct tm_shared_shaper_list *ssl =
1210 &p->soft.tm.h.shared_shapers;
1211
1212 /* Hierarchy must not be frozen */
1213 if (p->soft.tm.hierarchy_frozen)
1214 return -rte_tm_error_set(error,
1215 EBUSY,
1216 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1217 NULL,
1218 rte_strerror(EBUSY));
1219
1220 /* Memory allocation */
1221 ss = calloc(1, sizeof(struct tm_shared_shaper));
1222 if (ss == NULL)
1223 return -rte_tm_error_set(error,
1224 ENOMEM,
1225 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1226 NULL,
1227 rte_strerror(ENOMEM));
1228
1229 /* Fill in */
1230 ss->shared_shaper_id = shared_shaper_id;
1231 ss->shaper_profile_id = shaper_profile_id;
1232
1233 /* Add to list */
1234 TAILQ_INSERT_TAIL(ssl, ss, node);
1235 p->soft.tm.h.n_shared_shapers++;
1236
1237 return 0;
1238 }
1239
1240 /**
1241 * Update existing shared shaper
1242 */
1243 /* Hierarchy must be frozen (run-time update) */
1244 if (p->soft.tm.hierarchy_frozen == 0)
1245 return -rte_tm_error_set(error,
1246 EBUSY,
1247 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1248 NULL,
1249 rte_strerror(EBUSY));
1250
1251
1252 /* Propagate change. */
1253 nt = tm_shared_shaper_get_tc(dev, ss);
1254 if (update_subport_tc_rate(dev, nt, ss, sp))
1255 return -rte_tm_error_set(error,
1256 EINVAL,
1257 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1258 NULL,
1259 rte_strerror(EINVAL));
1260
1261 return 0;
1262 }
1263
1264 /* Traffic manager shared shaper delete */
1265 static int
pmd_tm_shared_shaper_delete(struct rte_eth_dev * dev,uint32_t shared_shaper_id,struct rte_tm_error * error)1266 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1267 uint32_t shared_shaper_id,
1268 struct rte_tm_error *error)
1269 {
1270 struct pmd_internals *p = dev->data->dev_private;
1271 struct tm_shared_shaper *ss;
1272
1273 /* Check existing */
1274 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1275 if (ss == NULL)
1276 return -rte_tm_error_set(error,
1277 EINVAL,
1278 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1279 NULL,
1280 rte_strerror(EINVAL));
1281
1282 /* Check unused */
1283 if (ss->n_users)
1284 return -rte_tm_error_set(error,
1285 EBUSY,
1286 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1287 NULL,
1288 rte_strerror(EBUSY));
1289
1290 /* Remove from list */
1291 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1292 p->soft.tm.h.n_shared_shapers--;
1293 free(ss);
1294
1295 return 0;
1296 }
1297
1298 static int
wred_profile_check(struct rte_eth_dev * dev,uint32_t wred_profile_id,struct rte_tm_wred_params * profile,struct rte_tm_error * error)1299 wred_profile_check(struct rte_eth_dev *dev,
1300 uint32_t wred_profile_id,
1301 struct rte_tm_wred_params *profile,
1302 struct rte_tm_error *error)
1303 {
1304 struct tm_wred_profile *wp;
1305 enum rte_color color;
1306
1307 /* WRED profile ID must not be NONE. */
1308 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1309 return -rte_tm_error_set(error,
1310 EINVAL,
1311 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1312 NULL,
1313 rte_strerror(EINVAL));
1314
1315 /* WRED profile must not exist. */
1316 wp = tm_wred_profile_search(dev, wred_profile_id);
1317 if (wp)
1318 return -rte_tm_error_set(error,
1319 EEXIST,
1320 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1321 NULL,
1322 rte_strerror(EEXIST));
1323
1324 /* Profile must not be NULL. */
1325 if (profile == NULL)
1326 return -rte_tm_error_set(error,
1327 EINVAL,
1328 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1329 NULL,
1330 rte_strerror(EINVAL));
1331
1332 /* WRED profile should be in packet mode */
1333 if (profile->packet_mode == 0)
1334 return -rte_tm_error_set(error,
1335 ENOTSUP,
1336 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1337 NULL,
1338 rte_strerror(ENOTSUP));
1339
1340 /* min_th <= max_th, max_th > 0 */
1341 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1342 uint32_t min_th = profile->red_params[color].min_th;
1343 uint32_t max_th = profile->red_params[color].max_th;
1344
1345 if (min_th > max_th ||
1346 max_th == 0 ||
1347 min_th > UINT16_MAX ||
1348 max_th > UINT16_MAX)
1349 return -rte_tm_error_set(error,
1350 EINVAL,
1351 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1352 NULL,
1353 rte_strerror(EINVAL));
1354 }
1355
1356 return 0;
1357 }
1358
1359 /* Traffic manager WRED profile add */
1360 static int
pmd_tm_wred_profile_add(struct rte_eth_dev * dev,uint32_t wred_profile_id,struct rte_tm_wred_params * profile,struct rte_tm_error * error)1361 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1362 uint32_t wred_profile_id,
1363 struct rte_tm_wred_params *profile,
1364 struct rte_tm_error *error)
1365 {
1366 struct pmd_internals *p = dev->data->dev_private;
1367 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1368 struct tm_wred_profile *wp;
1369 int status;
1370
1371 /* Check input params */
1372 status = wred_profile_check(dev, wred_profile_id, profile, error);
1373 if (status)
1374 return status;
1375
1376 /* Memory allocation */
1377 wp = calloc(1, sizeof(struct tm_wred_profile));
1378 if (wp == NULL)
1379 return -rte_tm_error_set(error,
1380 ENOMEM,
1381 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1382 NULL,
1383 rte_strerror(ENOMEM));
1384
1385 /* Fill in */
1386 wp->wred_profile_id = wred_profile_id;
1387 memcpy(&wp->params, profile, sizeof(wp->params));
1388
1389 /* Add to list */
1390 TAILQ_INSERT_TAIL(wpl, wp, node);
1391 p->soft.tm.h.n_wred_profiles++;
1392
1393 return 0;
1394 }
1395
1396 /* Traffic manager WRED profile delete */
1397 static int
pmd_tm_wred_profile_delete(struct rte_eth_dev * dev,uint32_t wred_profile_id,struct rte_tm_error * error)1398 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1399 uint32_t wred_profile_id,
1400 struct rte_tm_error *error)
1401 {
1402 struct pmd_internals *p = dev->data->dev_private;
1403 struct tm_wred_profile *wp;
1404
1405 /* Check existing */
1406 wp = tm_wred_profile_search(dev, wred_profile_id);
1407 if (wp == NULL)
1408 return -rte_tm_error_set(error,
1409 EINVAL,
1410 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1411 NULL,
1412 rte_strerror(EINVAL));
1413
1414 /* Check unused */
1415 if (wp->n_users)
1416 return -rte_tm_error_set(error,
1417 EBUSY,
1418 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1419 NULL,
1420 rte_strerror(EBUSY));
1421
1422 /* Remove from list */
1423 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1424 p->soft.tm.h.n_wred_profiles--;
1425 free(wp);
1426
1427 return 0;
1428 }
1429
1430 static int
node_add_check_port(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id __rte_unused,uint32_t priority,uint32_t weight,uint32_t level_id __rte_unused,struct rte_tm_node_params * params,struct rte_tm_error * error)1431 node_add_check_port(struct rte_eth_dev *dev,
1432 uint32_t node_id,
1433 uint32_t parent_node_id __rte_unused,
1434 uint32_t priority,
1435 uint32_t weight,
1436 uint32_t level_id __rte_unused,
1437 struct rte_tm_node_params *params,
1438 struct rte_tm_error *error)
1439 {
1440 struct pmd_internals *p = dev->data->dev_private;
1441 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1442 params->shaper_profile_id);
1443
1444 /* node type: non-leaf */
1445 if (node_id < p->params.tm.n_queues)
1446 return -rte_tm_error_set(error,
1447 EINVAL,
1448 RTE_TM_ERROR_TYPE_NODE_ID,
1449 NULL,
1450 rte_strerror(EINVAL));
1451
1452 /* Priority must be 0 */
1453 if (priority != 0)
1454 return -rte_tm_error_set(error,
1455 EINVAL,
1456 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1457 NULL,
1458 rte_strerror(EINVAL));
1459
1460 /* Weight must be 1 */
1461 if (weight != 1)
1462 return -rte_tm_error_set(error,
1463 EINVAL,
1464 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1465 NULL,
1466 rte_strerror(EINVAL));
1467
1468 /* Shaper must be valid */
1469 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1470 sp == NULL)
1471 return -rte_tm_error_set(error,
1472 EINVAL,
1473 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1474 NULL,
1475 rte_strerror(EINVAL));
1476
1477 /* No shared shapers */
1478 if (params->n_shared_shapers != 0)
1479 return -rte_tm_error_set(error,
1480 EINVAL,
1481 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1482 NULL,
1483 rte_strerror(EINVAL));
1484
1485 /* Number of SP priorities must be 1 */
1486 if (params->nonleaf.n_sp_priorities != 1)
1487 return -rte_tm_error_set(error,
1488 EINVAL,
1489 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1490 NULL,
1491 rte_strerror(EINVAL));
1492
1493 /* Stats */
1494 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1495 return -rte_tm_error_set(error,
1496 EINVAL,
1497 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1498 NULL,
1499 rte_strerror(EINVAL));
1500
1501 return 0;
1502 }
1503
1504 static int
node_add_check_subport(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id __rte_unused,uint32_t priority,uint32_t weight,uint32_t level_id __rte_unused,struct rte_tm_node_params * params,struct rte_tm_error * error)1505 node_add_check_subport(struct rte_eth_dev *dev,
1506 uint32_t node_id,
1507 uint32_t parent_node_id __rte_unused,
1508 uint32_t priority,
1509 uint32_t weight,
1510 uint32_t level_id __rte_unused,
1511 struct rte_tm_node_params *params,
1512 struct rte_tm_error *error)
1513 {
1514 struct pmd_internals *p = dev->data->dev_private;
1515
1516 /* node type: non-leaf */
1517 if (node_id < p->params.tm.n_queues)
1518 return -rte_tm_error_set(error,
1519 EINVAL,
1520 RTE_TM_ERROR_TYPE_NODE_ID,
1521 NULL,
1522 rte_strerror(EINVAL));
1523
1524 /* Priority must be 0 */
1525 if (priority != 0)
1526 return -rte_tm_error_set(error,
1527 EINVAL,
1528 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1529 NULL,
1530 rte_strerror(EINVAL));
1531
1532 /* Weight must be 1 */
1533 if (weight != 1)
1534 return -rte_tm_error_set(error,
1535 EINVAL,
1536 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1537 NULL,
1538 rte_strerror(EINVAL));
1539
1540 /* Shaper must be valid */
1541 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1542 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1543 return -rte_tm_error_set(error,
1544 EINVAL,
1545 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1546 NULL,
1547 rte_strerror(EINVAL));
1548
1549 /* No shared shapers */
1550 if (params->n_shared_shapers != 0)
1551 return -rte_tm_error_set(error,
1552 EINVAL,
1553 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1554 NULL,
1555 rte_strerror(EINVAL));
1556
1557 /* Number of SP priorities must be 1 */
1558 if (params->nonleaf.n_sp_priorities != 1)
1559 return -rte_tm_error_set(error,
1560 EINVAL,
1561 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1562 NULL,
1563 rte_strerror(EINVAL));
1564
1565 /* Stats */
1566 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1567 return -rte_tm_error_set(error,
1568 EINVAL,
1569 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1570 NULL,
1571 rte_strerror(EINVAL));
1572
1573 return 0;
1574 }
1575
1576 static int
node_add_check_pipe(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id __rte_unused,uint32_t priority,uint32_t weight __rte_unused,uint32_t level_id __rte_unused,struct rte_tm_node_params * params,struct rte_tm_error * error)1577 node_add_check_pipe(struct rte_eth_dev *dev,
1578 uint32_t node_id,
1579 uint32_t parent_node_id __rte_unused,
1580 uint32_t priority,
1581 uint32_t weight __rte_unused,
1582 uint32_t level_id __rte_unused,
1583 struct rte_tm_node_params *params,
1584 struct rte_tm_error *error)
1585 {
1586 struct pmd_internals *p = dev->data->dev_private;
1587
1588 /* node type: non-leaf */
1589 if (node_id < p->params.tm.n_queues)
1590 return -rte_tm_error_set(error,
1591 EINVAL,
1592 RTE_TM_ERROR_TYPE_NODE_ID,
1593 NULL,
1594 rte_strerror(EINVAL));
1595
1596 /* Priority must be 0 */
1597 if (priority != 0)
1598 return -rte_tm_error_set(error,
1599 EINVAL,
1600 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1601 NULL,
1602 rte_strerror(EINVAL));
1603
1604 /* Shaper must be valid */
1605 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1606 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1607 return -rte_tm_error_set(error,
1608 EINVAL,
1609 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1610 NULL,
1611 rte_strerror(EINVAL));
1612
1613 /* No shared shapers */
1614 if (params->n_shared_shapers != 0)
1615 return -rte_tm_error_set(error,
1616 EINVAL,
1617 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1618 NULL,
1619 rte_strerror(EINVAL));
1620
1621 /* Number of SP priorities must be 4 */
1622 if (params->nonleaf.n_sp_priorities !=
1623 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1624 return -rte_tm_error_set(error,
1625 EINVAL,
1626 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1627 NULL,
1628 rte_strerror(EINVAL));
1629
1630 /* WFQ mode must be byte mode */
1631 if (params->nonleaf.wfq_weight_mode != NULL &&
1632 params->nonleaf.wfq_weight_mode[0] != 0 &&
1633 params->nonleaf.wfq_weight_mode[1] != 0 &&
1634 params->nonleaf.wfq_weight_mode[2] != 0 &&
1635 params->nonleaf.wfq_weight_mode[3] != 0)
1636 return -rte_tm_error_set(error,
1637 EINVAL,
1638 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1639 NULL,
1640 rte_strerror(EINVAL));
1641
1642 /* Stats */
1643 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1644 return -rte_tm_error_set(error,
1645 EINVAL,
1646 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1647 NULL,
1648 rte_strerror(EINVAL));
1649
1650 return 0;
1651 }
1652
1653 static int
node_add_check_tc(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id __rte_unused,uint32_t priority __rte_unused,uint32_t weight,uint32_t level_id __rte_unused,struct rte_tm_node_params * params,struct rte_tm_error * error)1654 node_add_check_tc(struct rte_eth_dev *dev,
1655 uint32_t node_id,
1656 uint32_t parent_node_id __rte_unused,
1657 uint32_t priority __rte_unused,
1658 uint32_t weight,
1659 uint32_t level_id __rte_unused,
1660 struct rte_tm_node_params *params,
1661 struct rte_tm_error *error)
1662 {
1663 struct pmd_internals *p = dev->data->dev_private;
1664
1665 /* node type: non-leaf */
1666 if (node_id < p->params.tm.n_queues)
1667 return -rte_tm_error_set(error,
1668 EINVAL,
1669 RTE_TM_ERROR_TYPE_NODE_ID,
1670 NULL,
1671 rte_strerror(EINVAL));
1672
1673 /* Weight must be 1 */
1674 if (weight != 1)
1675 return -rte_tm_error_set(error,
1676 EINVAL,
1677 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1678 NULL,
1679 rte_strerror(EINVAL));
1680
1681 /* Shaper must be valid */
1682 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1683 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1684 return -rte_tm_error_set(error,
1685 EINVAL,
1686 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1687 NULL,
1688 rte_strerror(EINVAL));
1689
1690 /* Single valid shared shaper */
1691 if (params->n_shared_shapers > 1)
1692 return -rte_tm_error_set(error,
1693 EINVAL,
1694 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1695 NULL,
1696 rte_strerror(EINVAL));
1697
1698 if (params->n_shared_shapers == 1 &&
1699 (params->shared_shaper_id == NULL ||
1700 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1701 return -rte_tm_error_set(error,
1702 EINVAL,
1703 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1704 NULL,
1705 rte_strerror(EINVAL));
1706
1707 /* Number of priorities must be 1 */
1708 if (params->nonleaf.n_sp_priorities != 1)
1709 return -rte_tm_error_set(error,
1710 EINVAL,
1711 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1712 NULL,
1713 rte_strerror(EINVAL));
1714
1715 /* Stats */
1716 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1717 return -rte_tm_error_set(error,
1718 EINVAL,
1719 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1720 NULL,
1721 rte_strerror(EINVAL));
1722
1723 return 0;
1724 }
1725
1726 static int
node_add_check_queue(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id __rte_unused,uint32_t priority,uint32_t weight __rte_unused,uint32_t level_id __rte_unused,struct rte_tm_node_params * params,struct rte_tm_error * error)1727 node_add_check_queue(struct rte_eth_dev *dev,
1728 uint32_t node_id,
1729 uint32_t parent_node_id __rte_unused,
1730 uint32_t priority,
1731 uint32_t weight __rte_unused,
1732 uint32_t level_id __rte_unused,
1733 struct rte_tm_node_params *params,
1734 struct rte_tm_error *error)
1735 {
1736 struct pmd_internals *p = dev->data->dev_private;
1737
1738 /* node type: leaf */
1739 if (node_id >= p->params.tm.n_queues)
1740 return -rte_tm_error_set(error,
1741 EINVAL,
1742 RTE_TM_ERROR_TYPE_NODE_ID,
1743 NULL,
1744 rte_strerror(EINVAL));
1745
1746 /* Priority must be 0 */
1747 if (priority != 0)
1748 return -rte_tm_error_set(error,
1749 EINVAL,
1750 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1751 NULL,
1752 rte_strerror(EINVAL));
1753
1754 /* No shaper */
1755 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1756 return -rte_tm_error_set(error,
1757 EINVAL,
1758 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1759 NULL,
1760 rte_strerror(EINVAL));
1761
1762 /* No shared shapers */
1763 if (params->n_shared_shapers != 0)
1764 return -rte_tm_error_set(error,
1765 EINVAL,
1766 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1767 NULL,
1768 rte_strerror(EINVAL));
1769
1770 /* Congestion management must not be head drop */
1771 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1772 return -rte_tm_error_set(error,
1773 EINVAL,
1774 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1775 NULL,
1776 rte_strerror(EINVAL));
1777
1778 /* Congestion management set to WRED */
1779 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1780 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1781 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1782 wred_profile_id);
1783
1784 /* WRED profile (for private WRED context) must be valid */
1785 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1786 wp == NULL)
1787 return -rte_tm_error_set(error,
1788 EINVAL,
1789 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1790 NULL,
1791 rte_strerror(EINVAL));
1792
1793 /* No shared WRED contexts */
1794 if (params->leaf.wred.n_shared_wred_contexts != 0)
1795 return -rte_tm_error_set(error,
1796 EINVAL,
1797 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1798 NULL,
1799 rte_strerror(EINVAL));
1800 }
1801
1802 /* Stats */
1803 if (params->stats_mask & ~STATS_MASK_QUEUE)
1804 return -rte_tm_error_set(error,
1805 EINVAL,
1806 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1807 NULL,
1808 rte_strerror(EINVAL));
1809
1810 return 0;
1811 }
1812
1813 static int
node_add_check(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)1814 node_add_check(struct rte_eth_dev *dev,
1815 uint32_t node_id,
1816 uint32_t parent_node_id,
1817 uint32_t priority,
1818 uint32_t weight,
1819 uint32_t level_id,
1820 struct rte_tm_node_params *params,
1821 struct rte_tm_error *error)
1822 {
1823 struct tm_node *pn;
1824 uint32_t level;
1825 int status;
1826
1827 /* node_id, parent_node_id:
1828 * -node_id must not be RTE_TM_NODE_ID_NULL
1829 * -node_id must not be in use
1830 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1831 * -root node must not exist
1832 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1833 * -parent_node_id must be valid
1834 */
1835 if (node_id == RTE_TM_NODE_ID_NULL)
1836 return -rte_tm_error_set(error,
1837 EINVAL,
1838 RTE_TM_ERROR_TYPE_NODE_ID,
1839 NULL,
1840 rte_strerror(EINVAL));
1841
1842 if (tm_node_search(dev, node_id))
1843 return -rte_tm_error_set(error,
1844 EEXIST,
1845 RTE_TM_ERROR_TYPE_NODE_ID,
1846 NULL,
1847 rte_strerror(EEXIST));
1848
1849 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1850 pn = NULL;
1851 if (tm_root_node_present(dev))
1852 return -rte_tm_error_set(error,
1853 EEXIST,
1854 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1855 NULL,
1856 rte_strerror(EEXIST));
1857 } else {
1858 pn = tm_node_search(dev, parent_node_id);
1859 if (pn == NULL)
1860 return -rte_tm_error_set(error,
1861 EINVAL,
1862 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1863 NULL,
1864 rte_strerror(EINVAL));
1865 }
1866
1867 /* priority: must be 0 .. 3 */
1868 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1869 return -rte_tm_error_set(error,
1870 EINVAL,
1871 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1872 NULL,
1873 rte_strerror(EINVAL));
1874
1875 /* weight: must be 1 .. 255 */
1876 if (weight == 0 || weight >= UINT8_MAX)
1877 return -rte_tm_error_set(error,
1878 EINVAL,
1879 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1880 NULL,
1881 rte_strerror(EINVAL));
1882
1883 /* level_id: if valid, then
1884 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1885 * -level_id must be zero
1886 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1887 * -level_id must be parent level ID plus one
1888 */
1889 level = (pn == NULL) ? 0 : pn->level + 1;
1890 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1891 return -rte_tm_error_set(error,
1892 EINVAL,
1893 RTE_TM_ERROR_TYPE_LEVEL_ID,
1894 NULL,
1895 rte_strerror(EINVAL));
1896
1897 /* params: must not be NULL */
1898 if (params == NULL)
1899 return -rte_tm_error_set(error,
1900 EINVAL,
1901 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1902 NULL,
1903 rte_strerror(EINVAL));
1904
1905 /* params: per level checks */
1906 switch (level) {
1907 case TM_NODE_LEVEL_PORT:
1908 status = node_add_check_port(dev, node_id,
1909 parent_node_id, priority, weight, level_id,
1910 params, error);
1911 if (status)
1912 return status;
1913 break;
1914
1915 case TM_NODE_LEVEL_SUBPORT:
1916 status = node_add_check_subport(dev, node_id,
1917 parent_node_id, priority, weight, level_id,
1918 params, error);
1919 if (status)
1920 return status;
1921 break;
1922
1923 case TM_NODE_LEVEL_PIPE:
1924 status = node_add_check_pipe(dev, node_id,
1925 parent_node_id, priority, weight, level_id,
1926 params, error);
1927 if (status)
1928 return status;
1929 break;
1930
1931 case TM_NODE_LEVEL_TC:
1932 status = node_add_check_tc(dev, node_id,
1933 parent_node_id, priority, weight, level_id,
1934 params, error);
1935 if (status)
1936 return status;
1937 break;
1938
1939 case TM_NODE_LEVEL_QUEUE:
1940 status = node_add_check_queue(dev, node_id,
1941 parent_node_id, priority, weight, level_id,
1942 params, error);
1943 if (status)
1944 return status;
1945 break;
1946
1947 default:
1948 return -rte_tm_error_set(error,
1949 EINVAL,
1950 RTE_TM_ERROR_TYPE_LEVEL_ID,
1951 NULL,
1952 rte_strerror(EINVAL));
1953 }
1954
1955 return 0;
1956 }
1957
1958 /* Traffic manager node add */
1959 static int
pmd_tm_node_add(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)1960 pmd_tm_node_add(struct rte_eth_dev *dev,
1961 uint32_t node_id,
1962 uint32_t parent_node_id,
1963 uint32_t priority,
1964 uint32_t weight,
1965 uint32_t level_id,
1966 struct rte_tm_node_params *params,
1967 struct rte_tm_error *error)
1968 {
1969 struct pmd_internals *p = dev->data->dev_private;
1970 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1971 struct tm_node *n;
1972 uint32_t i;
1973 int status;
1974
1975 /* Checks */
1976 if (p->soft.tm.hierarchy_frozen)
1977 return -rte_tm_error_set(error,
1978 EBUSY,
1979 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1980 NULL,
1981 rte_strerror(EBUSY));
1982
1983 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1984 level_id, params, error);
1985 if (status)
1986 return status;
1987
1988 /* Memory allocation */
1989 n = calloc(1, sizeof(struct tm_node));
1990 if (n == NULL)
1991 return -rte_tm_error_set(error,
1992 ENOMEM,
1993 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1994 NULL,
1995 rte_strerror(ENOMEM));
1996
1997 /* Fill in */
1998 n->node_id = node_id;
1999 n->parent_node_id = parent_node_id;
2000 n->priority = priority;
2001 n->weight = weight;
2002
2003 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
2004 n->parent_node = tm_node_search(dev, parent_node_id);
2005 n->level = n->parent_node->level + 1;
2006 }
2007
2008 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
2009 n->shaper_profile = tm_shaper_profile_search(dev,
2010 params->shaper_profile_id);
2011
2012 if (n->level == TM_NODE_LEVEL_QUEUE &&
2013 params->leaf.cman == RTE_TM_CMAN_WRED)
2014 n->wred_profile = tm_wred_profile_search(dev,
2015 params->leaf.wred.wred_profile_id);
2016
2017 memcpy(&n->params, params, sizeof(n->params));
2018
2019 /* Add to list */
2020 TAILQ_INSERT_TAIL(nl, n, node);
2021 p->soft.tm.h.n_nodes++;
2022
2023 /* Update dependencies */
2024 if (n->parent_node)
2025 n->parent_node->n_children++;
2026
2027 if (n->shaper_profile)
2028 n->shaper_profile->n_users++;
2029
2030 for (i = 0; i < params->n_shared_shapers; i++) {
2031 struct tm_shared_shaper *ss;
2032
2033 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
2034 ss->n_users++;
2035 }
2036
2037 if (n->wred_profile)
2038 n->wred_profile->n_users++;
2039
2040 p->soft.tm.h.n_tm_nodes[n->level]++;
2041
2042 return 0;
2043 }
2044
2045 /* Traffic manager node delete */
2046 static int
pmd_tm_node_delete(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)2047 pmd_tm_node_delete(struct rte_eth_dev *dev,
2048 uint32_t node_id,
2049 struct rte_tm_error *error)
2050 {
2051 struct pmd_internals *p = dev->data->dev_private;
2052 struct tm_node *n;
2053 uint32_t i;
2054
2055 /* Check hierarchy changes are currently allowed */
2056 if (p->soft.tm.hierarchy_frozen)
2057 return -rte_tm_error_set(error,
2058 EBUSY,
2059 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2060 NULL,
2061 rte_strerror(EBUSY));
2062
2063 /* Check existing */
2064 n = tm_node_search(dev, node_id);
2065 if (n == NULL)
2066 return -rte_tm_error_set(error,
2067 EINVAL,
2068 RTE_TM_ERROR_TYPE_NODE_ID,
2069 NULL,
2070 rte_strerror(EINVAL));
2071
2072 /* Check unused */
2073 if (n->n_children)
2074 return -rte_tm_error_set(error,
2075 EBUSY,
2076 RTE_TM_ERROR_TYPE_NODE_ID,
2077 NULL,
2078 rte_strerror(EBUSY));
2079
2080 /* Update dependencies */
2081 p->soft.tm.h.n_tm_nodes[n->level]--;
2082
2083 if (n->wred_profile)
2084 n->wred_profile->n_users--;
2085
2086 for (i = 0; i < n->params.n_shared_shapers; i++) {
2087 struct tm_shared_shaper *ss;
2088
2089 ss = tm_shared_shaper_search(dev,
2090 n->params.shared_shaper_id[i]);
2091 ss->n_users--;
2092 }
2093
2094 if (n->shaper_profile)
2095 n->shaper_profile->n_users--;
2096
2097 if (n->parent_node)
2098 n->parent_node->n_children--;
2099
2100 /* Remove from list */
2101 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2102 p->soft.tm.h.n_nodes--;
2103 free(n);
2104
2105 return 0;
2106 }
2107
2108
2109 static void
pipe_profile_build(struct rte_eth_dev * dev,struct tm_node * np,struct rte_sched_pipe_params * pp)2110 pipe_profile_build(struct rte_eth_dev *dev,
2111 struct tm_node *np,
2112 struct rte_sched_pipe_params *pp)
2113 {
2114 struct pmd_internals *p = dev->data->dev_private;
2115 struct tm_hierarchy *h = &p->soft.tm.h;
2116 struct tm_node_list *nl = &h->nodes;
2117 struct tm_node *nt, *nq;
2118
2119 memset(pp, 0, sizeof(*pp));
2120
2121 /* Pipe */
2122 pp->tb_rate = np->shaper_profile->params.peak.rate;
2123 pp->tb_size = np->shaper_profile->params.peak.size;
2124
2125 /* Traffic Class (TC) */
2126 pp->tc_period = PIPE_TC_PERIOD;
2127
2128 pp->tc_ov_weight = np->weight;
2129
2130 TAILQ_FOREACH(nt, nl, node) {
2131 uint32_t queue_id = 0;
2132
2133 if (nt->level != TM_NODE_LEVEL_TC ||
2134 nt->parent_node_id != np->node_id)
2135 continue;
2136
2137 pp->tc_rate[nt->priority] =
2138 nt->shaper_profile->params.peak.rate;
2139
2140 /* Queue */
2141 TAILQ_FOREACH(nq, nl, node) {
2142
2143 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2144 nq->parent_node_id != nt->node_id)
2145 continue;
2146
2147 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2148 pp->wrr_weights[queue_id] = nq->weight;
2149
2150 queue_id++;
2151 }
2152 }
2153 }
2154
2155 static int
pipe_profile_free_exists(struct rte_eth_dev * dev,uint32_t * pipe_profile_id)2156 pipe_profile_free_exists(struct rte_eth_dev *dev,
2157 uint32_t *pipe_profile_id)
2158 {
2159 struct pmd_internals *p = dev->data->dev_private;
2160 struct tm_params *t = &p->soft.tm.params;
2161
2162 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2163 *pipe_profile_id = t->n_pipe_profiles;
2164 return 1;
2165 }
2166
2167 return 0;
2168 }
2169
2170 static int
pipe_profile_exists(struct rte_eth_dev * dev,struct rte_sched_pipe_params * pp,uint32_t * pipe_profile_id)2171 pipe_profile_exists(struct rte_eth_dev *dev,
2172 struct rte_sched_pipe_params *pp,
2173 uint32_t *pipe_profile_id)
2174 {
2175 struct pmd_internals *p = dev->data->dev_private;
2176 struct tm_params *t = &p->soft.tm.params;
2177 uint32_t i;
2178
2179 for (i = 0; i < t->n_pipe_profiles; i++)
2180 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2181 if (pipe_profile_id)
2182 *pipe_profile_id = i;
2183 return 1;
2184 }
2185
2186 return 0;
2187 }
2188
2189 static void
pipe_profile_install(struct rte_eth_dev * dev,struct rte_sched_pipe_params * pp,uint32_t pipe_profile_id)2190 pipe_profile_install(struct rte_eth_dev *dev,
2191 struct rte_sched_pipe_params *pp,
2192 uint32_t pipe_profile_id)
2193 {
2194 struct pmd_internals *p = dev->data->dev_private;
2195 struct tm_params *t = &p->soft.tm.params;
2196
2197 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2198 t->n_pipe_profiles++;
2199 }
2200
2201 static void
pipe_profile_mark(struct rte_eth_dev * dev,uint32_t subport_id,uint32_t pipe_id,uint32_t pipe_profile_id)2202 pipe_profile_mark(struct rte_eth_dev *dev,
2203 uint32_t subport_id,
2204 uint32_t pipe_id,
2205 uint32_t pipe_profile_id)
2206 {
2207 struct pmd_internals *p = dev->data->dev_private;
2208 struct tm_hierarchy *h = &p->soft.tm.h;
2209 struct tm_params *t = &p->soft.tm.params;
2210 uint32_t n_pipes_per_subport, pos;
2211
2212 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2213 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2214 pos = subport_id * n_pipes_per_subport + pipe_id;
2215
2216 t->pipe_to_profile[pos] = pipe_profile_id;
2217 }
2218
2219 static struct rte_sched_pipe_params *
pipe_profile_get(struct rte_eth_dev * dev,struct tm_node * np)2220 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2221 {
2222 struct pmd_internals *p = dev->data->dev_private;
2223 struct tm_hierarchy *h = &p->soft.tm.h;
2224 struct tm_params *t = &p->soft.tm.params;
2225 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2226 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2227
2228 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2229 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2230
2231 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2232 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2233
2234 return &t->pipe_profiles[pipe_profile_id];
2235 }
2236
2237 static int
pipe_profiles_generate(struct rte_eth_dev * dev)2238 pipe_profiles_generate(struct rte_eth_dev *dev)
2239 {
2240 struct pmd_internals *p = dev->data->dev_private;
2241 struct tm_hierarchy *h = &p->soft.tm.h;
2242 struct tm_node_list *nl = &h->nodes;
2243 struct tm_node *ns, *np;
2244 uint32_t subport_id;
2245
2246 /* Objective: Fill in the following fields in struct tm_params:
2247 * - pipe_profiles
2248 * - n_pipe_profiles
2249 * - pipe_to_profile
2250 */
2251
2252 subport_id = 0;
2253 TAILQ_FOREACH(ns, nl, node) {
2254 uint32_t pipe_id;
2255
2256 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2257 continue;
2258
2259 pipe_id = 0;
2260 TAILQ_FOREACH(np, nl, node) {
2261 struct rte_sched_pipe_params pp;
2262 uint32_t pos;
2263
2264 memset(&pp, 0, sizeof(pp));
2265
2266 if (np->level != TM_NODE_LEVEL_PIPE ||
2267 np->parent_node_id != ns->node_id)
2268 continue;
2269
2270 pipe_profile_build(dev, np, &pp);
2271
2272 if (!pipe_profile_exists(dev, &pp, &pos)) {
2273 if (!pipe_profile_free_exists(dev, &pos))
2274 return -1;
2275
2276 pipe_profile_install(dev, &pp, pos);
2277 }
2278
2279 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2280
2281 pipe_id++;
2282 }
2283
2284 subport_id++;
2285 }
2286
2287 return 0;
2288 }
2289
2290 static struct tm_wred_profile *
tm_tc_wred_profile_get(struct rte_eth_dev * dev,uint32_t tc_id)2291 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2292 {
2293 struct pmd_internals *p = dev->data->dev_private;
2294 struct tm_hierarchy *h = &p->soft.tm.h;
2295 struct tm_node_list *nl = &h->nodes;
2296 struct tm_node *nq;
2297
2298 TAILQ_FOREACH(nq, nl, node) {
2299 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2300 nq->parent_node->priority != tc_id)
2301 continue;
2302
2303 return nq->wred_profile;
2304 }
2305
2306 return NULL;
2307 }
2308
2309 #ifdef RTE_SCHED_RED
2310
2311 static void
wred_profiles_set(struct rte_eth_dev * dev,uint32_t subport_id)2312 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2313 {
2314 struct pmd_internals *p = dev->data->dev_private;
2315 struct rte_sched_subport_params *pp =
2316 &p->soft.tm.params.subport_params[subport_id];
2317
2318 uint32_t tc_id;
2319 enum rte_color color;
2320
2321 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2322 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2323 struct rte_red_params *dst =
2324 &pp->red_params[tc_id][color];
2325 struct tm_wred_profile *src_wp =
2326 tm_tc_wred_profile_get(dev, tc_id);
2327 struct rte_tm_red_params *src =
2328 &src_wp->params.red_params[color];
2329
2330 memcpy(dst, src, sizeof(*dst));
2331 }
2332 }
2333
2334 #else
2335
2336 #define wred_profiles_set(dev, subport_id)
2337
2338 #endif
2339
2340 static struct tm_shared_shaper *
tm_tc_shared_shaper_get(struct rte_eth_dev * dev,struct tm_node * tc_node)2341 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2342 {
2343 return (tc_node->params.n_shared_shapers) ?
2344 tm_shared_shaper_search(dev,
2345 tc_node->params.shared_shaper_id[0]) :
2346 NULL;
2347 }
2348
2349 static struct tm_shared_shaper *
tm_subport_tc_shared_shaper_get(struct rte_eth_dev * dev,struct tm_node * subport_node,uint32_t tc_id)2350 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2351 struct tm_node *subport_node,
2352 uint32_t tc_id)
2353 {
2354 struct pmd_internals *p = dev->data->dev_private;
2355 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2356 struct tm_node *n;
2357
2358 TAILQ_FOREACH(n, nl, node) {
2359 if (n->level != TM_NODE_LEVEL_TC ||
2360 n->parent_node->parent_node_id !=
2361 subport_node->node_id ||
2362 n->priority != tc_id)
2363 continue;
2364
2365 return tm_tc_shared_shaper_get(dev, n);
2366 }
2367
2368 return NULL;
2369 }
2370
2371 static struct rte_sched_subport_profile_params *
subport_profile_get(struct rte_eth_dev * dev,struct tm_node * np)2372 subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2373 {
2374 struct pmd_internals *p = dev->data->dev_private;
2375 struct tm_params *t = &p->soft.tm.params;
2376 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2377
2378 if (subport_id >= TM_MAX_SUBPORT_PROFILE)
2379 return NULL;
2380
2381 return &t->subport_profile[subport_id];
2382 }
2383
2384 static void
subport_profile_mark(struct rte_eth_dev * dev,uint32_t subport_id,uint32_t subport_profile_id)2385 subport_profile_mark(struct rte_eth_dev *dev,
2386 uint32_t subport_id,
2387 uint32_t subport_profile_id)
2388 {
2389 struct pmd_internals *p = dev->data->dev_private;
2390 struct tm_params *t = &p->soft.tm.params;
2391
2392 t->subport_to_profile[subport_id] = subport_profile_id;
2393 }
2394
2395 static void
subport_profile_install(struct rte_eth_dev * dev,struct rte_sched_subport_profile_params * sp,uint32_t subport_profile_id)2396 subport_profile_install(struct rte_eth_dev *dev,
2397 struct rte_sched_subport_profile_params *sp,
2398 uint32_t subport_profile_id)
2399 {
2400 struct pmd_internals *p = dev->data->dev_private;
2401 struct tm_params *t = &p->soft.tm.params;
2402
2403 memcpy(&t->subport_profile[subport_profile_id],
2404 sp, sizeof(*sp));
2405 t->n_subport_profiles++;
2406 }
2407
2408 static int
subport_profile_free_exists(struct rte_eth_dev * dev,uint32_t * subport_profile_id)2409 subport_profile_free_exists(struct rte_eth_dev *dev,
2410 uint32_t *subport_profile_id)
2411 {
2412 struct pmd_internals *p = dev->data->dev_private;
2413 struct tm_params *t = &p->soft.tm.params;
2414
2415 if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) {
2416 *subport_profile_id = t->n_subport_profiles;
2417 return 1;
2418 }
2419
2420 return 0;
2421 }
2422
2423 static void
subport_profile_build(struct rte_eth_dev * dev,struct tm_node * np,struct rte_sched_subport_profile_params * sp)2424 subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np,
2425 struct rte_sched_subport_profile_params *sp)
2426 {
2427 uint32_t i;
2428 memset(sp, 0, sizeof(*sp));
2429
2430 sp->tb_rate = np->shaper_profile->params.peak.rate;
2431 sp->tb_size = np->shaper_profile->params.peak.size;
2432
2433 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2434 struct tm_shared_shaper *ss;
2435 struct tm_shaper_profile *ssp;
2436
2437 ss = tm_subport_tc_shared_shaper_get(dev, np, i);
2438 ssp = (ss) ? tm_shaper_profile_search(dev,
2439 ss->shaper_profile_id) :
2440 np->shaper_profile;
2441 sp->tc_rate[i] = ssp->params.peak.rate;
2442 }
2443
2444 /* Traffic Class (TC) */
2445 sp->tc_period = SUBPORT_TC_PERIOD;
2446 }
2447
2448 static int
subport_profiles_generate(struct rte_eth_dev * dev)2449 subport_profiles_generate(struct rte_eth_dev *dev)
2450 {
2451 struct pmd_internals *p = dev->data->dev_private;
2452 struct tm_hierarchy *h = &p->soft.tm.h;
2453 struct tm_node_list *nl = &h->nodes;
2454 struct tm_node *ns;
2455 uint32_t subport_id;
2456
2457 /* Objective: Fill in the following fields in struct tm_params:
2458 * - subport_profiles
2459 * - n_subport_profiles
2460 * - subport_to_profile
2461 */
2462
2463 subport_id = 0;
2464 TAILQ_FOREACH(ns, nl, node) {
2465 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2466 continue;
2467
2468 struct rte_sched_subport_profile_params sp;
2469 uint32_t pos;
2470
2471 memset(&sp, 0, sizeof(sp));
2472
2473 subport_profile_build(dev, ns, &sp);
2474
2475 if (!subport_profile_exists(dev, &sp, &pos)) {
2476 if (!subport_profile_free_exists(dev, &pos))
2477 return -1;
2478
2479 subport_profile_install(dev, &sp, pos);
2480 }
2481
2482 subport_profile_mark(dev, subport_id, pos);
2483
2484 subport_id++;
2485 }
2486
2487 return 0;
2488 }
2489
2490
2491 static int
hierarchy_commit_check(struct rte_eth_dev * dev,struct rte_tm_error * error)2492 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2493 {
2494 struct pmd_internals *p = dev->data->dev_private;
2495 struct tm_hierarchy *h = &p->soft.tm.h;
2496 struct tm_node_list *nl = &h->nodes;
2497 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2498 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2499 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2500 struct tm_shared_shaper *ss;
2501
2502 uint32_t n_pipes_per_subport;
2503
2504 /* Root node exists. */
2505 if (nr == NULL)
2506 return -rte_tm_error_set(error,
2507 EINVAL,
2508 RTE_TM_ERROR_TYPE_LEVEL_ID,
2509 NULL,
2510 rte_strerror(EINVAL));
2511
2512 /* There is at least one subport, max is not exceeded. */
2513 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2514 return -rte_tm_error_set(error,
2515 EINVAL,
2516 RTE_TM_ERROR_TYPE_LEVEL_ID,
2517 NULL,
2518 rte_strerror(EINVAL));
2519
2520 /* There is at least one pipe. */
2521 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2522 return -rte_tm_error_set(error,
2523 EINVAL,
2524 RTE_TM_ERROR_TYPE_LEVEL_ID,
2525 NULL,
2526 rte_strerror(EINVAL));
2527
2528 /* Number of pipes is the same for all subports. Maximum number of pipes
2529 * per subport is not exceeded.
2530 */
2531 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2532 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2533
2534 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2535 return -rte_tm_error_set(error,
2536 EINVAL,
2537 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2538 NULL,
2539 rte_strerror(EINVAL));
2540
2541 TAILQ_FOREACH(ns, nl, node) {
2542 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2543 continue;
2544
2545 if (ns->n_children != n_pipes_per_subport)
2546 return -rte_tm_error_set(error,
2547 EINVAL,
2548 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2549 NULL,
2550 rte_strerror(EINVAL));
2551 }
2552
2553 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2554 TAILQ_FOREACH(np, nl, node) {
2555 uint32_t mask = 0, mask_expected =
2556 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2557 uint32_t);
2558
2559 if (np->level != TM_NODE_LEVEL_PIPE)
2560 continue;
2561
2562 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2563 return -rte_tm_error_set(error,
2564 EINVAL,
2565 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2566 NULL,
2567 rte_strerror(EINVAL));
2568
2569 TAILQ_FOREACH(nt, nl, node) {
2570 if (nt->level != TM_NODE_LEVEL_TC ||
2571 nt->parent_node_id != np->node_id)
2572 continue;
2573
2574 mask |= 1 << nt->priority;
2575 }
2576
2577 if (mask != mask_expected)
2578 return -rte_tm_error_set(error,
2579 EINVAL,
2580 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2581 NULL,
2582 rte_strerror(EINVAL));
2583 }
2584
2585 /** Each Strict priority TC has exactly 1 packet queues while
2586 * lowest priority TC (Best-effort) has 4 queues.
2587 */
2588 TAILQ_FOREACH(nt, nl, node) {
2589 if (nt->level != TM_NODE_LEVEL_TC)
2590 continue;
2591
2592 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2593 return -rte_tm_error_set(error,
2594 EINVAL,
2595 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2596 NULL,
2597 rte_strerror(EINVAL));
2598 }
2599
2600 /**
2601 * Shared shapers:
2602 * -For each TC #i, all pipes in the same subport use the same
2603 * shared shaper (or no shared shaper) for their TC#i.
2604 * -Each shared shaper needs to have at least one user. All its
2605 * users have to be TC nodes with the same priority and the same
2606 * subport.
2607 */
2608 TAILQ_FOREACH(ns, nl, node) {
2609 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2610 uint32_t id;
2611
2612 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2613 continue;
2614
2615 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2616 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2617
2618 TAILQ_FOREACH(nt, nl, node) {
2619 struct tm_shared_shaper *subport_ss, *tc_ss;
2620
2621 if (nt->level != TM_NODE_LEVEL_TC ||
2622 nt->parent_node->parent_node_id !=
2623 ns->node_id)
2624 continue;
2625
2626 subport_ss = s[nt->priority];
2627 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2628
2629 if (subport_ss == NULL && tc_ss == NULL)
2630 continue;
2631
2632 if ((subport_ss == NULL && tc_ss != NULL) ||
2633 (subport_ss != NULL && tc_ss == NULL) ||
2634 subport_ss->shared_shaper_id !=
2635 tc_ss->shared_shaper_id)
2636 return -rte_tm_error_set(error,
2637 EINVAL,
2638 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2639 NULL,
2640 rte_strerror(EINVAL));
2641 }
2642 }
2643
2644 TAILQ_FOREACH(ss, ssl, node) {
2645 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2646 uint32_t n_users = 0;
2647
2648 if (nt_any != NULL)
2649 TAILQ_FOREACH(nt, nl, node) {
2650 if (nt->level != TM_NODE_LEVEL_TC ||
2651 nt->priority != nt_any->priority ||
2652 nt->parent_node->parent_node_id !=
2653 nt_any->parent_node->parent_node_id)
2654 continue;
2655
2656 n_users++;
2657 }
2658
2659 if (ss->n_users == 0 || ss->n_users != n_users)
2660 return -rte_tm_error_set(error,
2661 EINVAL,
2662 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2663 NULL,
2664 rte_strerror(EINVAL));
2665 }
2666
2667 /* Not too many subport profiles. */
2668 if (subport_profiles_generate(dev))
2669 return -rte_tm_error_set(error,
2670 EINVAL,
2671 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2672 NULL,
2673 rte_strerror(EINVAL));
2674
2675
2676 /* Not too many pipe profiles. */
2677 if (pipe_profiles_generate(dev))
2678 return -rte_tm_error_set(error,
2679 EINVAL,
2680 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2681 NULL,
2682 rte_strerror(EINVAL));
2683
2684 /**
2685 * WRED (when used, i.e. at least one WRED profile defined):
2686 * -Each WRED profile must have at least one user.
2687 * -All leaf nodes must have their private WRED context enabled.
2688 * -For each TC #i, all leaf nodes must use the same WRED profile
2689 * for their private WRED context.
2690 */
2691 if (h->n_wred_profiles) {
2692 struct tm_wred_profile *wp;
2693 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2694 uint32_t id;
2695
2696 TAILQ_FOREACH(wp, wpl, node)
2697 if (wp->n_users == 0)
2698 return -rte_tm_error_set(error,
2699 EINVAL,
2700 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2701 NULL,
2702 rte_strerror(EINVAL));
2703
2704 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2705 w[id] = tm_tc_wred_profile_get(dev, id);
2706
2707 if (w[id] == NULL)
2708 return -rte_tm_error_set(error,
2709 EINVAL,
2710 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2711 NULL,
2712 rte_strerror(EINVAL));
2713 }
2714
2715 TAILQ_FOREACH(nq, nl, node) {
2716 uint32_t id;
2717
2718 if (nq->level != TM_NODE_LEVEL_QUEUE)
2719 continue;
2720
2721 id = nq->parent_node->priority;
2722
2723 if (nq->wred_profile == NULL ||
2724 nq->wred_profile->wred_profile_id !=
2725 w[id]->wred_profile_id)
2726 return -rte_tm_error_set(error,
2727 EINVAL,
2728 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2729 NULL,
2730 rte_strerror(EINVAL));
2731 }
2732 }
2733
2734 return 0;
2735 }
2736
2737 static void
hierarchy_blueprints_create(struct rte_eth_dev * dev)2738 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2739 {
2740 struct pmd_internals *p = dev->data->dev_private;
2741 struct tm_params *t = &p->soft.tm.params;
2742 struct tm_hierarchy *h = &p->soft.tm.h;
2743
2744 struct tm_node_list *nl = &h->nodes;
2745 struct tm_node *root = tm_root_node_present(dev), *n;
2746
2747 uint32_t subport_id;
2748
2749 t->port_params = (struct rte_sched_port_params) {
2750 .name = dev->data->name,
2751 .socket = dev->data->numa_node,
2752 .rate = root->shaper_profile->params.peak.rate,
2753 .mtu = dev->data->mtu,
2754 .frame_overhead =
2755 root->shaper_profile->params.pkt_length_adjust,
2756 .n_subports_per_port = root->n_children,
2757 .n_subport_profiles = t->n_subport_profiles,
2758 .subport_profiles = t->subport_profile,
2759 .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE,
2760 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2761 };
2762
2763 subport_id = 0;
2764 TAILQ_FOREACH(n, nl, node) {
2765
2766 if (n->level != TM_NODE_LEVEL_SUBPORT)
2767 continue;
2768
2769 t->subport_params[subport_id] =
2770 (struct rte_sched_subport_params) {
2771 .n_pipes_per_subport_enabled =
2772 h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2773 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2774 .qsize = {p->params.tm.qsize[0],
2775 p->params.tm.qsize[1],
2776 p->params.tm.qsize[2],
2777 p->params.tm.qsize[3],
2778 p->params.tm.qsize[4],
2779 p->params.tm.qsize[5],
2780 p->params.tm.qsize[6],
2781 p->params.tm.qsize[7],
2782 p->params.tm.qsize[8],
2783 p->params.tm.qsize[9],
2784 p->params.tm.qsize[10],
2785 p->params.tm.qsize[11],
2786 p->params.tm.qsize[12],
2787 },
2788 .pipe_profiles = t->pipe_profiles,
2789 .n_pipe_profiles = t->n_pipe_profiles,
2790 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2791 };
2792 wred_profiles_set(dev, subport_id);
2793 subport_id++;
2794 }
2795 }
2796
2797 /* Traffic manager hierarchy commit */
2798 static int
pmd_tm_hierarchy_commit(struct rte_eth_dev * dev,int clear_on_fail,struct rte_tm_error * error)2799 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2800 int clear_on_fail,
2801 struct rte_tm_error *error)
2802 {
2803 struct pmd_internals *p = dev->data->dev_private;
2804 int status;
2805
2806 /* Checks */
2807 if (p->soft.tm.hierarchy_frozen)
2808 return -rte_tm_error_set(error,
2809 EBUSY,
2810 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2811 NULL,
2812 rte_strerror(EBUSY));
2813
2814 status = hierarchy_commit_check(dev, error);
2815 if (status) {
2816 if (clear_on_fail)
2817 tm_hierarchy_free(p);
2818
2819 return status;
2820 }
2821
2822 /* Create blueprints */
2823 hierarchy_blueprints_create(dev);
2824
2825 /* Freeze hierarchy */
2826 p->soft.tm.hierarchy_frozen = 1;
2827
2828 return 0;
2829 }
2830
2831 #ifdef RTE_SCHED_SUBPORT_TC_OV
2832
2833 static int
update_pipe_weight(struct rte_eth_dev * dev,struct tm_node * np,uint32_t weight)2834 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2835 {
2836 struct pmd_internals *p = dev->data->dev_private;
2837 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2838
2839 struct tm_node *ns = np->parent_node;
2840 uint32_t subport_id = tm_node_subport_id(dev, ns);
2841
2842 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2843 struct rte_sched_pipe_params profile1;
2844 uint32_t pipe_profile_id;
2845
2846 /* Derive new pipe profile. */
2847 memcpy(&profile1, profile0, sizeof(profile1));
2848 profile1.tc_ov_weight = (uint8_t)weight;
2849
2850 /* Since implementation does not allow adding more pipe profiles after
2851 * port configuration, the pipe configuration can be successfully
2852 * updated only if the new profile is also part of the existing set of
2853 * pipe profiles.
2854 */
2855 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2856 return -1;
2857
2858 /* Update the pipe profile used by the current pipe. */
2859 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2860 (int32_t)pipe_profile_id))
2861 return -1;
2862
2863 /* Commit changes. */
2864 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2865 np->weight = weight;
2866
2867 return 0;
2868 }
2869
2870 #endif
2871
2872 static int
update_queue_weight(struct rte_eth_dev * dev,struct tm_node * nq,uint32_t weight)2873 update_queue_weight(struct rte_eth_dev *dev,
2874 struct tm_node *nq, uint32_t weight)
2875 {
2876 struct pmd_internals *p = dev->data->dev_private;
2877 uint32_t queue_id = tm_node_queue_id(dev, nq);
2878
2879 struct tm_node *nt = nq->parent_node;
2880
2881 struct tm_node *np = nt->parent_node;
2882 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2883
2884 struct tm_node *ns = np->parent_node;
2885 uint32_t subport_id = tm_node_subport_id(dev, ns);
2886
2887 uint32_t pipe_be_queue_id =
2888 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2889
2890 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2891 struct rte_sched_pipe_params profile1;
2892 uint32_t pipe_profile_id;
2893
2894 /* Derive new pipe profile. */
2895 memcpy(&profile1, profile0, sizeof(profile1));
2896 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2897
2898 /* Since implementation does not allow adding more pipe profiles after
2899 * port configuration, the pipe configuration can be successfully
2900 * updated only if the new profile is also part of the existing set
2901 * of pipe profiles.
2902 */
2903 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2904 return -1;
2905
2906 /* Update the pipe profile used by the current pipe. */
2907 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2908 (int32_t)pipe_profile_id))
2909 return -1;
2910
2911 /* Commit changes. */
2912 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2913 nq->weight = weight;
2914
2915 return 0;
2916 }
2917
2918 /* Traffic manager node parent update */
2919 static int
pmd_tm_node_parent_update(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,struct rte_tm_error * error)2920 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2921 uint32_t node_id,
2922 uint32_t parent_node_id,
2923 uint32_t priority,
2924 uint32_t weight,
2925 struct rte_tm_error *error)
2926 {
2927 struct tm_node *n;
2928
2929 /* Port must be started and TM used. */
2930 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2931 return -rte_tm_error_set(error,
2932 EBUSY,
2933 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2934 NULL,
2935 rte_strerror(EBUSY));
2936
2937 /* Node must be valid */
2938 n = tm_node_search(dev, node_id);
2939 if (n == NULL)
2940 return -rte_tm_error_set(error,
2941 EINVAL,
2942 RTE_TM_ERROR_TYPE_NODE_ID,
2943 NULL,
2944 rte_strerror(EINVAL));
2945
2946 /* Parent node must be the same */
2947 if (n->parent_node_id != parent_node_id)
2948 return -rte_tm_error_set(error,
2949 EINVAL,
2950 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2951 NULL,
2952 rte_strerror(EINVAL));
2953
2954 /* Priority must be the same */
2955 if (n->priority != priority)
2956 return -rte_tm_error_set(error,
2957 EINVAL,
2958 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2959 NULL,
2960 rte_strerror(EINVAL));
2961
2962 /* weight: must be 1 .. 255 */
2963 if (weight == 0 || weight >= UINT8_MAX)
2964 return -rte_tm_error_set(error,
2965 EINVAL,
2966 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2967 NULL,
2968 rte_strerror(EINVAL));
2969
2970 switch (n->level) {
2971 case TM_NODE_LEVEL_PORT:
2972 return -rte_tm_error_set(error,
2973 EINVAL,
2974 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2975 NULL,
2976 rte_strerror(EINVAL));
2977 /* fall-through */
2978 case TM_NODE_LEVEL_SUBPORT:
2979 return -rte_tm_error_set(error,
2980 EINVAL,
2981 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2982 NULL,
2983 rte_strerror(EINVAL));
2984 /* fall-through */
2985 case TM_NODE_LEVEL_PIPE:
2986 #ifdef RTE_SCHED_SUBPORT_TC_OV
2987 if (update_pipe_weight(dev, n, weight))
2988 return -rte_tm_error_set(error,
2989 EINVAL,
2990 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2991 NULL,
2992 rte_strerror(EINVAL));
2993 return 0;
2994 #else
2995 return -rte_tm_error_set(error,
2996 EINVAL,
2997 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2998 NULL,
2999 rte_strerror(EINVAL));
3000 #endif
3001 /* fall-through */
3002 case TM_NODE_LEVEL_TC:
3003 return -rte_tm_error_set(error,
3004 EINVAL,
3005 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
3006 NULL,
3007 rte_strerror(EINVAL));
3008 /* fall-through */
3009 case TM_NODE_LEVEL_QUEUE:
3010 /* fall-through */
3011 default:
3012 if (update_queue_weight(dev, n, weight))
3013 return -rte_tm_error_set(error,
3014 EINVAL,
3015 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3016 NULL,
3017 rte_strerror(EINVAL));
3018 return 0;
3019 }
3020 }
3021
3022 static int
update_subport_rate(struct rte_eth_dev * dev,struct tm_node * ns,struct tm_shaper_profile * sp)3023 update_subport_rate(struct rte_eth_dev *dev,
3024 struct tm_node *ns,
3025 struct tm_shaper_profile *sp)
3026 {
3027 struct pmd_internals *p = dev->data->dev_private;
3028 uint32_t subport_id = tm_node_subport_id(dev, ns);
3029
3030 struct rte_sched_subport_profile_params *profile0 =
3031 subport_profile_get(dev, ns);
3032 struct rte_sched_subport_profile_params profile1;
3033 uint32_t subport_profile_id;
3034
3035 if (profile0 == NULL)
3036 return -1;
3037
3038 /* Derive new pipe profile. */
3039 memcpy(&profile1, profile0, sizeof(profile1));
3040 profile1.tb_rate = sp->params.peak.rate;
3041 profile1.tb_size = sp->params.peak.size;
3042
3043 /* Since implementation does not allow adding more subport profiles
3044 * after port configuration, the pipe configuration can be successfully
3045 * updated only if the new profile is also part of the existing set of
3046 * pipe profiles.
3047 */
3048 if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0)
3049 return -1;
3050
3051 /* Update the subport configuration. */
3052 if (rte_sched_subport_config(SCHED(p), subport_id,
3053 NULL, subport_profile_id))
3054 return -1;
3055
3056 /* Commit changes. */
3057 ns->shaper_profile->n_users--;
3058
3059 ns->shaper_profile = sp;
3060 ns->params.shaper_profile_id = sp->shaper_profile_id;
3061 sp->n_users++;
3062
3063 subport_profile_mark(dev, subport_id, subport_profile_id);
3064
3065 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
3066 &profile1,
3067 sizeof(profile1));
3068
3069 return 0;
3070 }
3071
3072 static int
update_pipe_rate(struct rte_eth_dev * dev,struct tm_node * np,struct tm_shaper_profile * sp)3073 update_pipe_rate(struct rte_eth_dev *dev,
3074 struct tm_node *np,
3075 struct tm_shaper_profile *sp)
3076 {
3077 struct pmd_internals *p = dev->data->dev_private;
3078 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3079
3080 struct tm_node *ns = np->parent_node;
3081 uint32_t subport_id = tm_node_subport_id(dev, ns);
3082
3083 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3084 struct rte_sched_pipe_params profile1;
3085 uint32_t pipe_profile_id;
3086
3087 /* Derive new pipe profile. */
3088 memcpy(&profile1, profile0, sizeof(profile1));
3089 profile1.tb_rate = sp->params.peak.rate;
3090 profile1.tb_size = sp->params.peak.size;
3091
3092 /* Since implementation does not allow adding more pipe profiles after
3093 * port configuration, the pipe configuration can be successfully
3094 * updated only if the new profile is also part of the existing set of
3095 * pipe profiles.
3096 */
3097 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3098 return -1;
3099
3100 /* Update the pipe profile used by the current pipe. */
3101 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3102 (int32_t)pipe_profile_id))
3103 return -1;
3104
3105 /* Commit changes. */
3106 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3107 np->shaper_profile->n_users--;
3108 np->shaper_profile = sp;
3109 np->params.shaper_profile_id = sp->shaper_profile_id;
3110 sp->n_users++;
3111
3112 return 0;
3113 }
3114
3115 static int
update_tc_rate(struct rte_eth_dev * dev,struct tm_node * nt,struct tm_shaper_profile * sp)3116 update_tc_rate(struct rte_eth_dev *dev,
3117 struct tm_node *nt,
3118 struct tm_shaper_profile *sp)
3119 {
3120 struct pmd_internals *p = dev->data->dev_private;
3121 uint32_t tc_id = tm_node_tc_id(dev, nt);
3122
3123 struct tm_node *np = nt->parent_node;
3124 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3125
3126 struct tm_node *ns = np->parent_node;
3127 uint32_t subport_id = tm_node_subport_id(dev, ns);
3128
3129 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3130 struct rte_sched_pipe_params profile1;
3131 uint32_t pipe_profile_id;
3132
3133 /* Derive new pipe profile. */
3134 memcpy(&profile1, profile0, sizeof(profile1));
3135 profile1.tc_rate[tc_id] = sp->params.peak.rate;
3136
3137 /* Since implementation does not allow adding more pipe profiles after
3138 * port configuration, the pipe configuration can be successfully
3139 * updated only if the new profile is also part of the existing set of
3140 * pipe profiles.
3141 */
3142 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3143 return -1;
3144
3145 /* Update the pipe profile used by the current pipe. */
3146 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3147 (int32_t)pipe_profile_id))
3148 return -1;
3149
3150 /* Commit changes. */
3151 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3152 nt->shaper_profile->n_users--;
3153 nt->shaper_profile = sp;
3154 nt->params.shaper_profile_id = sp->shaper_profile_id;
3155 sp->n_users++;
3156
3157 return 0;
3158 }
3159
3160 /* Traffic manager node shaper update */
3161 static int
pmd_tm_node_shaper_update(struct rte_eth_dev * dev,uint32_t node_id,uint32_t shaper_profile_id,struct rte_tm_error * error)3162 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
3163 uint32_t node_id,
3164 uint32_t shaper_profile_id,
3165 struct rte_tm_error *error)
3166 {
3167 struct tm_node *n;
3168 struct tm_shaper_profile *sp;
3169
3170 /* Port must be started and TM used. */
3171 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3172 return -rte_tm_error_set(error,
3173 EBUSY,
3174 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3175 NULL,
3176 rte_strerror(EBUSY));
3177
3178 /* Node must be valid */
3179 n = tm_node_search(dev, node_id);
3180 if (n == NULL)
3181 return -rte_tm_error_set(error,
3182 EINVAL,
3183 RTE_TM_ERROR_TYPE_NODE_ID,
3184 NULL,
3185 rte_strerror(EINVAL));
3186
3187 /* Shaper profile must be valid. */
3188 sp = tm_shaper_profile_search(dev, shaper_profile_id);
3189 if (sp == NULL)
3190 return -rte_tm_error_set(error,
3191 EINVAL,
3192 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
3193 NULL,
3194 rte_strerror(EINVAL));
3195
3196 switch (n->level) {
3197 case TM_NODE_LEVEL_PORT:
3198 return -rte_tm_error_set(error,
3199 EINVAL,
3200 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3201 NULL,
3202 rte_strerror(EINVAL));
3203 /* fall-through */
3204 case TM_NODE_LEVEL_SUBPORT:
3205 if (update_subport_rate(dev, n, sp))
3206 return -rte_tm_error_set(error,
3207 EINVAL,
3208 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3209 NULL,
3210 rte_strerror(EINVAL));
3211 return 0;
3212 /* fall-through */
3213 case TM_NODE_LEVEL_PIPE:
3214 if (update_pipe_rate(dev, n, sp))
3215 return -rte_tm_error_set(error,
3216 EINVAL,
3217 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3218 NULL,
3219 rte_strerror(EINVAL));
3220 return 0;
3221 /* fall-through */
3222 case TM_NODE_LEVEL_TC:
3223 if (update_tc_rate(dev, n, sp))
3224 return -rte_tm_error_set(error,
3225 EINVAL,
3226 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3227 NULL,
3228 rte_strerror(EINVAL));
3229 return 0;
3230 /* fall-through */
3231 case TM_NODE_LEVEL_QUEUE:
3232 /* fall-through */
3233 default:
3234 return -rte_tm_error_set(error,
3235 EINVAL,
3236 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3237 NULL,
3238 rte_strerror(EINVAL));
3239 }
3240 }
3241
3242 static inline uint32_t
tm_port_queue_id(struct rte_eth_dev * dev,uint32_t port_subport_id,uint32_t subport_pipe_id,uint32_t pipe_tc_id,uint32_t tc_queue_id)3243 tm_port_queue_id(struct rte_eth_dev *dev,
3244 uint32_t port_subport_id,
3245 uint32_t subport_pipe_id,
3246 uint32_t pipe_tc_id,
3247 uint32_t tc_queue_id)
3248 {
3249 struct pmd_internals *p = dev->data->dev_private;
3250 struct tm_hierarchy *h = &p->soft.tm.h;
3251 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3252 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3253
3254 uint32_t port_pipe_id =
3255 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3256
3257 uint32_t port_queue_id =
3258 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3259
3260 return port_queue_id;
3261 }
3262
3263 static int
read_port_stats(struct rte_eth_dev * dev,struct tm_node * nr,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear)3264 read_port_stats(struct rte_eth_dev *dev,
3265 struct tm_node *nr,
3266 struct rte_tm_node_stats *stats,
3267 uint64_t *stats_mask,
3268 int clear)
3269 {
3270 struct pmd_internals *p = dev->data->dev_private;
3271 struct tm_hierarchy *h = &p->soft.tm.h;
3272 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3273 uint32_t subport_id;
3274
3275 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3276 struct rte_sched_subport_stats s;
3277 uint32_t tc_ov, id;
3278
3279 /* Stats read */
3280 int status = rte_sched_subport_read_stats(SCHED(p),
3281 subport_id,
3282 &s,
3283 &tc_ov);
3284 if (status)
3285 return status;
3286
3287 /* Stats accumulate */
3288 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3289 nr->stats.n_pkts +=
3290 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3291 nr->stats.n_bytes +=
3292 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3293 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3294 s.n_pkts_tc_dropped[id];
3295 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3296 s.n_bytes_tc_dropped[id];
3297 }
3298 }
3299
3300 /* Stats copy */
3301 if (stats)
3302 memcpy(stats, &nr->stats, sizeof(*stats));
3303
3304 if (stats_mask)
3305 *stats_mask = STATS_MASK_DEFAULT;
3306
3307 /* Stats clear */
3308 if (clear)
3309 memset(&nr->stats, 0, sizeof(nr->stats));
3310
3311 return 0;
3312 }
3313
3314 static int
read_subport_stats(struct rte_eth_dev * dev,struct tm_node * ns,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear)3315 read_subport_stats(struct rte_eth_dev *dev,
3316 struct tm_node *ns,
3317 struct rte_tm_node_stats *stats,
3318 uint64_t *stats_mask,
3319 int clear)
3320 {
3321 struct pmd_internals *p = dev->data->dev_private;
3322 uint32_t subport_id = tm_node_subport_id(dev, ns);
3323 struct rte_sched_subport_stats s;
3324 uint32_t tc_ov, tc_id;
3325
3326 /* Stats read */
3327 int status = rte_sched_subport_read_stats(SCHED(p),
3328 subport_id,
3329 &s,
3330 &tc_ov);
3331 if (status)
3332 return status;
3333
3334 /* Stats accumulate */
3335 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3336 ns->stats.n_pkts +=
3337 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3338 ns->stats.n_bytes +=
3339 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3340 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3341 s.n_pkts_tc_dropped[tc_id];
3342 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3343 s.n_bytes_tc_dropped[tc_id];
3344 }
3345
3346 /* Stats copy */
3347 if (stats)
3348 memcpy(stats, &ns->stats, sizeof(*stats));
3349
3350 if (stats_mask)
3351 *stats_mask = STATS_MASK_DEFAULT;
3352
3353 /* Stats clear */
3354 if (clear)
3355 memset(&ns->stats, 0, sizeof(ns->stats));
3356
3357 return 0;
3358 }
3359
3360 static int
read_pipe_stats(struct rte_eth_dev * dev,struct tm_node * np,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear)3361 read_pipe_stats(struct rte_eth_dev *dev,
3362 struct tm_node *np,
3363 struct rte_tm_node_stats *stats,
3364 uint64_t *stats_mask,
3365 int clear)
3366 {
3367 struct pmd_internals *p = dev->data->dev_private;
3368
3369 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3370
3371 struct tm_node *ns = np->parent_node;
3372 uint32_t subport_id = tm_node_subport_id(dev, ns);
3373 uint32_t tc_id, queue_id;
3374 uint32_t i;
3375
3376 /* Stats read */
3377 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3378 struct rte_sched_queue_stats s;
3379 uint16_t qlen;
3380
3381 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3382 tc_id = i;
3383 queue_id = i;
3384 } else {
3385 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3386 queue_id = i - tc_id;
3387 }
3388
3389 uint32_t qid = tm_port_queue_id(dev,
3390 subport_id,
3391 pipe_id,
3392 tc_id,
3393 queue_id);
3394
3395 int status = rte_sched_queue_read_stats(SCHED(p),
3396 qid,
3397 &s,
3398 &qlen);
3399 if (status)
3400 return status;
3401
3402 /* Stats accumulate */
3403 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3404 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3405 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3406 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3407 s.n_bytes_dropped;
3408 np->stats.leaf.n_pkts_queued = qlen;
3409 }
3410
3411 /* Stats copy */
3412 if (stats)
3413 memcpy(stats, &np->stats, sizeof(*stats));
3414
3415 if (stats_mask)
3416 *stats_mask = STATS_MASK_DEFAULT;
3417
3418 /* Stats clear */
3419 if (clear)
3420 memset(&np->stats, 0, sizeof(np->stats));
3421
3422 return 0;
3423 }
3424
3425 static int
read_tc_stats(struct rte_eth_dev * dev,struct tm_node * nt,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear)3426 read_tc_stats(struct rte_eth_dev *dev,
3427 struct tm_node *nt,
3428 struct rte_tm_node_stats *stats,
3429 uint64_t *stats_mask,
3430 int clear)
3431 {
3432 struct pmd_internals *p = dev->data->dev_private;
3433
3434 uint32_t tc_id = tm_node_tc_id(dev, nt);
3435
3436 struct tm_node *np = nt->parent_node;
3437 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3438
3439 struct tm_node *ns = np->parent_node;
3440 uint32_t subport_id = tm_node_subport_id(dev, ns);
3441 struct rte_sched_queue_stats s;
3442 uint32_t qid, i;
3443 uint16_t qlen;
3444 int status;
3445
3446 /* Stats read */
3447 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3448 qid = tm_port_queue_id(dev,
3449 subport_id,
3450 pipe_id,
3451 tc_id,
3452 0);
3453
3454 status = rte_sched_queue_read_stats(SCHED(p),
3455 qid,
3456 &s,
3457 &qlen);
3458 if (status)
3459 return status;
3460
3461 /* Stats accumulate */
3462 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3463 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3464 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3465 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3466 s.n_bytes_dropped;
3467 nt->stats.leaf.n_pkts_queued = qlen;
3468 } else {
3469 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3470 qid = tm_port_queue_id(dev,
3471 subport_id,
3472 pipe_id,
3473 tc_id,
3474 i);
3475
3476 status = rte_sched_queue_read_stats(SCHED(p),
3477 qid,
3478 &s,
3479 &qlen);
3480 if (status)
3481 return status;
3482
3483 /* Stats accumulate */
3484 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3485 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3486 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3487 s.n_pkts_dropped;
3488 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3489 s.n_bytes_dropped;
3490 nt->stats.leaf.n_pkts_queued = qlen;
3491 }
3492 }
3493
3494 /* Stats copy */
3495 if (stats)
3496 memcpy(stats, &nt->stats, sizeof(*stats));
3497
3498 if (stats_mask)
3499 *stats_mask = STATS_MASK_DEFAULT;
3500
3501 /* Stats clear */
3502 if (clear)
3503 memset(&nt->stats, 0, sizeof(nt->stats));
3504
3505 return 0;
3506 }
3507
3508 static int
read_queue_stats(struct rte_eth_dev * dev,struct tm_node * nq,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear)3509 read_queue_stats(struct rte_eth_dev *dev,
3510 struct tm_node *nq,
3511 struct rte_tm_node_stats *stats,
3512 uint64_t *stats_mask,
3513 int clear)
3514 {
3515 struct pmd_internals *p = dev->data->dev_private;
3516 struct rte_sched_queue_stats s;
3517 uint16_t qlen;
3518
3519 uint32_t queue_id = tm_node_queue_id(dev, nq);
3520
3521 struct tm_node *nt = nq->parent_node;
3522 uint32_t tc_id = tm_node_tc_id(dev, nt);
3523
3524 struct tm_node *np = nt->parent_node;
3525 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3526
3527 struct tm_node *ns = np->parent_node;
3528 uint32_t subport_id = tm_node_subport_id(dev, ns);
3529
3530 /* Stats read */
3531 uint32_t qid = tm_port_queue_id(dev,
3532 subport_id,
3533 pipe_id,
3534 tc_id,
3535 queue_id);
3536
3537 int status = rte_sched_queue_read_stats(SCHED(p),
3538 qid,
3539 &s,
3540 &qlen);
3541 if (status)
3542 return status;
3543
3544 /* Stats accumulate */
3545 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3546 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3547 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3548 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3549 s.n_bytes_dropped;
3550 nq->stats.leaf.n_pkts_queued = qlen;
3551
3552 /* Stats copy */
3553 if (stats)
3554 memcpy(stats, &nq->stats, sizeof(*stats));
3555
3556 if (stats_mask)
3557 *stats_mask = STATS_MASK_QUEUE;
3558
3559 /* Stats clear */
3560 if (clear)
3561 memset(&nq->stats, 0, sizeof(nq->stats));
3562
3563 return 0;
3564 }
3565
3566 /* Traffic manager read stats counters for specific node */
3567 static int
pmd_tm_node_stats_read(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear,struct rte_tm_error * error)3568 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3569 uint32_t node_id,
3570 struct rte_tm_node_stats *stats,
3571 uint64_t *stats_mask,
3572 int clear,
3573 struct rte_tm_error *error)
3574 {
3575 struct tm_node *n;
3576
3577 /* Port must be started and TM used. */
3578 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3579 return -rte_tm_error_set(error,
3580 EBUSY,
3581 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3582 NULL,
3583 rte_strerror(EBUSY));
3584
3585 /* Node must be valid */
3586 n = tm_node_search(dev, node_id);
3587 if (n == NULL)
3588 return -rte_tm_error_set(error,
3589 EINVAL,
3590 RTE_TM_ERROR_TYPE_NODE_ID,
3591 NULL,
3592 rte_strerror(EINVAL));
3593
3594 switch (n->level) {
3595 case TM_NODE_LEVEL_PORT:
3596 if (read_port_stats(dev, n, stats, stats_mask, clear))
3597 return -rte_tm_error_set(error,
3598 EINVAL,
3599 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3600 NULL,
3601 rte_strerror(EINVAL));
3602 return 0;
3603
3604 case TM_NODE_LEVEL_SUBPORT:
3605 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3606 return -rte_tm_error_set(error,
3607 EINVAL,
3608 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3609 NULL,
3610 rte_strerror(EINVAL));
3611 return 0;
3612
3613 case TM_NODE_LEVEL_PIPE:
3614 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3615 return -rte_tm_error_set(error,
3616 EINVAL,
3617 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3618 NULL,
3619 rte_strerror(EINVAL));
3620 return 0;
3621
3622 case TM_NODE_LEVEL_TC:
3623 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3624 return -rte_tm_error_set(error,
3625 EINVAL,
3626 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3627 NULL,
3628 rte_strerror(EINVAL));
3629 return 0;
3630
3631 case TM_NODE_LEVEL_QUEUE:
3632 default:
3633 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3634 return -rte_tm_error_set(error,
3635 EINVAL,
3636 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3637 NULL,
3638 rte_strerror(EINVAL));
3639 return 0;
3640 }
3641 }
3642
3643 const struct rte_tm_ops pmd_tm_ops = {
3644 .node_type_get = pmd_tm_node_type_get,
3645 .capabilities_get = pmd_tm_capabilities_get,
3646 .level_capabilities_get = pmd_tm_level_capabilities_get,
3647 .node_capabilities_get = pmd_tm_node_capabilities_get,
3648
3649 .wred_profile_add = pmd_tm_wred_profile_add,
3650 .wred_profile_delete = pmd_tm_wred_profile_delete,
3651 .shared_wred_context_add_update = NULL,
3652 .shared_wred_context_delete = NULL,
3653
3654 .shaper_profile_add = pmd_tm_shaper_profile_add,
3655 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3656 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3657 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3658
3659 .node_add = pmd_tm_node_add,
3660 .node_delete = pmd_tm_node_delete,
3661 .node_suspend = NULL,
3662 .node_resume = NULL,
3663 .hierarchy_commit = pmd_tm_hierarchy_commit,
3664
3665 .node_parent_update = pmd_tm_node_parent_update,
3666 .node_shaper_update = pmd_tm_node_shaper_update,
3667 .node_shared_shaper_update = NULL,
3668 .node_stats_update = NULL,
3669 .node_wfq_weight_mode_update = NULL,
3670 .node_cman_update = NULL,
3671 .node_wred_context_update = NULL,
3672 .node_shared_wred_context_update = NULL,
3673
3674 .node_stats_read = pmd_tm_node_stats_read,
3675 };
3676