1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020-2021 NXP
3 */
4
5 #include <rte_ethdev.h>
6 #include <rte_malloc.h>
7 #include <rte_tm_driver.h>
8
9 #include "dpaa2_ethdev.h"
10 #include "dpaa2_pmd_logs.h"
11 #include <dpaa2_hw_dpio.h>
12
13 #define DPAA2_BURST_MAX (64 * 1024)
14
15 #define DPAA2_SHAPER_MIN_RATE 0
16 #define DPAA2_SHAPER_MAX_RATE 107374182400ull
17 #define DPAA2_WEIGHT_MAX 24701
18 #define DPAA2_PKT_ADJUST_LEN_MIN 0
19 #define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
20
21 int
dpaa2_tm_init(struct rte_eth_dev * dev)22 dpaa2_tm_init(struct rte_eth_dev *dev)
23 {
24 struct dpaa2_dev_priv *priv = dev->data->dev_private;
25
26 LIST_INIT(&priv->shaper_profiles);
27 LIST_INIT(&priv->nodes);
28
29 return 0;
30 }
31
dpaa2_tm_deinit(struct rte_eth_dev * dev)32 void dpaa2_tm_deinit(struct rte_eth_dev *dev)
33 {
34 struct dpaa2_dev_priv *priv = dev->data->dev_private;
35 struct dpaa2_tm_shaper_profile *profile =
36 LIST_FIRST(&priv->shaper_profiles);
37 struct dpaa2_tm_node *node = LIST_FIRST(&priv->nodes);
38
39 while (profile) {
40 struct dpaa2_tm_shaper_profile *next = LIST_NEXT(profile, next);
41
42 LIST_REMOVE(profile, next);
43 rte_free(profile);
44 profile = next;
45 }
46
47 while (node) {
48 struct dpaa2_tm_node *next = LIST_NEXT(node, next);
49
50 LIST_REMOVE(node, next);
51 rte_free(node);
52 node = next;
53 }
54 }
55
56 static struct dpaa2_tm_node *
dpaa2_node_from_id(struct dpaa2_dev_priv * priv,uint32_t node_id)57 dpaa2_node_from_id(struct dpaa2_dev_priv *priv, uint32_t node_id)
58 {
59 struct dpaa2_tm_node *node;
60
61 LIST_FOREACH(node, &priv->nodes, next)
62 if (node->id == node_id)
63 return node;
64
65 return NULL;
66 }
67
68 static int
dpaa2_capabilities_get(struct rte_eth_dev * dev,struct rte_tm_capabilities * cap,struct rte_tm_error * error)69 dpaa2_capabilities_get(struct rte_eth_dev *dev,
70 struct rte_tm_capabilities *cap,
71 struct rte_tm_error *error)
72 {
73 struct dpaa2_dev_priv *priv = dev->data->dev_private;
74
75 if (!cap)
76 return -rte_tm_error_set(error, EINVAL,
77 RTE_TM_ERROR_TYPE_UNSPECIFIED,
78 NULL, "Capabilities are NULL\n");
79
80 memset(cap, 0, sizeof(*cap));
81
82 /* root node(port) + channels + txqs number, assuming each TX
83 * Queue is mapped to each TC
84 */
85 cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
86 cap->n_levels_max = MAX_LEVEL;
87 cap->non_leaf_nodes_identical = 1;
88 cap->leaf_nodes_identical = 1;
89
90 cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
91 cap->shaper_private_n_max = 1 + priv->num_channels;
92 cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
93 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
94 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
95 cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
96 cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
97
98 if (priv->num_channels > DPNI_MAX_TC)
99 cap->sched_n_children_max = priv->num_channels;
100 else
101 cap->sched_n_children_max = DPNI_MAX_TC;
102
103 cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
104 cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
105 cap->sched_wfq_n_groups_max = 2;
106 cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
107 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
108
109 return 0;
110 }
111
112 static int
dpaa2_level_capabilities_get(struct rte_eth_dev * dev,uint32_t level_id,struct rte_tm_level_capabilities * cap,struct rte_tm_error * error)113 dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
114 uint32_t level_id,
115 struct rte_tm_level_capabilities *cap,
116 struct rte_tm_error *error)
117 {
118 struct dpaa2_dev_priv *priv = dev->data->dev_private;
119
120 if (!cap)
121 return -rte_tm_error_set(error, EINVAL,
122 RTE_TM_ERROR_TYPE_UNSPECIFIED,
123 NULL, NULL);
124
125 memset(cap, 0, sizeof(*cap));
126
127 if (level_id > QUEUE_LEVEL)
128 return -rte_tm_error_set(error, EINVAL,
129 RTE_TM_ERROR_TYPE_LEVEL_ID,
130 NULL, "Wrong level id\n");
131
132 if (level_id == LNI_LEVEL) { /* Root node (LNI) */
133 cap->n_nodes_max = 1;
134 cap->n_nodes_nonleaf_max = 1;
135 cap->non_leaf_nodes_identical = 1;
136
137 cap->nonleaf.shaper_private_supported = 1;
138 cap->nonleaf.shaper_private_dual_rate_supported = 1;
139 cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
140 cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
141
142 cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
143 cap->nonleaf.sched_sp_n_priorities_max = 1;
144 cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
145 cap->nonleaf.sched_wfq_n_groups_max = 1;
146 cap->nonleaf.sched_wfq_weight_max = 1;
147 cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
148 RTE_TM_STATS_N_BYTES;
149 } else if (level_id == CHANNEL_LEVEL) { /* channels */
150 cap->n_nodes_max = priv->num_channels;
151 cap->n_nodes_nonleaf_max = priv->num_channels;
152 cap->n_nodes_leaf_max = 0;
153 cap->non_leaf_nodes_identical = 1;
154
155 cap->nonleaf.shaper_private_supported = 1;
156 cap->nonleaf.shaper_private_dual_rate_supported = 1;
157 cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
158 cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
159
160 /* no. of class queues per channel */
161 cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
162 cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
163 cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
164 cap->nonleaf.sched_wfq_n_groups_max = 2;
165 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
166 } else { /* leaf nodes */
167 /* queues per channels * channel */
168 cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
169 cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
170 cap->leaf_nodes_identical = 1;
171
172 cap->leaf.shaper_private_supported = 0;
173 cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
174 RTE_TM_STATS_N_BYTES;
175 }
176
177 return 0;
178 }
179
180 static int
dpaa2_node_capabilities_get(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_capabilities * cap,struct rte_tm_error * error)181 dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
182 struct rte_tm_node_capabilities *cap,
183 struct rte_tm_error *error)
184 {
185 struct dpaa2_tm_node *node;
186 struct dpaa2_dev_priv *priv = dev->data->dev_private;
187
188 if (!cap)
189 return -rte_tm_error_set(error, EINVAL,
190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
191 NULL, NULL);
192
193 memset(cap, 0, sizeof(*cap));
194
195 node = dpaa2_node_from_id(priv, node_id);
196 if (!node)
197 return -rte_tm_error_set(error, ENODEV,
198 RTE_TM_ERROR_TYPE_NODE_ID,
199 NULL, "Node id does not exist\n");
200
201 if (node->level_id == LNI_LEVEL) {
202 cap->shaper_private_supported = 1;
203 cap->shaper_private_dual_rate_supported = 1;
204 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
205 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
206
207 cap->nonleaf.sched_n_children_max = priv->num_channels;
208 cap->nonleaf.sched_sp_n_priorities_max = 1;
209 cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
210 cap->nonleaf.sched_wfq_n_groups_max = 1;
211 cap->nonleaf.sched_wfq_weight_max = 1;
212 cap->stats_mask = RTE_TM_STATS_N_PKTS |
213 RTE_TM_STATS_N_BYTES;
214 } else if (node->level_id == CHANNEL_LEVEL) {
215 cap->shaper_private_supported = 1;
216 cap->shaper_private_dual_rate_supported = 1;
217 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
218 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
219
220 cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
221 cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
222 cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
223 cap->nonleaf.sched_wfq_n_groups_max = 2;
224 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
225 } else {
226 cap->stats_mask = RTE_TM_STATS_N_PKTS |
227 RTE_TM_STATS_N_BYTES;
228 }
229
230 return 0;
231 }
232
233 static int
dpaa2_node_type_get(struct rte_eth_dev * dev,uint32_t node_id,int * is_leaf,struct rte_tm_error * error)234 dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
235 struct rte_tm_error *error)
236 {
237 struct dpaa2_dev_priv *priv = dev->data->dev_private;
238 struct dpaa2_tm_node *node;
239
240 if (!is_leaf)
241 return -rte_tm_error_set(error, EINVAL,
242 RTE_TM_ERROR_TYPE_UNSPECIFIED,
243 NULL, NULL);
244
245 node = dpaa2_node_from_id(priv, node_id);
246 if (!node)
247 return -rte_tm_error_set(error, ENODEV,
248 RTE_TM_ERROR_TYPE_NODE_ID,
249 NULL, "Node id does not exist\n");
250
251 *is_leaf = node->type == LEAF_NODE ? 1 : 0;
252
253 return 0;
254 }
255
256 static struct dpaa2_tm_shaper_profile *
dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv * priv,uint32_t shaper_profile_id)257 dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,
258 uint32_t shaper_profile_id)
259 {
260 struct dpaa2_tm_shaper_profile *profile;
261
262 LIST_FOREACH(profile, &priv->shaper_profiles, next)
263 if (profile->id == shaper_profile_id)
264 return profile;
265
266 return NULL;
267 }
268
269 static int
dpaa2_shaper_profile_add(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_shaper_params * params,struct rte_tm_error * error)270 dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
271 struct rte_tm_shaper_params *params,
272 struct rte_tm_error *error)
273 {
274 struct dpaa2_dev_priv *priv = dev->data->dev_private;
275 struct dpaa2_tm_shaper_profile *profile;
276
277 if (!params)
278 return -rte_tm_error_set(error, EINVAL,
279 RTE_TM_ERROR_TYPE_UNSPECIFIED,
280 NULL, NULL);
281 if (params->committed.rate > DPAA2_SHAPER_MAX_RATE)
282 return -rte_tm_error_set(error, EINVAL,
283 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
284 NULL, "committed rate is out of range\n");
285
286 if (params->committed.size > DPAA2_BURST_MAX)
287 return -rte_tm_error_set(error, EINVAL,
288 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
289 NULL, "committed size is out of range\n");
290
291 if (params->peak.rate > DPAA2_SHAPER_MAX_RATE)
292 return -rte_tm_error_set(error, EINVAL,
293 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
294 NULL, "Peak rate is out of range\n");
295
296 if (params->peak.size > DPAA2_BURST_MAX)
297 return -rte_tm_error_set(error, EINVAL,
298 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
299 NULL, "Peak size is out of range\n");
300
301 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
302 return -rte_tm_error_set(error, EINVAL,
303 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
304 NULL, "Wrong shaper profile id\n");
305
306 if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
307 params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
308 return -rte_tm_error_set(error, EINVAL,
309 RTE_TM_ERROR_TYPE_CAPABILITIES,
310 NULL,
311 "Not supported pkt adjust length\n");
312
313 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
314 if (profile)
315 return -rte_tm_error_set(error, EEXIST,
316 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
317 NULL, "Profile id already exists\n");
318
319 profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
320 rte_socket_id());
321 if (!profile)
322 return -rte_tm_error_set(error, ENOMEM,
323 RTE_TM_ERROR_TYPE_UNSPECIFIED,
324 NULL, NULL);
325
326 profile->id = shaper_profile_id;
327 rte_memcpy(&profile->params, params, sizeof(profile->params));
328
329 LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
330
331 return 0;
332 }
333
334 static int
dpaa2_shaper_profile_delete(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_error * error)335 dpaa2_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
336 struct rte_tm_error *error)
337 {
338 struct dpaa2_dev_priv *priv = dev->data->dev_private;
339 struct dpaa2_tm_shaper_profile *profile;
340
341 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
342 if (!profile)
343 return -rte_tm_error_set(error, ENODEV,
344 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
345 NULL, "Profile id does not exist\n");
346
347 if (profile->refcnt)
348 return -rte_tm_error_set(error, EPERM,
349 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
350 NULL, "Profile is used\n");
351
352 LIST_REMOVE(profile, next);
353 rte_free(profile);
354
355 return 0;
356 }
357
358 static int
dpaa2_node_check_params(struct rte_eth_dev * dev,uint32_t node_id,__rte_unused uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)359 dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
360 __rte_unused uint32_t priority, uint32_t weight,
361 uint32_t level_id,
362 struct rte_tm_node_params *params,
363 struct rte_tm_error *error)
364 {
365 if (node_id == RTE_TM_NODE_ID_NULL)
366 return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
367 NULL, "Node id is invalid\n");
368
369 if (weight > DPAA2_WEIGHT_MAX)
370 return -rte_tm_error_set(error, EINVAL,
371 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
372 NULL, "Weight is out of range\n");
373
374 if (level_id > QUEUE_LEVEL)
375 return -rte_tm_error_set(error, EINVAL,
376 RTE_TM_ERROR_TYPE_LEVEL_ID,
377 NULL, "Wrong level id\n");
378
379 if (!params)
380 return -rte_tm_error_set(error, EINVAL,
381 RTE_TM_ERROR_TYPE_UNSPECIFIED,
382 NULL, NULL);
383
384 if (params->shared_shaper_id)
385 return -rte_tm_error_set(error, EINVAL,
386 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
387 NULL, "Shared shaper is not supported\n");
388
389 if (params->n_shared_shapers)
390 return -rte_tm_error_set(error, EINVAL,
391 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
392 NULL, "Shared shaper is not supported\n");
393
394 /* verify non leaf nodes settings */
395 if (node_id >= dev->data->nb_tx_queues) {
396 if (params->nonleaf.wfq_weight_mode)
397 return -rte_tm_error_set(error, EINVAL,
398 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
399 NULL, "WFQ weight mode is not supported\n");
400 } else {
401 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
402 return -rte_tm_error_set(error, EINVAL,
403 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
404 NULL, "Private shaper not supported on leaf\n");
405 }
406
407 /* check leaf node */
408 if (level_id == QUEUE_LEVEL) {
409 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
410 return -rte_tm_error_set(error, ENODEV,
411 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
412 NULL, "Only taildrop is supported\n");
413 if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
414 RTE_TM_STATS_N_BYTES))
415 return -rte_tm_error_set(error, EINVAL,
416 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
417 NULL,
418 "Requested port stats are not supported\n");
419 } else if (level_id == LNI_LEVEL) {
420 if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
421 RTE_TM_STATS_N_BYTES))
422 return -rte_tm_error_set(error, EINVAL,
423 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
424 NULL,
425 "Requested port stats are not supported\n");
426 }
427
428 return 0;
429 }
430
431 static int
dpaa2_node_add(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)432 dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
433 uint32_t parent_node_id, uint32_t priority, uint32_t weight,
434 uint32_t level_id, struct rte_tm_node_params *params,
435 struct rte_tm_error *error)
436 {
437 struct dpaa2_dev_priv *priv = dev->data->dev_private;
438 struct dpaa2_tm_shaper_profile *profile = NULL;
439 struct dpaa2_tm_node *node, *parent = NULL;
440 int ret;
441
442 if (0/* If device is started*/)
443 return -rte_tm_error_set(error, EPERM,
444 RTE_TM_ERROR_TYPE_UNSPECIFIED,
445 NULL, "Port is already started\n");
446
447 ret = dpaa2_node_check_params(dev, node_id, priority, weight, level_id,
448 params, error);
449 if (ret)
450 return ret;
451
452 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
453 profile = dpaa2_shaper_profile_from_id(priv,
454 params->shaper_profile_id);
455 if (!profile)
456 return -rte_tm_error_set(error, ENODEV,
457 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
458 NULL, "Shaper id does not exist\n");
459 }
460 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
461 LIST_FOREACH(node, &priv->nodes, next) {
462 if (node->level_id != LNI_LEVEL)
463 continue;
464
465 return -rte_tm_error_set(error, EINVAL,
466 RTE_TM_ERROR_TYPE_UNSPECIFIED,
467 NULL, "Root node exists\n");
468 }
469 } else {
470 parent = dpaa2_node_from_id(priv, parent_node_id);
471 if (!parent)
472 return -rte_tm_error_set(error, EINVAL,
473 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
474 NULL, "Parent node id not exist\n");
475 }
476
477 node = dpaa2_node_from_id(priv, node_id);
478 if (node)
479 return -rte_tm_error_set(error, ENODEV,
480 RTE_TM_ERROR_TYPE_NODE_ID,
481 NULL, "Node id already exists\n");
482
483 node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
484 if (!node)
485 return -rte_tm_error_set(error, ENOMEM,
486 RTE_TM_ERROR_TYPE_UNSPECIFIED,
487 NULL, NULL);
488
489 node->id = node_id;
490
491 if (node_id > dev->data->nb_tx_queues)
492 node->type = NON_LEAF_NODE;
493 else
494 node->type = LEAF_NODE;
495
496 node->level_id = level_id;
497 if (node->level_id == CHANNEL_LEVEL) {
498 if (priv->channel_inuse < priv->num_channels) {
499 node->channel_id = priv->channel_inuse;
500 priv->channel_inuse++;
501 } else {
502 printf("error no channel id available\n");
503 }
504 }
505
506 if (parent) {
507 node->parent = parent;
508 parent->refcnt++;
509 }
510
511 /* TODO: add check if refcnt is more than supported children */
512
513 if (profile) {
514 node->profile = profile;
515 profile->refcnt++;
516 }
517
518 node->weight = weight;
519 node->priority = priority;
520 node->stats_mask = params->stats_mask;
521
522 LIST_INSERT_HEAD(&priv->nodes, node, next);
523
524 return 0;
525 }
526
527 static int
dpaa2_node_delete(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)528 dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
529 struct rte_tm_error *error)
530 {
531 struct dpaa2_dev_priv *priv = dev->data->dev_private;
532 struct dpaa2_tm_node *node;
533
534 /* XXX: update it */
535 if (0) {
536 return -rte_tm_error_set(error, EPERM,
537 RTE_TM_ERROR_TYPE_UNSPECIFIED,
538 NULL, "Port is already started\n");
539 }
540
541 node = dpaa2_node_from_id(priv, node_id);
542 if (!node)
543 return -rte_tm_error_set(error, ENODEV,
544 RTE_TM_ERROR_TYPE_NODE_ID,
545 NULL, "Node id does not exist\n");
546
547 if (node->refcnt)
548 return -rte_tm_error_set(error, EPERM,
549 RTE_TM_ERROR_TYPE_NODE_ID,
550 NULL, "Node id is used\n");
551
552 if (node->parent)
553 node->parent->refcnt--;
554
555 if (node->profile)
556 node->profile->refcnt--;
557
558 LIST_REMOVE(node, next);
559 rte_free(node);
560
561 return 0;
562 }
563
564 static int
dpaa2_tm_configure_queue(struct rte_eth_dev * dev,struct dpaa2_tm_node * node)565 dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
566 {
567 int ret = 0;
568 uint32_t tc_id;
569 uint8_t flow_id, options = 0;
570 struct dpni_queue tx_flow_cfg;
571 struct dpni_queue_id qid;
572 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
573 struct dpaa2_dev_priv *priv = dev->data->dev_private;
574 struct dpaa2_queue *dpaa2_q;
575
576 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
577 dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
578 tc_id = node->parent->tc_id;
579 node->parent->tc_id++;
580 flow_id = 0;
581
582 if (dpaa2_q == NULL) {
583 printf("Queue is not configured for node = %d\n", node->id);
584 return -1;
585 }
586
587 DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
588 node->parent->channel_id);
589 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
590 ((node->parent->channel_id << 8) | tc_id),
591 flow_id, options, &tx_flow_cfg);
592 if (ret) {
593 printf("Error in setting the tx flow: "
594 "channel id = %d tc_id= %d, param = 0x%x "
595 "flow=%d err=%d\n", node->parent->channel_id, tc_id,
596 ((node->parent->channel_id << 8) | tc_id), flow_id,
597 ret);
598 return -1;
599 }
600
601 dpaa2_q->flow_id = flow_id;
602 dpaa2_q->tc_index = tc_id;
603
604 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
605 DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
606 dpaa2_q->flow_id, &tx_flow_cfg, &qid);
607 if (ret) {
608 printf("Error in getting LFQID err=%d", ret);
609 return -1;
610 }
611 dpaa2_q->fqid = qid.fqid;
612
613 /* setting congestion notification */
614 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
615 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
616
617 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
618 cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
619 /* Notify that the queue is not congested when the data in
620 * the queue is below this thershold.(90% of value)
621 */
622 cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
623 cong_notif_cfg.message_ctx = 0;
624 cong_notif_cfg.message_iova =
625 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
626 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
627 cong_notif_cfg.notification_mode =
628 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
629 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
630 DPNI_CONG_OPT_COHERENT_WRITE;
631 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
632
633 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
634 priv->token,
635 DPNI_QUEUE_TX,
636 ((node->parent->channel_id << 8) | tc_id),
637 &cong_notif_cfg);
638 if (ret) {
639 printf("Error in setting tx congestion notification: "
640 "err=%d", ret);
641 return -ret;
642 }
643 }
644
645 return 0;
646 }
647
648 static void
dpaa2_tm_sort_and_configure(struct rte_eth_dev * dev,struct dpaa2_tm_node ** nodes,int n)649 dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
650 struct dpaa2_tm_node **nodes, int n)
651 {
652 struct dpaa2_tm_node *temp_node;
653 int i;
654
655 if (n == 1) {
656 DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
657 nodes[n - 1]->id, nodes[n - 1]->priority,
658 n - 1);
659 dpaa2_tm_configure_queue(dev, nodes[n - 1]);
660 return;
661 }
662
663 for (i = 0; i < n - 1; i++) {
664 if (nodes[i]->priority > nodes[i + 1]->priority) {
665 temp_node = nodes[i];
666 nodes[i] = nodes[i + 1];
667 nodes[i + 1] = temp_node;
668 }
669 }
670 dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
671
672 DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
673 nodes[n - 1]->id, nodes[n - 1]->priority,
674 n - 1);
675 dpaa2_tm_configure_queue(dev, nodes[n - 1]);
676 }
677
678 static int
dpaa2_hierarchy_commit(struct rte_eth_dev * dev,int clear_on_fail,struct rte_tm_error * error)679 dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
680 struct rte_tm_error *error)
681 {
682 struct dpaa2_dev_priv *priv = dev->data->dev_private;
683 struct dpaa2_tm_node *node;
684 struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
685 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
686 int ret, t;
687
688 /* Populate TCs */
689 LIST_FOREACH(channel_node, &priv->nodes, next) {
690 struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
691 int i = 0;
692
693 if (channel_node->level_id != CHANNEL_LEVEL)
694 continue;
695
696 LIST_FOREACH(leaf_node, &priv->nodes, next) {
697 if (leaf_node->level_id == LNI_LEVEL ||
698 leaf_node->level_id == CHANNEL_LEVEL)
699 continue;
700
701 if (leaf_node->parent == channel_node) {
702 if (i >= DPNI_MAX_TC) {
703 ret = -rte_tm_error_set(error, EINVAL,
704 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
705 "More children than supported\n");
706 goto out;
707 }
708 nodes[i++] = leaf_node;
709 }
710 }
711 if (i > 0) {
712 DPAA2_PMD_DEBUG("Configure queues\n");
713 dpaa2_tm_sort_and_configure(dev, nodes, i);
714 }
715 }
716
717 /* Shaping */
718 LIST_FOREACH(node, &priv->nodes, next) {
719 if (node->type == NON_LEAF_NODE) {
720 if (!node->profile)
721 continue;
722 struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
723 uint32_t param = 0;
724
725 tx_cr_shaper.max_burst_size =
726 node->profile->params.committed.size;
727 tx_cr_shaper.rate_limit =
728 node->profile->params.committed.rate /
729 (1024 * 1024);
730 tx_er_shaper.max_burst_size =
731 node->profile->params.peak.size;
732 tx_er_shaper.rate_limit =
733 node->profile->params.peak.rate / (1024 * 1024);
734 /* root node */
735 if (node->parent == NULL) {
736 DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
737 tx_cr_shaper.rate_limit,
738 tx_cr_shaper.max_burst_size);
739 param = 0x2;
740 param |= node->profile->params.pkt_length_adjust << 16;
741 } else {
742 DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
743 node->channel_id,
744 tx_cr_shaper.rate_limit);
745 param = (node->channel_id << 8);
746 }
747 ret = dpni_set_tx_shaping(dpni, 0, priv->token,
748 &tx_cr_shaper, &tx_er_shaper, param);
749 if (ret) {
750 ret = -rte_tm_error_set(error, EINVAL,
751 RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
752 "Error in setting Shaping\n");
753 goto out;
754 }
755 continue;
756 }
757 }
758
759 LIST_FOREACH(channel_node, &priv->nodes, next) {
760 int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
761 struct dpni_tx_priorities_cfg prio_cfg;
762
763 memset(&prio_cfg, 0, sizeof(prio_cfg));
764 memset(conf, 0, sizeof(conf));
765
766 /* Process for each channel */
767 if (channel_node->level_id != CHANNEL_LEVEL)
768 continue;
769
770 LIST_FOREACH(leaf_node, &priv->nodes, next) {
771 struct dpaa2_queue *leaf_dpaa2_q;
772 uint8_t leaf_tc_id;
773
774 if (leaf_node->level_id == LNI_LEVEL ||
775 leaf_node->level_id == CHANNEL_LEVEL)
776 continue;
777
778 /* level 2, all leaf nodes */
779 if (leaf_node->id >= dev->data->nb_tx_queues) {
780 ret = -rte_tm_error_set(error, EINVAL,
781 RTE_TM_ERROR_TYPE_NODE_ID, NULL,
782 "Not enough txqs configured\n");
783 goto out;
784 }
785
786 if (conf[leaf_node->id])
787 continue;
788
789 if (leaf_node->parent != channel_node)
790 continue;
791
792 leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
793 leaf_tc_id = leaf_dpaa2_q->tc_index;
794 /* Process sibling leaf nodes */
795 LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
796 if (temp_leaf_node->id == leaf_node->id ||
797 temp_leaf_node->level_id == LNI_LEVEL ||
798 temp_leaf_node->level_id == CHANNEL_LEVEL)
799 continue;
800
801 if (temp_leaf_node->parent != channel_node)
802 continue;
803
804 if (conf[temp_leaf_node->id])
805 continue;
806
807 if (leaf_node->priority == temp_leaf_node->priority) {
808 struct dpaa2_queue *temp_leaf_dpaa2_q;
809 uint8_t temp_leaf_tc_id;
810
811 temp_leaf_dpaa2_q = (struct dpaa2_queue *)
812 dev->data->tx_queues[temp_leaf_node->id];
813 temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
814 if (wfq_grp == 0) {
815 prio_cfg.tc_sched[temp_leaf_tc_id].mode =
816 DPNI_TX_SCHED_WEIGHTED_A;
817 /* DPAA2 support weight in multiple of 100 */
818 prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
819 temp_leaf_node->weight * 100;
820 } else if (wfq_grp == 1) {
821 prio_cfg.tc_sched[temp_leaf_tc_id].mode =
822 DPNI_TX_SCHED_WEIGHTED_B;
823 prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
824 temp_leaf_node->weight * 100;
825 } else {
826 ret = -rte_tm_error_set(error, EINVAL,
827 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
828 "Only 2 WFQ Groups are supported\n");
829 goto out;
830 }
831 is_wfq_grp = 1;
832 conf[temp_leaf_node->id] = 1;
833 }
834 }
835 if (is_wfq_grp) {
836 if (wfq_grp == 0) {
837 prio_cfg.tc_sched[leaf_tc_id].mode =
838 DPNI_TX_SCHED_WEIGHTED_A;
839 prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
840 leaf_node->weight * 100;
841 prio_cfg.prio_group_A = leaf_node->priority;
842 } else if (wfq_grp == 1) {
843 prio_cfg.tc_sched[leaf_tc_id].mode =
844 DPNI_TX_SCHED_WEIGHTED_B;
845 prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
846 leaf_node->weight * 100;
847 prio_cfg.prio_group_B = leaf_node->priority;
848 }
849 wfq_grp++;
850 is_wfq_grp = 0;
851 }
852 conf[leaf_node->id] = 1;
853 }
854 if (wfq_grp > 1) {
855 prio_cfg.separate_groups = 1;
856 if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
857 prio_cfg.prio_group_A = 0;
858 prio_cfg.prio_group_B = 1;
859 } else {
860 prio_cfg.prio_group_A = 1;
861 prio_cfg.prio_group_B = 0;
862 }
863 }
864
865 prio_cfg.prio_group_A = 1;
866 prio_cfg.channel_idx = channel_node->channel_id;
867 ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
868 if (ret) {
869 ret = -rte_tm_error_set(error, EINVAL,
870 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
871 "Scheduling Failed\n");
872 goto out;
873 }
874 DPAA2_PMD_DEBUG("########################################\n");
875 DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
876 for (t = 0; t < DPNI_MAX_TC; t++) {
877 DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
878 DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
879 }
880 DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
881 DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
882 DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
883 }
884 return 0;
885
886 out:
887 if (clear_on_fail) {
888 dpaa2_tm_deinit(dev);
889 dpaa2_tm_init(dev);
890 }
891
892 return ret;
893 }
894
895 static int
dpaa2_node_stats_read(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear,struct rte_tm_error * error)896 dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
897 struct rte_tm_node_stats *stats, uint64_t *stats_mask,
898 int clear, struct rte_tm_error *error)
899 {
900 struct dpaa2_dev_priv *priv = dev->data->dev_private;
901 struct dpaa2_tm_node *node;
902 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
903 union dpni_statistics value;
904 int ret = 0;
905
906 node = dpaa2_node_from_id(priv, node_id);
907 if (!node)
908 return -rte_tm_error_set(error, ENODEV,
909 RTE_TM_ERROR_TYPE_NODE_ID,
910 NULL, "Node id does not exist\n");
911
912 if (stats_mask)
913 *stats_mask = node->stats_mask;
914
915 if (!stats)
916 return 0;
917
918 memset(stats, 0, sizeof(*stats));
919 memset(&value, 0, sizeof(union dpni_statistics));
920
921 if (node->level_id == LNI_LEVEL) {
922 uint8_t page1 = 1;
923
924 ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
925 page1, 0, &value);
926 if (ret)
927 return -rte_tm_error_set(error, -ret,
928 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
929 "Failed to read port statistics\n");
930
931 if (node->stats_mask & RTE_TM_STATS_N_PKTS)
932 stats->n_pkts = value.page_1.egress_all_frames;
933
934 if (node->stats_mask & RTE_TM_STATS_N_BYTES)
935 stats->n_bytes = value.page_1.egress_all_bytes;
936
937 if (clear) {
938 ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
939 return -rte_tm_error_set(error, -ret,
940 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
941 "Failed to reset port statistics\n");
942 }
943 } else if (node->level_id == QUEUE_LEVEL) {
944 uint8_t page3 = 3;
945 struct dpaa2_queue *dpaa2_q;
946 dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
947
948 ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
949 page3,
950 (node->parent->channel_id << 8 |
951 dpaa2_q->tc_index), &value);
952 if (ret)
953 return -rte_tm_error_set(error, -ret,
954 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
955 "Failed to read queue statistics\n");
956
957 if (node->stats_mask & RTE_TM_STATS_N_PKTS)
958 stats->n_pkts = value.page_3.ceetm_dequeue_frames;
959 if (node->stats_mask & RTE_TM_STATS_N_BYTES)
960 stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
961 } else {
962 return -rte_tm_error_set(error, -1,
963 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
964 "Failed to read channel statistics\n");
965 }
966
967 return 0;
968 }
969
970 const struct rte_tm_ops dpaa2_tm_ops = {
971 .node_type_get = dpaa2_node_type_get,
972 .capabilities_get = dpaa2_capabilities_get,
973 .level_capabilities_get = dpaa2_level_capabilities_get,
974 .node_capabilities_get = dpaa2_node_capabilities_get,
975 .shaper_profile_add = dpaa2_shaper_profile_add,
976 .shaper_profile_delete = dpaa2_shaper_profile_delete,
977 .node_add = dpaa2_node_add,
978 .node_delete = dpaa2_node_delete,
979 .hierarchy_commit = dpaa2_hierarchy_commit,
980 .node_stats_read = dpaa2_node_stats_read,
981 };
982