1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 static inline uint64_t
nix_tm_shaper2regval(struct nix_tm_shaper_data * shaper)9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
10 {
11 uint64_t regval;
12
13 if (roc_model_is_cn9k()) {
14 regval = (shaper->burst_exponent << 37);
15 regval |= (shaper->burst_mantissa << 29);
16 regval |= (shaper->div_exp << 13);
17 regval |= (shaper->exponent << 9);
18 regval |= (shaper->mantissa << 1);
19 return regval;
20 }
21
22 regval = (shaper->burst_exponent << 44);
23 regval |= (shaper->burst_mantissa << 29);
24 regval |= (shaper->div_exp << 13);
25 regval |= (shaper->exponent << 9);
26 regval |= (shaper->mantissa << 1);
27 return regval;
28 }
29
30 uint16_t
nix_tm_lvl2nix_tl1_root(uint32_t lvl)31 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
32 {
33 switch (lvl) {
34 case ROC_TM_LVL_ROOT:
35 return NIX_TXSCH_LVL_TL1;
36 case ROC_TM_LVL_SCH1:
37 return NIX_TXSCH_LVL_TL2;
38 case ROC_TM_LVL_SCH2:
39 return NIX_TXSCH_LVL_TL3;
40 case ROC_TM_LVL_SCH3:
41 return NIX_TXSCH_LVL_TL4;
42 case ROC_TM_LVL_SCH4:
43 return NIX_TXSCH_LVL_SMQ;
44 default:
45 return NIX_TXSCH_LVL_CNT;
46 }
47 }
48
49 uint16_t
nix_tm_lvl2nix_tl2_root(uint32_t lvl)50 nix_tm_lvl2nix_tl2_root(uint32_t lvl)
51 {
52 switch (lvl) {
53 case ROC_TM_LVL_ROOT:
54 return NIX_TXSCH_LVL_TL2;
55 case ROC_TM_LVL_SCH1:
56 return NIX_TXSCH_LVL_TL3;
57 case ROC_TM_LVL_SCH2:
58 return NIX_TXSCH_LVL_TL4;
59 case ROC_TM_LVL_SCH3:
60 return NIX_TXSCH_LVL_SMQ;
61 default:
62 return NIX_TXSCH_LVL_CNT;
63 }
64 }
65
66 uint16_t
nix_tm_lvl2nix(struct nix * nix,uint32_t lvl)67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
68 {
69 if (nix_tm_have_tl1_access(nix))
70 return nix_tm_lvl2nix_tl1_root(lvl);
71 else
72 return nix_tm_lvl2nix_tl2_root(lvl);
73 }
74
75 static uint8_t
nix_tm_relchan_get(struct nix * nix)76 nix_tm_relchan_get(struct nix *nix)
77 {
78 return nix->tx_chan_base & 0xff;
79 }
80
81 static int
nix_tm_find_prio_anchor(struct nix * nix,uint32_t node_id,enum roc_nix_tm_tree tree)82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
83 enum roc_nix_tm_tree tree)
84 {
85 struct nix_tm_node *child_node;
86 struct nix_tm_node_list *list;
87
88 list = nix_tm_node_list(nix, tree);
89
90 TAILQ_FOREACH(child_node, list, node) {
91 if (!child_node->parent)
92 continue;
93 if (!(child_node->parent->id == node_id))
94 continue;
95 if (child_node->priority == child_node->parent->rr_prio)
96 continue;
97 return child_node->hw_id - child_node->priority;
98 }
99 return 0;
100 }
101
102 struct nix_tm_shaper_profile *
nix_tm_shaper_profile_search(struct nix * nix,uint32_t id)103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
104 {
105 struct nix_tm_shaper_profile *profile;
106
107 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
108 if (profile->id == id)
109 return profile;
110 }
111 return NULL;
112 }
113
114 struct nix_tm_node *
nix_tm_node_search(struct nix * nix,uint32_t node_id,enum roc_nix_tm_tree tree)115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
116 {
117 struct nix_tm_node_list *list;
118 struct nix_tm_node *node;
119
120 list = nix_tm_node_list(nix, tree);
121 TAILQ_FOREACH(node, list, node) {
122 if (node->id == node_id)
123 return node;
124 }
125 return NULL;
126 }
127
128 static uint64_t
nix_tm_shaper_rate_conv_floor(uint64_t value,uint64_t * exponent_p,uint64_t * mantissa_p,uint64_t * div_exp_p)129 nix_tm_shaper_rate_conv_floor(uint64_t value, uint64_t *exponent_p,
130 uint64_t *mantissa_p, uint64_t *div_exp_p)
131 {
132 uint64_t div_exp, exponent, mantissa;
133
134 /* Boundary checks */
135 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
136 return 0;
137
138 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
139 /* Calculate rate div_exp and mantissa using
140 * the following formula:
141 *
142 * value = (2E6 * (256 + mantissa)
143 * / ((1 << div_exp) * 256))
144 */
145 div_exp = 0;
146 exponent = 0;
147 mantissa = NIX_TM_MAX_RATE_MANTISSA;
148
149 while (value <= (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
150 div_exp += 1;
151
152 while (value <= ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
153 ((1 << div_exp) * 256)))
154 mantissa -= 1;
155 } else {
156 /* Calculate rate exponent and mantissa using
157 * the following formula:
158 *
159 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
160 *
161 */
162 div_exp = 0;
163 exponent = NIX_TM_MAX_RATE_EXPONENT;
164 mantissa = NIX_TM_MAX_RATE_MANTISSA;
165
166 while (value <= (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
167 exponent -= 1;
168
169 while (value <= ((NIX_TM_SHAPER_RATE_CONST *
170 ((256 + mantissa) << exponent)) /
171 256))
172 mantissa -= 1;
173 }
174
175 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
176 exponent > NIX_TM_MAX_RATE_EXPONENT ||
177 mantissa > NIX_TM_MAX_RATE_MANTISSA)
178 return 0;
179
180 if (div_exp_p)
181 *div_exp_p = div_exp;
182 if (exponent_p)
183 *exponent_p = exponent;
184 if (mantissa_p)
185 *mantissa_p = mantissa;
186
187 /* Calculate real rate value */
188 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
189 }
190
191 static uint64_t
nix_tm_shaper_rate_conv_exact(uint64_t value,uint64_t * exponent_p,uint64_t * mantissa_p,uint64_t * div_exp_p)192 nix_tm_shaper_rate_conv_exact(uint64_t value, uint64_t *exponent_p,
193 uint64_t *mantissa_p, uint64_t *div_exp_p)
194 {
195 uint64_t div_exp, exponent, mantissa;
196
197 /* Boundary checks */
198 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
199 return 0;
200
201 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
202 /* Calculate rate div_exp and mantissa using
203 * the following formula:
204 *
205 * value = (2E6 * (256 + mantissa)
206 * / ((1 << div_exp) * 256))
207 */
208 div_exp = 0;
209 exponent = 0;
210 mantissa = NIX_TM_MAX_RATE_MANTISSA;
211
212 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
213 div_exp += 1;
214
215 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
216 ((1 << div_exp) * 256)))
217 mantissa -= 1;
218 } else {
219 /* Calculate rate exponent and mantissa using
220 * the following formula:
221 *
222 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
223 *
224 */
225 div_exp = 0;
226 exponent = NIX_TM_MAX_RATE_EXPONENT;
227 mantissa = NIX_TM_MAX_RATE_MANTISSA;
228
229 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
230 exponent -= 1;
231
232 while (value < ((NIX_TM_SHAPER_RATE_CONST *
233 ((256 + mantissa) << exponent)) /
234 256))
235 mantissa -= 1;
236 }
237
238 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
239 exponent > NIX_TM_MAX_RATE_EXPONENT ||
240 mantissa > NIX_TM_MAX_RATE_MANTISSA)
241 return 0;
242
243 if (div_exp_p)
244 *div_exp_p = div_exp;
245 if (exponent_p)
246 *exponent_p = exponent;
247 if (mantissa_p)
248 *mantissa_p = mantissa;
249
250 /* Calculate real rate value */
251 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
252 }
253
254 /* With zero accuracy we will tune parameters as defined by HW,
255 * non zero accuracy will keep the parameters close to lower values
256 * and make sure long-term shaper rate will not exceed the requested rate.
257 */
258 uint64_t
nix_tm_shaper_rate_conv(uint64_t value,uint64_t * exponent_p,uint64_t * mantissa_p,uint64_t * div_exp_p,int8_t accuracy)259 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
260 uint64_t *mantissa_p, uint64_t *div_exp_p,
261 int8_t accuracy)
262 {
263 if (!accuracy)
264 return nix_tm_shaper_rate_conv_exact(value, exponent_p,
265 mantissa_p, div_exp_p);
266
267 return nix_tm_shaper_rate_conv_floor(value, exponent_p, mantissa_p,
268 div_exp_p);
269 }
270
271 uint64_t
nix_tm_shaper_burst_conv(uint64_t value,uint64_t * exponent_p,uint64_t * mantissa_p)272 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
273 uint64_t *mantissa_p)
274 {
275 uint64_t min_burst, max_burst;
276 uint64_t exponent, mantissa;
277 uint32_t max_mantissa;
278
279 min_burst = NIX_TM_MIN_SHAPER_BURST;
280 max_burst = roc_nix_tm_max_shaper_burst_get();
281
282 if (value < min_burst || value > max_burst)
283 return 0;
284
285 max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
286 NIX_TM_MAX_BURST_MANTISSA);
287 /* Calculate burst exponent and mantissa using
288 * the following formula:
289 *
290 * value = (((256 + mantissa) << (exponent + 1) / 256)
291 *
292 */
293 exponent = NIX_TM_MAX_BURST_EXPONENT;
294 mantissa = max_mantissa;
295
296 while (value < (1ull << (exponent + 1)))
297 exponent -= 1;
298
299 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
300 mantissa -= 1;
301
302 if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
303 return 0;
304
305 if (exponent_p)
306 *exponent_p = exponent;
307 if (mantissa_p)
308 *mantissa_p = mantissa;
309
310 return NIX_TM_SHAPER_BURST(exponent, mantissa);
311 }
312
313 static void
nix_tm_shaper_conf_get(struct nix_tm_shaper_profile * profile,struct nix_tm_shaper_data * cir,struct nix_tm_shaper_data * pir)314 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
315 struct nix_tm_shaper_data *cir,
316 struct nix_tm_shaper_data *pir)
317 {
318 memset(cir, 0, sizeof(*cir));
319 memset(pir, 0, sizeof(*pir));
320
321 if (!profile)
322 return;
323
324 /* Calculate CIR exponent and mantissa */
325 if (profile->commit.rate)
326 cir->rate = nix_tm_shaper_rate_conv(
327 profile->commit.rate, &cir->exponent, &cir->mantissa,
328 &cir->div_exp, profile->accuracy);
329
330 /* Calculate PIR exponent and mantissa */
331 if (profile->peak.rate)
332 pir->rate = nix_tm_shaper_rate_conv(
333 profile->peak.rate, &pir->exponent, &pir->mantissa,
334 &pir->div_exp, profile->accuracy);
335
336 /* Calculate CIR burst exponent and mantissa */
337 if (profile->commit.size)
338 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
339 &cir->burst_exponent,
340 &cir->burst_mantissa);
341
342 /* Calculate PIR burst exponent and mantissa */
343 if (profile->peak.size)
344 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
345 &pir->burst_exponent,
346 &pir->burst_mantissa);
347 }
348
349 uint32_t
nix_tm_check_rr(struct nix * nix,uint32_t parent_id,enum roc_nix_tm_tree tree,uint32_t * rr_prio,uint32_t * max_prio)350 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
351 uint32_t *rr_prio, uint32_t *max_prio)
352 {
353 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX];
354 struct nix_tm_node_list *list;
355 struct nix_tm_node *node;
356 uint32_t rr_num = 0, i;
357 uint32_t children = 0;
358 uint32_t priority;
359
360 memset(node_cnt, 0, sizeof(node_cnt));
361 *rr_prio = 0xF;
362 *max_prio = UINT32_MAX;
363
364 list = nix_tm_node_list(nix, tree);
365 TAILQ_FOREACH(node, list, node) {
366 if (!node->parent)
367 continue;
368
369 if (!(node->parent->id == parent_id))
370 continue;
371
372 priority = node->priority;
373 node_cnt[priority]++;
374 children++;
375 }
376
377 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) {
378 if (!node_cnt[i])
379 break;
380
381 if (node_cnt[i] > rr_num) {
382 *rr_prio = i;
383 rr_num = node_cnt[i];
384 }
385 }
386
387 /* RR group of single RR child is considered as SP */
388 if (rr_num == 1) {
389 *rr_prio = 0xF;
390 rr_num = 0;
391 }
392
393 /* Max prio will be returned only when we have non zero prio
394 * or if a parent has single child.
395 */
396 if (i > 1 || (children == 1))
397 *max_prio = i - 1;
398 return rr_num;
399 }
400
401 static uint16_t
nix_tm_max_prio(struct nix * nix,uint16_t hw_lvl)402 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl)
403 {
404 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
405 return 0;
406
407 /* MDQ does not support SP */
408 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
409 return 0;
410
411 /* PF's TL1 with VF's enabled does not support SP */
412 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) ||
413 (nix->tm_flags & NIX_TM_TL1_NO_SP)))
414 return 0;
415
416 return NIX_TM_TLX_SP_PRIO_MAX - 1;
417 }
418
419 int
nix_tm_validate_prio(struct nix * nix,uint32_t lvl,uint32_t parent_id,uint32_t priority,enum roc_nix_tm_tree tree)420 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
421 uint32_t priority, enum roc_nix_tm_tree tree)
422 {
423 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX];
424 struct nix_tm_node_list *list;
425 struct nix_tm_node *node;
426 uint32_t rr_num = 0;
427 int i;
428
429 list = nix_tm_node_list(nix, tree);
430 /* Validate priority against max */
431 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1)))
432 return NIX_ERR_TM_PRIO_EXCEEDED;
433
434 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID)
435 return 0;
436
437 memset(priorities, 0, sizeof(priorities));
438 priorities[priority] = 1;
439
440 TAILQ_FOREACH(node, list, node) {
441 if (!node->parent)
442 continue;
443
444 if (node->parent->id != parent_id)
445 continue;
446
447 priorities[node->priority]++;
448 }
449
450 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++)
451 if (priorities[i] > 1)
452 rr_num++;
453
454 /* At max, one rr groups per parent */
455 if (rr_num > 1)
456 return NIX_ERR_TM_MULTIPLE_RR_GROUPS;
457
458 /* Check for previous priority to avoid holes in priorities */
459 if (priority && !priorities[priority - 1])
460 return NIX_ERR_TM_PRIO_ORDER;
461
462 return 0;
463 }
464
465 bool
nix_tm_child_res_valid(struct nix_tm_node_list * list,struct nix_tm_node * parent)466 nix_tm_child_res_valid(struct nix_tm_node_list *list,
467 struct nix_tm_node *parent)
468 {
469 struct nix_tm_node *child;
470
471 TAILQ_FOREACH(child, list, node) {
472 if (child->parent != parent)
473 continue;
474 if (!(child->flags & NIX_TM_NODE_HWRES))
475 return false;
476 }
477 return true;
478 }
479
480 uint8_t
nix_tm_tl1_default_prep(struct nix * nix,uint32_t schq,volatile uint64_t * reg,volatile uint64_t * regval)481 nix_tm_tl1_default_prep(struct nix *nix, uint32_t schq, volatile uint64_t *reg,
482 volatile uint64_t *regval)
483 {
484 uint8_t k = 0;
485
486 /*
487 * Default config for TL1.
488 * For VF this is always ignored.
489 */
490 plt_tm_dbg("Default config for main root %s(%u)",
491 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
492
493 /* Set DWRR quantum */
494 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
495 regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
496 k++;
497
498 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
499 regval[k] = (nix->tm_aggr_lvl_rr_prio << 1);
500 k++;
501
502 reg[k] = NIX_AF_TL1X_CIR(schq);
503 regval[k] = 0;
504 k++;
505
506 return k;
507 }
508
509 uint8_t
nix_tm_topology_reg_prep(struct nix * nix,struct nix_tm_node * node,volatile uint64_t * reg,volatile uint64_t * regval,volatile uint64_t * regval_mask)510 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
511 volatile uint64_t *reg, volatile uint64_t *regval,
512 volatile uint64_t *regval_mask)
513 {
514 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
515 uint8_t k = 0, hw_lvl, parent_lvl;
516 uint64_t parent = 0, child = 0;
517 enum roc_nix_tm_tree tree;
518 uint32_t rr_prio, schq;
519 uint16_t link, relchan;
520
521 tree = node->tree;
522 schq = node->hw_id;
523 hw_lvl = node->hw_lvl;
524 parent_lvl = hw_lvl + 1;
525 rr_prio = node->rr_prio;
526
527 /* Root node will not have a parent node */
528 if (hw_lvl == nix->tm_root_lvl)
529 parent = node->parent_hw_id;
530 else
531 parent = node->parent->hw_id;
532
533 link = nix->tx_link;
534 relchan = nix_tm_relchan_get(nix);
535
536 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
537 child = nix_tm_find_prio_anchor(nix, node->id, tree);
538
539 /* Override default rr_prio when TL1
540 * Static Priority is disabled
541 */
542 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
543 rr_prio = nix->tm_aggr_lvl_rr_prio;
544 child = 0;
545 }
546
547 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
548 " prio_anchor %" PRIu64 " rr_prio %u (%p)",
549 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
550 parent, node->lvl, node->id, child, rr_prio, node);
551
552 /* Prepare Topology and Link config */
553 switch (hw_lvl) {
554 case NIX_TXSCH_LVL_SMQ:
555
556 /* Set xoff which will be cleared later */
557 reg[k] = NIX_AF_SMQX_CFG(schq);
558 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
559 ((nix->mtu & 0xFFFF) << 8));
560 /* Maximum Vtag insertion size as a multiple of four bytes */
561 if (roc_nix->hw_vlan_ins)
562 regval[k] |= (0x2ULL << 36);
563 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) |
564 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36));
565 k++;
566
567 /* Parent and schedule conf */
568 reg[k] = NIX_AF_MDQX_PARENT(schq);
569 regval[k] = parent << 16;
570 k++;
571
572 break;
573 case NIX_TXSCH_LVL_TL4:
574 /* Parent and schedule conf */
575 reg[k] = NIX_AF_TL4X_PARENT(schq);
576 regval[k] = parent << 16;
577 k++;
578
579 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
580 regval[k] = (child << 32) | (rr_prio << 1);
581 k++;
582
583 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
584 if (nix->sdp_link) {
585 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
586 regval[k] = BIT_ULL(12);
587 k++;
588 }
589 break;
590 case NIX_TXSCH_LVL_TL3:
591 /* Parent and schedule conf */
592 reg[k] = NIX_AF_TL3X_PARENT(schq);
593 regval[k] = parent << 16;
594 k++;
595
596 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
597 regval[k] = (child << 32) | (rr_prio << 1);
598 k++;
599
600 /* Link configuration */
601 if (!nix->sdp_link &&
602 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
603 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
604 regval[k] = BIT_ULL(12) | relchan;
605 /* Enable BP if node is BP capable and rx_pause is set
606 */
607 if (nix->rx_pause && node->bp_capa)
608 regval[k] |= BIT_ULL(13);
609 k++;
610 }
611
612 break;
613 case NIX_TXSCH_LVL_TL2:
614 /* Parent and schedule conf */
615 reg[k] = NIX_AF_TL2X_PARENT(schq);
616 regval[k] = parent << 16;
617 k++;
618
619 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
620 regval[k] = (child << 32) | (rr_prio << 1);
621 k++;
622
623 /* Link configuration */
624 if (!nix->sdp_link &&
625 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
626 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
627 regval[k] = BIT_ULL(12) | relchan;
628 /* Enable BP if node is BP capable and rx_pause is set
629 */
630 if (nix->rx_pause && node->bp_capa)
631 regval[k] |= BIT_ULL(13);
632 k++;
633 }
634
635 break;
636 case NIX_TXSCH_LVL_TL1:
637 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
638 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
639 k++;
640
641 break;
642 }
643
644 return k;
645 }
646
647 uint8_t
nix_tm_sched_reg_prep(struct nix * nix,struct nix_tm_node * node,volatile uint64_t * reg,volatile uint64_t * regval)648 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
649 volatile uint64_t *reg, volatile uint64_t *regval)
650 {
651 uint64_t strict_prio = node->priority;
652 uint32_t hw_lvl = node->hw_lvl;
653 uint32_t schq = node->hw_id;
654 uint64_t rr_quantum;
655 uint8_t k = 0;
656
657 /* For CN9K, weight needs to be converted to quantum */
658 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
659
660 /* For children to root, strict prio is default if either
661 * device root is TL2 or TL1 Static Priority is disabled.
662 */
663 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
664 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
665 strict_prio = nix->tm_aggr_lvl_rr_prio;
666
667 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
668 "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
669 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
670 strict_prio, rr_quantum, node);
671
672 switch (hw_lvl) {
673 case NIX_TXSCH_LVL_SMQ:
674 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
675 regval[k] = (strict_prio << 24) | rr_quantum;
676 k++;
677
678 break;
679 case NIX_TXSCH_LVL_TL4:
680 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
681 regval[k] = (strict_prio << 24) | rr_quantum;
682 k++;
683
684 break;
685 case NIX_TXSCH_LVL_TL3:
686 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
687 regval[k] = (strict_prio << 24) | rr_quantum;
688 k++;
689
690 break;
691 case NIX_TXSCH_LVL_TL2:
692 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
693 regval[k] = (strict_prio << 24) | rr_quantum;
694 k++;
695
696 break;
697 case NIX_TXSCH_LVL_TL1:
698 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
699 regval[k] = rr_quantum;
700 k++;
701
702 break;
703 }
704
705 return k;
706 }
707
708 uint8_t
nix_tm_shaper_reg_prep(struct nix_tm_node * node,struct nix_tm_shaper_profile * profile,volatile uint64_t * reg,volatile uint64_t * regval)709 nix_tm_shaper_reg_prep(struct nix_tm_node *node,
710 struct nix_tm_shaper_profile *profile,
711 volatile uint64_t *reg, volatile uint64_t *regval)
712 {
713 struct nix_tm_shaper_data cir, pir;
714 uint32_t schq = node->hw_id;
715 uint64_t adjust = 0;
716 uint8_t k = 0;
717
718 nix_tm_shaper_conf_get(profile, &cir, &pir);
719
720 if (profile && node->pkt_mode)
721 adjust = profile->pkt_mode_adj;
722 else if (profile)
723 adjust = profile->pkt_len_adj;
724
725 adjust &= 0x1FF;
726 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
727 "pir %" PRIu64 "(%" PRIu64 "B),"
728 " cir %" PRIu64 "(%" PRIu64 "B)"
729 "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
730 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
731 pir.rate, pir.burst, cir.rate, cir.burst, adjust,
732 node->pkt_mode, node);
733
734 switch (node->hw_lvl) {
735 case NIX_TXSCH_LVL_SMQ:
736 /* Configure PIR, CIR */
737 reg[k] = NIX_AF_MDQX_PIR(schq);
738 regval[k] = (pir.rate && pir.burst) ?
739 (nix_tm_shaper2regval(&pir) | 1) :
740 0;
741 k++;
742
743 reg[k] = NIX_AF_MDQX_CIR(schq);
744 regval[k] = (cir.rate && cir.burst) ?
745 (nix_tm_shaper2regval(&cir) | 1) :
746 0;
747 k++;
748
749 /* Configure RED ALG */
750 reg[k] = NIX_AF_MDQX_SHAPE(schq);
751 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
752 (uint64_t)node->pkt_mode << 24);
753 k++;
754 break;
755 case NIX_TXSCH_LVL_TL4:
756 /* Configure PIR, CIR */
757 reg[k] = NIX_AF_TL4X_PIR(schq);
758 regval[k] = (pir.rate && pir.burst) ?
759 (nix_tm_shaper2regval(&pir) | 1) :
760 0;
761 k++;
762
763 reg[k] = NIX_AF_TL4X_CIR(schq);
764 regval[k] = (cir.rate && cir.burst) ?
765 (nix_tm_shaper2regval(&cir) | 1) :
766 0;
767 k++;
768
769 /* Configure RED algo */
770 reg[k] = NIX_AF_TL4X_SHAPE(schq);
771 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
772 (uint64_t)node->pkt_mode << 24);
773 k++;
774 break;
775 case NIX_TXSCH_LVL_TL3:
776 /* Configure PIR, CIR */
777 reg[k] = NIX_AF_TL3X_PIR(schq);
778 regval[k] = (pir.rate && pir.burst) ?
779 (nix_tm_shaper2regval(&pir) | 1) :
780 0;
781 k++;
782
783 reg[k] = NIX_AF_TL3X_CIR(schq);
784 regval[k] = (cir.rate && cir.burst) ?
785 (nix_tm_shaper2regval(&cir) | 1) :
786 0;
787 k++;
788
789 /* Configure RED algo */
790 reg[k] = NIX_AF_TL3X_SHAPE(schq);
791 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
792 (uint64_t)node->pkt_mode << 24);
793 k++;
794
795 break;
796 case NIX_TXSCH_LVL_TL2:
797 /* Configure PIR, CIR */
798 reg[k] = NIX_AF_TL2X_PIR(schq);
799 regval[k] = (pir.rate && pir.burst) ?
800 (nix_tm_shaper2regval(&pir) | 1) :
801 0;
802 k++;
803
804 reg[k] = NIX_AF_TL2X_CIR(schq);
805 regval[k] = (cir.rate && cir.burst) ?
806 (nix_tm_shaper2regval(&cir) | 1) :
807 0;
808 k++;
809
810 /* Configure RED algo */
811 reg[k] = NIX_AF_TL2X_SHAPE(schq);
812 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
813 (uint64_t)node->pkt_mode << 24);
814 k++;
815
816 break;
817 case NIX_TXSCH_LVL_TL1:
818 /* Configure CIR */
819 reg[k] = NIX_AF_TL1X_CIR(schq);
820 regval[k] = (cir.rate && cir.burst) ?
821 (nix_tm_shaper2regval(&cir) | 1) :
822 0;
823 k++;
824
825 /* Configure length disable and adjust */
826 reg[k] = NIX_AF_TL1X_SHAPE(schq);
827 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
828 k++;
829 break;
830 }
831
832 return k;
833 }
834
835 uint8_t
nix_tm_sw_xoff_prep(struct nix_tm_node * node,bool enable,volatile uint64_t * reg,volatile uint64_t * regval)836 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
837 volatile uint64_t *reg, volatile uint64_t *regval)
838 {
839 uint32_t hw_lvl = node->hw_lvl;
840 uint32_t schq = node->hw_id;
841 uint8_t k = 0;
842
843 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
844 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
845 node);
846
847 regval[k] = enable;
848
849 switch (hw_lvl) {
850 case NIX_TXSCH_LVL_MDQ:
851 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
852 k++;
853 break;
854 case NIX_TXSCH_LVL_TL4:
855 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
856 k++;
857 break;
858 case NIX_TXSCH_LVL_TL3:
859 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
860 k++;
861 break;
862 case NIX_TXSCH_LVL_TL2:
863 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
864 k++;
865 break;
866 case NIX_TXSCH_LVL_TL1:
867 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
868 k++;
869 break;
870 default:
871 break;
872 }
873
874 return k;
875 }
876
877 /* Search for min rate in topology */
878 uint64_t
nix_tm_shaper_profile_rate_min(struct nix * nix)879 nix_tm_shaper_profile_rate_min(struct nix *nix)
880 {
881 struct nix_tm_shaper_profile *profile;
882 uint64_t rate_min = 1E9; /* 1 Gbps */
883
884 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
885 if (profile->peak.rate && profile->peak.rate < rate_min)
886 rate_min = profile->peak.rate;
887
888 if (profile->commit.rate && profile->commit.rate < rate_min)
889 rate_min = profile->commit.rate;
890 }
891 return rate_min;
892 }
893
894 uint16_t
nix_tm_resource_avail(struct nix * nix,uint8_t hw_lvl,bool contig)895 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig)
896 {
897 uint32_t pos = 0, start_pos = 0;
898 struct plt_bitmap *bmp;
899 uint16_t count = 0;
900 uint64_t slab = 0;
901
902 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
903 plt_bitmap_scan_init(bmp);
904
905 if (!plt_bitmap_scan(bmp, &pos, &slab))
906 return count;
907
908 /* Count bit set */
909 start_pos = pos;
910 do {
911 count += __builtin_popcountll(slab);
912 if (!plt_bitmap_scan(bmp, &pos, &slab))
913 break;
914 } while (pos != start_pos);
915
916 return count;
917 }
918
919 uint16_t
nix_tm_resource_estimate(struct nix * nix,uint16_t * schq_contig,uint16_t * schq,enum roc_nix_tm_tree tree)920 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq,
921 enum roc_nix_tm_tree tree)
922 {
923 struct nix_tm_node_list *list;
924 uint8_t contig_cnt, hw_lvl;
925 struct nix_tm_node *parent;
926 uint16_t cnt = 0, avail;
927
928 list = nix_tm_node_list(nix, tree);
929 /* Walk through parents from TL1..TL4 */
930 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
931 TAILQ_FOREACH(parent, list, node) {
932 if (hw_lvl != parent->hw_lvl)
933 continue;
934
935 /* Skip accounting for children whose
936 * parent does not indicate so.
937 */
938 if (!parent->child_realloc)
939 continue;
940
941 /* Count children needed */
942 schq[hw_lvl - 1] += parent->rr_num;
943 if (parent->max_prio != UINT32_MAX) {
944 contig_cnt = parent->max_prio + 1;
945 schq_contig[hw_lvl - 1] += contig_cnt;
946 /* When we have SP + DWRR at a parent,
947 * we will always have a spare schq at rr prio
948 * location in contiguous queues. Hence reduce
949 * discontiguous count by 1.
950 */
951 if (parent->max_prio > 0 && parent->rr_num)
952 schq[hw_lvl - 1] -= 1;
953 }
954 }
955 }
956
957 schq[nix->tm_root_lvl] = 1;
958 if (!nix_tm_have_tl1_access(nix))
959 schq[NIX_TXSCH_LVL_TL1] = 1;
960
961 /* Now check for existing resources */
962 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
963 avail = nix_tm_resource_avail(nix, hw_lvl, false);
964 if (schq[hw_lvl] <= avail)
965 schq[hw_lvl] = 0;
966 else
967 schq[hw_lvl] -= avail;
968
969 /* For contiguous queues, realloc everything */
970 avail = nix_tm_resource_avail(nix, hw_lvl, true);
971 if (schq_contig[hw_lvl] <= avail)
972 schq_contig[hw_lvl] = 0;
973
974 cnt += schq[hw_lvl];
975 cnt += schq_contig[hw_lvl];
976
977 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u",
978 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl],
979 schq_contig[hw_lvl]);
980 }
981
982 return cnt;
983 }
984
985 uint16_t
roc_nix_tm_leaf_cnt(struct roc_nix * roc_nix)986 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix)
987 {
988 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
989 struct nix_tm_node_list *list;
990 struct nix_tm_node *node;
991 uint16_t leaf_cnt = 0;
992
993 /* Count leafs only in user list */
994 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
995 TAILQ_FOREACH(node, list, node) {
996 if (node->id < nix->nb_tx_queues)
997 leaf_cnt++;
998 }
999
1000 return leaf_cnt;
1001 }
1002
1003 int
roc_nix_tm_node_lvl(struct roc_nix * roc_nix,uint32_t node_id)1004 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id)
1005 {
1006 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1007 struct nix_tm_node *node;
1008
1009 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1010 if (!node)
1011 return NIX_ERR_TM_INVALID_NODE;
1012
1013 return node->lvl;
1014 }
1015
1016 struct roc_nix_tm_node *
roc_nix_tm_node_get(struct roc_nix * roc_nix,uint32_t node_id)1017 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id)
1018 {
1019 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1020 struct nix_tm_node *node;
1021
1022 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1023 return (struct roc_nix_tm_node *)node;
1024 }
1025
1026 struct roc_nix_tm_node *
roc_nix_tm_node_next(struct roc_nix * roc_nix,struct roc_nix_tm_node * __prev)1027 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev)
1028 {
1029 struct nix_tm_node *prev = (struct nix_tm_node *)__prev;
1030 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1031 struct nix_tm_node_list *list;
1032
1033 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
1034
1035 /* HEAD of the list */
1036 if (!prev)
1037 return (struct roc_nix_tm_node *)TAILQ_FIRST(list);
1038
1039 /* Next entry */
1040 if (prev->tree != ROC_NIX_TM_USER)
1041 return NULL;
1042
1043 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node);
1044 }
1045
1046 struct roc_nix_tm_shaper_profile *
roc_nix_tm_shaper_profile_get(struct roc_nix * roc_nix,uint32_t profile_id)1047 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id)
1048 {
1049 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1050 struct nix_tm_shaper_profile *profile;
1051
1052 profile = nix_tm_shaper_profile_search(nix, profile_id);
1053 return (struct roc_nix_tm_shaper_profile *)profile;
1054 }
1055
1056 struct roc_nix_tm_shaper_profile *
roc_nix_tm_shaper_profile_next(struct roc_nix * roc_nix,struct roc_nix_tm_shaper_profile * __prev)1057 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix,
1058 struct roc_nix_tm_shaper_profile *__prev)
1059 {
1060 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1061 struct nix_tm_shaper_profile_list *list;
1062 struct nix_tm_shaper_profile *prev;
1063
1064 prev = (struct nix_tm_shaper_profile *)__prev;
1065 list = &nix->shaper_profile_list;
1066
1067 /* HEAD of the list */
1068 if (!prev)
1069 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list);
1070
1071 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper);
1072 }
1073
1074 struct nix_tm_node *
nix_tm_node_alloc(void)1075 nix_tm_node_alloc(void)
1076 {
1077 struct nix_tm_node *node;
1078
1079 node = plt_zmalloc(sizeof(struct nix_tm_node), 0);
1080 if (!node)
1081 return NULL;
1082
1083 node->free_fn = plt_free;
1084 return node;
1085 }
1086
1087 void
nix_tm_node_free(struct nix_tm_node * node)1088 nix_tm_node_free(struct nix_tm_node *node)
1089 {
1090 if (!node || node->free_fn == NULL)
1091 return;
1092
1093 (node->free_fn)(node);
1094 }
1095
1096 struct nix_tm_shaper_profile *
nix_tm_shaper_profile_alloc(void)1097 nix_tm_shaper_profile_alloc(void)
1098 {
1099 struct nix_tm_shaper_profile *profile;
1100
1101 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0);
1102 if (!profile)
1103 return NULL;
1104
1105 profile->free_fn = plt_free;
1106 return profile;
1107 }
1108
1109 void
nix_tm_shaper_profile_free(struct nix_tm_shaper_profile * profile)1110 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile)
1111 {
1112 if (!profile || !profile->free_fn)
1113 return;
1114
1115 (profile->free_fn)(profile);
1116 }
1117
1118 int
roc_nix_tm_node_stats_get(struct roc_nix * roc_nix,uint32_t node_id,bool clear,struct roc_nix_tm_node_stats * n_stats)1119 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
1120 struct roc_nix_tm_node_stats *n_stats)
1121 {
1122 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1123 struct mbox *mbox = (&nix->dev)->mbox;
1124 struct nix_txschq_config *req, *rsp;
1125 struct nix_tm_node *node;
1126 uint32_t schq;
1127 int rc, i;
1128
1129 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1130 if (!node)
1131 return NIX_ERR_TM_INVALID_NODE;
1132
1133 if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
1134 return NIX_ERR_OP_NOTSUP;
1135
1136 /* Check if node has HW resource */
1137 if (!(node->flags & NIX_TM_NODE_HWRES))
1138 return 0;
1139
1140 schq = node->hw_id;
1141 /* Skip fetch if not requested */
1142 if (!n_stats)
1143 goto clear_stats;
1144
1145 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
1146
1147 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1148 req->read = 1;
1149 req->lvl = NIX_TXSCH_LVL_TL1;
1150
1151 i = 0;
1152 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1153 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1154 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1155 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1156 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1157 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1158 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1159 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1160 req->num_regs = i;
1161
1162 rc = mbox_process_msg(mbox, (void **)&rsp);
1163 if (rc)
1164 return rc;
1165
1166 /* Return stats */
1167 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0];
1168 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1];
1169 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2];
1170 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3];
1171 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4];
1172 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5];
1173 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6];
1174 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7];
1175
1176 clear_stats:
1177 if (!clear)
1178 return 0;
1179
1180 /* Clear all the stats */
1181 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1182 req->lvl = NIX_TXSCH_LVL_TL1;
1183 i = 0;
1184 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1185 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1186 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1187 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1188 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1189 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1190 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1191 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1192 req->num_regs = i;
1193
1194 return mbox_process_msg(mbox, (void **)&rsp);
1195 }
1196
1197 bool
roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix * roc_nix)1198 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
1199 {
1200 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1201
1202 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
1203 (nix->tm_tree == ROC_NIX_TM_USER))
1204 return true;
1205 return false;
1206 }
1207
1208 int
roc_nix_tm_tree_type_get(struct roc_nix * roc_nix)1209 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
1210 {
1211 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1212
1213 return nix->tm_tree;
1214 }
1215
1216 int
roc_nix_tm_max_prio(struct roc_nix * roc_nix,int lvl)1217 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
1218 {
1219 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1220 int hw_lvl = nix_tm_lvl2nix(nix, lvl);
1221
1222 return nix_tm_max_prio(nix, hw_lvl);
1223 }
1224
1225 int
roc_nix_tm_lvl_is_leaf(struct roc_nix * roc_nix,int lvl)1226 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
1227 {
1228 return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
1229 }
1230
1231 void
roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node * node,struct roc_nix_tm_shaper_profile * roc_prof)1232 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
1233 struct roc_nix_tm_shaper_profile *roc_prof)
1234 {
1235 struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
1236 struct nix_tm_shaper_profile *profile;
1237 struct nix_tm_shaper_data cir, pir;
1238
1239 profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
1240 tm_node->red_algo = NIX_REDALG_STD;
1241
1242 /* C0 doesn't support STALL when both PIR & CIR are enabled */
1243 if (profile && roc_model_is_cn96_cx()) {
1244 nix_tm_shaper_conf_get(profile, &cir, &pir);
1245
1246 if (pir.rate && cir.rate)
1247 tm_node->red_algo = NIX_REDALG_DISCARD;
1248 }
1249 }
1250
1251 int
roc_nix_tm_lvl_cnt_get(struct roc_nix * roc_nix)1252 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
1253 {
1254 if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
1255 return NIX_TXSCH_LVL_CNT;
1256
1257 return (NIX_TXSCH_LVL_CNT - 1);
1258 }
1259
1260 int
roc_nix_tm_lvl_have_link_access(struct roc_nix * roc_nix,int lvl)1261 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
1262 {
1263 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1264
1265 if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)
1266 return 1;
1267
1268 return 0;
1269 }
1270