xref: /dpdk/drivers/common/cnxk/roc_nix_tm.c (revision f0b9158c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 static inline int
bitmap_ctzll(uint64_t slab)9 bitmap_ctzll(uint64_t slab)
10 {
11 	if (slab == 0)
12 		return 0;
13 
14 	return __builtin_ctzll(slab);
15 }
16 
17 void
nix_tm_clear_shaper_profiles(struct nix * nix)18 nix_tm_clear_shaper_profiles(struct nix *nix)
19 {
20 	struct nix_tm_shaper_profile *shaper_profile, *tmp;
21 	struct nix_tm_shaper_profile_list *list;
22 
23 	list = &nix->shaper_profile_list;
24 	PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
25 		if (shaper_profile->ref_cnt)
26 			plt_warn("Shaper profile %u has non zero references",
27 				 shaper_profile->id);
28 		TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
29 		nix_tm_shaper_profile_free(shaper_profile);
30 	}
31 }
32 
33 static int
nix_tm_node_reg_conf(struct nix * nix,struct nix_tm_node * node)34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
35 {
36 	uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37 	uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38 	struct nix_tm_shaper_profile *profile;
39 	uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40 	struct mbox *mbox = (&nix->dev)->mbox;
41 	struct nix_txschq_config *req;
42 	int rc = -EFAULT;
43 	uint32_t hw_lvl;
44 	uint8_t k = 0;
45 
46 	memset(regval, 0, sizeof(regval));
47 	memset(regval_mask, 0, sizeof(regval_mask));
48 
49 	profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50 	hw_lvl = node->hw_lvl;
51 
52 	/* Need this trigger to configure TL1 */
53 	if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54 		/* Prepare default conf for TL1 */
55 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56 		req->lvl = NIX_TXSCH_LVL_TL1;
57 
58 		k = nix_tm_tl1_default_prep(nix, node->parent_hw_id, req->reg,
59 					    req->regval);
60 		req->num_regs = k;
61 		rc = mbox_process(mbox);
62 		if (rc)
63 			goto error;
64 	}
65 
66 	/* Prepare topology config */
67 	k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
68 
69 	/* Prepare schedule config */
70 	k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
71 
72 	/* Prepare shaping config */
73 	k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
74 
75 	if (!k)
76 		return 0;
77 
78 	/* Copy and send config mbox */
79 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
80 	req->lvl = hw_lvl;
81 	req->num_regs = k;
82 
83 	mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84 	mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85 	mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
86 
87 	rc = mbox_process(mbox);
88 	if (rc)
89 		goto error;
90 
91 	return 0;
92 error:
93 	plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
94 	return rc;
95 }
96 
97 int
nix_tm_txsch_reg_config(struct nix * nix,enum roc_nix_tm_tree tree)98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
99 {
100 	struct nix_tm_node_list *list;
101 	bool is_pf_or_lbk = false;
102 	struct nix_tm_node *node;
103 	bool skip_bp = false;
104 	uint32_t hw_lvl;
105 	int rc = 0;
106 
107 	list = nix_tm_node_list(nix, tree);
108 
109 	if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
110 		is_pf_or_lbk = true;
111 
112 	for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
113 		TAILQ_FOREACH(node, list, node) {
114 			if (node->hw_lvl != hw_lvl)
115 				continue;
116 
117 			/* Only one TL3/TL2 Link config should have BP enable
118 			 * set per channel only for PF or lbk vf.
119 			 */
120 			node->bp_capa = 0;
121 			if (is_pf_or_lbk && !skip_bp &&
122 			    node->hw_lvl == nix->tm_link_cfg_lvl) {
123 				node->bp_capa = 1;
124 				skip_bp = false;
125 			}
126 
127 			rc = nix_tm_node_reg_conf(nix, node);
128 			if (rc)
129 				goto exit;
130 		}
131 	}
132 exit:
133 	return rc;
134 }
135 
136 int
nix_tm_update_parent_info(struct nix * nix,enum roc_nix_tm_tree tree)137 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
138 {
139 	struct nix_tm_node *child, *parent;
140 	struct nix_tm_node_list *list;
141 	uint32_t rr_prio, max_prio;
142 	uint32_t rr_num = 0;
143 
144 	list = nix_tm_node_list(nix, tree);
145 
146 	/* Release all the node hw resources locally
147 	 * if parent marked as dirty and resource exists.
148 	 */
149 	TAILQ_FOREACH(child, list, node) {
150 		/* Release resource only if parent direct hierarchy changed */
151 		if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
152 		    child->parent->child_realloc) {
153 			nix_tm_free_node_resource(nix, child);
154 		}
155 		child->max_prio = UINT32_MAX;
156 	}
157 
158 	TAILQ_FOREACH(parent, list, node) {
159 		/* Count group of children of same priority i.e are RR */
160 		rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
161 					 &max_prio);
162 
163 		/* Assuming that multiple RR groups are
164 		 * not configured based on capability.
165 		 */
166 		parent->rr_prio = rr_prio;
167 		parent->rr_num = rr_num;
168 		parent->max_prio = max_prio;
169 	}
170 
171 	return 0;
172 }
173 
174 static int
nix_tm_root_node_get(struct nix * nix,int tree)175 nix_tm_root_node_get(struct nix *nix, int tree)
176 {
177 	struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
178 	struct nix_tm_node *tm_node;
179 
180 	TAILQ_FOREACH(tm_node, list, node) {
181 		if (tm_node->hw_lvl == nix->tm_root_lvl)
182 			return 1;
183 	}
184 
185 	return 0;
186 }
187 
188 int
nix_tm_node_add(struct roc_nix * roc_nix,struct nix_tm_node * node)189 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
190 {
191 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
192 	struct nix_tm_shaper_profile *profile;
193 	uint32_t node_id, parent_id, lvl;
194 	struct nix_tm_node *parent_node;
195 	uint32_t priority, profile_id;
196 	uint8_t hw_lvl, exp_next_lvl;
197 	enum roc_nix_tm_tree tree;
198 	int rc;
199 
200 	node_id = node->id;
201 	priority = node->priority;
202 	parent_id = node->parent_id;
203 	profile_id = node->shaper_profile_id;
204 	lvl = node->lvl;
205 	tree = node->tree;
206 
207 	plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
208 		   "parent %u profile 0x%x tree %u",
209 		   nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
210 		   priority, node->weight, parent_id, profile_id, tree);
211 
212 	if (tree >= ROC_NIX_TM_TREE_MAX)
213 		return NIX_ERR_PARAM;
214 
215 	/* Translate sw level id's to nix hw level id's */
216 	hw_lvl = nix_tm_lvl2nix(nix, lvl);
217 	if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
218 		return NIX_ERR_TM_INVALID_LVL;
219 
220 	/* Leaf nodes have to be same priority */
221 	if (nix_tm_is_leaf(nix, lvl) && priority != 0)
222 		return NIX_ERR_TM_INVALID_PRIO;
223 
224 	parent_node = nix_tm_node_search(nix, parent_id, tree);
225 
226 	if (node_id < nix->nb_tx_queues)
227 		exp_next_lvl = NIX_TXSCH_LVL_SMQ;
228 	else
229 		exp_next_lvl = hw_lvl + 1;
230 
231 	/* Check if there is no parent node yet */
232 	if (hw_lvl != nix->tm_root_lvl &&
233 	    (!parent_node || parent_node->hw_lvl != exp_next_lvl))
234 		return NIX_ERR_TM_INVALID_PARENT;
235 
236 	/* Check if a node already exists */
237 	if (nix_tm_node_search(nix, node_id, tree))
238 		return NIX_ERR_TM_NODE_EXISTS;
239 
240 	/* Check if root node exists */
241 	if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
242 		return NIX_ERR_TM_NODE_EXISTS;
243 
244 	profile = nix_tm_shaper_profile_search(nix, profile_id);
245 	if (!nix_tm_is_leaf(nix, lvl)) {
246 		/* Check if shaper profile exists for non leaf node */
247 		if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
248 			return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
249 
250 		/* Packet mode in profile should match with that of tm node */
251 		if (profile && profile->pkt_mode != node->pkt_mode)
252 			return NIX_ERR_TM_PKT_MODE_MISMATCH;
253 	}
254 
255 	/* Check if there is second DWRR already in siblings or holes in prio */
256 	rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
257 	if (rc)
258 		return rc;
259 
260 	if (node->weight > roc_nix_tm_max_sched_wt_get())
261 		return NIX_ERR_TM_WEIGHT_EXCEED;
262 
263 	/* Maintain minimum weight */
264 	if (!node->weight)
265 		node->weight = 1;
266 
267 	node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
268 	node->rr_prio = 0xF;
269 	node->max_prio = UINT32_MAX;
270 	node->hw_id = NIX_TM_HW_ID_INVALID;
271 	node->flags = 0;
272 
273 	if (profile)
274 		profile->ref_cnt++;
275 
276 	node->parent = parent_node;
277 	if (parent_node)
278 		parent_node->child_realloc = true;
279 	node->parent_hw_id = NIX_TM_HW_ID_INVALID;
280 
281 	TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
282 	plt_tm_dbg("Added node %s lvl %u id %u (%p)",
283 		   nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
284 	return 0;
285 }
286 
287 int
nix_tm_clear_path_xoff(struct nix * nix,struct nix_tm_node * node)288 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
289 {
290 	struct mbox *mbox = (&nix->dev)->mbox;
291 	struct nix_txschq_config *req;
292 	struct nix_tm_node *p;
293 	int rc;
294 
295 	/* Enable nodes in path for flush to succeed */
296 	if (!nix_tm_is_leaf(nix, node->lvl))
297 		p = node;
298 	else
299 		p = node->parent;
300 	while (p) {
301 		if (!(p->flags & NIX_TM_NODE_ENABLED) &&
302 		    (p->flags & NIX_TM_NODE_HWRES)) {
303 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
304 			req->lvl = p->hw_lvl;
305 			req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
306 							    req->regval);
307 			rc = mbox_process(mbox);
308 			if (rc)
309 				return rc;
310 
311 			p->flags |= NIX_TM_NODE_ENABLED;
312 		}
313 		p = p->parent;
314 	}
315 
316 	return 0;
317 }
318 
319 int
nix_tm_bp_config_set(struct roc_nix * roc_nix,uint16_t sq,uint16_t tc,bool enable)320 nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
321 		     bool enable)
322 {
323 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
324 	enum roc_nix_tm_tree tree = nix->tm_tree;
325 	struct mbox *mbox = (&nix->dev)->mbox;
326 	struct nix_txschq_config *req = NULL;
327 	struct nix_tm_node_list *list;
328 	uint16_t link = nix->tx_link;
329 	struct nix_tm_node *sq_node;
330 	struct nix_tm_node *parent;
331 	struct nix_tm_node *node;
332 	uint8_t k = 0;
333 	int rc = 0;
334 
335 	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
336 	if (!sq_node)
337 		return -ENOENT;
338 
339 	parent = sq_node->parent;
340 	while (parent) {
341 		if (parent->lvl == ROC_TM_LVL_SCH2)
342 			break;
343 
344 		parent = parent->parent;
345 	}
346 	if (!parent)
347 		return -ENOENT;
348 
349 	list = nix_tm_node_list(nix, tree);
350 
351 	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
352 		rc = -EINVAL;
353 		goto err;
354 	}
355 
356 	TAILQ_FOREACH(node, list, node) {
357 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
358 			continue;
359 
360 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
361 			continue;
362 
363 		if (node->hw_id != parent->hw_id)
364 			continue;
365 
366 		if (!req) {
367 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
368 			req->lvl = nix->tm_link_cfg_lvl;
369 			k = 0;
370 		}
371 
372 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
373 		req->regval[k] = enable ? tc : 0;
374 		req->regval[k] |= enable ? BIT_ULL(13) : 0;
375 		req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
376 		k++;
377 
378 		if (k >= MAX_REGS_PER_MBOX_MSG) {
379 			req->num_regs = k;
380 			rc = mbox_process(mbox);
381 			if (rc)
382 				goto err;
383 			req = NULL;
384 		}
385 	}
386 
387 	if (req) {
388 		req->num_regs = k;
389 		rc = mbox_process(mbox);
390 		if (rc)
391 			goto err;
392 	}
393 
394 	parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
395 	return 0;
396 err:
397 	plt_err("Failed to %s bp on link %u, rc=%d(%s)",
398 		enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
399 	return rc;
400 }
401 
402 int
nix_tm_bp_config_get(struct roc_nix * roc_nix,bool * is_enabled)403 nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
404 {
405 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
406 	struct nix_txschq_config *req = NULL, *rsp;
407 	enum roc_nix_tm_tree tree = nix->tm_tree;
408 	struct mbox *mbox = (&nix->dev)->mbox;
409 	struct nix_tm_node_list *list;
410 	struct nix_tm_node *node;
411 	bool found = false;
412 	uint8_t enable = 1;
413 	uint8_t k = 0, i;
414 	uint16_t link;
415 	int rc = 0;
416 
417 	list = nix_tm_node_list(nix, tree);
418 	link = nix->tx_link;
419 
420 	TAILQ_FOREACH(node, list, node) {
421 		if (node->hw_lvl != nix->tm_link_cfg_lvl)
422 			continue;
423 
424 		if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
425 			continue;
426 
427 		found = true;
428 		if (!req) {
429 			req = mbox_alloc_msg_nix_txschq_cfg(mbox);
430 			req->read = 1;
431 			req->lvl = nix->tm_link_cfg_lvl;
432 			k = 0;
433 		}
434 
435 		req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
436 		k++;
437 
438 		if (k >= MAX_REGS_PER_MBOX_MSG) {
439 			req->num_regs = k;
440 			rc = mbox_process_msg(mbox, (void **)&rsp);
441 			if (rc || rsp->num_regs != k)
442 				goto err;
443 			req = NULL;
444 
445 			/* Report it as enabled only if enabled or all */
446 			for (i = 0; i < k; i++)
447 				enable &= !!(rsp->regval[i] & BIT_ULL(13));
448 		}
449 	}
450 
451 	if (req) {
452 		req->num_regs = k;
453 		rc = mbox_process_msg(mbox, (void **)&rsp);
454 		if (rc)
455 			goto err;
456 		/* Report it as enabled only if enabled or all */
457 		for (i = 0; i < k; i++)
458 			enable &= !!(rsp->regval[i] & BIT_ULL(13));
459 	}
460 
461 	*is_enabled = found ? !!enable : false;
462 	return 0;
463 err:
464 	plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
465 		roc_error_msg_get(rc));
466 	return rc;
467 }
468 
469 int
nix_tm_smq_xoff(struct nix * nix,struct nix_tm_node * node,bool enable)470 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
471 {
472 	struct mbox *mbox = (&nix->dev)->mbox;
473 	struct nix_txschq_config *req;
474 	uint16_t smq;
475 	int rc;
476 
477 	smq = node->hw_id;
478 	plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
479 		   enable ? "enable" : "disable");
480 
481 	rc = nix_tm_clear_path_xoff(nix, node);
482 	if (rc)
483 		return rc;
484 
485 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
486 	req->lvl = NIX_TXSCH_LVL_SMQ;
487 	req->num_regs = 1;
488 
489 	req->reg[0] = NIX_AF_SMQX_CFG(smq);
490 	req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
491 	req->regval_mask[0] =
492 		enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
493 
494 	return mbox_process(mbox);
495 }
496 
497 int
nix_tm_leaf_data_get(struct nix * nix,uint16_t sq,uint32_t * rr_quantum,uint16_t * smq)498 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
499 		     uint16_t *smq)
500 {
501 	struct nix_tm_node *node;
502 	int rc;
503 
504 	node = nix_tm_node_search(nix, sq, nix->tm_tree);
505 
506 	/* Check if we found a valid leaf node */
507 	if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
508 	    node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
509 		return -EIO;
510 	}
511 
512 	/* Get SMQ Id of leaf node's parent */
513 	*smq = node->parent->hw_id;
514 	*rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
515 
516 	rc = nix_tm_smq_xoff(nix, node->parent, false);
517 	if (rc)
518 		return rc;
519 	node->flags |= NIX_TM_NODE_ENABLED;
520 	return 0;
521 }
522 
523 int
roc_nix_tm_sq_flush_spin(struct roc_nix_sq * sq)524 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
525 {
526 	struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
527 	uint16_t sqb_cnt, head_off, tail_off;
528 	uint64_t wdata, val, prev;
529 	uint16_t qid = sq->qid;
530 	int64_t *regaddr;
531 	uint64_t timeout; /* 10's of usec */
532 
533 	/* Wait for enough time based on shaper min rate */
534 	timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
535 	/* Wait for worst case scenario of this SQ being last priority
536 	 * and so have to wait for all other SQ's drain out by their own.
537 	 */
538 	timeout = timeout * nix->nb_tx_queues;
539 	timeout = timeout / nix->tm_rate_min;
540 	if (!timeout)
541 		timeout = 10000;
542 
543 	wdata = ((uint64_t)qid << 32);
544 	regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
545 	val = roc_atomic64_add_nosync(wdata, regaddr);
546 
547 	/* Spin multiple iterations as "sq->fc_cache_pkts" can still
548 	 * have space to send pkts even though fc_mem is disabled
549 	 */
550 
551 	while (true) {
552 		prev = val;
553 		plt_delay_us(10);
554 		val = roc_atomic64_add_nosync(wdata, regaddr);
555 		/* Continue on error */
556 		if (val & BIT_ULL(63))
557 			continue;
558 
559 		if (prev != val)
560 			continue;
561 
562 		sqb_cnt = val & 0xFFFF;
563 		head_off = (val >> 20) & 0x3F;
564 		tail_off = (val >> 28) & 0x3F;
565 
566 		/* SQ reached quiescent state */
567 		if (sqb_cnt <= 1 && head_off == tail_off &&
568 		    (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
569 			break;
570 		}
571 
572 		/* Timeout */
573 		if (!timeout)
574 			goto exit;
575 		timeout--;
576 	}
577 
578 	return 0;
579 exit:
580 	roc_nix_tm_dump(sq->roc_nix);
581 	roc_nix_queues_ctx_dump(sq->roc_nix);
582 	return -EFAULT;
583 }
584 
585 /* Flush and disable tx queue and its parent SMQ */
586 int
nix_tm_sq_flush_pre(struct roc_nix_sq * sq)587 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
588 {
589 	struct roc_nix *roc_nix = sq->roc_nix;
590 	struct nix_tm_node *node, *sibling;
591 	struct nix_tm_node_list *list;
592 	enum roc_nix_tm_tree tree;
593 	struct msg_req *req;
594 	struct mbox *mbox;
595 	struct nix *nix;
596 	uint16_t qid;
597 	int rc;
598 
599 	nix = roc_nix_to_nix_priv(roc_nix);
600 
601 	/* Need not do anything if tree is in disabled state */
602 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
603 		return 0;
604 
605 	mbox = (&nix->dev)->mbox;
606 	qid = sq->qid;
607 
608 	tree = nix->tm_tree;
609 	list = nix_tm_node_list(nix, tree);
610 
611 	/* Find the node for this SQ */
612 	node = nix_tm_node_search(nix, qid, tree);
613 	if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
614 		plt_err("Invalid node/state for sq %u", qid);
615 		return -EFAULT;
616 	}
617 
618 	/* Enable CGX RXTX to drain pkts */
619 	if (!roc_nix->io_enabled) {
620 		/* Though it enables both RX MCAM Entries and CGX Link
621 		 * we assume all the rx queues are stopped way back.
622 		 */
623 		mbox_alloc_msg_nix_lf_start_rx(mbox);
624 		rc = mbox_process(mbox);
625 		if (rc) {
626 			plt_err("cgx start failed, rc=%d", rc);
627 			return rc;
628 		}
629 	}
630 
631 	/* Disable backpressure */
632 	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
633 	if (rc) {
634 		plt_err("Failed to disable backpressure for flush, rc=%d", rc);
635 		return rc;
636 	}
637 
638 	/* Disable smq xoff for case it was enabled earlier */
639 	rc = nix_tm_smq_xoff(nix, node->parent, false);
640 	if (rc) {
641 		plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
642 			rc);
643 		return rc;
644 	}
645 
646 	/* As per HRM, to disable an SQ, all other SQ's
647 	 * that feed to same SMQ must be paused before SMQ flush.
648 	 */
649 	TAILQ_FOREACH(sibling, list, node) {
650 		if (sibling->parent != node->parent)
651 			continue;
652 		if (!(sibling->flags & NIX_TM_NODE_ENABLED))
653 			continue;
654 
655 		qid = sibling->id;
656 		sq = nix->sqs[qid];
657 		if (!sq)
658 			continue;
659 
660 		rc = roc_nix_tm_sq_aura_fc(sq, false);
661 		if (rc) {
662 			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
663 			goto cleanup;
664 		}
665 
666 		/* Wait for sq entries to be flushed */
667 		rc = roc_nix_tm_sq_flush_spin(sq);
668 		if (rc) {
669 			plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
670 			return rc;
671 		}
672 	}
673 
674 	node->flags &= ~NIX_TM_NODE_ENABLED;
675 
676 	/* Disable and flush */
677 	rc = nix_tm_smq_xoff(nix, node->parent, true);
678 	if (rc) {
679 		plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
680 			rc);
681 		goto cleanup;
682 	}
683 
684 	req = mbox_alloc_msg_nix_rx_sw_sync(mbox);
685 	if (!req)
686 		return -ENOSPC;
687 
688 	rc = mbox_process(mbox);
689 cleanup:
690 	/* Restore cgx state */
691 	if (!roc_nix->io_enabled) {
692 		mbox_alloc_msg_nix_lf_stop_rx(mbox);
693 		rc |= mbox_process(mbox);
694 	}
695 
696 	return rc;
697 }
698 
699 int
nix_tm_sq_flush_post(struct roc_nix_sq * sq)700 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
701 {
702 	struct roc_nix *roc_nix = sq->roc_nix;
703 	struct nix_tm_node *node, *sibling;
704 	struct nix_tm_node_list *list;
705 	enum roc_nix_tm_tree tree;
706 	struct roc_nix_sq *s_sq;
707 	bool once = false;
708 	uint16_t qid, s_qid;
709 	struct nix *nix;
710 	int rc;
711 
712 	nix = roc_nix_to_nix_priv(roc_nix);
713 
714 	/* Need not do anything if tree is in disabled state */
715 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
716 		return 0;
717 
718 	qid = sq->qid;
719 	tree = nix->tm_tree;
720 	list = nix_tm_node_list(nix, tree);
721 
722 	/* Find the node for this SQ */
723 	node = nix_tm_node_search(nix, qid, tree);
724 	if (!node) {
725 		plt_err("Invalid node for sq %u", qid);
726 		return -EFAULT;
727 	}
728 
729 	/* Enable all the siblings back */
730 	TAILQ_FOREACH(sibling, list, node) {
731 		if (sibling->parent != node->parent)
732 			continue;
733 
734 		if (sibling->id == qid)
735 			continue;
736 
737 		if (!(sibling->flags & NIX_TM_NODE_ENABLED))
738 			continue;
739 
740 		s_qid = sibling->id;
741 		s_sq = nix->sqs[s_qid];
742 		if (!s_sq)
743 			continue;
744 
745 		if (!once) {
746 			/* Enable back if any SQ is still present */
747 			rc = nix_tm_smq_xoff(nix, node->parent, false);
748 			if (rc) {
749 				plt_err("Failed to enable smq %u, rc=%d",
750 					node->parent->hw_id, rc);
751 				return rc;
752 			}
753 			once = true;
754 		}
755 
756 		rc = roc_nix_tm_sq_aura_fc(s_sq, true);
757 		if (rc) {
758 			plt_err("Failed to enable sqb aura fc, rc=%d", rc);
759 			return rc;
760 		}
761 	}
762 
763 	if (!nix->rx_pause)
764 		return 0;
765 
766 	/* Restore backpressure */
767 	rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
768 	if (rc) {
769 		plt_err("Failed to restore backpressure, rc=%d", rc);
770 		return rc;
771 	}
772 
773 	return 0;
774 }
775 
776 int
nix_tm_sq_sched_conf(struct nix * nix,struct nix_tm_node * node,bool rr_quantum_only)777 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
778 		     bool rr_quantum_only)
779 {
780 	struct mbox *mbox = (&nix->dev)->mbox;
781 	uint16_t qid = node->id, smq;
782 	uint64_t rr_quantum;
783 	int rc;
784 
785 	smq = node->parent->hw_id;
786 	rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
787 
788 	if (rr_quantum_only)
789 		plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
790 			   rr_quantum);
791 	else
792 		plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
793 			   qid, smq, rr_quantum);
794 
795 	if (qid > nix->nb_tx_queues)
796 		return -EFAULT;
797 
798 	if (roc_model_is_cn9k()) {
799 		struct nix_aq_enq_req *aq;
800 
801 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
802 		if (!aq)
803 			return -ENOSPC;
804 
805 		aq->qidx = qid;
806 		aq->ctype = NIX_AQ_CTYPE_SQ;
807 		aq->op = NIX_AQ_INSTOP_WRITE;
808 
809 		/* smq update only when needed */
810 		if (!rr_quantum_only) {
811 			aq->sq.smq = smq;
812 			aq->sq_mask.smq = ~aq->sq_mask.smq;
813 		}
814 		aq->sq.smq_rr_quantum = rr_quantum;
815 		aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
816 	} else {
817 		struct nix_cn10k_aq_enq_req *aq;
818 
819 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
820 		if (!aq)
821 			return -ENOSPC;
822 
823 		aq->qidx = qid;
824 		aq->ctype = NIX_AQ_CTYPE_SQ;
825 		aq->op = NIX_AQ_INSTOP_WRITE;
826 
827 		/* smq update only when needed */
828 		if (!rr_quantum_only) {
829 			aq->sq.smq = smq;
830 			aq->sq_mask.smq = ~aq->sq_mask.smq;
831 		}
832 		aq->sq.smq_rr_weight = rr_quantum;
833 		aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
834 	}
835 
836 	rc = mbox_process(mbox);
837 	if (rc)
838 		plt_err("Failed to set smq, rc=%d", rc);
839 	return rc;
840 }
841 
842 int
nix_tm_release_resources(struct nix * nix,uint8_t hw_lvl,bool contig,bool above_thresh)843 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
844 			 bool above_thresh)
845 {
846 	uint16_t avail, thresh, to_free = 0, schq;
847 	struct mbox *mbox = (&nix->dev)->mbox;
848 	struct nix_txsch_free_req *req;
849 	struct plt_bitmap *bmp;
850 	uint64_t slab = 0;
851 	uint32_t pos = 0;
852 	int rc = -ENOSPC;
853 
854 	bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
855 	thresh =
856 		contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
857 	plt_bitmap_scan_init(bmp);
858 
859 	avail = nix_tm_resource_avail(nix, hw_lvl, contig);
860 
861 	if (above_thresh) {
862 		/* Release only above threshold */
863 		if (avail > thresh)
864 			to_free = avail - thresh;
865 	} else {
866 		/* Release everything */
867 		to_free = avail;
868 	}
869 
870 	/* Now release resources to AF */
871 	while (to_free) {
872 		if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
873 			break;
874 
875 		schq = bitmap_ctzll(slab);
876 		slab &= ~(1ULL << schq);
877 		schq += pos;
878 
879 		/* Free to AF */
880 		req = mbox_alloc_msg_nix_txsch_free(mbox);
881 		if (req == NULL)
882 			return rc;
883 		req->flags = 0;
884 		req->schq_lvl = hw_lvl;
885 		req->schq = schq;
886 		rc = mbox_process(mbox);
887 		if (rc) {
888 			plt_err("failed to release hwres %s(%u) rc %d",
889 				nix_tm_hwlvl2str(hw_lvl), schq, rc);
890 			return rc;
891 		}
892 
893 		plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
894 			   schq);
895 		plt_bitmap_clear(bmp, schq);
896 		to_free--;
897 	}
898 
899 	if (to_free) {
900 		plt_err("resource inconsistency for %s(%u)",
901 			nix_tm_hwlvl2str(hw_lvl), contig);
902 		return -EFAULT;
903 	}
904 	return 0;
905 }
906 
907 int
nix_tm_free_node_resource(struct nix * nix,struct nix_tm_node * node)908 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
909 {
910 	struct mbox *mbox = (&nix->dev)->mbox;
911 	struct nix_txsch_free_req *req;
912 	struct plt_bitmap *bmp;
913 	uint16_t avail, hw_id;
914 	uint8_t hw_lvl;
915 	int rc = -ENOSPC;
916 
917 	hw_lvl = node->hw_lvl;
918 	hw_id = node->hw_id;
919 	bmp = nix->schq_bmp[hw_lvl];
920 	/* Free specific HW resource */
921 	plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
922 		   nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
923 		   node);
924 
925 	avail = nix_tm_resource_avail(nix, hw_lvl, false);
926 	/* Always for now free to discontiguous queue when avail
927 	 * is not sufficient.
928 	 */
929 	if (nix->discontig_rsvd[hw_lvl] &&
930 	    avail < nix->discontig_rsvd[hw_lvl]) {
931 		PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
932 		PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
933 		plt_bitmap_set(bmp, hw_id);
934 		node->hw_id = NIX_TM_HW_ID_INVALID;
935 		node->flags &= ~NIX_TM_NODE_HWRES;
936 		return 0;
937 	}
938 
939 	/* Free to AF */
940 	req = mbox_alloc_msg_nix_txsch_free(mbox);
941 	if (req == NULL)
942 		return rc;
943 	req->flags = 0;
944 	req->schq_lvl = node->hw_lvl;
945 	req->schq = hw_id;
946 	rc = mbox_process(mbox);
947 	if (rc) {
948 		plt_err("failed to release hwres %s(%u) rc %d",
949 			nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
950 		return rc;
951 	}
952 
953 	/* Mark parent as dirty for reallocing it's children */
954 	if (node->parent)
955 		node->parent->child_realloc = true;
956 
957 	node->hw_id = NIX_TM_HW_ID_INVALID;
958 	node->flags &= ~NIX_TM_NODE_HWRES;
959 	plt_tm_dbg("Released hwres %s(%u) to af",
960 		   nix_tm_hwlvl2str(node->hw_lvl), hw_id);
961 	return 0;
962 }
963 
964 int
nix_tm_node_delete(struct roc_nix * roc_nix,uint32_t node_id,enum roc_nix_tm_tree tree,bool free)965 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
966 		   enum roc_nix_tm_tree tree, bool free)
967 {
968 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
969 	struct nix_tm_shaper_profile *profile;
970 	struct nix_tm_node *node, *child;
971 	struct nix_tm_node_list *list;
972 	uint32_t profile_id;
973 	int rc;
974 
975 	plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
976 
977 	node = nix_tm_node_search(nix, node_id, tree);
978 	if (!node)
979 		return NIX_ERR_TM_INVALID_NODE;
980 
981 	list = nix_tm_node_list(nix, tree);
982 	/* Check for any existing children */
983 	TAILQ_FOREACH(child, list, node) {
984 		if (child->parent == node)
985 			return NIX_ERR_TM_CHILD_EXISTS;
986 	}
987 
988 	/* Remove shaper profile reference */
989 	profile_id = node->shaper_profile_id;
990 	profile = nix_tm_shaper_profile_search(nix, profile_id);
991 
992 	/* Free hw resource locally */
993 	if (node->flags & NIX_TM_NODE_HWRES) {
994 		rc = nix_tm_free_node_resource(nix, node);
995 		if (rc)
996 			return rc;
997 	}
998 
999 	if (profile)
1000 		profile->ref_cnt--;
1001 
1002 	TAILQ_REMOVE(list, node, node);
1003 
1004 	plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
1005 		   "parent %u profile 0x%x tree %u (%p)",
1006 		   nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
1007 		   node->priority, node->weight,
1008 		   node->parent ? node->parent->id : UINT32_MAX,
1009 		   node->shaper_profile_id, tree, node);
1010 	/* Free only if requested */
1011 	if (free)
1012 		nix_tm_node_free(node);
1013 	return 0;
1014 }
1015 
1016 static int
nix_tm_assign_hw_id(struct nix * nix,struct nix_tm_node * parent,uint16_t * contig_id,int * contig_cnt,struct nix_tm_node_list * list)1017 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
1018 		    uint16_t *contig_id, int *contig_cnt,
1019 		    struct nix_tm_node_list *list)
1020 {
1021 	struct nix_tm_node *child;
1022 	struct plt_bitmap *bmp;
1023 	uint8_t child_hw_lvl;
1024 	int spare_schq = -1;
1025 	uint32_t pos = 0;
1026 	uint64_t slab;
1027 	uint16_t schq;
1028 
1029 	child_hw_lvl = parent->hw_lvl - 1;
1030 	bmp = nix->schq_bmp[child_hw_lvl];
1031 	plt_bitmap_scan_init(bmp);
1032 	slab = 0;
1033 
1034 	/* Save spare schq if it is case of RR + SP */
1035 	if (parent->rr_prio != 0xf && *contig_cnt > 1)
1036 		spare_schq = *contig_id + parent->rr_prio;
1037 
1038 	TAILQ_FOREACH(child, list, node) {
1039 		if (!child->parent)
1040 			continue;
1041 		if (child->parent->id != parent->id)
1042 			continue;
1043 
1044 		/* Resource never expected to be present */
1045 		if (child->flags & NIX_TM_NODE_HWRES) {
1046 			plt_err("Resource exists for child (%s)%u, id %u (%p)",
1047 				nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
1048 				child->id, child);
1049 			return -EFAULT;
1050 		}
1051 
1052 		if (!slab)
1053 			plt_bitmap_scan(bmp, &pos, &slab);
1054 
1055 		if (child->priority == parent->rr_prio && spare_schq != -1) {
1056 			/* Use spare schq first if present */
1057 			schq = spare_schq;
1058 			spare_schq = -1;
1059 			*contig_cnt = *contig_cnt - 1;
1060 
1061 		} else if (child->priority == parent->rr_prio) {
1062 			/* Assign a discontiguous queue */
1063 			if (!slab) {
1064 				plt_err("Schq not found for Child %u "
1065 					"lvl %u (%p)",
1066 					child->id, child->lvl, child);
1067 				return -ENOENT;
1068 			}
1069 
1070 			schq = bitmap_ctzll(slab);
1071 			slab &= ~(1ULL << schq);
1072 			schq += pos;
1073 			plt_bitmap_clear(bmp, schq);
1074 		} else {
1075 			/* Assign a contiguous queue */
1076 			schq = *contig_id + child->priority;
1077 			*contig_cnt = *contig_cnt - 1;
1078 		}
1079 
1080 		plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
1081 			   nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
1082 			   child->id, child);
1083 
1084 		child->hw_id = schq;
1085 		child->parent_hw_id = parent->hw_id;
1086 		child->flags |= NIX_TM_NODE_HWRES;
1087 	}
1088 
1089 	return 0;
1090 }
1091 
1092 int
nix_tm_assign_resources(struct nix * nix,enum roc_nix_tm_tree tree)1093 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
1094 {
1095 	struct nix_tm_node *parent, *root = NULL;
1096 	struct plt_bitmap *bmp, *bmp_contig;
1097 	struct nix_tm_node_list *list;
1098 	uint8_t child_hw_lvl, hw_lvl;
1099 	uint16_t contig_id, j;
1100 	uint64_t slab = 0;
1101 	uint32_t pos = 0;
1102 	int cnt, rc;
1103 
1104 	list = nix_tm_node_list(nix, tree);
1105 	/* Walk from TL1 to TL4 parents */
1106 	for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
1107 		TAILQ_FOREACH(parent, list, node) {
1108 			child_hw_lvl = parent->hw_lvl - 1;
1109 			if (parent->hw_lvl != hw_lvl)
1110 				continue;
1111 
1112 			/* Remember root for future */
1113 			if (parent->hw_lvl == nix->tm_root_lvl)
1114 				root = parent;
1115 
1116 			if (!parent->child_realloc) {
1117 				/* Skip when parent is not dirty */
1118 				if (nix_tm_child_res_valid(list, parent))
1119 					continue;
1120 				plt_err("Parent not dirty but invalid "
1121 					"child res parent id %u(lvl %u)",
1122 					parent->id, parent->lvl);
1123 				return -EFAULT;
1124 			}
1125 
1126 			bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
1127 
1128 			/* Prealloc contiguous indices for a parent */
1129 			contig_id = NIX_TM_MAX_HW_TXSCHQ;
1130 			cnt = (int)parent->max_prio + 1;
1131 			if (cnt > 0) {
1132 				plt_bitmap_scan_init(bmp_contig);
1133 				if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
1134 					plt_err("Contig schq not found");
1135 					return -ENOENT;
1136 				}
1137 				contig_id = pos + bitmap_ctzll(slab);
1138 
1139 				/* Check if we have enough */
1140 				for (j = contig_id; j < contig_id + cnt; j++) {
1141 					if (!plt_bitmap_get(bmp_contig, j))
1142 						break;
1143 				}
1144 
1145 				if (j != contig_id + cnt) {
1146 					plt_err("Contig schq not sufficient");
1147 					return -ENOENT;
1148 				}
1149 
1150 				for (j = contig_id; j < contig_id + cnt; j++)
1151 					plt_bitmap_clear(bmp_contig, j);
1152 			}
1153 
1154 			/* Assign hw id to all children */
1155 			rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
1156 						 list);
1157 			if (cnt || rc) {
1158 				plt_err("Unexpected err, contig res alloc, "
1159 					"parent %u, of %s, rc=%d, cnt=%d",
1160 					parent->id, nix_tm_hwlvl2str(hw_lvl),
1161 					rc, cnt);
1162 				return -EFAULT;
1163 			}
1164 
1165 			/* Clear the dirty bit as children's
1166 			 * resources are reallocated.
1167 			 */
1168 			parent->child_realloc = false;
1169 		}
1170 	}
1171 
1172 	/* Root is always expected to be there */
1173 	if (!root)
1174 		return -EFAULT;
1175 
1176 	if (root->flags & NIX_TM_NODE_HWRES)
1177 		return 0;
1178 
1179 	/* Process root node */
1180 	bmp = nix->schq_bmp[nix->tm_root_lvl];
1181 	plt_bitmap_scan_init(bmp);
1182 	if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1183 		plt_err("Resource not allocated for root");
1184 		return -EIO;
1185 	}
1186 
1187 	root->hw_id = pos + bitmap_ctzll(slab);
1188 	root->flags |= NIX_TM_NODE_HWRES;
1189 	plt_bitmap_clear(bmp, root->hw_id);
1190 
1191 	/* Get TL1 id as well when root is not TL1 */
1192 	if (!nix_tm_have_tl1_access(nix)) {
1193 		bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
1194 
1195 		plt_bitmap_scan_init(bmp);
1196 		if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1197 			plt_err("Resource not found for TL1");
1198 			return -EIO;
1199 		}
1200 		root->parent_hw_id = pos + bitmap_ctzll(slab);
1201 		plt_bitmap_clear(bmp, root->parent_hw_id);
1202 	}
1203 
1204 	plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1205 		   nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1206 
1207 	return 0;
1208 }
1209 
1210 void
nix_tm_copy_rsp_to_nix(struct nix * nix,struct nix_txsch_alloc_rsp * rsp)1211 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1212 {
1213 	uint8_t lvl;
1214 	uint16_t i;
1215 
1216 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1217 		for (i = 0; i < rsp->schq[lvl]; i++)
1218 			plt_bitmap_set(nix->schq_bmp[lvl],
1219 				       rsp->schq_list[lvl][i]);
1220 
1221 		for (i = 0; i < rsp->schq_contig[lvl]; i++)
1222 			plt_bitmap_set(nix->schq_contig_bmp[lvl],
1223 				       rsp->schq_contig_list[lvl][i]);
1224 	}
1225 }
1226 
1227 int
nix_tm_alloc_txschq(struct nix * nix,enum roc_nix_tm_tree tree)1228 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1229 {
1230 	uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1231 	struct mbox *mbox = (&nix->dev)->mbox;
1232 	uint16_t schq[NIX_TXSCH_LVL_CNT];
1233 	struct nix_txsch_alloc_req *req;
1234 	struct nix_txsch_alloc_rsp *rsp;
1235 	uint8_t hw_lvl, i;
1236 	bool pend;
1237 	int rc;
1238 
1239 	memset(schq, 0, sizeof(schq));
1240 	memset(schq_contig, 0, sizeof(schq_contig));
1241 
1242 	/* Estimate requirement */
1243 	rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1244 	if (!rc)
1245 		return 0;
1246 
1247 	/* Release existing contiguous resources when realloc requested
1248 	 * as there is no way to guarantee continuity of old with new.
1249 	 */
1250 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1251 		if (schq_contig[hw_lvl])
1252 			nix_tm_release_resources(nix, hw_lvl, true, false);
1253 	}
1254 
1255 	/* Alloc as needed */
1256 	do {
1257 		pend = false;
1258 		req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1259 		if (!req) {
1260 			rc = -ENOMEM;
1261 			goto alloc_err;
1262 		}
1263 		mbox_memcpy(req->schq, schq, sizeof(req->schq));
1264 		mbox_memcpy(req->schq_contig, schq_contig,
1265 			    sizeof(req->schq_contig));
1266 
1267 		/* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1268 		 * So split alloc to multiple requests.
1269 		 */
1270 		for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1271 			if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1272 				req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1273 			schq[i] -= req->schq[i];
1274 
1275 			if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1276 				req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1277 			schq_contig[i] -= req->schq_contig[i];
1278 
1279 			if (schq[i] || schq_contig[i])
1280 				pend = true;
1281 		}
1282 
1283 		rc = mbox_process_msg(mbox, (void *)&rsp);
1284 		if (rc)
1285 			goto alloc_err;
1286 
1287 		nix_tm_copy_rsp_to_nix(nix, rsp);
1288 	} while (pend);
1289 
1290 	nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1291 	nix->tm_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
1292 	return 0;
1293 alloc_err:
1294 	for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1295 		if (nix_tm_release_resources(nix, i, true, false))
1296 			plt_err("Failed to release contig resources of "
1297 				"lvl %d on error",
1298 				i);
1299 		if (nix_tm_release_resources(nix, i, false, false))
1300 			plt_err("Failed to release discontig resources of "
1301 				"lvl %d on error",
1302 				i);
1303 	}
1304 	return rc;
1305 }
1306 
1307 int
nix_tm_prepare_default_tree(struct roc_nix * roc_nix)1308 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1309 {
1310 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1311 	uint32_t nonleaf_id = nix->nb_tx_queues;
1312 	struct nix_tm_node *node = NULL;
1313 	uint8_t leaf_lvl, lvl, lvl_end;
1314 	uint32_t parent, i;
1315 	int rc = 0;
1316 
1317 	/* Add ROOT, SCH1, SCH2, SCH3, [SCH4]  nodes */
1318 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1319 	/* With TL1 access we have an extra level */
1320 	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1321 						       ROC_TM_LVL_SCH3);
1322 
1323 	for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1324 		rc = -ENOMEM;
1325 		node = nix_tm_node_alloc();
1326 		if (!node)
1327 			goto error;
1328 
1329 		node->id = nonleaf_id;
1330 		node->parent_id = parent;
1331 		node->priority = 0;
1332 		node->weight = NIX_TM_DFLT_RR_WT;
1333 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1334 		node->lvl = lvl;
1335 		node->tree = ROC_NIX_TM_DEFAULT;
1336 		node->rel_chan = NIX_TM_CHAN_INVALID;
1337 
1338 		rc = nix_tm_node_add(roc_nix, node);
1339 		if (rc)
1340 			goto error;
1341 		parent = nonleaf_id;
1342 		nonleaf_id++;
1343 	}
1344 
1345 	parent = nonleaf_id - 1;
1346 	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1347 							ROC_TM_LVL_SCH4);
1348 
1349 	/* Add leaf nodes */
1350 	for (i = 0; i < nix->nb_tx_queues; i++) {
1351 		rc = -ENOMEM;
1352 		node = nix_tm_node_alloc();
1353 		if (!node)
1354 			goto error;
1355 
1356 		node->id = i;
1357 		node->parent_id = parent;
1358 		node->priority = 0;
1359 		node->weight = NIX_TM_DFLT_RR_WT;
1360 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1361 		node->lvl = leaf_lvl;
1362 		node->tree = ROC_NIX_TM_DEFAULT;
1363 		node->rel_chan = NIX_TM_CHAN_INVALID;
1364 
1365 		rc = nix_tm_node_add(roc_nix, node);
1366 		if (rc)
1367 			goto error;
1368 	}
1369 
1370 	return 0;
1371 error:
1372 	nix_tm_node_free(node);
1373 	return rc;
1374 }
1375 
1376 int
roc_nix_tm_prepare_rate_limited_tree(struct roc_nix * roc_nix)1377 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1378 {
1379 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1380 	uint32_t nonleaf_id = nix->nb_tx_queues;
1381 	struct nix_tm_node *node = NULL;
1382 	uint8_t leaf_lvl, lvl, lvl_end;
1383 	uint32_t parent, i;
1384 	int rc = 0;
1385 
1386 	/* Add ROOT, SCH1, SCH2 nodes */
1387 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1388 	lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1389 						       ROC_TM_LVL_SCH2);
1390 
1391 	for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1392 		rc = -ENOMEM;
1393 		node = nix_tm_node_alloc();
1394 		if (!node)
1395 			goto error;
1396 
1397 		node->id = nonleaf_id;
1398 		node->parent_id = parent;
1399 		node->priority = 0;
1400 		node->weight = NIX_TM_DFLT_RR_WT;
1401 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1402 		node->lvl = lvl;
1403 		node->tree = ROC_NIX_TM_RLIMIT;
1404 		node->rel_chan = NIX_TM_CHAN_INVALID;
1405 
1406 		rc = nix_tm_node_add(roc_nix, node);
1407 		if (rc)
1408 			goto error;
1409 		parent = nonleaf_id;
1410 		nonleaf_id++;
1411 	}
1412 
1413 	/* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1414 	lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1415 
1416 	/* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1417 	for (i = 0; i < nix->nb_tx_queues; i++) {
1418 		rc = -ENOMEM;
1419 		node = nix_tm_node_alloc();
1420 		if (!node)
1421 			goto error;
1422 
1423 		node->id = nonleaf_id + i;
1424 		node->parent_id = parent;
1425 		node->priority = 0;
1426 		node->weight = NIX_TM_DFLT_RR_WT;
1427 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1428 		node->lvl = lvl;
1429 		node->tree = ROC_NIX_TM_RLIMIT;
1430 		node->rel_chan = NIX_TM_CHAN_INVALID;
1431 
1432 		rc = nix_tm_node_add(roc_nix, node);
1433 		if (rc)
1434 			goto error;
1435 	}
1436 
1437 	parent = nonleaf_id;
1438 	leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1439 							ROC_TM_LVL_SCH4);
1440 
1441 	/* Add leaf nodes */
1442 	for (i = 0; i < nix->nb_tx_queues; i++) {
1443 		rc = -ENOMEM;
1444 		node = nix_tm_node_alloc();
1445 		if (!node)
1446 			goto error;
1447 
1448 		node->id = i;
1449 		node->parent_id = parent + i;
1450 		node->priority = 0;
1451 		node->weight = NIX_TM_DFLT_RR_WT;
1452 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1453 		node->lvl = leaf_lvl;
1454 		node->tree = ROC_NIX_TM_RLIMIT;
1455 		node->rel_chan = NIX_TM_CHAN_INVALID;
1456 
1457 		rc = nix_tm_node_add(roc_nix, node);
1458 		if (rc)
1459 			goto error;
1460 	}
1461 
1462 	return 0;
1463 error:
1464 	nix_tm_node_free(node);
1465 	return rc;
1466 }
1467 
1468 int
roc_nix_tm_pfc_prepare_tree(struct roc_nix * roc_nix)1469 roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
1470 {
1471 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1472 	uint32_t nonleaf_id = nix->nb_tx_queues;
1473 	struct nix_tm_node *node = NULL;
1474 	uint8_t leaf_lvl, lvl, lvl_end;
1475 	uint32_t tl2_node_id;
1476 	uint32_t parent, i;
1477 	int rc = -ENOMEM;
1478 
1479 	parent = ROC_NIX_TM_NODE_ID_INVALID;
1480 	lvl_end = ROC_TM_LVL_SCH3;
1481 	leaf_lvl = ROC_TM_LVL_QUEUE;
1482 
1483 	/* TL1 node */
1484 	node = nix_tm_node_alloc();
1485 	if (!node)
1486 		goto error;
1487 
1488 	node->id = nonleaf_id;
1489 	node->parent_id = parent;
1490 	node->priority = 0;
1491 	node->weight = NIX_TM_DFLT_RR_WT;
1492 	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1493 	node->lvl = ROC_TM_LVL_ROOT;
1494 	node->tree = ROC_NIX_TM_PFC;
1495 	node->rel_chan = NIX_TM_CHAN_INVALID;
1496 
1497 	rc = nix_tm_node_add(roc_nix, node);
1498 	if (rc)
1499 		goto error;
1500 
1501 	parent = nonleaf_id;
1502 	nonleaf_id++;
1503 
1504 	/* TL2 node */
1505 	rc = -ENOMEM;
1506 	node = nix_tm_node_alloc();
1507 	if (!node)
1508 		goto error;
1509 
1510 	node->id = nonleaf_id;
1511 	node->parent_id = parent;
1512 	node->priority = 0;
1513 	node->weight = NIX_TM_DFLT_RR_WT;
1514 	node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1515 	node->lvl = ROC_TM_LVL_SCH1;
1516 	node->tree = ROC_NIX_TM_PFC;
1517 	node->rel_chan = NIX_TM_CHAN_INVALID;
1518 
1519 	rc = nix_tm_node_add(roc_nix, node);
1520 	if (rc)
1521 		goto error;
1522 
1523 	tl2_node_id = nonleaf_id;
1524 	nonleaf_id++;
1525 
1526 	for (i = 0; i < nix->nb_tx_queues; i++) {
1527 		parent = tl2_node_id;
1528 		for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
1529 			rc = -ENOMEM;
1530 			node = nix_tm_node_alloc();
1531 			if (!node)
1532 				goto error;
1533 
1534 			node->id = nonleaf_id;
1535 			node->parent_id = parent;
1536 			node->priority = 0;
1537 			node->weight = NIX_TM_DFLT_RR_WT;
1538 			node->shaper_profile_id =
1539 				ROC_NIX_TM_SHAPER_PROFILE_NONE;
1540 			node->lvl = lvl;
1541 			node->tree = ROC_NIX_TM_PFC;
1542 			node->rel_chan = NIX_TM_CHAN_INVALID;
1543 
1544 			rc = nix_tm_node_add(roc_nix, node);
1545 			if (rc)
1546 				goto error;
1547 
1548 			parent = nonleaf_id;
1549 			nonleaf_id++;
1550 		}
1551 
1552 		lvl = ROC_TM_LVL_SCH4;
1553 
1554 		rc = -ENOMEM;
1555 		node = nix_tm_node_alloc();
1556 		if (!node)
1557 			goto error;
1558 
1559 		node->id = nonleaf_id;
1560 		node->parent_id = parent;
1561 		node->priority = 0;
1562 		node->weight = NIX_TM_DFLT_RR_WT;
1563 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1564 		node->lvl = lvl;
1565 		node->tree = ROC_NIX_TM_PFC;
1566 		node->rel_chan = NIX_TM_CHAN_INVALID;
1567 
1568 		rc = nix_tm_node_add(roc_nix, node);
1569 		if (rc)
1570 			goto error;
1571 
1572 		parent = nonleaf_id;
1573 		nonleaf_id++;
1574 
1575 		rc = -ENOMEM;
1576 		node = nix_tm_node_alloc();
1577 		if (!node)
1578 			goto error;
1579 
1580 		node->id = i;
1581 		node->parent_id = parent;
1582 		node->priority = 0;
1583 		node->weight = NIX_TM_DFLT_RR_WT;
1584 		node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1585 		node->lvl = leaf_lvl;
1586 		node->tree = ROC_NIX_TM_PFC;
1587 		node->rel_chan = NIX_TM_CHAN_INVALID;
1588 
1589 		rc = nix_tm_node_add(roc_nix, node);
1590 		if (rc)
1591 			goto error;
1592 	}
1593 
1594 	return 0;
1595 error:
1596 	nix_tm_node_free(node);
1597 	return rc;
1598 }
1599 
1600 int
nix_tm_free_resources(struct roc_nix * roc_nix,uint32_t tree_mask,bool hw_only)1601 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1602 {
1603 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1604 	struct nix_tm_shaper_profile *profile;
1605 	struct nix_tm_node *node, *next_node;
1606 	struct nix_tm_node_list *list;
1607 	enum roc_nix_tm_tree tree;
1608 	uint32_t profile_id;
1609 	int rc = 0;
1610 
1611 	for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1612 		if (!(tree_mask & BIT(tree)))
1613 			continue;
1614 
1615 		plt_tm_dbg("Freeing resources of tree %u", tree);
1616 
1617 		list = nix_tm_node_list(nix, tree);
1618 		next_node = TAILQ_FIRST(list);
1619 		while (next_node) {
1620 			node = next_node;
1621 			next_node = TAILQ_NEXT(node, node);
1622 
1623 			if (!nix_tm_is_leaf(nix, node->lvl) &&
1624 			    node->flags & NIX_TM_NODE_HWRES) {
1625 				/* Clear xoff in path for flush to succeed */
1626 				rc = nix_tm_clear_path_xoff(nix, node);
1627 				if (rc)
1628 					return rc;
1629 				rc = nix_tm_free_node_resource(nix, node);
1630 				if (rc)
1631 					return rc;
1632 			}
1633 		}
1634 
1635 		/* Leave software elements if needed */
1636 		if (hw_only)
1637 			continue;
1638 
1639 		next_node = TAILQ_FIRST(list);
1640 		while (next_node) {
1641 			node = next_node;
1642 			next_node = TAILQ_NEXT(node, node);
1643 
1644 			plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1645 				   node->id, node);
1646 
1647 			profile_id = node->shaper_profile_id;
1648 			profile = nix_tm_shaper_profile_search(nix, profile_id);
1649 			if (profile)
1650 				profile->ref_cnt--;
1651 
1652 			TAILQ_REMOVE(list, node, node);
1653 			nix_tm_node_free(node);
1654 		}
1655 	}
1656 	return rc;
1657 }
1658 
1659 int
nix_tm_conf_init(struct roc_nix * roc_nix)1660 nix_tm_conf_init(struct roc_nix *roc_nix)
1661 {
1662 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1663 	uint32_t bmp_sz, hw_lvl;
1664 	void *bmp_mem;
1665 	int rc, i;
1666 
1667 	PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1668 	PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1669 			  ROC_NIX_TM_SHAPER_PROFILE_SZ);
1670 
1671 	nix->tm_flags = 0;
1672 	for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1673 		TAILQ_INIT(&nix->trees[i]);
1674 
1675 	TAILQ_INIT(&nix->shaper_profile_list);
1676 	nix->tm_rate_min = 1E9; /* 1Gbps */
1677 
1678 	rc = -ENOMEM;
1679 	bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1680 	bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1681 	if (!bmp_mem)
1682 		return rc;
1683 	nix->schq_bmp_mem = bmp_mem;
1684 
1685 	/* Init contiguous and discontiguous bitmap per lvl */
1686 	rc = -EIO;
1687 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1688 		/* Bitmap for discontiguous resource */
1689 		nix->schq_bmp[hw_lvl] =
1690 			plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1691 		if (!nix->schq_bmp[hw_lvl])
1692 			goto exit;
1693 
1694 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1695 
1696 		/* Bitmap for contiguous resource */
1697 		nix->schq_contig_bmp[hw_lvl] =
1698 			plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1699 		if (!nix->schq_contig_bmp[hw_lvl])
1700 			goto exit;
1701 
1702 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1703 	}
1704 
1705 	rc = nix_tm_mark_init(nix);
1706 	if (rc)
1707 		goto exit;
1708 
1709 	/* Disable TL1 Static Priority when VF's are enabled
1710 	 * as otherwise VF's TL2 reallocation will be needed
1711 	 * runtime to support a specific topology of PF.
1712 	 */
1713 	if (nix->pci_dev->max_vfs)
1714 		nix->tm_flags |= NIX_TM_TL1_NO_SP;
1715 
1716 	/* TL1 access is only for PF's */
1717 	if (roc_nix_is_pf(roc_nix)) {
1718 		nix->tm_flags |= NIX_TM_TL1_ACCESS;
1719 		nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1720 	} else {
1721 		nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1722 	}
1723 
1724 	return 0;
1725 exit:
1726 	nix_tm_conf_fini(roc_nix);
1727 	return rc;
1728 }
1729 
1730 void
nix_tm_conf_fini(struct roc_nix * roc_nix)1731 nix_tm_conf_fini(struct roc_nix *roc_nix)
1732 {
1733 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1734 	uint16_t hw_lvl;
1735 
1736 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1737 		plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1738 		plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1739 	}
1740 	plt_free(nix->schq_bmp_mem);
1741 }
1742