xref: /dpdk/drivers/common/cnxk/roc_nix_tm_ops.c (revision 1f997c06)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 int
roc_nix_tm_sq_aura_fc(struct roc_nix_sq * sq,bool enable)9 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
10 {
11 	struct npa_aq_enq_req *req;
12 	struct npa_aq_enq_rsp *rsp;
13 	uint64_t aura_handle;
14 	struct npa_lf *lf;
15 	struct mbox *mbox;
16 	int rc = -ENOSPC;
17 
18 	plt_tm_dbg("Setting SQ %u SQB aura FC to %s", sq->qid,
19 		   enable ? "enable" : "disable");
20 
21 	lf = idev_npa_obj_get();
22 	if (!lf)
23 		return NPA_ERR_DEVICE_NOT_BOUNDED;
24 
25 	mbox = lf->mbox;
26 	/* Set/clear sqb aura fc_ena */
27 	aura_handle = sq->aura_handle;
28 	req = mbox_alloc_msg_npa_aq_enq(mbox);
29 	if (req == NULL)
30 		return rc;
31 
32 	req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
33 	req->ctype = NPA_AQ_CTYPE_AURA;
34 	req->op = NPA_AQ_INSTOP_WRITE;
35 	/* Below is not needed for aura writes but AF driver needs it */
36 	/* AF will translate to associated poolctx */
37 	req->aura.pool_addr = req->aura_id;
38 
39 	req->aura.fc_ena = enable;
40 	req->aura_mask.fc_ena = 1;
41 	if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp()) {
42 		req->aura.fc_stype = 0x0;      /* STF */
43 		req->aura_mask.fc_stype = 0x0; /* STF */
44 	} else {
45 		req->aura.fc_stype = 0x3;      /* STSTP */
46 		req->aura_mask.fc_stype = 0x3; /* STSTP */
47 	}
48 
49 	rc = mbox_process(mbox);
50 	if (rc)
51 		return rc;
52 
53 	/* Read back npa aura ctx */
54 	req = mbox_alloc_msg_npa_aq_enq(mbox);
55 	if (req == NULL)
56 		return -ENOSPC;
57 
58 	req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
59 	req->ctype = NPA_AQ_CTYPE_AURA;
60 	req->op = NPA_AQ_INSTOP_READ;
61 
62 	rc = mbox_process_msg(mbox, (void *)&rsp);
63 	if (rc)
64 		return rc;
65 
66 	/* Init when enabled as there might be no triggers */
67 	if (enable)
68 		*(volatile uint64_t *)sq->fc = rsp->aura.count;
69 	else
70 		*(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
71 	/* Sync write barrier */
72 	plt_wmb();
73 	return 0;
74 }
75 
76 int
roc_nix_tm_free_resources(struct roc_nix * roc_nix,bool hw_only)77 roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
78 {
79 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
80 
81 	if (nix->tm_flags & NIX_TM_HIERARCHY_ENA)
82 		return -EBUSY;
83 
84 	return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
85 }
86 
87 static int
nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile * profile)88 nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
89 {
90 	uint64_t min_rate = profile->commit.rate;
91 
92 	if (!profile->pkt_mode)
93 		return 0;
94 
95 	profile->pkt_mode_adj = 1;
96 
97 	if (profile->commit.rate &&
98 	    (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
99 	     profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
100 		return NIX_ERR_TM_INVALID_COMMIT_RATE;
101 
102 	if (profile->peak.rate &&
103 	    (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
104 	     profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
105 		return NIX_ERR_TM_INVALID_PEAK_RATE;
106 
107 	if (profile->peak.rate && min_rate > profile->peak.rate)
108 		min_rate = profile->peak.rate;
109 
110 	/* Each packet accumulate single count, whereas HW
111 	 * considers each unit as Byte, so we need convert
112 	 * user pps to bps
113 	 */
114 	profile->commit.rate = profile->commit.rate * 8;
115 	profile->peak.rate = profile->peak.rate * 8;
116 	min_rate = min_rate * 8;
117 
118 	if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
119 		int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
120 
121 		if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
122 			return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
123 
124 		profile->pkt_mode_adj += adjust;
125 		profile->commit.rate += (adjust * profile->commit.rate);
126 		profile->peak.rate += (adjust * profile->peak.rate);
127 		/* Number of tokens freed after scheduling was proportional
128 		 * to adjust value
129 		 */
130 		profile->commit.size *= adjust;
131 		profile->peak.size *= adjust;
132 	}
133 
134 	return 0;
135 }
136 
137 static int
nix_tm_shaper_profile_add(struct roc_nix * roc_nix,struct nix_tm_shaper_profile * profile,int skip_ins)138 nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
139 			  struct nix_tm_shaper_profile *profile, int skip_ins)
140 {
141 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
142 	uint64_t commit_rate, commit_sz;
143 	uint64_t min_burst, max_burst;
144 	uint64_t peak_rate, peak_sz;
145 	uint32_t id;
146 	int rc;
147 
148 	id = profile->id;
149 	rc = nix_tm_adjust_shaper_pps_rate(profile);
150 	if (rc)
151 		return rc;
152 
153 	commit_rate = profile->commit.rate;
154 	commit_sz = profile->commit.size;
155 	peak_rate = profile->peak.rate;
156 	peak_sz = profile->peak.size;
157 
158 	min_burst = NIX_TM_MIN_SHAPER_BURST;
159 	max_burst = roc_nix_tm_max_shaper_burst_get();
160 
161 	if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
162 		return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
163 
164 	if (profile->pkt_len_adj < NIX_TM_LENGTH_ADJUST_MIN ||
165 	    profile->pkt_len_adj > NIX_TM_LENGTH_ADJUST_MAX)
166 		return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
167 
168 	/* We cannot support both pkt length adjust and pkt mode */
169 	if (profile->pkt_mode && profile->pkt_len_adj)
170 		return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
171 
172 	/* commit rate and burst size can be enabled/disabled */
173 	if (commit_rate || commit_sz) {
174 		if (commit_sz < min_burst || commit_sz > max_burst)
175 			return NIX_ERR_TM_INVALID_COMMIT_SZ;
176 		else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL, NULL,
177 						  profile->accuracy))
178 			return NIX_ERR_TM_INVALID_COMMIT_RATE;
179 	}
180 
181 	/* Peak rate and burst size can be enabled/disabled */
182 	if (peak_sz || peak_rate) {
183 		if (peak_sz < min_burst || peak_sz > max_burst)
184 			return NIX_ERR_TM_INVALID_PEAK_SZ;
185 		else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL,
186 						  profile->accuracy))
187 			return NIX_ERR_TM_INVALID_PEAK_RATE;
188 	}
189 
190 	/* If PIR and CIR are requested, PIR should always be larger than CIR */
191 	if (peak_rate && commit_rate && (commit_rate > peak_rate))
192 		return NIX_ERR_TM_INVALID_PEAK_RATE;
193 
194 	if (!skip_ins)
195 		TAILQ_INSERT_TAIL(&nix->shaper_profile_list, profile, shaper);
196 
197 	plt_tm_dbg("Added TM shaper profile %u, "
198 		   " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
199 		   ", cbs %" PRIu64 " , adj %u, pkt_mode %u",
200 		   id, profile->peak.rate, profile->peak.size,
201 		   profile->commit.rate, profile->commit.size,
202 		   profile->pkt_len_adj, profile->pkt_mode);
203 
204 	/* Always use PIR for single rate shaping */
205 	if (!peak_rate && commit_rate) {
206 		profile->peak.rate = profile->commit.rate;
207 		profile->peak.size = profile->commit.size;
208 		profile->commit.rate = 0;
209 		profile->commit.size = 0;
210 	}
211 
212 	/* update min rate */
213 	nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
214 	return 0;
215 }
216 
217 int
roc_nix_tm_shaper_profile_add(struct roc_nix * roc_nix,struct roc_nix_tm_shaper_profile * roc_profile)218 roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
219 			      struct roc_nix_tm_shaper_profile *roc_profile)
220 {
221 	struct nix_tm_shaper_profile *profile;
222 
223 	profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
224 
225 	profile->ref_cnt = 0;
226 	profile->id = roc_profile->id;
227 	profile->commit.rate = roc_profile->commit_rate;
228 	profile->peak.rate = roc_profile->peak_rate;
229 	profile->commit.size = roc_profile->commit_sz;
230 	profile->peak.size = roc_profile->peak_sz;
231 	profile->pkt_len_adj = roc_profile->pkt_len_adj;
232 	profile->pkt_mode = roc_profile->pkt_mode;
233 	profile->free_fn = roc_profile->free_fn;
234 	profile->accuracy = roc_profile->accuracy;
235 
236 	return nix_tm_shaper_profile_add(roc_nix, profile, 0);
237 }
238 
239 int
roc_nix_tm_shaper_profile_update(struct roc_nix * roc_nix,struct roc_nix_tm_shaper_profile * roc_profile)240 roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
241 				 struct roc_nix_tm_shaper_profile *roc_profile)
242 {
243 	struct nix_tm_shaper_profile *profile;
244 
245 	profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
246 
247 	profile->commit.rate = roc_profile->commit_rate;
248 	profile->peak.rate = roc_profile->peak_rate;
249 	profile->commit.size = roc_profile->commit_sz;
250 	profile->peak.size = roc_profile->peak_sz;
251 	profile->pkt_len_adj = roc_profile->pkt_len_adj;
252 	profile->accuracy = roc_profile->accuracy;
253 
254 	return nix_tm_shaper_profile_add(roc_nix, profile, 1);
255 }
256 
257 int
roc_nix_tm_shaper_profile_delete(struct roc_nix * roc_nix,uint32_t id)258 roc_nix_tm_shaper_profile_delete(struct roc_nix *roc_nix, uint32_t id)
259 {
260 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
261 	struct nix_tm_shaper_profile *profile;
262 
263 	profile = nix_tm_shaper_profile_search(nix, id);
264 	if (!profile)
265 		return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
266 
267 	if (profile->ref_cnt)
268 		return NIX_ERR_TM_SHAPER_PROFILE_IN_USE;
269 
270 	plt_tm_dbg("Removing TM shaper profile %u", id);
271 	TAILQ_REMOVE(&nix->shaper_profile_list, profile, shaper);
272 	nix_tm_shaper_profile_free(profile);
273 
274 	/* update min rate */
275 	nix->tm_rate_min = nix_tm_shaper_profile_rate_min(nix);
276 	return 0;
277 }
278 
279 int
roc_nix_tm_node_add(struct roc_nix * roc_nix,struct roc_nix_tm_node * roc_node)280 roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node)
281 {
282 	struct nix_tm_node *node;
283 
284 	node = (struct nix_tm_node *)&roc_node->reserved;
285 	node->id = roc_node->id;
286 	node->priority = roc_node->priority;
287 	node->weight = roc_node->weight;
288 	node->lvl = roc_node->lvl;
289 	node->parent_id = roc_node->parent_id;
290 	node->shaper_profile_id = roc_node->shaper_profile_id;
291 	node->pkt_mode = roc_node->pkt_mode;
292 	node->pkt_mode_set = roc_node->pkt_mode_set;
293 	node->free_fn = roc_node->free_fn;
294 	node->tree = ROC_NIX_TM_USER;
295 
296 	return nix_tm_node_add(roc_nix, node);
297 }
298 
299 int
roc_nix_tm_node_pkt_mode_update(struct roc_nix * roc_nix,uint32_t node_id,bool pkt_mode)300 roc_nix_tm_node_pkt_mode_update(struct roc_nix *roc_nix, uint32_t node_id,
301 				bool pkt_mode)
302 {
303 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
304 	struct nix_tm_node *node, *child;
305 	struct nix_tm_node_list *list;
306 	int num_children = 0;
307 
308 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
309 	if (!node)
310 		return NIX_ERR_TM_INVALID_NODE;
311 
312 	if (node->pkt_mode == pkt_mode) {
313 		node->pkt_mode_set = true;
314 		return 0;
315 	}
316 
317 	/* Check for any existing children, if there are any,
318 	 * then we cannot update the pkt mode as children's quantum
319 	 * are already taken in.
320 	 */
321 	list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
322 	TAILQ_FOREACH(child, list, node) {
323 		if (child->parent == node)
324 			num_children++;
325 	}
326 
327 	/* Cannot update mode if it has children or tree is enabled */
328 	if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) && num_children)
329 		return -EBUSY;
330 
331 	if (node->pkt_mode_set && num_children)
332 		return NIX_ERR_TM_PKT_MODE_MISMATCH;
333 
334 	node->pkt_mode = pkt_mode;
335 	node->pkt_mode_set = true;
336 
337 	return 0;
338 }
339 
340 int
roc_nix_tm_node_name_get(struct roc_nix * roc_nix,uint32_t node_id,char * buf,size_t buflen)341 roc_nix_tm_node_name_get(struct roc_nix *roc_nix, uint32_t node_id, char *buf,
342 			 size_t buflen)
343 {
344 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
345 	struct nix_tm_node *node;
346 
347 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
348 	if (!node) {
349 		plt_strlcpy(buf, "???", buflen);
350 		return NIX_ERR_TM_INVALID_NODE;
351 	}
352 
353 	if (node->hw_lvl == NIX_TXSCH_LVL_CNT)
354 		snprintf(buf, buflen, "SQ_%d", node->id);
355 	else
356 		snprintf(buf, buflen, "%s_%d", nix_tm_hwlvl2str(node->hw_lvl),
357 			 node->hw_id);
358 	return 0;
359 }
360 
361 int
roc_nix_tm_node_delete(struct roc_nix * roc_nix,uint32_t node_id,bool free)362 roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
363 {
364 	return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
365 }
366 
367 int
roc_nix_smq_flush(struct roc_nix * roc_nix)368 roc_nix_smq_flush(struct roc_nix *roc_nix)
369 {
370 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
371 	struct nix_tm_node_list *list;
372 	enum roc_nix_tm_tree tree;
373 	struct nix_tm_node *node;
374 	int rc = 0;
375 
376 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
377 		return 0;
378 
379 	tree = nix->tm_tree;
380 	list = nix_tm_node_list(nix, tree);
381 
382 	/* XOFF & Flush all SMQ's. HRM mandates
383 	 * all SQ's empty before SMQ flush is issued.
384 	 */
385 	TAILQ_FOREACH(node, list, node) {
386 		if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
387 			continue;
388 		if (!(node->flags & NIX_TM_NODE_HWRES))
389 			continue;
390 
391 		rc = nix_tm_smq_xoff(nix, node, true);
392 		if (rc) {
393 			plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
394 				rc);
395 			goto exit;
396 		}
397 	}
398 
399 	/* XON all SMQ's */
400 	TAILQ_FOREACH(node, list, node) {
401 		if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
402 			continue;
403 		if (!(node->flags & NIX_TM_NODE_HWRES))
404 			continue;
405 
406 		rc = nix_tm_smq_xoff(nix, node, false);
407 		if (rc) {
408 			plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
409 				rc);
410 			goto exit;
411 		}
412 	}
413 exit:
414 	return rc;
415 }
416 
417 int
roc_nix_tm_hierarchy_disable(struct roc_nix * roc_nix)418 roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
419 {
420 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
421 	uint16_t sqb_cnt, head_off, tail_off;
422 	uint16_t sq_cnt = nix->nb_tx_queues;
423 	struct mbox *mbox = (&nix->dev)->mbox;
424 	struct nix_tm_node_list *list;
425 	enum roc_nix_tm_tree tree;
426 	struct nix_tm_node *node;
427 	struct roc_nix_sq *sq;
428 	uint64_t wdata, val;
429 	uintptr_t regaddr;
430 	int rc = -1, i;
431 
432 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
433 		return 0;
434 
435 	plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
436 
437 	tree = nix->tm_tree;
438 	list = nix_tm_node_list(nix, tree);
439 
440 	/* Enable CGX RXTX to drain pkts */
441 	if (!roc_nix->io_enabled) {
442 		/* Though it enables both RX MCAM Entries and CGX Link
443 		 * we assume all the rx queues are stopped way back.
444 		 */
445 		mbox_alloc_msg_nix_lf_start_rx(mbox);
446 		rc = mbox_process(mbox);
447 		if (rc) {
448 			plt_err("cgx start failed, rc=%d", rc);
449 			return rc;
450 		}
451 	}
452 
453 	/* XON all SMQ's */
454 	TAILQ_FOREACH(node, list, node) {
455 		if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
456 			continue;
457 		if (!(node->flags & NIX_TM_NODE_HWRES))
458 			continue;
459 
460 		rc = nix_tm_smq_xoff(nix, node, false);
461 		if (rc) {
462 			plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
463 				rc);
464 			goto cleanup;
465 		}
466 	}
467 
468 	/* Disable backpressure, it will be enabled back if needed on
469 	 * hierarchy enable
470 	 */
471 	for (i = 0; i < sq_cnt; i++) {
472 		sq = nix->sqs[i];
473 		if (!sq)
474 			continue;
475 
476 		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
477 		if (rc && rc != -ENOENT) {
478 			plt_err("Failed to disable backpressure, rc=%d", rc);
479 			goto cleanup;
480 		}
481 	}
482 
483 	/* Flush all tx queues */
484 	for (i = 0; i < sq_cnt; i++) {
485 		sq = nix->sqs[i];
486 		if (!sq)
487 			continue;
488 
489 		rc = roc_nix_tm_sq_aura_fc(sq, false);
490 		if (rc) {
491 			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
492 			goto cleanup;
493 		}
494 
495 		/* Wait for sq entries to be flushed */
496 		rc = roc_nix_tm_sq_flush_spin(sq);
497 		if (rc) {
498 			plt_err("Failed to drain sq, rc=%d\n", rc);
499 			goto cleanup;
500 		}
501 	}
502 
503 	/* XOFF & Flush all SMQ's. HRM mandates
504 	 * all SQ's empty before SMQ flush is issued.
505 	 */
506 	TAILQ_FOREACH(node, list, node) {
507 		if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
508 			continue;
509 		if (!(node->flags & NIX_TM_NODE_HWRES))
510 			continue;
511 
512 		rc = nix_tm_smq_xoff(nix, node, true);
513 		if (rc) {
514 			plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
515 				rc);
516 			goto cleanup;
517 		}
518 
519 		node->flags &= ~NIX_TM_NODE_ENABLED;
520 	}
521 
522 	/* Verify sanity of all tx queues */
523 	for (i = 0; i < sq_cnt; i++) {
524 		sq = nix->sqs[i];
525 		if (!sq)
526 			continue;
527 
528 		wdata = ((uint64_t)sq->qid << 32);
529 		regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
530 		val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
531 
532 		sqb_cnt = val & 0xFFFF;
533 		head_off = (val >> 20) & 0x3F;
534 		tail_off = (val >> 28) & 0x3F;
535 
536 		if (sqb_cnt > 1 || head_off != tail_off ||
537 		    (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
538 			plt_err("Failed to gracefully flush sq %u", sq->qid);
539 	}
540 
541 	nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
542 cleanup:
543 	/* Restore cgx state */
544 	if (!roc_nix->io_enabled) {
545 		mbox_alloc_msg_nix_lf_stop_rx(mbox);
546 		rc |= mbox_process(mbox);
547 	}
548 	return rc;
549 }
550 
551 int
roc_nix_tm_hierarchy_enable(struct roc_nix * roc_nix,enum roc_nix_tm_tree tree,bool xmit_enable)552 roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
553 			    bool xmit_enable)
554 {
555 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
556 	struct nix_tm_node_list *list;
557 	struct nix_tm_node *node;
558 	struct roc_nix_sq *sq;
559 	uint32_t tree_mask;
560 	uint16_t sq_id;
561 	int rc;
562 
563 	if (tree >= ROC_NIX_TM_TREE_MAX)
564 		return NIX_ERR_PARAM;
565 
566 	if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
567 		if (nix->tm_tree != tree)
568 			return -EBUSY;
569 		return 0;
570 	}
571 
572 	plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
573 		   nix->pci_dev->name, xmit_enable, tree);
574 
575 	/* Free hw resources of other trees */
576 	tree_mask = NIX_TM_TREE_MASK_ALL;
577 	tree_mask &= ~BIT(tree);
578 
579 	rc = nix_tm_free_resources(roc_nix, tree_mask, true);
580 	if (rc) {
581 		plt_err("failed to free resources of other trees, rc=%d", rc);
582 		return rc;
583 	}
584 
585 	/* Update active tree before starting to do anything */
586 	nix->tm_tree = tree;
587 
588 	nix_tm_update_parent_info(nix, tree);
589 
590 	rc = nix_tm_alloc_txschq(nix, tree);
591 	if (rc) {
592 		plt_err("TM failed to alloc tm resources=%d", rc);
593 		return rc;
594 	}
595 
596 	rc = nix_tm_assign_resources(nix, tree);
597 	if (rc) {
598 		plt_err("TM failed to assign tm resources=%d", rc);
599 		return rc;
600 	}
601 
602 	rc = nix_tm_txsch_reg_config(nix, tree);
603 	if (rc) {
604 		plt_err("TM failed to configure sched registers=%d", rc);
605 		return rc;
606 	}
607 
608 	list = nix_tm_node_list(nix, tree);
609 	/* Mark all non-leaf's as enabled */
610 	TAILQ_FOREACH(node, list, node) {
611 		if (!nix_tm_is_leaf(nix, node->lvl))
612 			node->flags |= NIX_TM_NODE_ENABLED;
613 	}
614 
615 	if (!xmit_enable)
616 		goto skip_sq_update;
617 
618 	/* Update SQ Sched Data while SQ is idle */
619 	TAILQ_FOREACH(node, list, node) {
620 		if (!nix_tm_is_leaf(nix, node->lvl))
621 			continue;
622 
623 		rc = nix_tm_sq_sched_conf(nix, node, false);
624 		if (rc) {
625 			plt_err("SQ %u sched update failed, rc=%d", node->id,
626 				rc);
627 			return rc;
628 		}
629 	}
630 
631 	/* Finally XON all SMQ's */
632 	TAILQ_FOREACH(node, list, node) {
633 		if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
634 			continue;
635 
636 		rc = nix_tm_smq_xoff(nix, node, false);
637 		if (rc) {
638 			plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
639 				rc);
640 			return rc;
641 		}
642 	}
643 
644 	/* Enable xmit as all the topology is ready */
645 	TAILQ_FOREACH(node, list, node) {
646 		if (!nix_tm_is_leaf(nix, node->lvl))
647 			continue;
648 
649 		sq_id = node->id;
650 		sq = nix->sqs[sq_id];
651 
652 		rc = roc_nix_tm_sq_aura_fc(sq, true);
653 		if (rc) {
654 			plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
655 				rc);
656 			return rc;
657 		}
658 		node->flags |= NIX_TM_NODE_ENABLED;
659 	}
660 
661 skip_sq_update:
662 	nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
663 	return 0;
664 }
665 
666 int
roc_nix_tm_node_suspend_resume(struct roc_nix * roc_nix,uint32_t node_id,bool suspend)667 roc_nix_tm_node_suspend_resume(struct roc_nix *roc_nix, uint32_t node_id,
668 			       bool suspend)
669 {
670 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
671 	struct mbox *mbox = (&nix->dev)->mbox;
672 	struct nix_txschq_config *req;
673 	struct nix_tm_node *node;
674 	uint16_t flags;
675 	int rc;
676 
677 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
678 	if (!node)
679 		return NIX_ERR_TM_INVALID_NODE;
680 
681 	flags = node->flags;
682 	flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
683 				(flags | NIX_TM_NODE_ENABLED);
684 
685 	if (node->flags == flags)
686 		return 0;
687 
688 	/* send mbox for state change */
689 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
690 
691 	req->lvl = node->hw_lvl;
692 	req->num_regs =
693 		nix_tm_sw_xoff_prep(node, suspend, req->reg, req->regval);
694 	rc = mbox_process(mbox);
695 	if (!rc)
696 		node->flags = flags;
697 	return rc;
698 }
699 
700 int
roc_nix_tm_prealloc_res(struct roc_nix * roc_nix,uint8_t lvl,uint16_t discontig,uint16_t contig)701 roc_nix_tm_prealloc_res(struct roc_nix *roc_nix, uint8_t lvl,
702 			uint16_t discontig, uint16_t contig)
703 {
704 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
705 	struct mbox *mbox = (&nix->dev)->mbox;
706 	struct nix_txsch_alloc_req *req;
707 	struct nix_txsch_alloc_rsp *rsp;
708 	uint8_t hw_lvl;
709 	int rc = -ENOSPC;
710 
711 	hw_lvl = nix_tm_lvl2nix(nix, lvl);
712 	if (hw_lvl == NIX_TXSCH_LVL_CNT)
713 		return -EINVAL;
714 
715 	/* Preallocate contiguous */
716 	if (nix->contig_rsvd[hw_lvl] < contig) {
717 		req = mbox_alloc_msg_nix_txsch_alloc(mbox);
718 		if (req == NULL)
719 			return rc;
720 		req->schq_contig[hw_lvl] = contig - nix->contig_rsvd[hw_lvl];
721 
722 		rc = mbox_process_msg(mbox, (void *)&rsp);
723 		if (rc)
724 			return rc;
725 
726 		nix_tm_copy_rsp_to_nix(nix, rsp);
727 	}
728 
729 	/* Preallocate contiguous */
730 	if (nix->discontig_rsvd[hw_lvl] < discontig) {
731 		req = mbox_alloc_msg_nix_txsch_alloc(mbox);
732 		if (req == NULL)
733 			return -ENOSPC;
734 		req->schq[hw_lvl] = discontig - nix->discontig_rsvd[hw_lvl];
735 
736 		rc = mbox_process_msg(mbox, (void *)&rsp);
737 		if (rc)
738 			return rc;
739 
740 		nix_tm_copy_rsp_to_nix(nix, rsp);
741 	}
742 
743 	/* Save thresholds */
744 	nix->contig_rsvd[hw_lvl] = contig;
745 	nix->discontig_rsvd[hw_lvl] = discontig;
746 	/* Release anything present above thresholds */
747 	nix_tm_release_resources(nix, hw_lvl, true, true);
748 	nix_tm_release_resources(nix, hw_lvl, false, true);
749 	return 0;
750 }
751 
752 int
roc_nix_tm_node_shaper_update(struct roc_nix * roc_nix,uint32_t node_id,uint32_t profile_id,bool force_update)753 roc_nix_tm_node_shaper_update(struct roc_nix *roc_nix, uint32_t node_id,
754 			      uint32_t profile_id, bool force_update)
755 {
756 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
757 	struct nix_tm_shaper_profile *profile = NULL;
758 	struct mbox *mbox = (&nix->dev)->mbox;
759 	struct nix_txschq_config *req;
760 	struct nix_tm_node *node;
761 	uint8_t k;
762 	int rc;
763 
764 	/* Shaper updates valid only for user nodes */
765 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
766 	if (!node || nix_tm_is_leaf(nix, node->lvl))
767 		return NIX_ERR_TM_INVALID_NODE;
768 
769 	if (profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE) {
770 		profile = nix_tm_shaper_profile_search(nix, profile_id);
771 		if (!profile)
772 			return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
773 	}
774 
775 	/* Pkt mode should match existing node's pkt mode */
776 	if (profile && profile->pkt_mode != node->pkt_mode)
777 		return NIX_ERR_TM_PKT_MODE_MISMATCH;
778 
779 	if ((profile_id == node->shaper_profile_id) && !force_update) {
780 		return 0;
781 	} else if (profile_id != node->shaper_profile_id) {
782 		struct nix_tm_shaper_profile *old;
783 
784 		/* Find old shaper profile and reduce ref count */
785 		old = nix_tm_shaper_profile_search(nix,
786 						   node->shaper_profile_id);
787 		if (old)
788 			old->ref_cnt--;
789 
790 		if (profile)
791 			profile->ref_cnt++;
792 
793 		/* Reduce older shaper ref count and increase new one */
794 		node->shaper_profile_id = profile_id;
795 	}
796 
797 	/* Nothing to do if hierarchy not yet enabled */
798 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
799 		return 0;
800 
801 	node->flags &= ~NIX_TM_NODE_ENABLED;
802 
803 	/* Flush the specific node with SW_XOFF */
804 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
805 	req->lvl = node->hw_lvl;
806 	k = nix_tm_sw_xoff_prep(node, true, req->reg, req->regval);
807 	req->num_regs = k;
808 
809 	rc = mbox_process(mbox);
810 	if (rc)
811 		return rc;
812 
813 	/* Update the PIR/CIR and clear SW XOFF */
814 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
815 	req->lvl = node->hw_lvl;
816 
817 	k = nix_tm_shaper_reg_prep(node, profile, req->reg, req->regval);
818 
819 	k += nix_tm_sw_xoff_prep(node, false, &req->reg[k], &req->regval[k]);
820 
821 	req->num_regs = k;
822 	rc = mbox_process(mbox);
823 	if (!rc)
824 		node->flags |= NIX_TM_NODE_ENABLED;
825 	return rc;
826 }
827 
828 int
roc_nix_tm_node_parent_update(struct roc_nix * roc_nix,uint32_t node_id,uint32_t new_parent_id,uint32_t priority,uint32_t weight)829 roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id,
830 			      uint32_t new_parent_id, uint32_t priority,
831 			      uint32_t weight)
832 {
833 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
834 	struct mbox *mbox = (&nix->dev)->mbox;
835 	struct nix_tm_node *node, *sibling;
836 	struct nix_tm_node *new_parent;
837 	struct nix_txschq_config *req;
838 	struct nix_tm_node_list *list;
839 	uint8_t k;
840 	int rc;
841 
842 	node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
843 	if (!node)
844 		return NIX_ERR_TM_INVALID_NODE;
845 
846 	/* Parent id valid only for non root nodes */
847 	if (node->hw_lvl != nix->tm_root_lvl) {
848 		new_parent =
849 			nix_tm_node_search(nix, new_parent_id, ROC_NIX_TM_USER);
850 		if (!new_parent)
851 			return NIX_ERR_TM_INVALID_PARENT;
852 
853 		/* Current support is only for dynamic weight update */
854 		if (node->parent != new_parent || node->priority != priority)
855 			return NIX_ERR_TM_PARENT_PRIO_UPDATE;
856 	}
857 
858 	list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
859 	/* Skip if no change */
860 	if (node->weight == weight)
861 		return 0;
862 
863 	node->weight = weight;
864 
865 	/* Nothing to do if hierarchy not yet enabled */
866 	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
867 		return 0;
868 
869 	/* For leaf nodes, SQ CTX needs update */
870 	if (nix_tm_is_leaf(nix, node->lvl)) {
871 		/* Update SQ quantum data on the fly */
872 		rc = nix_tm_sq_sched_conf(nix, node, true);
873 		if (rc)
874 			return NIX_ERR_TM_SQ_UPDATE_FAIL;
875 	} else {
876 		/* XOFF Parent node */
877 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
878 		req->lvl = node->parent->hw_lvl;
879 		req->num_regs = nix_tm_sw_xoff_prep(node->parent, true,
880 						    req->reg, req->regval);
881 		rc = mbox_process(mbox);
882 		if (rc)
883 			return rc;
884 
885 		/* XOFF this node and all other siblings */
886 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
887 		req->lvl = node->hw_lvl;
888 
889 		k = 0;
890 		TAILQ_FOREACH(sibling, list, node) {
891 			if (sibling->parent != node->parent)
892 				continue;
893 			k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k],
894 						 &req->regval[k]);
895 		}
896 		req->num_regs = k;
897 		rc = mbox_process(mbox);
898 		if (rc)
899 			return rc;
900 
901 		/* Update new weight for current node */
902 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
903 		req->lvl = node->hw_lvl;
904 		req->num_regs =
905 			nix_tm_sched_reg_prep(nix, node, req->reg, req->regval);
906 		rc = mbox_process(mbox);
907 		if (rc)
908 			return rc;
909 
910 		/* XON this node and all other siblings */
911 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
912 		req->lvl = node->hw_lvl;
913 
914 		k = 0;
915 		TAILQ_FOREACH(sibling, list, node) {
916 			if (sibling->parent != node->parent)
917 				continue;
918 			k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k],
919 						 &req->regval[k]);
920 		}
921 		req->num_regs = k;
922 		rc = mbox_process(mbox);
923 		if (rc)
924 			return rc;
925 
926 		/* XON Parent node */
927 		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
928 		req->lvl = node->parent->hw_lvl;
929 		req->num_regs = nix_tm_sw_xoff_prep(node->parent, false,
930 						    req->reg, req->regval);
931 		rc = mbox_process(mbox);
932 		if (rc)
933 			return rc;
934 	}
935 	return 0;
936 }
937 
938 int
roc_nix_tm_init(struct roc_nix * roc_nix)939 roc_nix_tm_init(struct roc_nix *roc_nix)
940 {
941 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
942 	uint32_t tree_mask;
943 	int rc;
944 
945 	if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
946 		plt_err("Cannot init while existing hierarchy is enabled");
947 		return -EBUSY;
948 	}
949 
950 	/* Free up all user resources already held */
951 	tree_mask = NIX_TM_TREE_MASK_ALL;
952 	rc = nix_tm_free_resources(roc_nix, tree_mask, false);
953 	if (rc) {
954 		plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
955 		return rc;
956 	}
957 
958 	/* Prepare default tree */
959 	rc = nix_tm_prepare_default_tree(roc_nix);
960 	if (rc) {
961 		plt_err("failed to prepare default tm tree, rc=%d", rc);
962 		return rc;
963 	}
964 
965 	return rc;
966 }
967 
968 int
roc_nix_tm_rlimit_sq(struct roc_nix * roc_nix,uint16_t qid,uint64_t rate)969 roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
970 {
971 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
972 	struct nix_tm_shaper_profile profile;
973 	struct mbox *mbox = (&nix->dev)->mbox;
974 	struct nix_tm_node *node, *parent;
975 
976 	volatile uint64_t *reg, *regval;
977 	struct nix_txschq_config *req;
978 	uint16_t flags;
979 	uint8_t k = 0;
980 	int rc;
981 
982 	if ((nix->tm_tree == ROC_NIX_TM_USER) ||
983 	    !(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
984 		return NIX_ERR_TM_INVALID_TREE;
985 
986 	node = nix_tm_node_search(nix, qid, nix->tm_tree);
987 
988 	/* check if we found a valid leaf node */
989 	if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
990 	    node->parent->hw_id == NIX_TM_HW_ID_INVALID)
991 		return NIX_ERR_TM_INVALID_NODE;
992 
993 	parent = node->parent;
994 	flags = parent->flags;
995 
996 	req = mbox_alloc_msg_nix_txschq_cfg(mbox);
997 	req->lvl = NIX_TXSCH_LVL_MDQ;
998 	reg = req->reg;
999 	regval = req->regval;
1000 
1001 	if (rate == 0) {
1002 		k += nix_tm_sw_xoff_prep(parent, true, &reg[k], &regval[k]);
1003 		flags &= ~NIX_TM_NODE_ENABLED;
1004 		goto exit;
1005 	}
1006 
1007 	if (!(flags & NIX_TM_NODE_ENABLED)) {
1008 		k += nix_tm_sw_xoff_prep(parent, false, &reg[k], &regval[k]);
1009 		flags |= NIX_TM_NODE_ENABLED;
1010 	}
1011 
1012 	/* Use only PIR for rate limit */
1013 	memset(&profile, 0, sizeof(profile));
1014 	profile.peak.rate = rate;
1015 	/* Minimum burst of ~4us Bytes of Tx */
1016 	profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
1017 				    (4ul * rate) / ((uint64_t)1E6 * 8));
1018 	if (!nix->tm_rate_min || nix->tm_rate_min > rate)
1019 		nix->tm_rate_min = rate;
1020 
1021 	k += nix_tm_shaper_reg_prep(parent, &profile, &reg[k], &regval[k]);
1022 exit:
1023 	req->num_regs = k;
1024 	rc = mbox_process(mbox);
1025 	if (rc)
1026 		return rc;
1027 
1028 	parent->flags = flags;
1029 	return 0;
1030 }
1031 
1032 void
roc_nix_tm_fini(struct roc_nix * roc_nix)1033 roc_nix_tm_fini(struct roc_nix *roc_nix)
1034 {
1035 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1036 	struct mbox *mbox = (&nix->dev)->mbox;
1037 	struct nix_txsch_free_req *req;
1038 	uint32_t tree_mask;
1039 	uint8_t hw_lvl;
1040 	int rc;
1041 
1042 	/* Xmit is assumed to be disabled */
1043 	/* Free up resources already held */
1044 	tree_mask = NIX_TM_TREE_MASK_ALL;
1045 	rc = nix_tm_free_resources(roc_nix, tree_mask, false);
1046 	if (rc)
1047 		plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
1048 
1049 	/* Free all other hw resources */
1050 	req = mbox_alloc_msg_nix_txsch_free(mbox);
1051 	if (req == NULL)
1052 		return;
1053 
1054 	req->flags = TXSCHQ_FREE_ALL;
1055 	rc = mbox_process(mbox);
1056 	if (rc)
1057 		plt_err("Failed to freeup all res, rc=%d", rc);
1058 
1059 	for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1060 		plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
1061 		plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
1062 		nix->contig_rsvd[hw_lvl] = 0;
1063 		nix->discontig_rsvd[hw_lvl] = 0;
1064 	}
1065 
1066 	/* Clear shaper profiles */
1067 	nix_tm_clear_shaper_profiles(nix);
1068 	nix->tm_tree = 0;
1069 	nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
1070 }
1071 
1072 int
roc_nix_tm_rsrc_count(struct roc_nix * roc_nix,uint16_t schq[ROC_TM_LVL_MAX])1073 roc_nix_tm_rsrc_count(struct roc_nix *roc_nix, uint16_t schq[ROC_TM_LVL_MAX])
1074 {
1075 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1076 	struct mbox *mbox = (&nix->dev)->mbox;
1077 	struct free_rsrcs_rsp *rsp;
1078 	uint8_t hw_lvl;
1079 	int rc, i;
1080 
1081 	/* Get the current free resources */
1082 	mbox_alloc_msg_free_rsrc_cnt(mbox);
1083 	rc = mbox_process_msg(mbox, (void *)&rsp);
1084 	if (rc)
1085 		return rc;
1086 
1087 	for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1088 		hw_lvl = nix_tm_lvl2nix(nix, i);
1089 		if (hw_lvl == NIX_TXSCH_LVL_CNT)
1090 			continue;
1091 
1092 		schq[i] = (nix->is_nix1 ? rsp->schq_nix1[hw_lvl] :
1093 						rsp->schq[hw_lvl]);
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 void
roc_nix_tm_rsrc_max(bool pf,uint16_t schq[ROC_TM_LVL_MAX])1100 roc_nix_tm_rsrc_max(bool pf, uint16_t schq[ROC_TM_LVL_MAX])
1101 {
1102 	uint8_t hw_lvl, i;
1103 	uint16_t max;
1104 
1105 	for (i = 0; i < ROC_TM_LVL_MAX; i++) {
1106 		hw_lvl = pf ? nix_tm_lvl2nix_tl1_root(i) :
1107 				    nix_tm_lvl2nix_tl2_root(i);
1108 
1109 		switch (hw_lvl) {
1110 		case NIX_TXSCH_LVL_SMQ:
1111 			max = (roc_model_is_cn9k() ?
1112 					     NIX_CN9K_TXSCH_LVL_SMQ_MAX :
1113 					     NIX_TXSCH_LVL_SMQ_MAX);
1114 			break;
1115 		case NIX_TXSCH_LVL_TL4:
1116 			max = NIX_TXSCH_LVL_TL4_MAX;
1117 			break;
1118 		case NIX_TXSCH_LVL_TL3:
1119 			max = NIX_TXSCH_LVL_TL3_MAX;
1120 			break;
1121 		case NIX_TXSCH_LVL_TL2:
1122 			max = pf ? NIX_TXSCH_LVL_TL2_MAX : 1;
1123 			break;
1124 		case NIX_TXSCH_LVL_TL1:
1125 			max = pf ? 1 : 0;
1126 			break;
1127 		default:
1128 			max = 0;
1129 			break;
1130 		}
1131 		schq[i] = max;
1132 	}
1133 }
1134 
1135 bool
roc_nix_tm_root_has_sp(struct roc_nix * roc_nix)1136 roc_nix_tm_root_has_sp(struct roc_nix *roc_nix)
1137 {
1138 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1139 
1140 	if (nix->tm_flags & NIX_TM_TL1_NO_SP)
1141 		return false;
1142 	return true;
1143 }
1144