xref: /f-stack/dpdk/drivers/net/mvpp2/mrvl_tm.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Marvell International Ltd.
3  * Copyright(c) 2018 Semihalf.
4  * All rights reserved.
5  */
6 
7 #include <rte_malloc.h>
8 
9 #include <linux/ethtool.h>
10 #include <linux/sockios.h>
11 #include <net/if.h>
12 #include <sys/ioctl.h>
13 
14 #include "mrvl_tm.h"
15 
16 /** Minimum rate value in Bytes/s */
17 #define MRVL_RATE_MIN (PP2_PPIO_MIN_CIR * 1000 / 8)
18 
19 /** Minimum burst size in Bytes */
20 #define MRVL_BURST_MIN (PP2_PPIO_MIN_CBS * 1000)
21 
22 /** Maximum burst size in Bytes */
23 #define MRVL_BURST_MAX 256000000
24 
25 /** Maximum WRR weight */
26 #define MRVL_WEIGHT_MAX 255
27 
28 /**
29  * Get maximum port rate in Bytes/s.
30  *
31  * @param dev Pointer to the device.
32  * @param rate Pointer to the rate.
33  * @returns 0 on success, negative value otherwise.
34  */
35 static int
mrvl_get_max_rate(struct rte_eth_dev * dev,uint64_t * rate)36 mrvl_get_max_rate(struct rte_eth_dev *dev, uint64_t *rate)
37 {
38 	struct ethtool_cmd edata;
39 	struct ifreq req;
40 	int ret, fd;
41 
42 	memset(&edata, 0, sizeof(edata));
43 	memset(&req, 0, sizeof(req));
44 	edata.cmd = ETHTOOL_GSET;
45 	strcpy(req.ifr_name, dev->data->name);
46 	req.ifr_data = (void *)&edata;
47 
48 	fd = socket(AF_INET, SOCK_DGRAM, 0);
49 	if (fd == -1)
50 		return -1;
51 
52 	ret = ioctl(fd, SIOCETHTOOL, &req);
53 	if (ret == -1) {
54 		close(fd);
55 		return -1;
56 	}
57 
58 	close(fd);
59 
60 	*rate = ethtool_cmd_speed(&edata) * 1000 * 1000 / 8;
61 
62 	return 0;
63 }
64 
65 /**
66  * Initialize traffic manager related data.
67  *
68  * @param dev Pointer to the device.
69  * @returns 0 on success, failure otherwise.
70  */
71 int
mrvl_tm_init(struct rte_eth_dev * dev)72 mrvl_tm_init(struct rte_eth_dev *dev)
73 {
74 	struct mrvl_priv *priv = dev->data->dev_private;
75 
76 	LIST_INIT(&priv->shaper_profiles);
77 	LIST_INIT(&priv->nodes);
78 
79 	if (priv->rate_max)
80 		return 0;
81 
82 	return mrvl_get_max_rate(dev, &priv->rate_max);
83 }
84 
85 /**
86  * Cleanup traffic manager related data.
87  *
88  * @param dev Pointer to the device.
89  */
mrvl_tm_deinit(struct rte_eth_dev * dev)90 void mrvl_tm_deinit(struct rte_eth_dev *dev)
91 {
92 	struct mrvl_priv *priv = dev->data->dev_private;
93 	struct mrvl_tm_shaper_profile *profile =
94 		LIST_FIRST(&priv->shaper_profiles);
95 	struct mrvl_tm_node *node = LIST_FIRST(&priv->nodes);
96 
97 	while (profile) {
98 		struct mrvl_tm_shaper_profile *next = LIST_NEXT(profile, next);
99 
100 		LIST_REMOVE(profile, next);
101 		rte_free(profile);
102 		profile = next;
103 	}
104 
105 	while (node) {
106 		struct mrvl_tm_node *next = LIST_NEXT(node, next);
107 
108 		LIST_REMOVE(node, next);
109 		rte_free(node);
110 		node = next;
111 	}
112 }
113 
114 /**
115  * Get node using its id.
116  *
117  * @param priv Pointer to the port's private data.
118  * @param node_id Id used by this node.
119  * @returns Pointer to the node if exists, NULL otherwise.
120  */
121 static struct mrvl_tm_node *
mrvl_node_from_id(struct mrvl_priv * priv,uint32_t node_id)122 mrvl_node_from_id(struct mrvl_priv *priv, uint32_t node_id)
123 {
124 	struct mrvl_tm_node *node;
125 
126 	LIST_FOREACH(node, &priv->nodes, next)
127 		if (node->id == node_id)
128 			return node;
129 
130 	return NULL;
131 }
132 
133 /**
134  * Check whether node is leaf or root.
135  *
136  * @param dev Pointer to the device.
137  * @param node_id Id used by this node.
138  * @param is_leaf Pointer to flag indicating whether node is a leaf.
139  * @param error Pointer to the error.
140  * @returns 0 on success, negative value otherwise.
141  */
142 static int
mrvl_node_type_get(struct rte_eth_dev * dev,uint32_t node_id,int * is_leaf,struct rte_tm_error * error)143 mrvl_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
144 		   struct rte_tm_error *error)
145 {
146 	struct mrvl_priv *priv = dev->data->dev_private;
147 	struct mrvl_tm_node *node;
148 
149 	if (!is_leaf)
150 		return -rte_tm_error_set(error, EINVAL,
151 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
152 					 NULL, NULL);
153 
154 	node = mrvl_node_from_id(priv, node_id);
155 	if (!node)
156 		return -rte_tm_error_set(error, ENODEV,
157 					 RTE_TM_ERROR_TYPE_NODE_ID,
158 					 NULL, "Node id does not exist\n");
159 
160 	*is_leaf = node->type == MRVL_NODE_QUEUE ? 1 : 0;
161 
162 	return 0;
163 }
164 
165 /**
166  * Get traffic manager capabilities.
167  *
168  * @param dev Pointer to the device (unused).
169  * @param cap Pointer to the capabilities.
170  * @param error Pointer to the error.
171  * @returns 0 on success, negative value otherwise.
172  */
173 static int
mrvl_capabilities_get(struct rte_eth_dev * dev,struct rte_tm_capabilities * cap,struct rte_tm_error * error)174 mrvl_capabilities_get(struct rte_eth_dev *dev,
175 		      struct rte_tm_capabilities *cap,
176 		      struct rte_tm_error *error)
177 {
178 	struct mrvl_priv *priv = dev->data->dev_private;
179 
180 	if (!cap)
181 		return -rte_tm_error_set(error, EINVAL,
182 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
183 					 NULL, "Capabilities are missing\n");
184 
185 	memset(cap, 0, sizeof(*cap));
186 
187 	cap->n_nodes_max = 1 + dev->data->nb_tx_queues; /* port + txqs number */
188 	cap->n_levels_max = 2; /* port level + txqs level */
189 	cap->non_leaf_nodes_identical = 1;
190 	cap->leaf_nodes_identical = 1;
191 
192 	cap->shaper_n_max = cap->n_nodes_max;
193 	cap->shaper_private_n_max = cap->shaper_n_max;
194 	cap->shaper_private_rate_min = MRVL_RATE_MIN;
195 	cap->shaper_private_rate_max = priv->rate_max;
196 	cap->shaper_private_packet_mode_supported = 0;
197 	cap->shaper_private_byte_mode_supported = 1;
198 
199 	cap->sched_n_children_max = dev->data->nb_tx_queues;
200 	cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
201 	cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
202 	cap->sched_wfq_n_groups_max = 1;
203 	cap->sched_wfq_weight_max = MRVL_WEIGHT_MAX;
204 	cap->sched_wfq_packet_mode_supported = 0;
205 	cap->sched_wfq_byte_mode_supported = 1;
206 
207 	cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_SUSPEND_RESUME |
208 				   RTE_TM_UPDATE_NODE_STATS;
209 	cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
210 
211 	return 0;
212 }
213 
214 /**
215  * Get traffic manager hierarchy level capabilities.
216  *
217  * @param dev Pointer to the device.
218  * @param level_id Id of the level.
219  * @param cap Pointer to the level capabilities.
220  * @param error Pointer to the error.
221  * @returns 0 on success, negative value otherwise.
222  */
223 static int
mrvl_level_capabilities_get(struct rte_eth_dev * dev,uint32_t level_id,struct rte_tm_level_capabilities * cap,struct rte_tm_error * error)224 mrvl_level_capabilities_get(struct rte_eth_dev *dev,
225 			    uint32_t level_id,
226 			    struct rte_tm_level_capabilities *cap,
227 			    struct rte_tm_error *error)
228 {
229 	struct mrvl_priv *priv = dev->data->dev_private;
230 
231 	if (!cap)
232 		return -rte_tm_error_set(error, EINVAL,
233 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
234 					 NULL, NULL);
235 
236 	memset(cap, 0, sizeof(*cap));
237 
238 	if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE)
239 		return -rte_tm_error_set(error, EINVAL,
240 					 RTE_TM_ERROR_TYPE_LEVEL_ID,
241 					 NULL, "Wrong level id\n");
242 
243 	if (level_id == MRVL_NODE_PORT) {
244 		cap->n_nodes_max = 1;
245 		cap->n_nodes_nonleaf_max = 1;
246 		cap->non_leaf_nodes_identical = 1;
247 
248 		cap->nonleaf.shaper_private_supported = 1;
249 		cap->nonleaf.shaper_private_rate_min = MRVL_RATE_MIN;
250 		cap->nonleaf.shaper_private_rate_max = priv->rate_max;
251 		cap->nonleaf.shaper_private_packet_mode_supported = 0;
252 		cap->nonleaf.shaper_private_byte_mode_supported = 1;
253 
254 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
255 		cap->nonleaf.sched_sp_n_priorities_max = 1;
256 		cap->nonleaf.sched_wfq_n_children_per_group_max =
257 			dev->data->nb_tx_queues;
258 		cap->nonleaf.sched_wfq_n_groups_max = 1;
259 		cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
260 		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
261 		cap->nonleaf.sched_wfq_byte_mode_supported = 1;
262 		cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
263 					  RTE_TM_STATS_N_BYTES;
264 	} else { /* level_id == MRVL_NODE_QUEUE */
265 		cap->n_nodes_max = dev->data->nb_tx_queues;
266 		cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
267 		cap->leaf_nodes_identical = 1;
268 
269 		cap->leaf.shaper_private_supported = 1;
270 		cap->leaf.shaper_private_rate_min = MRVL_RATE_MIN;
271 		cap->leaf.shaper_private_rate_max = priv->rate_max;
272 		cap->leaf.shaper_private_packet_mode_supported = 0;
273 		cap->leaf.shaper_private_byte_mode_supported = 1;
274 		cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
275 	}
276 
277 	return 0;
278 }
279 
280 /**
281  * Get node capabilities.
282  *
283  * @param dev Pointer to the device.
284  * @param node_id Id of the node.
285  * @param cap Pointer to the capabilities.
286  * @param error Pointer to the error.
287  * @returns 0 on success, negative value otherwise.
288  */
289 static int
mrvl_node_capabilities_get(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_capabilities * cap,struct rte_tm_error * error)290 mrvl_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
291 			   struct rte_tm_node_capabilities *cap,
292 			   struct rte_tm_error *error)
293 {
294 	struct mrvl_priv *priv = dev->data->dev_private;
295 	struct mrvl_tm_node *node;
296 
297 	if (!cap)
298 		return -rte_tm_error_set(error, EINVAL,
299 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
300 					 NULL, NULL);
301 
302 	memset(cap, 0, sizeof(*cap));
303 
304 	node = mrvl_node_from_id(priv, node_id);
305 	if (!node)
306 		return -rte_tm_error_set(error, ENODEV,
307 					 RTE_TM_ERROR_TYPE_NODE_ID,
308 					 NULL, "Node id does not exist\n");
309 
310 	cap->shaper_private_supported = 1;
311 	cap->shaper_private_rate_min = MRVL_RATE_MIN;
312 	cap->shaper_private_rate_max = priv->rate_max;
313 	cap->shaper_private_packet_mode_supported = 0;
314 	cap->shaper_private_byte_mode_supported = 1;
315 
316 	if (node->type == MRVL_NODE_PORT) {
317 		cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
318 		cap->nonleaf.sched_sp_n_priorities_max = 1;
319 		cap->nonleaf.sched_wfq_n_children_per_group_max =
320 			dev->data->nb_tx_queues;
321 		cap->nonleaf.sched_wfq_n_groups_max = 1;
322 		cap->nonleaf.sched_wfq_weight_max = MRVL_WEIGHT_MAX;
323 		cap->nonleaf.sched_wfq_packet_mode_supported = 0;
324 		cap->nonleaf.sched_wfq_byte_mode_supported = 1;
325 		cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
326 	} else {
327 		cap->stats_mask = RTE_TM_STATS_N_PKTS;
328 	}
329 
330 	return 0;
331 }
332 
333 /**
334  * Get shaper profile using its id.
335  *
336  * @param priv Pointer to the port's private data.
337  * @param shaper_profile_id Id used by the shaper.
338  * @returns Pointer to the shaper profile if exists, NULL otherwise.
339  */
340 static struct mrvl_tm_shaper_profile *
mrvl_shaper_profile_from_id(struct mrvl_priv * priv,uint32_t shaper_profile_id)341 mrvl_shaper_profile_from_id(struct mrvl_priv *priv, uint32_t shaper_profile_id)
342 {
343 	struct mrvl_tm_shaper_profile *profile;
344 
345 	LIST_FOREACH(profile, &priv->shaper_profiles, next)
346 		if (profile->id == shaper_profile_id)
347 			return profile;
348 
349 	return NULL;
350 }
351 
352 /**
353  * Add a new shaper profile.
354  *
355  * @param dev Pointer to the device.
356  * @param shaper_profile_id Id of the new profile.
357  * @param params Pointer to the shaper profile parameters.
358  * @param error Pointer to the error.
359  * @returns 0 on success, negative value otherwise.
360  */
361 static int
mrvl_shaper_profile_add(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_shaper_params * params,struct rte_tm_error * error)362 mrvl_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
363 			struct rte_tm_shaper_params *params,
364 			struct rte_tm_error *error)
365 {
366 	struct mrvl_priv *priv = dev->data->dev_private;
367 	struct mrvl_tm_shaper_profile *profile;
368 
369 	if (!params)
370 		return -rte_tm_error_set(error, EINVAL,
371 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
372 					 NULL, NULL);
373 
374 	if (params->committed.rate)
375 		return -rte_tm_error_set(error, EINVAL,
376 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
377 				NULL, "Committed rate not supported\n");
378 
379 	if (params->committed.size)
380 		return -rte_tm_error_set(error, EINVAL,
381 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE,
382 				NULL, "Committed bucket size not supported\n");
383 
384 	if (params->peak.rate < MRVL_RATE_MIN ||
385 	    params->peak.rate > priv->rate_max)
386 		return -rte_tm_error_set(error, EINVAL,
387 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
388 				NULL, "Peak rate is out of range\n");
389 
390 	if (params->peak.size < MRVL_BURST_MIN ||
391 	    params->peak.size > MRVL_BURST_MAX)
392 		return -rte_tm_error_set(error, EINVAL,
393 				RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
394 				NULL, "Peak size is out of range\n");
395 
396 	if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
397 		return -rte_tm_error_set(error, EINVAL,
398 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
399 					 NULL, "Wrong shaper profile id\n");
400 
401 	profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id);
402 	if (profile)
403 		return -rte_tm_error_set(error, EEXIST,
404 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
405 					 NULL, "Profile id already exists\n");
406 
407 	profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
408 				     rte_socket_id());
409 	if (!profile)
410 		return -rte_tm_error_set(error, ENOMEM,
411 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
412 					 NULL, NULL);
413 
414 	profile->id = shaper_profile_id;
415 	rte_memcpy(&profile->params, params, sizeof(profile->params));
416 
417 	LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
418 
419 	return 0;
420 }
421 
422 /**
423  * Remove a shaper profile.
424  *
425  * @param dev Pointer to the device.
426  * @param shaper_profile_id Id of the shaper profile.
427  * @param error Pointer to the error.
428  * @returns 0 on success, negative value otherwise.
429  */
430 static int
mrvl_shaper_profile_delete(struct rte_eth_dev * dev,uint32_t shaper_profile_id,struct rte_tm_error * error)431 mrvl_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
432 			   struct rte_tm_error *error)
433 {
434 	struct mrvl_priv *priv = dev->data->dev_private;
435 	struct mrvl_tm_shaper_profile *profile;
436 
437 	profile = mrvl_shaper_profile_from_id(priv, shaper_profile_id);
438 	if (!profile)
439 		return -rte_tm_error_set(error, ENODEV,
440 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
441 					 NULL, "Profile id does not exist\n");
442 
443 	if (profile->refcnt)
444 		return -rte_tm_error_set(error, EPERM,
445 					 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
446 					 NULL, "Profile is used\n");
447 
448 	LIST_REMOVE(profile, next);
449 	rte_free(profile);
450 
451 	return 0;
452 }
453 
454 /**
455  * Check node parameters.
456  *
457  * @param dev Pointer to the device.
458  * @param node_id Id used by the node.
459  * @param priority Priority value.
460  * @param weight Weight value.
461  * @param level_id Id of the level.
462  * @param params Pointer to the node parameters.
463  * @param error Pointer to the error.
464  * @returns 0 on success, negative value otherwise.
465  */
466 static int
mrvl_node_check_params(struct rte_eth_dev * dev,uint32_t node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)467 mrvl_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
468 		       uint32_t priority, uint32_t weight, uint32_t level_id,
469 		       struct rte_tm_node_params *params,
470 		       struct rte_tm_error *error)
471 {
472 	if (node_id == RTE_TM_NODE_ID_NULL)
473 		return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
474 					 NULL, "Node id is invalid\n");
475 
476 	if (priority)
477 		return -rte_tm_error_set(error, EINVAL,
478 					 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
479 					 NULL, "Priority should be 0\n");
480 
481 	if (weight > MRVL_WEIGHT_MAX)
482 		return -rte_tm_error_set(error, EINVAL,
483 					 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
484 					 NULL, "Weight is out of range\n");
485 
486 	if (level_id != MRVL_NODE_PORT && level_id != MRVL_NODE_QUEUE)
487 		return -rte_tm_error_set(error, EINVAL,
488 					 RTE_TM_ERROR_TYPE_LEVEL_ID,
489 					 NULL, "Wrong level id\n");
490 
491 	if (!params)
492 		return -rte_tm_error_set(error, EINVAL,
493 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
494 					 NULL, NULL);
495 
496 	if (params->shared_shaper_id)
497 		return -rte_tm_error_set(error, EINVAL,
498 				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
499 				NULL, "Shared shaper is not supported\n");
500 
501 	if (params->n_shared_shapers)
502 		return -rte_tm_error_set(error, EINVAL,
503 				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
504 				NULL, "Shared shaper is not supported\n");
505 
506 	/* verify port (root node) settings */
507 	if (node_id >= dev->data->nb_tx_queues) {
508 		if (params->nonleaf.wfq_weight_mode)
509 			return -rte_tm_error_set(error, EINVAL,
510 				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
511 				NULL, "WFQ is not supported\n");
512 
513 		if (params->nonleaf.n_sp_priorities != 1)
514 			return -rte_tm_error_set(error, EINVAL,
515 				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
516 				NULL, "SP is not supported\n");
517 
518 		if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
519 					   RTE_TM_STATS_N_BYTES))
520 			return -rte_tm_error_set(error, EINVAL,
521 				RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
522 				NULL,
523 				"Requested port stats are not supported\n");
524 
525 		return 0;
526 	}
527 
528 	/* verify txq (leaf node) settings */
529 	if (params->leaf.cman)
530 		return -rte_tm_error_set(error, EINVAL,
531 					 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
532 					 NULL,
533 					 "Congestion mngmt is not supported\n");
534 
535 	if (params->leaf.wred.wred_profile_id)
536 		return -rte_tm_error_set(error, EINVAL,
537 				RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
538 				NULL, "WRED is not supported\n");
539 
540 	if (params->leaf.wred.shared_wred_context_id)
541 		return -rte_tm_error_set(error, EINVAL,
542 			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID,
543 			NULL, "WRED is not supported\n");
544 
545 	if (params->leaf.wred.n_shared_wred_contexts)
546 		return -rte_tm_error_set(error, EINVAL,
547 			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
548 			NULL, "WRED is not supported\n");
549 
550 	if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
551 		return -rte_tm_error_set(error, EINVAL,
552 			RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
553 			NULL,
554 			"Requested txq stats are not supported\n");
555 
556 	return 0;
557 }
558 
559 /**
560  * Add a new node.
561  *
562  * @param dev Pointer to the device.
563  * @param node_id Id of the node.
564  * @param parent_node_id Id of the parent node.
565  * @param priority Priority value.
566  * @param weight Weight value.
567  * @param level_id Id of the level.
568  * @param params Pointer to the node parameters.
569  * @param error Pointer to the error.
570  * @returns 0 on success, negative value otherwise.
571  */
572 static int
mrvl_node_add(struct rte_eth_dev * dev,uint32_t node_id,uint32_t parent_node_id,uint32_t priority,uint32_t weight,uint32_t level_id,struct rte_tm_node_params * params,struct rte_tm_error * error)573 mrvl_node_add(struct rte_eth_dev *dev, uint32_t node_id,
574 	      uint32_t parent_node_id, uint32_t priority, uint32_t weight,
575 	      uint32_t level_id, struct rte_tm_node_params *params,
576 	      struct rte_tm_error *error)
577 {
578 	struct mrvl_priv *priv = dev->data->dev_private;
579 	struct mrvl_tm_shaper_profile *profile = NULL;
580 	struct mrvl_tm_node *node, *parent = NULL;
581 	int ret;
582 
583 	if (priv->ppio)
584 		return -rte_tm_error_set(error, EPERM,
585 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
586 					 NULL, "Port is already started\n");
587 
588 	ret = mrvl_node_check_params(dev, node_id, priority, weight, level_id,
589 				     params, error);
590 	if (ret)
591 		return ret;
592 
593 	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
594 		profile = mrvl_shaper_profile_from_id(priv,
595 						 params->shaper_profile_id);
596 		if (!profile)
597 			return -rte_tm_error_set(error, ENODEV,
598 					RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
599 					NULL, "Shaper id does not exist\n");
600 	}
601 
602 	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
603 		LIST_FOREACH(node, &priv->nodes, next) {
604 			if (node->type != MRVL_NODE_PORT)
605 				continue;
606 
607 			return -rte_tm_error_set(error, EINVAL,
608 						 RTE_TM_ERROR_TYPE_UNSPECIFIED,
609 						 NULL, "Root node exists\n");
610 		}
611 	} else {
612 		parent = mrvl_node_from_id(priv, parent_node_id);
613 		if (!parent)
614 			return -rte_tm_error_set(error, EINVAL,
615 					RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
616 					NULL, "Node id does not exist\n");
617 	}
618 
619 	node = mrvl_node_from_id(priv, node_id);
620 	if (node)
621 		return -rte_tm_error_set(error, ENODEV,
622 					 RTE_TM_ERROR_TYPE_NODE_ID,
623 					 NULL, "Node id already exists\n");
624 
625 	node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
626 	if (!node)
627 		return -rte_tm_error_set(error, ENOMEM,
628 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
629 					 NULL, NULL);
630 
631 	node->id = node_id;
632 	node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? MRVL_NODE_PORT :
633 							     MRVL_NODE_QUEUE;
634 
635 	if (parent) {
636 		node->parent = parent;
637 		parent->refcnt++;
638 	}
639 
640 	if (profile) {
641 		node->profile = profile;
642 		profile->refcnt++;
643 	}
644 
645 	node->weight = weight;
646 	node->stats_mask = params->stats_mask;
647 
648 	LIST_INSERT_HEAD(&priv->nodes, node, next);
649 
650 	return 0;
651 }
652 
653 /**
654  * Delete a node.
655  *
656  * @param dev Pointer to the device.
657  * @param node_id Id of the node.
658  * @param error Pointer to the error.
659  * @returns 0 on success, negative value otherwise.
660  */
661 static int
mrvl_node_delete(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)662 mrvl_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
663 		 struct rte_tm_error *error)
664 {
665 	struct mrvl_priv *priv = dev->data->dev_private;
666 	struct mrvl_tm_node *node;
667 
668 	if (priv->ppio) {
669 		return -rte_tm_error_set(error, EPERM,
670 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
671 					 NULL, "Port is already started\n");
672 	}
673 
674 	node = mrvl_node_from_id(priv, node_id);
675 	if (!node)
676 		return -rte_tm_error_set(error, ENODEV,
677 					 RTE_TM_ERROR_TYPE_NODE_ID,
678 					 NULL, "Node id does not exist\n");
679 
680 	if (node->refcnt)
681 		return -rte_tm_error_set(error, EPERM,
682 					 RTE_TM_ERROR_TYPE_NODE_ID,
683 					 NULL, "Node id is used\n");
684 
685 	if (node->parent)
686 		node->parent->refcnt--;
687 
688 	if (node->profile)
689 		node->profile->refcnt--;
690 
691 	LIST_REMOVE(node, next);
692 	rte_free(node);
693 
694 	return 0;
695 }
696 
697 /**
698  * Helper for suspending specific tx queue.
699  *
700  * @param dev Pointer to the device.
701  * @param node_id Id used by this node.
702  * @returns 0 on success, negative value otherwise.
703  */
mrvl_node_suspend_one(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)704 static int mrvl_node_suspend_one(struct rte_eth_dev *dev, uint32_t node_id,
705 				 struct rte_tm_error *error)
706 {
707 	int ret = dev->dev_ops->tx_queue_stop(dev, node_id);
708 	if (ret)
709 		return -rte_tm_error_set(error, ret,
710 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
711 					 NULL, "Failed to suspend a txq\n");
712 
713 	return 0;
714 }
715 
716 /**
717  * Suspend a node.
718  *
719  * @param dev Pointer to the device.
720  * @param node_id Id of the node.
721  * @param error Pointer to the error.
722  * returns 0 on success, negative value otherwise.
723  */
724 static int
mrvl_node_suspend(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)725 mrvl_node_suspend(struct rte_eth_dev *dev, uint32_t node_id,
726 		  struct rte_tm_error *error)
727 {
728 	struct mrvl_priv *priv = dev->data->dev_private;
729 	struct mrvl_tm_node *node, *tmp;
730 	int ret;
731 
732 	node = mrvl_node_from_id(priv, node_id);
733 	if (!node)
734 		return -rte_tm_error_set(error, ENODEV,
735 					 RTE_TM_ERROR_TYPE_NODE_ID,
736 					 NULL, "Node id does not exist\n");
737 
738 	if (!node->parent) {
739 		LIST_FOREACH(tmp, &priv->nodes, next) {
740 			if (!tmp->parent)
741 				continue;
742 
743 			if (node != tmp->parent)
744 				continue;
745 
746 			ret = mrvl_node_suspend_one(dev, tmp->id, error);
747 			if (ret)
748 				return ret;
749 		}
750 
751 		return 0;
752 	}
753 
754 	return mrvl_node_suspend_one(dev, node_id, error);
755 }
756 
757 /**
758  * Resume a node.
759  *
760  * @param dev Pointer to the device.
761  * @param node_id Id of the node.
762  * @param error Pointer to the error.
763  * returns 0 on success, negative value otherwise.
764  */
765 static int
mrvl_node_resume(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_error * error)766 mrvl_node_resume(struct rte_eth_dev *dev, uint32_t node_id,
767 		 struct rte_tm_error *error)
768 {
769 	struct mrvl_priv *priv = dev->data->dev_private;
770 	struct mrvl_tm_node *node;
771 	int ret;
772 
773 	node = mrvl_node_from_id(priv, node_id);
774 	if (!node)
775 		return -rte_tm_error_set(error, ENODEV,
776 					 RTE_TM_ERROR_TYPE_NODE_ID,
777 					 NULL, "Node id does not exist\n");
778 
779 
780 	if (!node->parent)
781 		return -rte_tm_error_set(error, EPERM,
782 					 RTE_TM_ERROR_TYPE_NODE_ID,
783 					 NULL, "Cannot suspend a port\n");
784 
785 	ret = dev->dev_ops->tx_queue_start(dev, node_id);
786 	if (ret)
787 		return -rte_tm_error_set(error, ret,
788 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
789 					 NULL, "Failed to resume a txq\n");
790 	return 0;
791 }
792 
793 /**
794  * Apply traffic manager hierarchy.
795  *
796  * @param dev Pointer to the device.
797  * @param clear_on_fail Flag indicating whether to do cleanup on the failure.
798  * @param error Pointer to the error.
799  * @returns 0 on success, negative value otherwise.
800  */
801 static int
mrvl_hierarchy_commit(struct rte_eth_dev * dev,int clear_on_fail,struct rte_tm_error * error)802 mrvl_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
803 		      struct rte_tm_error *error)
804 {
805 	struct mrvl_priv *priv = dev->data->dev_private;
806 	struct mrvl_tm_node *node;
807 	int ret;
808 
809 	if (priv->ppio) {
810 		ret = -rte_tm_error_set(error, EPERM,
811 					RTE_TM_ERROR_TYPE_UNSPECIFIED,
812 					NULL, "Port is already started\n");
813 		goto out;
814 	}
815 
816 	LIST_FOREACH(node, &priv->nodes, next) {
817 		struct pp2_ppio_outq_params *p;
818 
819 		if (node->type == MRVL_NODE_PORT) {
820 			if (!node->profile)
821 				continue;
822 
823 			priv->ppio_params.rate_limit_enable = 1;
824 			priv->ppio_params.rate_limit_params.cir =
825 				node->profile->params.peak.rate * 8 / 1000;
826 			priv->ppio_params.rate_limit_params.cbs =
827 				node->profile->params.peak.size / 1000;
828 
829 			MRVL_LOG(INFO,
830 				"Port rate limit overrides txqs rate limit");
831 
832 			continue;
833 		}
834 
835 		if (node->id >= dev->data->nb_tx_queues) {
836 			ret = -rte_tm_error_set(error, EINVAL,
837 					RTE_TM_ERROR_TYPE_NODE_ID, NULL,
838 					"Not enough txqs are configured\n");
839 			goto out;
840 		}
841 
842 		p = &priv->ppio_params.outqs_params.outqs_params[node->id];
843 
844 		if (node->weight) {
845 			p->sched_mode = PP2_PPIO_SCHED_M_WRR;
846 			p->weight = node->weight;
847 		} else {
848 			p->sched_mode = PP2_PPIO_SCHED_M_SP;
849 			p->weight = 0;
850 		}
851 
852 		if (node->profile) {
853 			p->rate_limit_enable = 1;
854 			/* convert Bytes/s to kilo bits/s */
855 			p->rate_limit_params.cir =
856 				node->profile->params.peak.rate * 8 / 1000;
857 			/* convert bits to kilo bits */
858 			p->rate_limit_params.cbs =
859 				node->profile->params.peak.size / 1000;
860 		} else {
861 			p->rate_limit_enable = 0;
862 			p->rate_limit_params.cir = 0;
863 			p->rate_limit_params.cbs = 0;
864 		}
865 	}
866 
867 	/* reset to defaults in case applied tm hierarchy is empty */
868 	if (LIST_EMPTY(&priv->nodes)) {
869 		int i;
870 
871 		for (i = 0; i < priv->ppio_params.outqs_params.num_outqs; i++) {
872 			struct pp2_ppio_outq_params *p =
873 				&priv->ppio_params.outqs_params.outqs_params[i];
874 
875 			p->sched_mode = PP2_PPIO_SCHED_M_WRR;
876 			p->weight = 0;
877 			p->rate_limit_enable = 0;
878 			p->rate_limit_params.cir = 0;
879 			p->rate_limit_params.cbs = 0;
880 		}
881 	}
882 
883 	return 0;
884 out:
885 	if (clear_on_fail) {
886 		mrvl_tm_deinit(dev);
887 		mrvl_tm_init(dev);
888 	}
889 
890 	return ret;
891 }
892 
893 /**
894  * Read statistics counters for current node.
895  *
896  * @param dev Pointer to the device.
897  * @param node_id Id of the node.
898  * @param stats Pointer to the statistics counters.
899  * @param stats_mask Pointer to mask of enabled statistics counters
900  *                   that are retrieved.
901  * @param clear Flag indicating whether to clear statistics.
902  *              Non-zero value clears statistics.
903  * @param error Pointer to the error.
904  * @returns 0 on success, negative value otherwise.
905  */
906 static int
mrvl_node_stats_read(struct rte_eth_dev * dev,uint32_t node_id,struct rte_tm_node_stats * stats,uint64_t * stats_mask,int clear,struct rte_tm_error * error)907 mrvl_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
908 		     struct rte_tm_node_stats *stats, uint64_t *stats_mask,
909 		     int clear, struct rte_tm_error *error)
910 {
911 	struct mrvl_priv *priv = dev->data->dev_private;
912 	struct mrvl_tm_node *node;
913 	int ret;
914 
915 	if (!priv->ppio) {
916 		return -rte_tm_error_set(error, EPERM,
917 					 RTE_TM_ERROR_TYPE_UNSPECIFIED,
918 					 NULL, "Port is not started\n");
919 	}
920 
921 	node = mrvl_node_from_id(priv, node_id);
922 	if (!node)
923 		return -rte_tm_error_set(error, ENODEV,
924 					 RTE_TM_ERROR_TYPE_NODE_ID,
925 					 NULL, "Node id does not exist\n");
926 
927 	if (stats_mask)
928 		*stats_mask = node->stats_mask;
929 
930 	if (!stats)
931 		return 0;
932 
933 	memset(stats, 0, sizeof(*stats));
934 
935 	if (!node->parent) {
936 		struct pp2_ppio_statistics s;
937 
938 		memset(&s, 0, sizeof(s));
939 		ret = pp2_ppio_get_statistics(priv->ppio, &s, clear);
940 		if (ret)
941 			return -rte_tm_error_set(error, -ret,
942 					RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
943 					"Failed to read port statistics\n");
944 
945 		if (node->stats_mask & RTE_TM_STATS_N_PKTS)
946 			stats->n_pkts = s.tx_packets;
947 
948 		if (node->stats_mask & RTE_TM_STATS_N_BYTES)
949 			stats->n_bytes = s.tx_bytes;
950 	} else {
951 		struct pp2_ppio_outq_statistics s;
952 
953 		memset(&s, 0, sizeof(s));
954 		ret = pp2_ppio_outq_get_statistics(priv->ppio, node_id, &s,
955 						   clear);
956 		if (ret)
957 			return -rte_tm_error_set(error, -ret,
958 					RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
959 					"Failed to read txq statistics\n");
960 
961 		if (node->stats_mask & RTE_TM_STATS_N_PKTS)
962 			stats->n_pkts = s.deq_desc;
963 	}
964 
965 	return 0;
966 }
967 
968 /**
969  * Update node statistics.
970  *
971  * @param dev Pointer to the device.
972  * @param node_id Id of the node.
973  * @param stats_mask Bitmask of statistics counters to be enabled.
974  * @param error Pointer to the error.
975  * @returns 0 on success, negative value otherwise.
976  */
977 static int
mrvl_node_stats_update(struct rte_eth_dev * dev,uint32_t node_id,uint64_t stats_mask,struct rte_tm_error * error)978 mrvl_node_stats_update(struct rte_eth_dev *dev, uint32_t node_id,
979 		       uint64_t stats_mask, struct rte_tm_error *error)
980 {
981 	struct mrvl_priv *priv = dev->data->dev_private;
982 	struct mrvl_tm_node *node;
983 
984 	node = mrvl_node_from_id(priv, node_id);
985 	if (!node)
986 		return -rte_tm_error_set(error, ENODEV,
987 					 RTE_TM_ERROR_TYPE_NODE_ID,
988 					 NULL, "Node id does not exist\n");
989 
990 	if (!node->parent) {
991 		if (stats_mask & ~(RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES))
992 			return -rte_tm_error_set(error, EINVAL,
993 				RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
994 				NULL,
995 				"Requested port stats are not supported\n");
996 	} else {
997 		if (stats_mask & ~RTE_TM_STATS_N_PKTS)
998 			return -rte_tm_error_set(error, EINVAL,
999 				RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1000 				NULL,
1001 				"Requested txq stats are not supported\n");
1002 	}
1003 
1004 	node->stats_mask = stats_mask;
1005 
1006 	return 0;
1007 }
1008 
1009 const struct rte_tm_ops mrvl_tm_ops = {
1010 	.node_type_get = mrvl_node_type_get,
1011 	.capabilities_get = mrvl_capabilities_get,
1012 	.level_capabilities_get = mrvl_level_capabilities_get,
1013 	.node_capabilities_get = mrvl_node_capabilities_get,
1014 	.shaper_profile_add = mrvl_shaper_profile_add,
1015 	.shaper_profile_delete = mrvl_shaper_profile_delete,
1016 	.node_add = mrvl_node_add,
1017 	.node_delete = mrvl_node_delete,
1018 	.node_suspend = mrvl_node_suspend,
1019 	.node_resume = mrvl_node_resume,
1020 	.hierarchy_commit = mrvl_hierarchy_commit,
1021 	.node_stats_update = mrvl_node_stats_update,
1022 	.node_stats_read = mrvl_node_stats_read,
1023 };
1024