1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_cycles.h>
12 #include <rte_prefetch.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mbuf.h>
15 #include <rte_bitmap.h>
16 #include <rte_reciprocal.h>
17
18 #include "rte_sched.h"
19 #include "rte_sched_common.h"
20 #include "rte_approx.h"
21
22 #ifdef __INTEL_COMPILER
23 #pragma warning(disable:2259) /* conversion may lose significant bits */
24 #endif
25
26 #ifndef RTE_SCHED_PORT_N_GRINDERS
27 #define RTE_SCHED_PORT_N_GRINDERS 8
28 #endif
29
30 #define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
31 #define RTE_SCHED_WRR_SHIFT 3
32 #define RTE_SCHED_MAX_QUEUES_PER_TC RTE_SCHED_BE_QUEUES_PER_PIPE
33 #define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
34 #define RTE_SCHED_PIPE_INVALID UINT32_MAX
35 #define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
36
37 /* Scaling for cycles_per_byte calculation
38 * Chosen so that minimum rate is 480 bit/sec
39 */
40 #define RTE_SCHED_TIME_SHIFT 8
41
42 struct rte_sched_pipe_profile {
43 /* Token bucket (TB) */
44 uint64_t tb_period;
45 uint64_t tb_credits_per_period;
46 uint64_t tb_size;
47
48 /* Pipe traffic classes */
49 uint64_t tc_period;
50 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
51 uint8_t tc_ov_weight;
52
53 /* Pipe best-effort traffic class queues */
54 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
55 };
56
57 struct rte_sched_pipe {
58 /* Token bucket (TB) */
59 uint64_t tb_time; /* time of last update */
60 uint64_t tb_credits;
61
62 /* Pipe profile and flags */
63 uint32_t profile;
64
65 /* Traffic classes (TCs) */
66 uint64_t tc_time; /* time of next update */
67 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
68
69 /* Weighted Round Robin (WRR) */
70 uint8_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
71
72 /* TC oversubscription */
73 uint64_t tc_ov_credits;
74 uint8_t tc_ov_period_id;
75 } __rte_cache_aligned;
76
77 struct rte_sched_queue {
78 uint16_t qw;
79 uint16_t qr;
80 };
81
82 struct rte_sched_queue_extra {
83 struct rte_sched_queue_stats stats;
84 #ifdef RTE_SCHED_CMAN
85 RTE_STD_C11
86 union {
87 struct rte_red red;
88 struct rte_pie pie;
89 };
90 #endif
91 };
92
93 enum grinder_state {
94 e_GRINDER_PREFETCH_PIPE = 0,
95 e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
96 e_GRINDER_PREFETCH_MBUF,
97 e_GRINDER_READ_MBUF
98 };
99
100 struct rte_sched_subport_profile {
101 /* Token bucket (TB) */
102 uint64_t tb_period;
103 uint64_t tb_credits_per_period;
104 uint64_t tb_size;
105
106 uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
107 uint64_t tc_period;
108 };
109
110 struct rte_sched_grinder {
111 /* Pipe cache */
112 uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
113 uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
114 uint32_t pcache_w;
115 uint32_t pcache_r;
116
117 /* Current pipe */
118 enum grinder_state state;
119 uint32_t productive;
120 uint32_t pindex;
121 struct rte_sched_subport *subport;
122 struct rte_sched_subport_profile *subport_params;
123 struct rte_sched_pipe *pipe;
124 struct rte_sched_pipe_profile *pipe_params;
125
126 /* TC cache */
127 uint8_t tccache_qmask[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
128 uint32_t tccache_qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
129 uint32_t tccache_w;
130 uint32_t tccache_r;
131
132 /* Current TC */
133 uint32_t tc_index;
134 struct rte_sched_queue *queue[RTE_SCHED_MAX_QUEUES_PER_TC];
135 struct rte_mbuf **qbase[RTE_SCHED_MAX_QUEUES_PER_TC];
136 uint32_t qindex[RTE_SCHED_MAX_QUEUES_PER_TC];
137 uint16_t qsize;
138 uint32_t qmask;
139 uint32_t qpos;
140 struct rte_mbuf *pkt;
141
142 /* WRR */
143 uint16_t wrr_tokens[RTE_SCHED_BE_QUEUES_PER_PIPE];
144 uint16_t wrr_mask[RTE_SCHED_BE_QUEUES_PER_PIPE];
145 uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
146 };
147
148 struct rte_sched_subport {
149 /* Token bucket (TB) */
150 uint64_t tb_time; /* time of last update */
151 uint64_t tb_credits;
152
153 /* Traffic classes (TCs) */
154 uint64_t tc_time; /* time of next update */
155 uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
156
157 /* TC oversubscription */
158 uint64_t tc_ov_wm;
159 uint64_t tc_ov_wm_min;
160 uint64_t tc_ov_wm_max;
161 uint8_t tc_ov_period_id;
162 uint8_t tc_ov;
163 uint32_t tc_ov_n;
164 double tc_ov_rate;
165
166 /* Statistics */
167 struct rte_sched_subport_stats stats __rte_cache_aligned;
168
169 /* subport profile */
170 uint32_t profile;
171 /* Subport pipes */
172 uint32_t n_pipes_per_subport_enabled;
173 uint32_t n_pipe_profiles;
174 uint32_t n_max_pipe_profiles;
175
176 /* Pipe best-effort TC rate */
177 uint64_t pipe_tc_be_rate_max;
178
179 /* Pipe queues size */
180 uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
181
182 #ifdef RTE_SCHED_CMAN
183 bool cman_enabled;
184 enum rte_sched_cman_mode cman;
185
186 RTE_STD_C11
187 union {
188 struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
189 struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
190 };
191 #endif
192
193 /* Scheduling loop detection */
194 uint32_t pipe_loop;
195 uint32_t pipe_exhaustion;
196
197 /* Bitmap */
198 struct rte_bitmap *bmp;
199 uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
200
201 /* Grinders */
202 struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
203 uint32_t busy_grinders;
204
205 /* Queue base calculation */
206 uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
207 uint32_t qsize_sum;
208
209 struct rte_sched_pipe *pipe;
210 struct rte_sched_queue *queue;
211 struct rte_sched_queue_extra *queue_extra;
212 struct rte_sched_pipe_profile *pipe_profiles;
213 uint8_t *bmp_array;
214 struct rte_mbuf **queue_array;
215 uint8_t memory[0] __rte_cache_aligned;
216 } __rte_cache_aligned;
217
218 struct rte_sched_port {
219 /* User parameters */
220 uint32_t n_subports_per_port;
221 uint32_t n_pipes_per_subport;
222 uint32_t n_pipes_per_subport_log2;
223 uint16_t pipe_queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
224 uint8_t pipe_tc[RTE_SCHED_QUEUES_PER_PIPE];
225 uint8_t tc_queue[RTE_SCHED_QUEUES_PER_PIPE];
226 uint32_t n_subport_profiles;
227 uint32_t n_max_subport_profiles;
228 uint64_t rate;
229 uint32_t mtu;
230 uint32_t frame_overhead;
231 int socket;
232
233 /* Timing */
234 uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */
235 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
236 uint64_t time; /* Current NIC TX time measured in bytes */
237 struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
238 uint64_t cycles_per_byte;
239
240 /* Grinders */
241 struct rte_mbuf **pkts_out;
242 uint32_t n_pkts_out;
243 uint32_t subport_id;
244
245 /* Large data structures */
246 struct rte_sched_subport_profile *subport_profiles;
247 struct rte_sched_subport *subports[0] __rte_cache_aligned;
248 } __rte_cache_aligned;
249
250 enum rte_sched_subport_array {
251 e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
252 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
253 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
254 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
255 e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
256 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
257 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
258 };
259
260 static inline uint32_t
rte_sched_subport_pipe_queues(struct rte_sched_subport * subport)261 rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
262 {
263 return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
264 }
265
266 static inline struct rte_mbuf **
rte_sched_subport_pipe_qbase(struct rte_sched_subport * subport,uint32_t qindex)267 rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
268 {
269 uint32_t pindex = qindex >> 4;
270 uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
271
272 return (subport->queue_array + pindex *
273 subport->qsize_sum + subport->qsize_add[qpos]);
274 }
275
276 static inline uint16_t
rte_sched_subport_pipe_qsize(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex)277 rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
278 struct rte_sched_subport *subport, uint32_t qindex)
279 {
280 uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
281
282 return subport->qsize[tc];
283 }
284
285 static inline uint32_t
rte_sched_port_queues_per_port(struct rte_sched_port * port)286 rte_sched_port_queues_per_port(struct rte_sched_port *port)
287 {
288 uint32_t n_queues = 0, i;
289
290 for (i = 0; i < port->n_subports_per_port; i++)
291 n_queues += rte_sched_subport_pipe_queues(port->subports[i]);
292
293 return n_queues;
294 }
295
296 static inline uint16_t
rte_sched_port_pipe_queue(struct rte_sched_port * port,uint32_t traffic_class)297 rte_sched_port_pipe_queue(struct rte_sched_port *port, uint32_t traffic_class)
298 {
299 uint16_t pipe_queue = port->pipe_queue[traffic_class];
300
301 return pipe_queue;
302 }
303
304 static inline uint8_t
rte_sched_port_pipe_tc(struct rte_sched_port * port,uint32_t qindex)305 rte_sched_port_pipe_tc(struct rte_sched_port *port, uint32_t qindex)
306 {
307 uint8_t pipe_tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
308
309 return pipe_tc;
310 }
311
312 static inline uint8_t
rte_sched_port_tc_queue(struct rte_sched_port * port,uint32_t qindex)313 rte_sched_port_tc_queue(struct rte_sched_port *port, uint32_t qindex)
314 {
315 uint8_t tc_queue = port->tc_queue[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
316
317 return tc_queue;
318 }
319
320 static int
pipe_profile_check(struct rte_sched_pipe_params * params,uint64_t rate,uint16_t * qsize)321 pipe_profile_check(struct rte_sched_pipe_params *params,
322 uint64_t rate, uint16_t *qsize)
323 {
324 uint32_t i;
325
326 /* Pipe parameters */
327 if (params == NULL) {
328 RTE_LOG(ERR, SCHED,
329 "%s: Incorrect value for parameter params\n", __func__);
330 return -EINVAL;
331 }
332
333 /* TB rate: non-zero, not greater than port rate */
334 if (params->tb_rate == 0 ||
335 params->tb_rate > rate) {
336 RTE_LOG(ERR, SCHED,
337 "%s: Incorrect value for tb rate\n", __func__);
338 return -EINVAL;
339 }
340
341 /* TB size: non-zero */
342 if (params->tb_size == 0) {
343 RTE_LOG(ERR, SCHED,
344 "%s: Incorrect value for tb size\n", __func__);
345 return -EINVAL;
346 }
347
348 /* TC rate: non-zero if qsize non-zero, less than pipe rate */
349 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
350 if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
351 (qsize[i] != 0 && (params->tc_rate[i] == 0 ||
352 params->tc_rate[i] > params->tb_rate))) {
353 RTE_LOG(ERR, SCHED,
354 "%s: Incorrect value for qsize or tc_rate\n", __func__);
355 return -EINVAL;
356 }
357 }
358
359 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
360 qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
361 RTE_LOG(ERR, SCHED,
362 "%s: Incorrect value for be traffic class rate\n", __func__);
363 return -EINVAL;
364 }
365
366 /* TC period: non-zero */
367 if (params->tc_period == 0) {
368 RTE_LOG(ERR, SCHED,
369 "%s: Incorrect value for tc period\n", __func__);
370 return -EINVAL;
371 }
372
373 /* Best effort tc oversubscription weight: non-zero */
374 if (params->tc_ov_weight == 0) {
375 RTE_LOG(ERR, SCHED,
376 "%s: Incorrect value for tc ov weight\n", __func__);
377 return -EINVAL;
378 }
379
380 /* Queue WRR weights: non-zero */
381 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
382 if (params->wrr_weights[i] == 0) {
383 RTE_LOG(ERR, SCHED,
384 "%s: Incorrect value for wrr weight\n", __func__);
385 return -EINVAL;
386 }
387 }
388
389 return 0;
390 }
391
392 static int
subport_profile_check(struct rte_sched_subport_profile_params * params,uint64_t rate)393 subport_profile_check(struct rte_sched_subport_profile_params *params,
394 uint64_t rate)
395 {
396 uint32_t i;
397
398 /* Check user parameters */
399 if (params == NULL) {
400 RTE_LOG(ERR, SCHED, "%s: "
401 "Incorrect value for parameter params\n", __func__);
402 return -EINVAL;
403 }
404
405 if (params->tb_rate == 0 || params->tb_rate > rate) {
406 RTE_LOG(ERR, SCHED, "%s: "
407 "Incorrect value for tb rate\n", __func__);
408 return -EINVAL;
409 }
410
411 if (params->tb_size == 0) {
412 RTE_LOG(ERR, SCHED, "%s: "
413 "Incorrect value for tb size\n", __func__);
414 return -EINVAL;
415 }
416
417 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
418 uint64_t tc_rate = params->tc_rate[i];
419
420 if (tc_rate == 0 || (tc_rate > params->tb_rate)) {
421 RTE_LOG(ERR, SCHED, "%s: "
422 "Incorrect value for tc rate\n", __func__);
423 return -EINVAL;
424 }
425 }
426
427 if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
428 RTE_LOG(ERR, SCHED, "%s: "
429 "Incorrect tc rate(best effort)\n", __func__);
430 return -EINVAL;
431 }
432
433 if (params->tc_period == 0) {
434 RTE_LOG(ERR, SCHED, "%s: "
435 "Incorrect value for tc period\n", __func__);
436 return -EINVAL;
437 }
438
439 return 0;
440 }
441
442 static int
rte_sched_port_check_params(struct rte_sched_port_params * params)443 rte_sched_port_check_params(struct rte_sched_port_params *params)
444 {
445 uint32_t i;
446
447 if (params == NULL) {
448 RTE_LOG(ERR, SCHED,
449 "%s: Incorrect value for parameter params\n", __func__);
450 return -EINVAL;
451 }
452
453 /* socket */
454 if (params->socket < 0) {
455 RTE_LOG(ERR, SCHED,
456 "%s: Incorrect value for socket id\n", __func__);
457 return -EINVAL;
458 }
459
460 /* rate */
461 if (params->rate == 0) {
462 RTE_LOG(ERR, SCHED,
463 "%s: Incorrect value for rate\n", __func__);
464 return -EINVAL;
465 }
466
467 /* mtu */
468 if (params->mtu == 0) {
469 RTE_LOG(ERR, SCHED,
470 "%s: Incorrect value for mtu\n", __func__);
471 return -EINVAL;
472 }
473
474 /* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
475 if (params->n_subports_per_port == 0 ||
476 params->n_subports_per_port > 1u << 16 ||
477 !rte_is_power_of_2(params->n_subports_per_port)) {
478 RTE_LOG(ERR, SCHED,
479 "%s: Incorrect value for number of subports\n", __func__);
480 return -EINVAL;
481 }
482
483 if (params->subport_profiles == NULL ||
484 params->n_subport_profiles == 0 ||
485 params->n_max_subport_profiles == 0 ||
486 params->n_subport_profiles > params->n_max_subport_profiles) {
487 RTE_LOG(ERR, SCHED,
488 "%s: Incorrect value for subport profiles\n", __func__);
489 return -EINVAL;
490 }
491
492 for (i = 0; i < params->n_subport_profiles; i++) {
493 struct rte_sched_subport_profile_params *p =
494 params->subport_profiles + i;
495 int status;
496
497 status = subport_profile_check(p, params->rate);
498 if (status != 0) {
499 RTE_LOG(ERR, SCHED,
500 "%s: subport profile check failed(%d)\n",
501 __func__, status);
502 return -EINVAL;
503 }
504 }
505
506 /* n_pipes_per_subport: non-zero, power of 2 */
507 if (params->n_pipes_per_subport == 0 ||
508 !rte_is_power_of_2(params->n_pipes_per_subport)) {
509 RTE_LOG(ERR, SCHED,
510 "%s: Incorrect value for maximum pipes number\n", __func__);
511 return -EINVAL;
512 }
513
514 return 0;
515 }
516
517 static uint32_t
rte_sched_subport_get_array_base(struct rte_sched_subport_params * params,enum rte_sched_subport_array array)518 rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
519 enum rte_sched_subport_array array)
520 {
521 uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
522 uint32_t n_subport_pipe_queues =
523 RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
524
525 uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
526 uint32_t size_queue =
527 n_subport_pipe_queues * sizeof(struct rte_sched_queue);
528 uint32_t size_queue_extra
529 = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
530 uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
531 sizeof(struct rte_sched_pipe_profile);
532 uint32_t size_bmp_array =
533 rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
534 uint32_t size_per_pipe_queue_array, size_queue_array;
535
536 uint32_t base, i;
537
538 size_per_pipe_queue_array = 0;
539 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
540 if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
541 size_per_pipe_queue_array +=
542 params->qsize[i] * sizeof(struct rte_mbuf *);
543 else
544 size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
545 params->qsize[i] * sizeof(struct rte_mbuf *);
546 }
547 size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
548
549 base = 0;
550
551 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
552 return base;
553 base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
554
555 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
556 return base;
557 base += RTE_CACHE_LINE_ROUNDUP(size_queue);
558
559 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
560 return base;
561 base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
562
563 if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
564 return base;
565 base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
566
567 if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
568 return base;
569 base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
570
571 if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
572 return base;
573 base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
574
575 return base;
576 }
577
578 static void
rte_sched_subport_config_qsize(struct rte_sched_subport * subport)579 rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
580 {
581 uint32_t i;
582
583 subport->qsize_add[0] = 0;
584
585 /* Strict priority traffic class */
586 for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
587 subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
588
589 /* Best-effort traffic class */
590 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
591 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
592 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
593 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
594 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
595 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
596 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
597 subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
598 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
599
600 subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
601 subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
602 }
603
604 static void
rte_sched_port_log_pipe_profile(struct rte_sched_subport * subport,uint32_t i)605 rte_sched_port_log_pipe_profile(struct rte_sched_subport *subport, uint32_t i)
606 {
607 struct rte_sched_pipe_profile *p = subport->pipe_profiles + i;
608
609 RTE_LOG(DEBUG, SCHED, "Low level config for pipe profile %u:\n"
610 " Token bucket: period = %"PRIu64", credits per period = %"PRIu64", size = %"PRIu64"\n"
611 " Traffic classes: period = %"PRIu64",\n"
612 " credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
613 ", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
614 ", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
615 " Best-effort traffic class oversubscription: weight = %hhu\n"
616 " WRR cost: [%hhu, %hhu, %hhu, %hhu]\n",
617 i,
618
619 /* Token bucket */
620 p->tb_period,
621 p->tb_credits_per_period,
622 p->tb_size,
623
624 /* Traffic classes */
625 p->tc_period,
626 p->tc_credits_per_period[0],
627 p->tc_credits_per_period[1],
628 p->tc_credits_per_period[2],
629 p->tc_credits_per_period[3],
630 p->tc_credits_per_period[4],
631 p->tc_credits_per_period[5],
632 p->tc_credits_per_period[6],
633 p->tc_credits_per_period[7],
634 p->tc_credits_per_period[8],
635 p->tc_credits_per_period[9],
636 p->tc_credits_per_period[10],
637 p->tc_credits_per_period[11],
638 p->tc_credits_per_period[12],
639
640 /* Best-effort traffic class oversubscription */
641 p->tc_ov_weight,
642
643 /* WRR */
644 p->wrr_cost[0], p->wrr_cost[1], p->wrr_cost[2], p->wrr_cost[3]);
645 }
646
647 static void
rte_sched_port_log_subport_profile(struct rte_sched_port * port,uint32_t i)648 rte_sched_port_log_subport_profile(struct rte_sched_port *port, uint32_t i)
649 {
650 struct rte_sched_subport_profile *p = port->subport_profiles + i;
651
652 RTE_LOG(DEBUG, SCHED, "Low level config for subport profile %u:\n"
653 "Token bucket: period = %"PRIu64", credits per period = %"PRIu64","
654 "size = %"PRIu64"\n"
655 "Traffic classes: period = %"PRIu64",\n"
656 "credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
657 " %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
658 " %"PRIu64", %"PRIu64", %"PRIu64"]\n",
659 i,
660
661 /* Token bucket */
662 p->tb_period,
663 p->tb_credits_per_period,
664 p->tb_size,
665
666 /* Traffic classes */
667 p->tc_period,
668 p->tc_credits_per_period[0],
669 p->tc_credits_per_period[1],
670 p->tc_credits_per_period[2],
671 p->tc_credits_per_period[3],
672 p->tc_credits_per_period[4],
673 p->tc_credits_per_period[5],
674 p->tc_credits_per_period[6],
675 p->tc_credits_per_period[7],
676 p->tc_credits_per_period[8],
677 p->tc_credits_per_period[9],
678 p->tc_credits_per_period[10],
679 p->tc_credits_per_period[11],
680 p->tc_credits_per_period[12]);
681 }
682
683 static inline uint64_t
rte_sched_time_ms_to_bytes(uint64_t time_ms,uint64_t rate)684 rte_sched_time_ms_to_bytes(uint64_t time_ms, uint64_t rate)
685 {
686 uint64_t time = time_ms;
687
688 time = (time * rate) / 1000;
689
690 return time;
691 }
692
693 static void
rte_sched_pipe_profile_convert(struct rte_sched_subport * subport,struct rte_sched_pipe_params * src,struct rte_sched_pipe_profile * dst,uint64_t rate)694 rte_sched_pipe_profile_convert(struct rte_sched_subport *subport,
695 struct rte_sched_pipe_params *src,
696 struct rte_sched_pipe_profile *dst,
697 uint64_t rate)
698 {
699 uint32_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
700 uint32_t lcd1, lcd2, lcd;
701 uint32_t i;
702
703 /* Token Bucket */
704 if (src->tb_rate == rate) {
705 dst->tb_credits_per_period = 1;
706 dst->tb_period = 1;
707 } else {
708 double tb_rate = (double) src->tb_rate
709 / (double) rate;
710 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
711
712 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
713 &dst->tb_period);
714 }
715
716 dst->tb_size = src->tb_size;
717
718 /* Traffic Classes */
719 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
720 rate);
721
722 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
723 if (subport->qsize[i])
724 dst->tc_credits_per_period[i]
725 = rte_sched_time_ms_to_bytes(src->tc_period,
726 src->tc_rate[i]);
727
728 dst->tc_ov_weight = src->tc_ov_weight;
729
730 /* WRR queues */
731 wrr_cost[0] = src->wrr_weights[0];
732 wrr_cost[1] = src->wrr_weights[1];
733 wrr_cost[2] = src->wrr_weights[2];
734 wrr_cost[3] = src->wrr_weights[3];
735
736 lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
737 lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
738 lcd = rte_get_lcd(lcd1, lcd2);
739
740 wrr_cost[0] = lcd / wrr_cost[0];
741 wrr_cost[1] = lcd / wrr_cost[1];
742 wrr_cost[2] = lcd / wrr_cost[2];
743 wrr_cost[3] = lcd / wrr_cost[3];
744
745 dst->wrr_cost[0] = (uint8_t) wrr_cost[0];
746 dst->wrr_cost[1] = (uint8_t) wrr_cost[1];
747 dst->wrr_cost[2] = (uint8_t) wrr_cost[2];
748 dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
749 }
750
751 static void
rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params * src,struct rte_sched_subport_profile * dst,uint64_t rate)752 rte_sched_subport_profile_convert(struct rte_sched_subport_profile_params *src,
753 struct rte_sched_subport_profile *dst,
754 uint64_t rate)
755 {
756 uint32_t i;
757
758 /* Token Bucket */
759 if (src->tb_rate == rate) {
760 dst->tb_credits_per_period = 1;
761 dst->tb_period = 1;
762 } else {
763 double tb_rate = (double) src->tb_rate
764 / (double) rate;
765 double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
766
767 rte_approx_64(tb_rate, d, &dst->tb_credits_per_period,
768 &dst->tb_period);
769 }
770
771 dst->tb_size = src->tb_size;
772
773 /* Traffic Classes */
774 dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period, rate);
775
776 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
777 dst->tc_credits_per_period[i]
778 = rte_sched_time_ms_to_bytes(src->tc_period,
779 src->tc_rate[i]);
780 }
781
782 static void
rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport * subport,struct rte_sched_subport_params * params,uint64_t rate)783 rte_sched_subport_config_pipe_profile_table(struct rte_sched_subport *subport,
784 struct rte_sched_subport_params *params, uint64_t rate)
785 {
786 uint32_t i;
787
788 for (i = 0; i < subport->n_pipe_profiles; i++) {
789 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
790 struct rte_sched_pipe_profile *dst = subport->pipe_profiles + i;
791
792 rte_sched_pipe_profile_convert(subport, src, dst, rate);
793 rte_sched_port_log_pipe_profile(subport, i);
794 }
795
796 subport->pipe_tc_be_rate_max = 0;
797 for (i = 0; i < subport->n_pipe_profiles; i++) {
798 struct rte_sched_pipe_params *src = params->pipe_profiles + i;
799 uint64_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
800
801 if (subport->pipe_tc_be_rate_max < pipe_tc_be_rate)
802 subport->pipe_tc_be_rate_max = pipe_tc_be_rate;
803 }
804 }
805
806 static void
rte_sched_port_config_subport_profile_table(struct rte_sched_port * port,struct rte_sched_port_params * params,uint64_t rate)807 rte_sched_port_config_subport_profile_table(struct rte_sched_port *port,
808 struct rte_sched_port_params *params,
809 uint64_t rate)
810 {
811 uint32_t i;
812
813 for (i = 0; i < port->n_subport_profiles; i++) {
814 struct rte_sched_subport_profile_params *src
815 = params->subport_profiles + i;
816 struct rte_sched_subport_profile *dst
817 = port->subport_profiles + i;
818
819 rte_sched_subport_profile_convert(src, dst, rate);
820 rte_sched_port_log_subport_profile(port, i);
821 }
822 }
823
824 static int
rte_sched_subport_check_params(struct rte_sched_subport_params * params,uint32_t n_max_pipes_per_subport,uint64_t rate)825 rte_sched_subport_check_params(struct rte_sched_subport_params *params,
826 uint32_t n_max_pipes_per_subport,
827 uint64_t rate)
828 {
829 uint32_t i;
830
831 /* Check user parameters */
832 if (params == NULL) {
833 RTE_LOG(ERR, SCHED,
834 "%s: Incorrect value for parameter params\n", __func__);
835 return -EINVAL;
836 }
837
838 /* qsize: if non-zero, power of 2,
839 * no bigger than 32K (due to 16-bit read/write pointers)
840 */
841 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
842 uint16_t qsize = params->qsize[i];
843
844 if (qsize != 0 && !rte_is_power_of_2(qsize)) {
845 RTE_LOG(ERR, SCHED,
846 "%s: Incorrect value for qsize\n", __func__);
847 return -EINVAL;
848 }
849 }
850
851 if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
852 RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
853 return -EINVAL;
854 }
855
856 /* n_pipes_per_subport: non-zero, power of 2 */
857 if (params->n_pipes_per_subport_enabled == 0 ||
858 params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
859 !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
860 RTE_LOG(ERR, SCHED,
861 "%s: Incorrect value for pipes number\n", __func__);
862 return -EINVAL;
863 }
864
865 /* pipe_profiles and n_pipe_profiles */
866 if (params->pipe_profiles == NULL ||
867 params->n_pipe_profiles == 0 ||
868 params->n_max_pipe_profiles == 0 ||
869 params->n_pipe_profiles > params->n_max_pipe_profiles) {
870 RTE_LOG(ERR, SCHED,
871 "%s: Incorrect value for pipe profiles\n", __func__);
872 return -EINVAL;
873 }
874
875 for (i = 0; i < params->n_pipe_profiles; i++) {
876 struct rte_sched_pipe_params *p = params->pipe_profiles + i;
877 int status;
878
879 status = pipe_profile_check(p, rate, ¶ms->qsize[0]);
880 if (status != 0) {
881 RTE_LOG(ERR, SCHED,
882 "%s: Pipe profile check failed(%d)\n", __func__, status);
883 return -EINVAL;
884 }
885 }
886
887 return 0;
888 }
889
890 uint32_t
rte_sched_port_get_memory_footprint(struct rte_sched_port_params * port_params,struct rte_sched_subport_params ** subport_params)891 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params,
892 struct rte_sched_subport_params **subport_params)
893 {
894 uint32_t size0 = 0, size1 = 0, i;
895 int status;
896
897 status = rte_sched_port_check_params(port_params);
898 if (status != 0) {
899 RTE_LOG(ERR, SCHED,
900 "%s: Port scheduler port params check failed (%d)\n",
901 __func__, status);
902
903 return 0;
904 }
905
906 for (i = 0; i < port_params->n_subports_per_port; i++) {
907 struct rte_sched_subport_params *sp = subport_params[i];
908
909 status = rte_sched_subport_check_params(sp,
910 port_params->n_pipes_per_subport,
911 port_params->rate);
912 if (status != 0) {
913 RTE_LOG(ERR, SCHED,
914 "%s: Port scheduler subport params check failed (%d)\n",
915 __func__, status);
916
917 return 0;
918 }
919 }
920
921 size0 = sizeof(struct rte_sched_port);
922
923 for (i = 0; i < port_params->n_subports_per_port; i++) {
924 struct rte_sched_subport_params *sp = subport_params[i];
925
926 size1 += rte_sched_subport_get_array_base(sp,
927 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
928 }
929
930 return size0 + size1;
931 }
932
933 struct rte_sched_port *
rte_sched_port_config(struct rte_sched_port_params * params)934 rte_sched_port_config(struct rte_sched_port_params *params)
935 {
936 struct rte_sched_port *port = NULL;
937 uint32_t size0, size1, size2;
938 uint32_t cycles_per_byte;
939 uint32_t i, j;
940 int status;
941
942 status = rte_sched_port_check_params(params);
943 if (status != 0) {
944 RTE_LOG(ERR, SCHED,
945 "%s: Port scheduler params check failed (%d)\n",
946 __func__, status);
947 return NULL;
948 }
949
950 size0 = sizeof(struct rte_sched_port);
951 size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
952 size2 = params->n_max_subport_profiles *
953 sizeof(struct rte_sched_subport_profile);
954
955 /* Allocate memory to store the data structures */
956 port = rte_zmalloc_socket("qos_params", size0 + size1,
957 RTE_CACHE_LINE_SIZE, params->socket);
958 if (port == NULL) {
959 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
960
961 return NULL;
962 }
963
964 /* Allocate memory to store the subport profile */
965 port->subport_profiles = rte_zmalloc_socket("subport_profile", size2,
966 RTE_CACHE_LINE_SIZE, params->socket);
967 if (port->subport_profiles == NULL) {
968 RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
969 rte_free(port);
970 return NULL;
971 }
972
973 /* User parameters */
974 port->n_subports_per_port = params->n_subports_per_port;
975 port->n_subport_profiles = params->n_subport_profiles;
976 port->n_max_subport_profiles = params->n_max_subport_profiles;
977 port->n_pipes_per_subport = params->n_pipes_per_subport;
978 port->n_pipes_per_subport_log2 =
979 __builtin_ctz(params->n_pipes_per_subport);
980 port->socket = params->socket;
981
982 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
983 port->pipe_queue[i] = i;
984
985 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
986 port->pipe_tc[i] = j;
987
988 if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
989 j++;
990 }
991
992 for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
993 port->tc_queue[i] = j;
994
995 if (i >= RTE_SCHED_TRAFFIC_CLASS_BE)
996 j++;
997 }
998 port->rate = params->rate;
999 port->mtu = params->mtu + params->frame_overhead;
1000 port->frame_overhead = params->frame_overhead;
1001
1002 /* Timing */
1003 port->time_cpu_cycles = rte_get_tsc_cycles();
1004 port->time_cpu_bytes = 0;
1005 port->time = 0;
1006
1007 /* Subport profile table */
1008 rte_sched_port_config_subport_profile_table(port, params, port->rate);
1009
1010 cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
1011 / params->rate;
1012 port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
1013 port->cycles_per_byte = cycles_per_byte;
1014
1015 /* Grinders */
1016 port->pkts_out = NULL;
1017 port->n_pkts_out = 0;
1018 port->subport_id = 0;
1019
1020 return port;
1021 }
1022
1023 static inline void
rte_sched_subport_free(struct rte_sched_port * port,struct rte_sched_subport * subport)1024 rte_sched_subport_free(struct rte_sched_port *port,
1025 struct rte_sched_subport *subport)
1026 {
1027 uint32_t n_subport_pipe_queues;
1028 uint32_t qindex;
1029
1030 if (subport == NULL)
1031 return;
1032
1033 n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
1034
1035 /* Free enqueued mbufs */
1036 for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
1037 struct rte_mbuf **mbufs =
1038 rte_sched_subport_pipe_qbase(subport, qindex);
1039 uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1040 if (qsize != 0) {
1041 struct rte_sched_queue *queue = subport->queue + qindex;
1042 uint16_t qr = queue->qr & (qsize - 1);
1043 uint16_t qw = queue->qw & (qsize - 1);
1044
1045 for (; qr != qw; qr = (qr + 1) & (qsize - 1))
1046 rte_pktmbuf_free(mbufs[qr]);
1047 }
1048 }
1049
1050 rte_free(subport);
1051 }
1052
1053 void
rte_sched_port_free(struct rte_sched_port * port)1054 rte_sched_port_free(struct rte_sched_port *port)
1055 {
1056 uint32_t i;
1057
1058 /* Check user parameters */
1059 if (port == NULL)
1060 return;
1061
1062 for (i = 0; i < port->n_subports_per_port; i++)
1063 rte_sched_subport_free(port, port->subports[i]);
1064
1065 rte_free(port->subport_profiles);
1066 rte_free(port);
1067 }
1068
1069 static void
rte_sched_free_memory(struct rte_sched_port * port,uint32_t n_subports)1070 rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
1071 {
1072 uint32_t i;
1073
1074 for (i = 0; i < n_subports; i++) {
1075 struct rte_sched_subport *subport = port->subports[i];
1076
1077 rte_sched_subport_free(port, subport);
1078 }
1079
1080 rte_free(port->subport_profiles);
1081 rte_free(port);
1082 }
1083
1084 #ifdef RTE_SCHED_CMAN
1085 static int
rte_sched_red_config(struct rte_sched_port * port,struct rte_sched_subport * s,struct rte_sched_subport_params * params,uint32_t n_subports)1086 rte_sched_red_config(struct rte_sched_port *port,
1087 struct rte_sched_subport *s,
1088 struct rte_sched_subport_params *params,
1089 uint32_t n_subports)
1090 {
1091 uint32_t i;
1092
1093 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1094
1095 uint32_t j;
1096
1097 for (j = 0; j < RTE_COLORS; j++) {
1098 /* if min/max are both zero, then RED is disabled */
1099 if ((params->cman_params->red_params[i][j].min_th |
1100 params->cman_params->red_params[i][j].max_th) == 0) {
1101 continue;
1102 }
1103
1104 if (rte_red_config_init(&s->red_config[i][j],
1105 params->cman_params->red_params[i][j].wq_log2,
1106 params->cman_params->red_params[i][j].min_th,
1107 params->cman_params->red_params[i][j].max_th,
1108 params->cman_params->red_params[i][j].maxp_inv) != 0) {
1109 rte_sched_free_memory(port, n_subports);
1110
1111 RTE_LOG(NOTICE, SCHED,
1112 "%s: RED configuration init fails\n", __func__);
1113 return -EINVAL;
1114 }
1115 }
1116 }
1117 s->cman = RTE_SCHED_CMAN_RED;
1118 return 0;
1119 }
1120
1121 static int
rte_sched_pie_config(struct rte_sched_port * port,struct rte_sched_subport * s,struct rte_sched_subport_params * params,uint32_t n_subports)1122 rte_sched_pie_config(struct rte_sched_port *port,
1123 struct rte_sched_subport *s,
1124 struct rte_sched_subport_params *params,
1125 uint32_t n_subports)
1126 {
1127 uint32_t i;
1128
1129 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
1130 if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
1131 RTE_LOG(NOTICE, SCHED,
1132 "%s: PIE tailq threshold incorrect\n", __func__);
1133 return -EINVAL;
1134 }
1135
1136 if (rte_pie_config_init(&s->pie_config[i],
1137 params->cman_params->pie_params[i].qdelay_ref,
1138 params->cman_params->pie_params[i].dp_update_interval,
1139 params->cman_params->pie_params[i].max_burst,
1140 params->cman_params->pie_params[i].tailq_th) != 0) {
1141 rte_sched_free_memory(port, n_subports);
1142
1143 RTE_LOG(NOTICE, SCHED,
1144 "%s: PIE configuration init fails\n", __func__);
1145 return -EINVAL;
1146 }
1147 }
1148 s->cman = RTE_SCHED_CMAN_PIE;
1149 return 0;
1150 }
1151
1152 static int
rte_sched_cman_config(struct rte_sched_port * port,struct rte_sched_subport * s,struct rte_sched_subport_params * params,uint32_t n_subports)1153 rte_sched_cman_config(struct rte_sched_port *port,
1154 struct rte_sched_subport *s,
1155 struct rte_sched_subport_params *params,
1156 uint32_t n_subports)
1157 {
1158 if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
1159 return rte_sched_red_config(port, s, params, n_subports);
1160
1161 else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
1162 return rte_sched_pie_config(port, s, params, n_subports);
1163
1164 return -EINVAL;
1165 }
1166 #endif
1167
1168 int
rte_sched_subport_config(struct rte_sched_port * port,uint32_t subport_id,struct rte_sched_subport_params * params,uint32_t subport_profile_id)1169 rte_sched_subport_config(struct rte_sched_port *port,
1170 uint32_t subport_id,
1171 struct rte_sched_subport_params *params,
1172 uint32_t subport_profile_id)
1173 {
1174 struct rte_sched_subport *s = NULL;
1175 uint32_t n_subports = subport_id;
1176 struct rte_sched_subport_profile *profile;
1177 uint32_t n_subport_pipe_queues, i;
1178 uint32_t size0, size1, bmp_mem_size;
1179 int status;
1180 int ret;
1181
1182 /* Check user parameters */
1183 if (port == NULL) {
1184 RTE_LOG(ERR, SCHED,
1185 "%s: Incorrect value for parameter port\n", __func__);
1186 return 0;
1187 }
1188
1189 if (subport_id >= port->n_subports_per_port) {
1190 RTE_LOG(ERR, SCHED,
1191 "%s: Incorrect value for subport id\n", __func__);
1192 ret = -EINVAL;
1193 goto out;
1194 }
1195
1196 if (subport_profile_id >= port->n_max_subport_profiles) {
1197 RTE_LOG(ERR, SCHED, "%s: "
1198 "Number of subport profile exceeds the max limit\n",
1199 __func__);
1200 ret = -EINVAL;
1201 goto out;
1202 }
1203
1204 /** Memory is allocated only on first invocation of the api for a
1205 * given subport. Subsequent invocation on same subport will just
1206 * update subport bandwidth parameter.
1207 **/
1208 if (port->subports[subport_id] == NULL) {
1209
1210 status = rte_sched_subport_check_params(params,
1211 port->n_pipes_per_subport,
1212 port->rate);
1213 if (status != 0) {
1214 RTE_LOG(NOTICE, SCHED,
1215 "%s: Port scheduler params check failed (%d)\n",
1216 __func__, status);
1217 ret = -EINVAL;
1218 goto out;
1219 }
1220
1221 /* Determine the amount of memory to allocate */
1222 size0 = sizeof(struct rte_sched_subport);
1223 size1 = rte_sched_subport_get_array_base(params,
1224 e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
1225
1226 /* Allocate memory to store the data structures */
1227 s = rte_zmalloc_socket("subport_params", size0 + size1,
1228 RTE_CACHE_LINE_SIZE, port->socket);
1229 if (s == NULL) {
1230 RTE_LOG(ERR, SCHED,
1231 "%s: Memory allocation fails\n", __func__);
1232 ret = -ENOMEM;
1233 goto out;
1234 }
1235
1236 n_subports++;
1237
1238 subport_profile_id = 0;
1239
1240 /* Port */
1241 port->subports[subport_id] = s;
1242
1243 s->tb_time = port->time;
1244
1245 /* compile time checks */
1246 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
1247 RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
1248 (RTE_SCHED_PORT_N_GRINDERS - 1));
1249
1250 /* User parameters */
1251 s->n_pipes_per_subport_enabled =
1252 params->n_pipes_per_subport_enabled;
1253 memcpy(s->qsize, params->qsize, sizeof(params->qsize));
1254 s->n_pipe_profiles = params->n_pipe_profiles;
1255 s->n_max_pipe_profiles = params->n_max_pipe_profiles;
1256
1257 #ifdef RTE_SCHED_CMAN
1258 if (params->cman_params != NULL) {
1259 s->cman_enabled = true;
1260 status = rte_sched_cman_config(port, s, params, n_subports);
1261 if (status) {
1262 RTE_LOG(NOTICE, SCHED,
1263 "%s: CMAN configuration fails\n", __func__);
1264 return status;
1265 }
1266 } else {
1267 s->cman_enabled = false;
1268 }
1269 #endif
1270
1271 /* Scheduling loop detection */
1272 s->pipe_loop = RTE_SCHED_PIPE_INVALID;
1273 s->pipe_exhaustion = 0;
1274
1275 /* Grinders */
1276 s->busy_grinders = 0;
1277
1278 /* Queue base calculation */
1279 rte_sched_subport_config_qsize(s);
1280
1281 /* Large data structures */
1282 s->pipe = (struct rte_sched_pipe *)
1283 (s->memory + rte_sched_subport_get_array_base(params,
1284 e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
1285 s->queue = (struct rte_sched_queue *)
1286 (s->memory + rte_sched_subport_get_array_base(params,
1287 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
1288 s->queue_extra = (struct rte_sched_queue_extra *)
1289 (s->memory + rte_sched_subport_get_array_base(params,
1290 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
1291 s->pipe_profiles = (struct rte_sched_pipe_profile *)
1292 (s->memory + rte_sched_subport_get_array_base(params,
1293 e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
1294 s->bmp_array = s->memory + rte_sched_subport_get_array_base(
1295 params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
1296 s->queue_array = (struct rte_mbuf **)
1297 (s->memory + rte_sched_subport_get_array_base(params,
1298 e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
1299
1300 /* Pipe profile table */
1301 rte_sched_subport_config_pipe_profile_table(s, params,
1302 port->rate);
1303
1304 /* Bitmap */
1305 n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
1306 bmp_mem_size = rte_bitmap_get_memory_footprint(
1307 n_subport_pipe_queues);
1308 s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
1309 bmp_mem_size);
1310 if (s->bmp == NULL) {
1311 RTE_LOG(ERR, SCHED,
1312 "%s: Subport bitmap init error\n", __func__);
1313 ret = -EINVAL;
1314 goto out;
1315 }
1316
1317 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
1318 s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
1319
1320 /* TC oversubscription */
1321 s->tc_ov_wm_min = port->mtu;
1322 s->tc_ov_period_id = 0;
1323 s->tc_ov = 0;
1324 s->tc_ov_n = 0;
1325 s->tc_ov_rate = 0;
1326 }
1327
1328 {
1329 /* update subport parameters from subport profile table*/
1330 profile = port->subport_profiles + subport_profile_id;
1331
1332 s = port->subports[subport_id];
1333
1334 s->tb_credits = profile->tb_size / 2;
1335
1336 s->tc_time = port->time + profile->tc_period;
1337
1338 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1339 if (s->qsize[i])
1340 s->tc_credits[i] =
1341 profile->tc_credits_per_period[i];
1342 else
1343 profile->tc_credits_per_period[i] = 0;
1344
1345 s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
1346 s->pipe_tc_be_rate_max);
1347 s->tc_ov_wm = s->tc_ov_wm_max;
1348 s->profile = subport_profile_id;
1349
1350 }
1351
1352 rte_sched_port_log_subport_profile(port, subport_profile_id);
1353
1354 return 0;
1355
1356 out:
1357 rte_sched_free_memory(port, n_subports);
1358
1359 return ret;
1360 }
1361
1362 int
rte_sched_pipe_config(struct rte_sched_port * port,uint32_t subport_id,uint32_t pipe_id,int32_t pipe_profile)1363 rte_sched_pipe_config(struct rte_sched_port *port,
1364 uint32_t subport_id,
1365 uint32_t pipe_id,
1366 int32_t pipe_profile)
1367 {
1368 struct rte_sched_subport *s;
1369 struct rte_sched_subport_profile *sp;
1370 struct rte_sched_pipe *p;
1371 struct rte_sched_pipe_profile *params;
1372 uint32_t n_subports = subport_id + 1;
1373 uint32_t deactivate, profile, i;
1374 int ret;
1375
1376 /* Check user parameters */
1377 profile = (uint32_t) pipe_profile;
1378 deactivate = (pipe_profile < 0);
1379
1380 if (port == NULL) {
1381 RTE_LOG(ERR, SCHED,
1382 "%s: Incorrect value for parameter port\n", __func__);
1383 return -EINVAL;
1384 }
1385
1386 if (subport_id >= port->n_subports_per_port) {
1387 RTE_LOG(ERR, SCHED,
1388 "%s: Incorrect value for parameter subport id\n", __func__);
1389 ret = -EINVAL;
1390 goto out;
1391 }
1392
1393 s = port->subports[subport_id];
1394 if (pipe_id >= s->n_pipes_per_subport_enabled) {
1395 RTE_LOG(ERR, SCHED,
1396 "%s: Incorrect value for parameter pipe id\n", __func__);
1397 ret = -EINVAL;
1398 goto out;
1399 }
1400
1401 if (!deactivate && profile >= s->n_pipe_profiles) {
1402 RTE_LOG(ERR, SCHED,
1403 "%s: Incorrect value for parameter pipe profile\n", __func__);
1404 ret = -EINVAL;
1405 goto out;
1406 }
1407
1408 sp = port->subport_profiles + s->profile;
1409 /* Handle the case when pipe already has a valid configuration */
1410 p = s->pipe + pipe_id;
1411 if (p->tb_time) {
1412 params = s->pipe_profiles + p->profile;
1413
1414 double subport_tc_be_rate =
1415 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1416 / (double) sp->tc_period;
1417 double pipe_tc_be_rate =
1418 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1419 / (double) params->tc_period;
1420 uint32_t tc_be_ov = s->tc_ov;
1421
1422 /* Unplug pipe from its subport */
1423 s->tc_ov_n -= params->tc_ov_weight;
1424 s->tc_ov_rate -= pipe_tc_be_rate;
1425 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1426
1427 if (s->tc_ov != tc_be_ov) {
1428 RTE_LOG(DEBUG, SCHED,
1429 "Subport %u Best-effort TC oversubscription is OFF (%.4lf >= %.4lf)\n",
1430 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1431 }
1432
1433 /* Reset the pipe */
1434 memset(p, 0, sizeof(struct rte_sched_pipe));
1435 }
1436
1437 if (deactivate)
1438 return 0;
1439
1440 /* Apply the new pipe configuration */
1441 p->profile = profile;
1442 params = s->pipe_profiles + p->profile;
1443
1444 /* Token Bucket (TB) */
1445 p->tb_time = port->time;
1446 p->tb_credits = params->tb_size / 2;
1447
1448 /* Traffic Classes (TCs) */
1449 p->tc_time = port->time + params->tc_period;
1450
1451 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
1452 if (s->qsize[i])
1453 p->tc_credits[i] = params->tc_credits_per_period[i];
1454
1455 {
1456 /* Subport best effort tc oversubscription */
1457 double subport_tc_be_rate =
1458 (double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1459 / (double) sp->tc_period;
1460 double pipe_tc_be_rate =
1461 (double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
1462 / (double) params->tc_period;
1463 uint32_t tc_be_ov = s->tc_ov;
1464
1465 s->tc_ov_n += params->tc_ov_weight;
1466 s->tc_ov_rate += pipe_tc_be_rate;
1467 s->tc_ov = s->tc_ov_rate > subport_tc_be_rate;
1468
1469 if (s->tc_ov != tc_be_ov) {
1470 RTE_LOG(DEBUG, SCHED,
1471 "Subport %u Best effort TC oversubscription is ON (%.4lf < %.4lf)\n",
1472 subport_id, subport_tc_be_rate, s->tc_ov_rate);
1473 }
1474 p->tc_ov_period_id = s->tc_ov_period_id;
1475 p->tc_ov_credits = s->tc_ov_wm;
1476 }
1477
1478 return 0;
1479
1480 out:
1481 rte_sched_free_memory(port, n_subports);
1482
1483 return ret;
1484 }
1485
1486 int
rte_sched_subport_pipe_profile_add(struct rte_sched_port * port,uint32_t subport_id,struct rte_sched_pipe_params * params,uint32_t * pipe_profile_id)1487 rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
1488 uint32_t subport_id,
1489 struct rte_sched_pipe_params *params,
1490 uint32_t *pipe_profile_id)
1491 {
1492 struct rte_sched_subport *s;
1493 struct rte_sched_pipe_profile *pp;
1494 uint32_t i;
1495 int status;
1496
1497 /* Port */
1498 if (port == NULL) {
1499 RTE_LOG(ERR, SCHED,
1500 "%s: Incorrect value for parameter port\n", __func__);
1501 return -EINVAL;
1502 }
1503
1504 /* Subport id not exceeds the max limit */
1505 if (subport_id > port->n_subports_per_port) {
1506 RTE_LOG(ERR, SCHED,
1507 "%s: Incorrect value for subport id\n", __func__);
1508 return -EINVAL;
1509 }
1510
1511 s = port->subports[subport_id];
1512
1513 /* Pipe profiles exceeds the max limit */
1514 if (s->n_pipe_profiles >= s->n_max_pipe_profiles) {
1515 RTE_LOG(ERR, SCHED,
1516 "%s: Number of pipe profiles exceeds the max limit\n", __func__);
1517 return -EINVAL;
1518 }
1519
1520 /* Pipe params */
1521 status = pipe_profile_check(params, port->rate, &s->qsize[0]);
1522 if (status != 0) {
1523 RTE_LOG(ERR, SCHED,
1524 "%s: Pipe profile check failed(%d)\n", __func__, status);
1525 return -EINVAL;
1526 }
1527
1528 pp = &s->pipe_profiles[s->n_pipe_profiles];
1529 rte_sched_pipe_profile_convert(s, params, pp, port->rate);
1530
1531 /* Pipe profile should not exists */
1532 for (i = 0; i < s->n_pipe_profiles; i++)
1533 if (memcmp(s->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
1534 RTE_LOG(ERR, SCHED,
1535 "%s: Pipe profile exists\n", __func__);
1536 return -EINVAL;
1537 }
1538
1539 /* Pipe profile commit */
1540 *pipe_profile_id = s->n_pipe_profiles;
1541 s->n_pipe_profiles++;
1542
1543 if (s->pipe_tc_be_rate_max < params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE])
1544 s->pipe_tc_be_rate_max = params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
1545
1546 rte_sched_port_log_pipe_profile(s, *pipe_profile_id);
1547
1548 return 0;
1549 }
1550
1551 int
rte_sched_port_subport_profile_add(struct rte_sched_port * port,struct rte_sched_subport_profile_params * params,uint32_t * subport_profile_id)1552 rte_sched_port_subport_profile_add(struct rte_sched_port *port,
1553 struct rte_sched_subport_profile_params *params,
1554 uint32_t *subport_profile_id)
1555 {
1556 int status;
1557 uint32_t i;
1558 struct rte_sched_subport_profile *dst;
1559
1560 /* Port */
1561 if (port == NULL) {
1562 RTE_LOG(ERR, SCHED, "%s: "
1563 "Incorrect value for parameter port\n", __func__);
1564 return -EINVAL;
1565 }
1566
1567 if (params == NULL) {
1568 RTE_LOG(ERR, SCHED, "%s: "
1569 "Incorrect value for parameter profile\n", __func__);
1570 return -EINVAL;
1571 }
1572
1573 if (subport_profile_id == NULL) {
1574 RTE_LOG(ERR, SCHED, "%s: "
1575 "Incorrect value for parameter subport_profile_id\n",
1576 __func__);
1577 return -EINVAL;
1578 }
1579
1580 dst = port->subport_profiles + port->n_subport_profiles;
1581
1582 /* Subport profiles exceeds the max limit */
1583 if (port->n_subport_profiles >= port->n_max_subport_profiles) {
1584 RTE_LOG(ERR, SCHED, "%s: "
1585 "Number of subport profiles exceeds the max limit\n",
1586 __func__);
1587 return -EINVAL;
1588 }
1589
1590 status = subport_profile_check(params, port->rate);
1591 if (status != 0) {
1592 RTE_LOG(ERR, SCHED,
1593 "%s: subport profile check failed(%d)\n", __func__, status);
1594 return -EINVAL;
1595 }
1596
1597 rte_sched_subport_profile_convert(params, dst, port->rate);
1598
1599 /* Subport profile should not exists */
1600 for (i = 0; i < port->n_subport_profiles; i++)
1601 if (memcmp(port->subport_profiles + i,
1602 dst, sizeof(*dst)) == 0) {
1603 RTE_LOG(ERR, SCHED,
1604 "%s: subport profile exists\n", __func__);
1605 return -EINVAL;
1606 }
1607
1608 /* Subport profile commit */
1609 *subport_profile_id = port->n_subport_profiles;
1610 port->n_subport_profiles++;
1611
1612 rte_sched_port_log_subport_profile(port, *subport_profile_id);
1613
1614 return 0;
1615 }
1616
1617 static inline uint32_t
rte_sched_port_qindex(struct rte_sched_port * port,uint32_t subport,uint32_t pipe,uint32_t traffic_class,uint32_t queue)1618 rte_sched_port_qindex(struct rte_sched_port *port,
1619 uint32_t subport,
1620 uint32_t pipe,
1621 uint32_t traffic_class,
1622 uint32_t queue)
1623 {
1624 return ((subport & (port->n_subports_per_port - 1)) <<
1625 (port->n_pipes_per_subport_log2 + 4)) |
1626 ((pipe &
1627 (port->subports[subport]->n_pipes_per_subport_enabled - 1)) << 4) |
1628 ((rte_sched_port_pipe_queue(port, traffic_class) + queue) &
1629 (RTE_SCHED_QUEUES_PER_PIPE - 1));
1630 }
1631
1632 void
rte_sched_port_pkt_write(struct rte_sched_port * port,struct rte_mbuf * pkt,uint32_t subport,uint32_t pipe,uint32_t traffic_class,uint32_t queue,enum rte_color color)1633 rte_sched_port_pkt_write(struct rte_sched_port *port,
1634 struct rte_mbuf *pkt,
1635 uint32_t subport, uint32_t pipe,
1636 uint32_t traffic_class,
1637 uint32_t queue, enum rte_color color)
1638 {
1639 uint32_t queue_id =
1640 rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
1641
1642 rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
1643 }
1644
1645 void
rte_sched_port_pkt_read_tree_path(struct rte_sched_port * port,const struct rte_mbuf * pkt,uint32_t * subport,uint32_t * pipe,uint32_t * traffic_class,uint32_t * queue)1646 rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
1647 const struct rte_mbuf *pkt,
1648 uint32_t *subport, uint32_t *pipe,
1649 uint32_t *traffic_class, uint32_t *queue)
1650 {
1651 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1652
1653 *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1654 *pipe = (queue_id >> 4) &
1655 (port->subports[*subport]->n_pipes_per_subport_enabled - 1);
1656 *traffic_class = rte_sched_port_pipe_tc(port, queue_id);
1657 *queue = rte_sched_port_tc_queue(port, queue_id);
1658 }
1659
1660 enum rte_color
rte_sched_port_pkt_read_color(const struct rte_mbuf * pkt)1661 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
1662 {
1663 return (enum rte_color)rte_mbuf_sched_color_get(pkt);
1664 }
1665
1666 int
rte_sched_subport_read_stats(struct rte_sched_port * port,uint32_t subport_id,struct rte_sched_subport_stats * stats,uint32_t * tc_ov)1667 rte_sched_subport_read_stats(struct rte_sched_port *port,
1668 uint32_t subport_id,
1669 struct rte_sched_subport_stats *stats,
1670 uint32_t *tc_ov)
1671 {
1672 struct rte_sched_subport *s;
1673
1674 /* Check user parameters */
1675 if (port == NULL) {
1676 RTE_LOG(ERR, SCHED,
1677 "%s: Incorrect value for parameter port\n", __func__);
1678 return -EINVAL;
1679 }
1680
1681 if (subport_id >= port->n_subports_per_port) {
1682 RTE_LOG(ERR, SCHED,
1683 "%s: Incorrect value for subport id\n", __func__);
1684 return -EINVAL;
1685 }
1686
1687 if (stats == NULL) {
1688 RTE_LOG(ERR, SCHED,
1689 "%s: Incorrect value for parameter stats\n", __func__);
1690 return -EINVAL;
1691 }
1692
1693 if (tc_ov == NULL) {
1694 RTE_LOG(ERR, SCHED,
1695 "%s: Incorrect value for tc_ov\n", __func__);
1696 return -EINVAL;
1697 }
1698
1699 s = port->subports[subport_id];
1700
1701 /* Copy subport stats and clear */
1702 memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
1703 memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
1704
1705 /* Subport TC oversubscription status */
1706 *tc_ov = s->tc_ov;
1707
1708 return 0;
1709 }
1710
1711 int
rte_sched_queue_read_stats(struct rte_sched_port * port,uint32_t queue_id,struct rte_sched_queue_stats * stats,uint16_t * qlen)1712 rte_sched_queue_read_stats(struct rte_sched_port *port,
1713 uint32_t queue_id,
1714 struct rte_sched_queue_stats *stats,
1715 uint16_t *qlen)
1716 {
1717 struct rte_sched_subport *s;
1718 struct rte_sched_queue *q;
1719 struct rte_sched_queue_extra *qe;
1720 uint32_t subport_id, subport_qmask, subport_qindex;
1721
1722 /* Check user parameters */
1723 if (port == NULL) {
1724 RTE_LOG(ERR, SCHED,
1725 "%s: Incorrect value for parameter port\n", __func__);
1726 return -EINVAL;
1727 }
1728
1729 if (queue_id >= rte_sched_port_queues_per_port(port)) {
1730 RTE_LOG(ERR, SCHED,
1731 "%s: Incorrect value for queue id\n", __func__);
1732 return -EINVAL;
1733 }
1734
1735 if (stats == NULL) {
1736 RTE_LOG(ERR, SCHED,
1737 "%s: Incorrect value for parameter stats\n", __func__);
1738 return -EINVAL;
1739 }
1740
1741 if (qlen == NULL) {
1742 RTE_LOG(ERR, SCHED,
1743 "%s: Incorrect value for parameter qlen\n", __func__);
1744 return -EINVAL;
1745 }
1746 subport_qmask = port->n_pipes_per_subport_log2 + 4;
1747 subport_id = (queue_id >> subport_qmask) & (port->n_subports_per_port - 1);
1748
1749 s = port->subports[subport_id];
1750 subport_qindex = ((1 << subport_qmask) - 1) & queue_id;
1751 q = s->queue + subport_qindex;
1752 qe = s->queue_extra + subport_qindex;
1753
1754 /* Copy queue stats and clear */
1755 memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
1756 memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
1757
1758 /* Queue length */
1759 *qlen = q->qw - q->qr;
1760
1761 return 0;
1762 }
1763
1764 #ifdef RTE_SCHED_DEBUG
1765
1766 static inline int
rte_sched_port_queue_is_empty(struct rte_sched_subport * subport,uint32_t qindex)1767 rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
1768 uint32_t qindex)
1769 {
1770 struct rte_sched_queue *queue = subport->queue + qindex;
1771
1772 return queue->qr == queue->qw;
1773 }
1774
1775 #endif /* RTE_SCHED_DEBUG */
1776
1777 static inline void
rte_sched_port_update_subport_stats(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf * pkt)1778 rte_sched_port_update_subport_stats(struct rte_sched_port *port,
1779 struct rte_sched_subport *subport,
1780 uint32_t qindex,
1781 struct rte_mbuf *pkt)
1782 {
1783 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1784 uint32_t pkt_len = pkt->pkt_len;
1785
1786 subport->stats.n_pkts_tc[tc_index] += 1;
1787 subport->stats.n_bytes_tc[tc_index] += pkt_len;
1788 }
1789
1790 static inline void
rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf * pkt,__rte_unused uint32_t n_pkts_cman_dropped)1791 rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
1792 struct rte_sched_subport *subport,
1793 uint32_t qindex,
1794 struct rte_mbuf *pkt,
1795 __rte_unused uint32_t n_pkts_cman_dropped)
1796 {
1797 uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
1798 uint32_t pkt_len = pkt->pkt_len;
1799
1800 subport->stats.n_pkts_tc_dropped[tc_index] += 1;
1801 subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
1802 subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
1803 }
1804
1805 static inline void
rte_sched_port_update_queue_stats(struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf * pkt)1806 rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
1807 uint32_t qindex,
1808 struct rte_mbuf *pkt)
1809 {
1810 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1811 uint32_t pkt_len = pkt->pkt_len;
1812
1813 qe->stats.n_pkts += 1;
1814 qe->stats.n_bytes += pkt_len;
1815 }
1816
1817 static inline void
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf * pkt,__rte_unused uint32_t n_pkts_cman_dropped)1818 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
1819 uint32_t qindex,
1820 struct rte_mbuf *pkt,
1821 __rte_unused uint32_t n_pkts_cman_dropped)
1822 {
1823 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1824 uint32_t pkt_len = pkt->pkt_len;
1825
1826 qe->stats.n_pkts_dropped += 1;
1827 qe->stats.n_bytes_dropped += pkt_len;
1828 #ifdef RTE_SCHED_CMAN
1829 if (subport->cman_enabled)
1830 qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
1831 #endif
1832 }
1833
1834 #ifdef RTE_SCHED_CMAN
1835
1836 static inline int
rte_sched_port_cman_drop(struct rte_sched_port * port,struct rte_sched_subport * subport,struct rte_mbuf * pkt,uint32_t qindex,uint16_t qlen)1837 rte_sched_port_cman_drop(struct rte_sched_port *port,
1838 struct rte_sched_subport *subport,
1839 struct rte_mbuf *pkt,
1840 uint32_t qindex,
1841 uint16_t qlen)
1842 {
1843 if (!subport->cman_enabled)
1844 return 0;
1845
1846 struct rte_sched_queue_extra *qe;
1847 uint32_t tc_index;
1848
1849 tc_index = rte_sched_port_pipe_tc(port, qindex);
1850 qe = subport->queue_extra + qindex;
1851
1852 /* RED */
1853 if (subport->cman == RTE_SCHED_CMAN_RED) {
1854 struct rte_red_config *red_cfg;
1855 struct rte_red *red;
1856 enum rte_color color;
1857
1858 color = rte_sched_port_pkt_read_color(pkt);
1859 red_cfg = &subport->red_config[tc_index][color];
1860
1861 if ((red_cfg->min_th | red_cfg->max_th) == 0)
1862 return 0;
1863
1864 red = &qe->red;
1865
1866 return rte_red_enqueue(red_cfg, red, qlen, port->time);
1867 }
1868
1869 /* PIE */
1870 struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
1871 struct rte_pie *pie = &qe->pie;
1872
1873 return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
1874 }
1875
1876 static inline void
rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex)1877 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
1878 struct rte_sched_subport *subport, uint32_t qindex)
1879 {
1880 if (subport->cman_enabled) {
1881 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1882 if (subport->cman == RTE_SCHED_CMAN_RED) {
1883 struct rte_red *red = &qe->red;
1884
1885 rte_red_mark_queue_empty(red, port->time);
1886 }
1887 }
1888 }
1889
1890 static inline void
rte_sched_port_pie_dequeue(struct rte_sched_subport * subport,uint32_t qindex,uint32_t pkt_len,uint64_t time)1891 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
1892 uint32_t qindex, uint32_t pkt_len, uint64_t time) {
1893 if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
1894 struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
1895 struct rte_pie *pie = &qe->pie;
1896
1897 /* Update queue length */
1898 pie->qlen -= 1;
1899 pie->qlen_bytes -= pkt_len;
1900
1901 rte_pie_dequeue(pie, pkt_len, time);
1902 }
1903 }
1904
1905 #else
1906
rte_sched_port_cman_drop(struct rte_sched_port * port __rte_unused,struct rte_sched_subport * subport __rte_unused,struct rte_mbuf * pkt __rte_unused,uint32_t qindex __rte_unused,uint16_t qlen __rte_unused)1907 static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
1908 struct rte_sched_subport *subport __rte_unused,
1909 struct rte_mbuf *pkt __rte_unused,
1910 uint32_t qindex __rte_unused,
1911 uint16_t qlen __rte_unused)
1912 {
1913 return 0;
1914 }
1915
1916 #define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
1917
1918 static inline void
rte_sched_port_pie_dequeue(struct rte_sched_subport * subport __rte_unused,uint32_t qindex __rte_unused,uint32_t pkt_len __rte_unused,uint64_t time __rte_unused)1919 rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
1920 uint32_t qindex __rte_unused,
1921 uint32_t pkt_len __rte_unused,
1922 uint64_t time __rte_unused) {
1923 /* do-nothing when RTE_SCHED_CMAN not defined */
1924 }
1925
1926 #endif /* RTE_SCHED_CMAN */
1927
1928 #ifdef RTE_SCHED_DEBUG
1929
1930 static inline void
debug_check_queue_slab(struct rte_sched_subport * subport,uint32_t bmp_pos,uint64_t bmp_slab)1931 debug_check_queue_slab(struct rte_sched_subport *subport, uint32_t bmp_pos,
1932 uint64_t bmp_slab)
1933 {
1934 uint64_t mask;
1935 uint32_t i, panic;
1936
1937 if (bmp_slab == 0)
1938 rte_panic("Empty slab at position %u\n", bmp_pos);
1939
1940 panic = 0;
1941 for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
1942 if (mask & bmp_slab) {
1943 if (rte_sched_port_queue_is_empty(subport, bmp_pos + i)) {
1944 printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
1945 panic = 1;
1946 }
1947 }
1948 }
1949
1950 if (panic)
1951 rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
1952 bmp_slab, bmp_pos);
1953 }
1954
1955 #endif /* RTE_SCHED_DEBUG */
1956
1957 static inline struct rte_sched_subport *
rte_sched_port_subport(struct rte_sched_port * port,struct rte_mbuf * pkt)1958 rte_sched_port_subport(struct rte_sched_port *port,
1959 struct rte_mbuf *pkt)
1960 {
1961 uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
1962 uint32_t subport_id = queue_id >> (port->n_pipes_per_subport_log2 + 4);
1963
1964 return port->subports[subport_id];
1965 }
1966
1967 static inline uint32_t
rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport * subport,struct rte_mbuf * pkt,uint32_t subport_qmask)1968 rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
1969 struct rte_mbuf *pkt, uint32_t subport_qmask)
1970 {
1971 struct rte_sched_queue *q;
1972 struct rte_sched_queue_extra *qe;
1973 uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
1974 uint32_t subport_queue_id = subport_qmask & qindex;
1975
1976 q = subport->queue + subport_queue_id;
1977 rte_prefetch0(q);
1978 qe = subport->queue_extra + subport_queue_id;
1979 rte_prefetch0(qe);
1980
1981 return subport_queue_id;
1982 }
1983
1984 static inline void
rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf ** qbase)1985 rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
1986 struct rte_sched_subport *subport,
1987 uint32_t qindex,
1988 struct rte_mbuf **qbase)
1989 {
1990 struct rte_sched_queue *q;
1991 struct rte_mbuf **q_qw;
1992 uint16_t qsize;
1993
1994 q = subport->queue + qindex;
1995 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
1996 q_qw = qbase + (q->qw & (qsize - 1));
1997
1998 rte_prefetch0(q_qw);
1999 rte_bitmap_prefetch0(subport->bmp, qindex);
2000 }
2001
2002 static inline int
rte_sched_port_enqueue_qwa(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t qindex,struct rte_mbuf ** qbase,struct rte_mbuf * pkt)2003 rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
2004 struct rte_sched_subport *subport,
2005 uint32_t qindex,
2006 struct rte_mbuf **qbase,
2007 struct rte_mbuf *pkt)
2008 {
2009 struct rte_sched_queue *q;
2010 uint16_t qsize;
2011 uint16_t qlen;
2012
2013 q = subport->queue + qindex;
2014 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2015 qlen = q->qw - q->qr;
2016
2017 /* Drop the packet (and update drop stats) when queue is full */
2018 if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
2019 (qlen >= qsize))) {
2020 rte_pktmbuf_free(pkt);
2021 rte_sched_port_update_subport_stats_on_drop(port, subport,
2022 qindex, pkt, qlen < qsize);
2023 rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
2024 qlen < qsize);
2025 return 0;
2026 }
2027
2028 /* Enqueue packet */
2029 qbase[q->qw & (qsize - 1)] = pkt;
2030 q->qw++;
2031
2032 /* Activate queue in the subport bitmap */
2033 rte_bitmap_set(subport->bmp, qindex);
2034
2035 /* Statistics */
2036 rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
2037 rte_sched_port_update_queue_stats(subport, qindex, pkt);
2038
2039 return 1;
2040 }
2041
2042
2043 /*
2044 * The enqueue function implements a 4-level pipeline with each stage
2045 * processing two different packets. The purpose of using a pipeline
2046 * is to hide the latency of prefetching the data structures. The
2047 * naming convention is presented in the diagram below:
2048 *
2049 * p00 _______ p10 _______ p20 _______ p30 _______
2050 * ----->| |----->| |----->| |----->| |----->
2051 * | 0 | | 1 | | 2 | | 3 |
2052 * ----->|_______|----->|_______|----->|_______|----->|_______|----->
2053 * p01 p11 p21 p31
2054 *
2055 */
2056 int
rte_sched_port_enqueue(struct rte_sched_port * port,struct rte_mbuf ** pkts,uint32_t n_pkts)2057 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts,
2058 uint32_t n_pkts)
2059 {
2060 struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
2061 *pkt30, *pkt31, *pkt_last;
2062 struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base,
2063 **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
2064 struct rte_sched_subport *subport00, *subport01, *subport10, *subport11,
2065 *subport20, *subport21, *subport30, *subport31, *subport_last;
2066 uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
2067 uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
2068 uint32_t subport_qmask;
2069 uint32_t result, i;
2070
2071 result = 0;
2072 subport_qmask = (1 << (port->n_pipes_per_subport_log2 + 4)) - 1;
2073
2074 /*
2075 * Less then 6 input packets available, which is not enough to
2076 * feed the pipeline
2077 */
2078 if (unlikely(n_pkts < 6)) {
2079 struct rte_sched_subport *subports[5];
2080 struct rte_mbuf **q_base[5];
2081 uint32_t q[5];
2082
2083 /* Prefetch the mbuf structure of each packet */
2084 for (i = 0; i < n_pkts; i++)
2085 rte_prefetch0(pkts[i]);
2086
2087 /* Prefetch the subport structure for each packet */
2088 for (i = 0; i < n_pkts; i++)
2089 subports[i] = rte_sched_port_subport(port, pkts[i]);
2090
2091 /* Prefetch the queue structure for each queue */
2092 for (i = 0; i < n_pkts; i++)
2093 q[i] = rte_sched_port_enqueue_qptrs_prefetch0(subports[i],
2094 pkts[i], subport_qmask);
2095
2096 /* Prefetch the write pointer location of each queue */
2097 for (i = 0; i < n_pkts; i++) {
2098 q_base[i] = rte_sched_subport_pipe_qbase(subports[i], q[i]);
2099 rte_sched_port_enqueue_qwa_prefetch0(port, subports[i],
2100 q[i], q_base[i]);
2101 }
2102
2103 /* Write each packet to its queue */
2104 for (i = 0; i < n_pkts; i++)
2105 result += rte_sched_port_enqueue_qwa(port, subports[i],
2106 q[i], q_base[i], pkts[i]);
2107
2108 return result;
2109 }
2110
2111 /* Feed the first 3 stages of the pipeline (6 packets needed) */
2112 pkt20 = pkts[0];
2113 pkt21 = pkts[1];
2114 rte_prefetch0(pkt20);
2115 rte_prefetch0(pkt21);
2116
2117 pkt10 = pkts[2];
2118 pkt11 = pkts[3];
2119 rte_prefetch0(pkt10);
2120 rte_prefetch0(pkt11);
2121
2122 subport20 = rte_sched_port_subport(port, pkt20);
2123 subport21 = rte_sched_port_subport(port, pkt21);
2124 q20 = rte_sched_port_enqueue_qptrs_prefetch0(subport20,
2125 pkt20, subport_qmask);
2126 q21 = rte_sched_port_enqueue_qptrs_prefetch0(subport21,
2127 pkt21, subport_qmask);
2128
2129 pkt00 = pkts[4];
2130 pkt01 = pkts[5];
2131 rte_prefetch0(pkt00);
2132 rte_prefetch0(pkt01);
2133
2134 subport10 = rte_sched_port_subport(port, pkt10);
2135 subport11 = rte_sched_port_subport(port, pkt11);
2136 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2137 pkt10, subport_qmask);
2138 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2139 pkt11, subport_qmask);
2140
2141 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2142 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2143 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2144 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2145
2146 /* Run the pipeline */
2147 for (i = 6; i < (n_pkts & (~1)); i += 2) {
2148 /* Propagate stage inputs */
2149 pkt30 = pkt20;
2150 pkt31 = pkt21;
2151 pkt20 = pkt10;
2152 pkt21 = pkt11;
2153 pkt10 = pkt00;
2154 pkt11 = pkt01;
2155 q30 = q20;
2156 q31 = q21;
2157 q20 = q10;
2158 q21 = q11;
2159 subport30 = subport20;
2160 subport31 = subport21;
2161 subport20 = subport10;
2162 subport21 = subport11;
2163 q30_base = q20_base;
2164 q31_base = q21_base;
2165
2166 /* Stage 0: Get packets in */
2167 pkt00 = pkts[i];
2168 pkt01 = pkts[i + 1];
2169 rte_prefetch0(pkt00);
2170 rte_prefetch0(pkt01);
2171
2172 /* Stage 1: Prefetch subport and queue structure storing queue pointers */
2173 subport10 = rte_sched_port_subport(port, pkt10);
2174 subport11 = rte_sched_port_subport(port, pkt11);
2175 q10 = rte_sched_port_enqueue_qptrs_prefetch0(subport10,
2176 pkt10, subport_qmask);
2177 q11 = rte_sched_port_enqueue_qptrs_prefetch0(subport11,
2178 pkt11, subport_qmask);
2179
2180 /* Stage 2: Prefetch queue write location */
2181 q20_base = rte_sched_subport_pipe_qbase(subport20, q20);
2182 q21_base = rte_sched_subport_pipe_qbase(subport21, q21);
2183 rte_sched_port_enqueue_qwa_prefetch0(port, subport20, q20, q20_base);
2184 rte_sched_port_enqueue_qwa_prefetch0(port, subport21, q21, q21_base);
2185
2186 /* Stage 3: Write packet to queue and activate queue */
2187 r30 = rte_sched_port_enqueue_qwa(port, subport30,
2188 q30, q30_base, pkt30);
2189 r31 = rte_sched_port_enqueue_qwa(port, subport31,
2190 q31, q31_base, pkt31);
2191 result += r30 + r31;
2192 }
2193
2194 /*
2195 * Drain the pipeline (exactly 6 packets).
2196 * Handle the last packet in the case
2197 * of an odd number of input packets.
2198 */
2199 pkt_last = pkts[n_pkts - 1];
2200 rte_prefetch0(pkt_last);
2201
2202 subport00 = rte_sched_port_subport(port, pkt00);
2203 subport01 = rte_sched_port_subport(port, pkt01);
2204 q00 = rte_sched_port_enqueue_qptrs_prefetch0(subport00,
2205 pkt00, subport_qmask);
2206 q01 = rte_sched_port_enqueue_qptrs_prefetch0(subport01,
2207 pkt01, subport_qmask);
2208
2209 q10_base = rte_sched_subport_pipe_qbase(subport10, q10);
2210 q11_base = rte_sched_subport_pipe_qbase(subport11, q11);
2211 rte_sched_port_enqueue_qwa_prefetch0(port, subport10, q10, q10_base);
2212 rte_sched_port_enqueue_qwa_prefetch0(port, subport11, q11, q11_base);
2213
2214 r20 = rte_sched_port_enqueue_qwa(port, subport20,
2215 q20, q20_base, pkt20);
2216 r21 = rte_sched_port_enqueue_qwa(port, subport21,
2217 q21, q21_base, pkt21);
2218 result += r20 + r21;
2219
2220 subport_last = rte_sched_port_subport(port, pkt_last);
2221 q_last = rte_sched_port_enqueue_qptrs_prefetch0(subport_last,
2222 pkt_last, subport_qmask);
2223
2224 q00_base = rte_sched_subport_pipe_qbase(subport00, q00);
2225 q01_base = rte_sched_subport_pipe_qbase(subport01, q01);
2226 rte_sched_port_enqueue_qwa_prefetch0(port, subport00, q00, q00_base);
2227 rte_sched_port_enqueue_qwa_prefetch0(port, subport01, q01, q01_base);
2228
2229 r10 = rte_sched_port_enqueue_qwa(port, subport10, q10,
2230 q10_base, pkt10);
2231 r11 = rte_sched_port_enqueue_qwa(port, subport11, q11,
2232 q11_base, pkt11);
2233 result += r10 + r11;
2234
2235 q_last_base = rte_sched_subport_pipe_qbase(subport_last, q_last);
2236 rte_sched_port_enqueue_qwa_prefetch0(port, subport_last,
2237 q_last, q_last_base);
2238
2239 r00 = rte_sched_port_enqueue_qwa(port, subport00, q00,
2240 q00_base, pkt00);
2241 r01 = rte_sched_port_enqueue_qwa(port, subport01, q01,
2242 q01_base, pkt01);
2243 result += r00 + r01;
2244
2245 if (n_pkts & 1) {
2246 r_last = rte_sched_port_enqueue_qwa(port, subport_last,
2247 q_last, q_last_base, pkt_last);
2248 result += r_last;
2249 }
2250
2251 return result;
2252 }
2253
2254 static inline uint64_t
grinder_tc_ov_credits_update(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2255 grinder_tc_ov_credits_update(struct rte_sched_port *port,
2256 struct rte_sched_subport *subport, uint32_t pos)
2257 {
2258 struct rte_sched_grinder *grinder = subport->grinder + pos;
2259 struct rte_sched_subport_profile *sp = grinder->subport_params;
2260 uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2261 uint64_t tc_consumption = 0, tc_ov_consumption_max;
2262 uint64_t tc_ov_wm = subport->tc_ov_wm;
2263 uint32_t i;
2264
2265 if (subport->tc_ov == 0)
2266 return subport->tc_ov_wm_max;
2267
2268 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2269 tc_ov_consumption[i] = sp->tc_credits_per_period[i]
2270 - subport->tc_credits[i];
2271 tc_consumption += tc_ov_consumption[i];
2272 }
2273
2274 tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
2275 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2276 subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
2277
2278 tc_ov_consumption_max =
2279 sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
2280 tc_consumption;
2281
2282 if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
2283 (tc_ov_consumption_max - port->mtu)) {
2284 tc_ov_wm -= tc_ov_wm >> 7;
2285 if (tc_ov_wm < subport->tc_ov_wm_min)
2286 tc_ov_wm = subport->tc_ov_wm_min;
2287
2288 return tc_ov_wm;
2289 }
2290
2291 tc_ov_wm += (tc_ov_wm >> 7) + 1;
2292 if (tc_ov_wm > subport->tc_ov_wm_max)
2293 tc_ov_wm = subport->tc_ov_wm_max;
2294
2295 return tc_ov_wm;
2296 }
2297
2298 static inline void
grinder_credits_update(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2299 grinder_credits_update(struct rte_sched_port *port,
2300 struct rte_sched_subport *subport, uint32_t pos)
2301 {
2302 struct rte_sched_grinder *grinder = subport->grinder + pos;
2303 struct rte_sched_pipe *pipe = grinder->pipe;
2304 struct rte_sched_pipe_profile *params = grinder->pipe_params;
2305 struct rte_sched_subport_profile *sp = grinder->subport_params;
2306 uint64_t n_periods;
2307 uint32_t i;
2308
2309 /* Subport TB */
2310 n_periods = (port->time - subport->tb_time) / sp->tb_period;
2311 subport->tb_credits += n_periods * sp->tb_credits_per_period;
2312 subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
2313 subport->tb_time += n_periods * sp->tb_period;
2314
2315 /* Pipe TB */
2316 n_periods = (port->time - pipe->tb_time) / params->tb_period;
2317 pipe->tb_credits += n_periods * params->tb_credits_per_period;
2318 pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size);
2319 pipe->tb_time += n_periods * params->tb_period;
2320
2321 /* Subport TCs */
2322 if (unlikely(port->time >= subport->tc_time)) {
2323 subport->tc_ov_wm =
2324 grinder_tc_ov_credits_update(port, subport, pos);
2325
2326 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2327 subport->tc_credits[i] = sp->tc_credits_per_period[i];
2328
2329 subport->tc_time = port->time + sp->tc_period;
2330 subport->tc_ov_period_id++;
2331 }
2332
2333 /* Pipe TCs */
2334 if (unlikely(port->time >= pipe->tc_time)) {
2335 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2336 pipe->tc_credits[i] = params->tc_credits_per_period[i];
2337 pipe->tc_time = port->time + params->tc_period;
2338 }
2339
2340 /* Pipe TCs - Oversubscription */
2341 if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
2342 pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
2343
2344 pipe->tc_ov_period_id = subport->tc_ov_period_id;
2345 }
2346 }
2347
2348 static inline int
grinder_credits_check(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2349 grinder_credits_check(struct rte_sched_port *port,
2350 struct rte_sched_subport *subport, uint32_t pos)
2351 {
2352 struct rte_sched_grinder *grinder = subport->grinder + pos;
2353 struct rte_sched_pipe *pipe = grinder->pipe;
2354 struct rte_mbuf *pkt = grinder->pkt;
2355 uint32_t tc_index = grinder->tc_index;
2356 uint64_t pkt_len = pkt->pkt_len + port->frame_overhead;
2357 uint64_t subport_tb_credits = subport->tb_credits;
2358 uint64_t subport_tc_credits = subport->tc_credits[tc_index];
2359 uint64_t pipe_tb_credits = pipe->tb_credits;
2360 uint64_t pipe_tc_credits = pipe->tc_credits[tc_index];
2361 uint64_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2362 uint64_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
2363 uint64_t pipe_tc_ov_credits;
2364 uint32_t i;
2365 int enough_credits;
2366
2367 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
2368 pipe_tc_ov_mask1[i] = ~0LLU;
2369
2370 pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
2371 pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = ~0LLU;
2372 pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
2373
2374 /* Check pipe and subport credits */
2375 enough_credits = (pkt_len <= subport_tb_credits) &&
2376 (pkt_len <= subport_tc_credits) &&
2377 (pkt_len <= pipe_tb_credits) &&
2378 (pkt_len <= pipe_tc_credits) &&
2379 (pkt_len <= pipe_tc_ov_credits);
2380
2381 if (!enough_credits)
2382 return 0;
2383
2384 /* Update pipe and subport credits */
2385 subport->tb_credits -= pkt_len;
2386 subport->tc_credits[tc_index] -= pkt_len;
2387 pipe->tb_credits -= pkt_len;
2388 pipe->tc_credits[tc_index] -= pkt_len;
2389 pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
2390
2391 return 1;
2392 }
2393
2394
2395 static inline int
grinder_schedule(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2396 grinder_schedule(struct rte_sched_port *port,
2397 struct rte_sched_subport *subport, uint32_t pos)
2398 {
2399 struct rte_sched_grinder *grinder = subport->grinder + pos;
2400 struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
2401 uint32_t qindex = grinder->qindex[grinder->qpos];
2402 struct rte_mbuf *pkt = grinder->pkt;
2403 uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
2404 uint32_t be_tc_active;
2405
2406 if (!grinder_credits_check(port, subport, pos))
2407 return 0;
2408
2409 /* Advance port time */
2410 port->time += pkt_len;
2411
2412 /* Send packet */
2413 port->pkts_out[port->n_pkts_out++] = pkt;
2414 queue->qr++;
2415
2416 be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE) ? ~0x0 : 0x0;
2417 grinder->wrr_tokens[grinder->qpos] +=
2418 (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
2419
2420 if (queue->qr == queue->qw) {
2421 rte_bitmap_clear(subport->bmp, qindex);
2422 grinder->qmask &= ~(1 << grinder->qpos);
2423 if (be_tc_active)
2424 grinder->wrr_mask[grinder->qpos] = 0;
2425
2426 rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
2427 }
2428
2429 rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
2430
2431 /* Reset pipe loop detection */
2432 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2433 grinder->productive = 1;
2434
2435 return 1;
2436 }
2437
2438 static inline int
grinder_pipe_exists(struct rte_sched_subport * subport,uint32_t base_pipe)2439 grinder_pipe_exists(struct rte_sched_subport *subport, uint32_t base_pipe)
2440 {
2441 uint32_t i;
2442
2443 for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
2444 if (subport->grinder_base_bmp_pos[i] == base_pipe)
2445 return 1;
2446 }
2447
2448 return 0;
2449 }
2450
2451 static inline void
grinder_pcache_populate(struct rte_sched_subport * subport,uint32_t pos,uint32_t bmp_pos,uint64_t bmp_slab)2452 grinder_pcache_populate(struct rte_sched_subport *subport,
2453 uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
2454 {
2455 struct rte_sched_grinder *grinder = subport->grinder + pos;
2456 uint16_t w[4];
2457
2458 grinder->pcache_w = 0;
2459 grinder->pcache_r = 0;
2460
2461 w[0] = (uint16_t) bmp_slab;
2462 w[1] = (uint16_t) (bmp_slab >> 16);
2463 w[2] = (uint16_t) (bmp_slab >> 32);
2464 w[3] = (uint16_t) (bmp_slab >> 48);
2465
2466 grinder->pcache_qmask[grinder->pcache_w] = w[0];
2467 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
2468 grinder->pcache_w += (w[0] != 0);
2469
2470 grinder->pcache_qmask[grinder->pcache_w] = w[1];
2471 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
2472 grinder->pcache_w += (w[1] != 0);
2473
2474 grinder->pcache_qmask[grinder->pcache_w] = w[2];
2475 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
2476 grinder->pcache_w += (w[2] != 0);
2477
2478 grinder->pcache_qmask[grinder->pcache_w] = w[3];
2479 grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
2480 grinder->pcache_w += (w[3] != 0);
2481 }
2482
2483 static inline void
grinder_tccache_populate(struct rte_sched_subport * subport,uint32_t pos,uint32_t qindex,uint16_t qmask)2484 grinder_tccache_populate(struct rte_sched_subport *subport,
2485 uint32_t pos, uint32_t qindex, uint16_t qmask)
2486 {
2487 struct rte_sched_grinder *grinder = subport->grinder + pos;
2488 uint8_t b, i;
2489
2490 grinder->tccache_w = 0;
2491 grinder->tccache_r = 0;
2492
2493 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
2494 b = (uint8_t) ((qmask >> i) & 0x1);
2495 grinder->tccache_qmask[grinder->tccache_w] = b;
2496 grinder->tccache_qindex[grinder->tccache_w] = qindex + i;
2497 grinder->tccache_w += (b != 0);
2498 }
2499
2500 b = (uint8_t) (qmask >> (RTE_SCHED_TRAFFIC_CLASS_BE));
2501 grinder->tccache_qmask[grinder->tccache_w] = b;
2502 grinder->tccache_qindex[grinder->tccache_w] = qindex +
2503 RTE_SCHED_TRAFFIC_CLASS_BE;
2504 grinder->tccache_w += (b != 0);
2505 }
2506
2507 static inline int
grinder_next_tc(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2508 grinder_next_tc(struct rte_sched_port *port,
2509 struct rte_sched_subport *subport, uint32_t pos)
2510 {
2511 struct rte_sched_grinder *grinder = subport->grinder + pos;
2512 struct rte_mbuf **qbase;
2513 uint32_t qindex;
2514 uint16_t qsize;
2515
2516 if (grinder->tccache_r == grinder->tccache_w)
2517 return 0;
2518
2519 qindex = grinder->tccache_qindex[grinder->tccache_r];
2520 qbase = rte_sched_subport_pipe_qbase(subport, qindex);
2521 qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
2522
2523 grinder->tc_index = rte_sched_port_pipe_tc(port, qindex);
2524 grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
2525 grinder->qsize = qsize;
2526
2527 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2528 grinder->queue[0] = subport->queue + qindex;
2529 grinder->qbase[0] = qbase;
2530 grinder->qindex[0] = qindex;
2531 grinder->tccache_r++;
2532
2533 return 1;
2534 }
2535
2536 grinder->queue[0] = subport->queue + qindex;
2537 grinder->queue[1] = subport->queue + qindex + 1;
2538 grinder->queue[2] = subport->queue + qindex + 2;
2539 grinder->queue[3] = subport->queue + qindex + 3;
2540
2541 grinder->qbase[0] = qbase;
2542 grinder->qbase[1] = qbase + qsize;
2543 grinder->qbase[2] = qbase + 2 * qsize;
2544 grinder->qbase[3] = qbase + 3 * qsize;
2545
2546 grinder->qindex[0] = qindex;
2547 grinder->qindex[1] = qindex + 1;
2548 grinder->qindex[2] = qindex + 2;
2549 grinder->qindex[3] = qindex + 3;
2550
2551 grinder->tccache_r++;
2552 return 1;
2553 }
2554
2555 static inline int
grinder_next_pipe(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2556 grinder_next_pipe(struct rte_sched_port *port,
2557 struct rte_sched_subport *subport, uint32_t pos)
2558 {
2559 struct rte_sched_grinder *grinder = subport->grinder + pos;
2560 uint32_t pipe_qindex;
2561 uint16_t pipe_qmask;
2562
2563 if (grinder->pcache_r < grinder->pcache_w) {
2564 pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
2565 pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
2566 grinder->pcache_r++;
2567 } else {
2568 uint64_t bmp_slab = 0;
2569 uint32_t bmp_pos = 0;
2570
2571 /* Get another non-empty pipe group */
2572 if (unlikely(rte_bitmap_scan(subport->bmp, &bmp_pos, &bmp_slab) <= 0))
2573 return 0;
2574
2575 #ifdef RTE_SCHED_DEBUG
2576 debug_check_queue_slab(subport, bmp_pos, bmp_slab);
2577 #endif
2578
2579 /* Return if pipe group already in one of the other grinders */
2580 subport->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
2581 if (unlikely(grinder_pipe_exists(subport, bmp_pos)))
2582 return 0;
2583
2584 subport->grinder_base_bmp_pos[pos] = bmp_pos;
2585
2586 /* Install new pipe group into grinder's pipe cache */
2587 grinder_pcache_populate(subport, pos, bmp_pos, bmp_slab);
2588
2589 pipe_qmask = grinder->pcache_qmask[0];
2590 pipe_qindex = grinder->pcache_qindex[0];
2591 grinder->pcache_r = 1;
2592 }
2593
2594 /* Install new pipe in the grinder */
2595 grinder->pindex = pipe_qindex >> 4;
2596 grinder->subport = subport;
2597 grinder->pipe = subport->pipe + grinder->pindex;
2598 grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
2599 grinder->productive = 0;
2600
2601 grinder_tccache_populate(subport, pos, pipe_qindex, pipe_qmask);
2602 grinder_next_tc(port, subport, pos);
2603
2604 /* Check for pipe exhaustion */
2605 if (grinder->pindex == subport->pipe_loop) {
2606 subport->pipe_exhaustion = 1;
2607 subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
2608 }
2609
2610 return 1;
2611 }
2612
2613
2614 static inline void
grinder_wrr_load(struct rte_sched_subport * subport,uint32_t pos)2615 grinder_wrr_load(struct rte_sched_subport *subport, uint32_t pos)
2616 {
2617 struct rte_sched_grinder *grinder = subport->grinder + pos;
2618 struct rte_sched_pipe *pipe = grinder->pipe;
2619 struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
2620 uint32_t qmask = grinder->qmask;
2621
2622 grinder->wrr_tokens[0] =
2623 ((uint16_t) pipe->wrr_tokens[0]) << RTE_SCHED_WRR_SHIFT;
2624 grinder->wrr_tokens[1] =
2625 ((uint16_t) pipe->wrr_tokens[1]) << RTE_SCHED_WRR_SHIFT;
2626 grinder->wrr_tokens[2] =
2627 ((uint16_t) pipe->wrr_tokens[2]) << RTE_SCHED_WRR_SHIFT;
2628 grinder->wrr_tokens[3] =
2629 ((uint16_t) pipe->wrr_tokens[3]) << RTE_SCHED_WRR_SHIFT;
2630
2631 grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
2632 grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
2633 grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
2634 grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
2635
2636 grinder->wrr_cost[0] = pipe_params->wrr_cost[0];
2637 grinder->wrr_cost[1] = pipe_params->wrr_cost[1];
2638 grinder->wrr_cost[2] = pipe_params->wrr_cost[2];
2639 grinder->wrr_cost[3] = pipe_params->wrr_cost[3];
2640 }
2641
2642 static inline void
grinder_wrr_store(struct rte_sched_subport * subport,uint32_t pos)2643 grinder_wrr_store(struct rte_sched_subport *subport, uint32_t pos)
2644 {
2645 struct rte_sched_grinder *grinder = subport->grinder + pos;
2646 struct rte_sched_pipe *pipe = grinder->pipe;
2647
2648 pipe->wrr_tokens[0] =
2649 (grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
2650 RTE_SCHED_WRR_SHIFT;
2651 pipe->wrr_tokens[1] =
2652 (grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
2653 RTE_SCHED_WRR_SHIFT;
2654 pipe->wrr_tokens[2] =
2655 (grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
2656 RTE_SCHED_WRR_SHIFT;
2657 pipe->wrr_tokens[3] =
2658 (grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
2659 RTE_SCHED_WRR_SHIFT;
2660 }
2661
2662 static inline void
grinder_wrr(struct rte_sched_subport * subport,uint32_t pos)2663 grinder_wrr(struct rte_sched_subport *subport, uint32_t pos)
2664 {
2665 struct rte_sched_grinder *grinder = subport->grinder + pos;
2666 uint16_t wrr_tokens_min;
2667
2668 grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
2669 grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
2670 grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
2671 grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
2672
2673 grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
2674 wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
2675
2676 grinder->wrr_tokens[0] -= wrr_tokens_min;
2677 grinder->wrr_tokens[1] -= wrr_tokens_min;
2678 grinder->wrr_tokens[2] -= wrr_tokens_min;
2679 grinder->wrr_tokens[3] -= wrr_tokens_min;
2680 }
2681
2682
2683 #define grinder_evict(subport, pos)
2684
2685 static inline void
grinder_prefetch_pipe(struct rte_sched_subport * subport,uint32_t pos)2686 grinder_prefetch_pipe(struct rte_sched_subport *subport, uint32_t pos)
2687 {
2688 struct rte_sched_grinder *grinder = subport->grinder + pos;
2689
2690 rte_prefetch0(grinder->pipe);
2691 rte_prefetch0(grinder->queue[0]);
2692 }
2693
2694 static inline void
grinder_prefetch_tc_queue_arrays(struct rte_sched_subport * subport,uint32_t pos)2695 grinder_prefetch_tc_queue_arrays(struct rte_sched_subport *subport, uint32_t pos)
2696 {
2697 struct rte_sched_grinder *grinder = subport->grinder + pos;
2698 uint16_t qsize, qr[RTE_SCHED_MAX_QUEUES_PER_TC];
2699
2700 qsize = grinder->qsize;
2701 grinder->qpos = 0;
2702
2703 if (grinder->tc_index < RTE_SCHED_TRAFFIC_CLASS_BE) {
2704 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2705
2706 rte_prefetch0(grinder->qbase[0] + qr[0]);
2707 return;
2708 }
2709
2710 qr[0] = grinder->queue[0]->qr & (qsize - 1);
2711 qr[1] = grinder->queue[1]->qr & (qsize - 1);
2712 qr[2] = grinder->queue[2]->qr & (qsize - 1);
2713 qr[3] = grinder->queue[3]->qr & (qsize - 1);
2714
2715 rte_prefetch0(grinder->qbase[0] + qr[0]);
2716 rte_prefetch0(grinder->qbase[1] + qr[1]);
2717
2718 grinder_wrr_load(subport, pos);
2719 grinder_wrr(subport, pos);
2720
2721 rte_prefetch0(grinder->qbase[2] + qr[2]);
2722 rte_prefetch0(grinder->qbase[3] + qr[3]);
2723 }
2724
2725 static inline void
grinder_prefetch_mbuf(struct rte_sched_subport * subport,uint32_t pos)2726 grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos)
2727 {
2728 struct rte_sched_grinder *grinder = subport->grinder + pos;
2729 uint32_t qpos = grinder->qpos;
2730 struct rte_mbuf **qbase = grinder->qbase[qpos];
2731 uint16_t qsize = grinder->qsize;
2732 uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
2733
2734 grinder->pkt = qbase[qr];
2735 rte_prefetch0(grinder->pkt);
2736
2737 if (unlikely((qr & 0x7) == 7)) {
2738 uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
2739
2740 rte_prefetch0(qbase + qr_next);
2741 }
2742 }
2743
2744 static inline uint32_t
grinder_handle(struct rte_sched_port * port,struct rte_sched_subport * subport,uint32_t pos)2745 grinder_handle(struct rte_sched_port *port,
2746 struct rte_sched_subport *subport, uint32_t pos)
2747 {
2748 struct rte_sched_grinder *grinder = subport->grinder + pos;
2749
2750 switch (grinder->state) {
2751 case e_GRINDER_PREFETCH_PIPE:
2752 {
2753 if (grinder_next_pipe(port, subport, pos)) {
2754 grinder_prefetch_pipe(subport, pos);
2755 subport->busy_grinders++;
2756
2757 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2758 return 0;
2759 }
2760
2761 return 0;
2762 }
2763
2764 case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
2765 {
2766 struct rte_sched_pipe *pipe = grinder->pipe;
2767
2768 grinder->pipe_params = subport->pipe_profiles + pipe->profile;
2769 grinder->subport_params = port->subport_profiles +
2770 subport->profile;
2771
2772 grinder_prefetch_tc_queue_arrays(subport, pos);
2773 grinder_credits_update(port, subport, pos);
2774
2775 grinder->state = e_GRINDER_PREFETCH_MBUF;
2776 return 0;
2777 }
2778
2779 case e_GRINDER_PREFETCH_MBUF:
2780 {
2781 grinder_prefetch_mbuf(subport, pos);
2782
2783 grinder->state = e_GRINDER_READ_MBUF;
2784 return 0;
2785 }
2786
2787 case e_GRINDER_READ_MBUF:
2788 {
2789 uint32_t wrr_active, result = 0;
2790
2791 result = grinder_schedule(port, subport, pos);
2792
2793 wrr_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
2794
2795 /* Look for next packet within the same TC */
2796 if (result && grinder->qmask) {
2797 if (wrr_active)
2798 grinder_wrr(subport, pos);
2799
2800 grinder_prefetch_mbuf(subport, pos);
2801
2802 return 1;
2803 }
2804
2805 if (wrr_active)
2806 grinder_wrr_store(subport, pos);
2807
2808 /* Look for another active TC within same pipe */
2809 if (grinder_next_tc(port, subport, pos)) {
2810 grinder_prefetch_tc_queue_arrays(subport, pos);
2811
2812 grinder->state = e_GRINDER_PREFETCH_MBUF;
2813 return result;
2814 }
2815
2816 if (grinder->productive == 0 &&
2817 subport->pipe_loop == RTE_SCHED_PIPE_INVALID)
2818 subport->pipe_loop = grinder->pindex;
2819
2820 grinder_evict(subport, pos);
2821
2822 /* Look for another active pipe */
2823 if (grinder_next_pipe(port, subport, pos)) {
2824 grinder_prefetch_pipe(subport, pos);
2825
2826 grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
2827 return result;
2828 }
2829
2830 /* No active pipe found */
2831 subport->busy_grinders--;
2832
2833 grinder->state = e_GRINDER_PREFETCH_PIPE;
2834 return result;
2835 }
2836
2837 default:
2838 rte_panic("Algorithmic error (invalid state)\n");
2839 return 0;
2840 }
2841 }
2842
2843 static inline void
rte_sched_port_time_resync(struct rte_sched_port * port)2844 rte_sched_port_time_resync(struct rte_sched_port *port)
2845 {
2846 uint64_t cycles = rte_get_tsc_cycles();
2847 uint64_t cycles_diff;
2848 uint64_t bytes_diff;
2849 uint32_t i;
2850
2851 if (cycles < port->time_cpu_cycles)
2852 port->time_cpu_cycles = 0;
2853
2854 cycles_diff = cycles - port->time_cpu_cycles;
2855 /* Compute elapsed time in bytes */
2856 bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
2857 port->inv_cycles_per_byte);
2858
2859 /* Advance port time */
2860 port->time_cpu_cycles +=
2861 (bytes_diff * port->cycles_per_byte) >> RTE_SCHED_TIME_SHIFT;
2862 port->time_cpu_bytes += bytes_diff;
2863 if (port->time < port->time_cpu_bytes)
2864 port->time = port->time_cpu_bytes;
2865
2866 /* Reset pipe loop detection */
2867 for (i = 0; i < port->n_subports_per_port; i++)
2868 port->subports[i]->pipe_loop = RTE_SCHED_PIPE_INVALID;
2869 }
2870
2871 static inline int
rte_sched_port_exceptions(struct rte_sched_subport * subport,int second_pass)2872 rte_sched_port_exceptions(struct rte_sched_subport *subport, int second_pass)
2873 {
2874 int exceptions;
2875
2876 /* Check if any exception flag is set */
2877 exceptions = (second_pass && subport->busy_grinders == 0) ||
2878 (subport->pipe_exhaustion == 1);
2879
2880 /* Clear exception flags */
2881 subport->pipe_exhaustion = 0;
2882
2883 return exceptions;
2884 }
2885
2886 int
rte_sched_port_dequeue(struct rte_sched_port * port,struct rte_mbuf ** pkts,uint32_t n_pkts)2887 rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
2888 {
2889 struct rte_sched_subport *subport;
2890 uint32_t subport_id = port->subport_id;
2891 uint32_t i, n_subports = 0, count;
2892
2893 port->pkts_out = pkts;
2894 port->n_pkts_out = 0;
2895
2896 rte_sched_port_time_resync(port);
2897
2898 /* Take each queue in the grinder one step further */
2899 for (i = 0, count = 0; ; i++) {
2900 subport = port->subports[subport_id];
2901
2902 count += grinder_handle(port, subport,
2903 i & (RTE_SCHED_PORT_N_GRINDERS - 1));
2904
2905 if (count == n_pkts) {
2906 subport_id++;
2907
2908 if (subport_id == port->n_subports_per_port)
2909 subport_id = 0;
2910
2911 port->subport_id = subport_id;
2912 break;
2913 }
2914
2915 if (rte_sched_port_exceptions(subport, i >= RTE_SCHED_PORT_N_GRINDERS)) {
2916 i = 0;
2917 subport_id++;
2918 n_subports++;
2919 }
2920
2921 if (subport_id == port->n_subports_per_port)
2922 subport_id = 0;
2923
2924 if (n_subports == port->n_subports_per_port) {
2925 port->subport_id = subport_id;
2926 break;
2927 }
2928 }
2929
2930 return count;
2931 }
2932