1 /* $OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $ */
2
3 /*
4 * Copyright (c) 2002
5 * Sony Computer Science Laboratories Inc.
6 * Copyright (c) 2002, 2003 Henning Brauer <[email protected]>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23
24 #define PFIOC_USE_LATEST
25 #define _WANT_FREEBSD_BITSET
26
27 #include <sys/types.h>
28 #include <sys/bitset.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
31
32 #include <net/if.h>
33 #include <netinet/in.h>
34 #include <net/pfvar.h>
35
36 #include <err.h>
37 #include <errno.h>
38 #include <inttypes.h>
39 #include <limits.h>
40 #include <math.h>
41 #include <search.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <unistd.h>
46
47 #include <net/altq/altq.h>
48 #include <net/altq/altq_cbq.h>
49 #include <net/altq/altq_codel.h>
50 #include <net/altq/altq_priq.h>
51 #include <net/altq/altq_hfsc.h>
52 #include <net/altq/altq_fairq.h>
53
54 #include "pfctl_parser.h"
55 #include "pfctl.h"
56
57 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
58
59 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
60 static struct hsearch_data queue_map;
61 static struct hsearch_data if_map;
62 static struct hsearch_data qid_map;
63
64 static struct pfctl_altq *pfaltq_lookup(char *ifname);
65 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
66 static u_int32_t qname_to_qid(char *);
67
68 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
69 struct pfctl_altq *);
70 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
71 static int check_commit_cbq(int, int, struct pfctl_altq *);
72 static int print_cbq_opts(const struct pf_altq *);
73
74 static int print_codel_opts(const struct pf_altq *,
75 const struct node_queue_opt *);
76
77 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
78 struct pfctl_altq *);
79 static int check_commit_priq(int, int, struct pfctl_altq *);
80 static int print_priq_opts(const struct pf_altq *);
81
82 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
83 struct pfctl_altq *, struct pfctl_altq *);
84 static int check_commit_hfsc(int, int, struct pfctl_altq *);
85 static int print_hfsc_opts(const struct pf_altq *,
86 const struct node_queue_opt *);
87
88 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
89 struct pfctl_altq *, struct pfctl_altq *);
90 static int print_fairq_opts(const struct pf_altq *,
91 const struct node_queue_opt *);
92 static int check_commit_fairq(int, int, struct pfctl_altq *);
93
94 static void gsc_add_sc(struct gen_sc *, struct service_curve *);
95 static int is_gsc_under_sc(struct gen_sc *,
96 struct service_curve *);
97 static struct segment *gsc_getentry(struct gen_sc *, double);
98 static int gsc_add_seg(struct gen_sc *, double, double, double,
99 double);
100 static double sc_x2y(struct service_curve *, double);
101
102 #ifdef __FreeBSD__
103 u_int64_t getifspeed(int, char *);
104 #else
105 u_int32_t getifspeed(char *);
106 #endif
107 u_long getifmtu(char *);
108 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
109 u_int64_t);
110 u_int64_t eval_bwspec(struct node_queue_bw *, u_int64_t);
111 void print_hfsc_sc(const char *, u_int, u_int, u_int,
112 const struct node_hfsc_sc *);
113 void print_fairq_sc(const char *, u_int, u_int, u_int,
114 const struct node_fairq_sc *);
115
116 static __attribute__((constructor)) void
pfctl_altq_init(void)117 pfctl_altq_init(void)
118 {
119 /*
120 * As hdestroy() will never be called on these tables, it will be
121 * safe to use references into the stored data as keys.
122 */
123 if (hcreate_r(0, &queue_map) == 0)
124 err(1, "Failed to create altq queue map");
125 if (hcreate_r(0, &if_map) == 0)
126 err(1, "Failed to create altq interface map");
127 if (hcreate_r(0, &qid_map) == 0)
128 err(1, "Failed to create altq queue id map");
129 }
130
131 void
pfaltq_store(struct pf_altq * a)132 pfaltq_store(struct pf_altq *a)
133 {
134 struct pfctl_altq *altq;
135 ENTRY item;
136 ENTRY *ret_item;
137 size_t key_size;
138
139 if ((altq = malloc(sizeof(*altq))) == NULL)
140 err(1, "queue malloc");
141 memcpy(&altq->pa, a, sizeof(struct pf_altq));
142 memset(&altq->meta, 0, sizeof(altq->meta));
143
144 if (a->qname[0] == 0) {
145 item.key = altq->pa.ifname;
146 item.data = altq;
147 if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
148 err(1, "interface map insert");
149 STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
150 } else {
151 key_size = sizeof(a->ifname) + sizeof(a->qname);
152 if ((item.key = malloc(key_size)) == NULL)
153 err(1, "queue map key malloc");
154 snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
155 item.data = altq;
156 if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
157 err(1, "queue map insert");
158
159 item.key = altq->pa.qname;
160 item.data = &altq->pa.qid;
161 if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
162 err(1, "qid map insert");
163 }
164 }
165
166 static struct pfctl_altq *
pfaltq_lookup(char * ifname)167 pfaltq_lookup(char *ifname)
168 {
169 ENTRY item;
170 ENTRY *ret_item;
171
172 item.key = ifname;
173 if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
174 return (NULL);
175
176 return (ret_item->data);
177 }
178
179 static struct pfctl_altq *
qname_to_pfaltq(const char * qname,const char * ifname)180 qname_to_pfaltq(const char *qname, const char *ifname)
181 {
182 ENTRY item;
183 ENTRY *ret_item;
184 char key[IFNAMSIZ + PF_QNAME_SIZE];
185
186 item.key = key;
187 snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
188 if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
189 return (NULL);
190
191 return (ret_item->data);
192 }
193
194 static u_int32_t
qname_to_qid(char * qname)195 qname_to_qid(char *qname)
196 {
197 ENTRY item;
198 ENTRY *ret_item;
199 uint32_t qid;
200
201 /*
202 * We guarantee that same named queues on different interfaces
203 * have the same qid.
204 */
205 item.key = qname;
206 if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
207 return (0);
208
209 qid = *(uint32_t *)ret_item->data;
210 return (qid);
211 }
212
213 void
print_altq(const struct pf_altq * a,unsigned int level,struct node_queue_bw * bw,struct node_queue_opt * qopts)214 print_altq(const struct pf_altq *a, unsigned int level,
215 struct node_queue_bw *bw, struct node_queue_opt *qopts)
216 {
217 if (a->qname[0] != 0) {
218 print_queue(a, level, bw, 1, qopts);
219 return;
220 }
221
222 #ifdef __FreeBSD__
223 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
224 printf("INACTIVE ");
225 #endif
226
227 printf("altq on %s ", a->ifname);
228
229 switch (a->scheduler) {
230 case ALTQT_CBQ:
231 if (!print_cbq_opts(a))
232 printf("cbq ");
233 break;
234 case ALTQT_PRIQ:
235 if (!print_priq_opts(a))
236 printf("priq ");
237 break;
238 case ALTQT_HFSC:
239 if (!print_hfsc_opts(a, qopts))
240 printf("hfsc ");
241 break;
242 case ALTQT_FAIRQ:
243 if (!print_fairq_opts(a, qopts))
244 printf("fairq ");
245 break;
246 case ALTQT_CODEL:
247 if (!print_codel_opts(a, qopts))
248 printf("codel ");
249 break;
250 }
251
252 if (bw != NULL && bw->bw_percent > 0) {
253 if (bw->bw_percent < 100)
254 printf("bandwidth %u%% ", bw->bw_percent);
255 } else
256 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
257
258 if (a->qlimit != DEFAULT_QLIMIT)
259 printf("qlimit %u ", a->qlimit);
260 printf("tbrsize %u ", a->tbrsize);
261 }
262
263 void
print_queue(const struct pf_altq * a,unsigned int level,struct node_queue_bw * bw,int print_interface,struct node_queue_opt * qopts)264 print_queue(const struct pf_altq *a, unsigned int level,
265 struct node_queue_bw *bw, int print_interface,
266 struct node_queue_opt *qopts)
267 {
268 unsigned int i;
269
270 #ifdef __FreeBSD__
271 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
272 printf("INACTIVE ");
273 #endif
274 printf("queue ");
275 for (i = 0; i < level; ++i)
276 printf(" ");
277 printf("%s ", a->qname);
278 if (print_interface)
279 printf("on %s ", a->ifname);
280 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
281 a->scheduler == ALTQT_FAIRQ) {
282 if (bw != NULL && bw->bw_percent > 0) {
283 if (bw->bw_percent < 100)
284 printf("bandwidth %u%% ", bw->bw_percent);
285 } else
286 printf("bandwidth %s ", rate2str((double)a->bandwidth));
287 }
288 if (a->priority != DEFAULT_PRIORITY)
289 printf("priority %u ", a->priority);
290 if (a->qlimit != DEFAULT_QLIMIT)
291 printf("qlimit %u ", a->qlimit);
292 switch (a->scheduler) {
293 case ALTQT_CBQ:
294 print_cbq_opts(a);
295 break;
296 case ALTQT_PRIQ:
297 print_priq_opts(a);
298 break;
299 case ALTQT_HFSC:
300 print_hfsc_opts(a, qopts);
301 break;
302 case ALTQT_FAIRQ:
303 print_fairq_opts(a, qopts);
304 break;
305 }
306 }
307
308 /*
309 * eval_pfaltq computes the discipline parameters.
310 */
311 int
eval_pfaltq(struct pfctl * pf,struct pf_altq * pa,struct node_queue_bw * bw,struct node_queue_opt * opts)312 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
313 struct node_queue_opt *opts)
314 {
315 u_int64_t rate;
316 u_int size, errors = 0;
317
318 if (bw->bw_absolute > 0)
319 pa->ifbandwidth = bw->bw_absolute;
320 else
321 #ifdef __FreeBSD__
322 if ((rate = getifspeed(pf->dev, pa->ifname)) == 0) {
323 #else
324 if ((rate = getifspeed(pa->ifname)) == 0) {
325 #endif
326 fprintf(stderr, "interface %s does not know its bandwidth, "
327 "please specify an absolute bandwidth\n",
328 pa->ifname);
329 errors++;
330 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
331 pa->ifbandwidth = rate;
332
333 /*
334 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
335 */
336 if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
337 pa->ifbandwidth = UINT_MAX;
338 warnx("interface %s bandwidth limited to %" PRIu64 " bps "
339 "because selected scheduler is 32-bit limited\n", pa->ifname,
340 pa->ifbandwidth);
341 }
342 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
343
344 /* if tbrsize is not specified, use heuristics */
345 if (pa->tbrsize == 0) {
346 rate = pa->ifbandwidth;
347 if (rate <= 1 * 1000 * 1000)
348 size = 1;
349 else if (rate <= 10 * 1000 * 1000)
350 size = 4;
351 else if (rate <= 200 * 1000 * 1000)
352 size = 8;
353 else if (rate <= 2500 * 1000 * 1000ULL)
354 size = 24;
355 else
356 size = 128;
357 size = size * getifmtu(pa->ifname);
358 pa->tbrsize = size;
359 }
360 return (errors);
361 }
362
363 /*
364 * check_commit_altq does consistency check for each interface
365 */
366 int
367 check_commit_altq(int dev, int opts)
368 {
369 struct pfctl_altq *if_ppa;
370 int error = 0;
371
372 /* call the discipline check for each interface. */
373 STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
374 switch (if_ppa->pa.scheduler) {
375 case ALTQT_CBQ:
376 error = check_commit_cbq(dev, opts, if_ppa);
377 break;
378 case ALTQT_PRIQ:
379 error = check_commit_priq(dev, opts, if_ppa);
380 break;
381 case ALTQT_HFSC:
382 error = check_commit_hfsc(dev, opts, if_ppa);
383 break;
384 case ALTQT_FAIRQ:
385 error = check_commit_fairq(dev, opts, if_ppa);
386 break;
387 default:
388 break;
389 }
390 }
391 return (error);
392 }
393
394 /*
395 * eval_pfqueue computes the queue parameters.
396 */
397 int
398 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
399 struct node_queue_opt *opts)
400 {
401 /* should be merged with expand_queue */
402 struct pfctl_altq *if_ppa, *parent;
403 int error = 0;
404
405 /* find the corresponding interface and copy fields used by queues */
406 if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
407 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
408 return (1);
409 }
410 pa->scheduler = if_ppa->pa.scheduler;
411 pa->ifbandwidth = if_ppa->pa.ifbandwidth;
412
413 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
414 fprintf(stderr, "queue %s already exists on interface %s\n",
415 pa->qname, pa->ifname);
416 return (1);
417 }
418 pa->qid = qname_to_qid(pa->qname);
419
420 parent = NULL;
421 if (pa->parent[0] != 0) {
422 parent = qname_to_pfaltq(pa->parent, pa->ifname);
423 if (parent == NULL) {
424 fprintf(stderr, "parent %s not found for %s\n",
425 pa->parent, pa->qname);
426 return (1);
427 }
428 pa->parent_qid = parent->pa.qid;
429 }
430 if (pa->qlimit == 0)
431 pa->qlimit = DEFAULT_QLIMIT;
432
433 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
434 pa->scheduler == ALTQT_FAIRQ) {
435 pa->bandwidth = eval_bwspec(bw,
436 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
437
438 if (pa->bandwidth > pa->ifbandwidth) {
439 fprintf(stderr, "bandwidth for %s higher than "
440 "interface\n", pa->qname);
441 return (1);
442 }
443 /*
444 * If not HFSC, then check that the sum of the child
445 * bandwidths is less than the parent's bandwidth. For
446 * HFSC, the equivalent concept is to check that the sum of
447 * the child linkshare service curves are under the parent's
448 * linkshare service curve, and that check is performed by
449 * eval_pfqueue_hfsc().
450 */
451 if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) {
452 if (pa->bandwidth > parent->pa.bandwidth) {
453 warnx("bandwidth for %s higher than parent",
454 pa->qname);
455 return (1);
456 }
457 parent->meta.bwsum += pa->bandwidth;
458 if (parent->meta.bwsum > parent->pa.bandwidth) {
459 warnx("the sum of the child bandwidth (%" PRIu64
460 ") higher than parent \"%s\" (%" PRIu64 ")",
461 parent->meta.bwsum, parent->pa.qname,
462 parent->pa.bandwidth);
463 }
464 }
465 }
466
467 if (eval_queue_opts(pa, opts,
468 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
469 return (1);
470
471 if (parent != NULL)
472 parent->meta.children++;
473
474 switch (pa->scheduler) {
475 case ALTQT_CBQ:
476 error = eval_pfqueue_cbq(pf, pa, if_ppa);
477 break;
478 case ALTQT_PRIQ:
479 error = eval_pfqueue_priq(pf, pa, if_ppa);
480 break;
481 case ALTQT_HFSC:
482 error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
483 break;
484 case ALTQT_FAIRQ:
485 error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
486 break;
487 default:
488 break;
489 }
490 return (error);
491 }
492
493 /*
494 * CBQ support functions
495 */
496 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
497 #define RM_NS_PER_SEC (1000000000)
498
499 static int
500 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
501 {
502 struct cbq_opts *opts;
503 u_int ifmtu;
504
505 if (pa->priority >= CBQ_MAXPRI) {
506 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
507 return (-1);
508 }
509
510 ifmtu = getifmtu(pa->ifname);
511 opts = &pa->pq_u.cbq_opts;
512
513 if (opts->pktsize == 0) { /* use default */
514 opts->pktsize = ifmtu;
515 if (opts->pktsize > MCLBYTES) /* do what TCP does */
516 opts->pktsize &= ~MCLBYTES;
517 } else if (opts->pktsize > ifmtu)
518 opts->pktsize = ifmtu;
519 if (opts->maxpktsize == 0) /* use default */
520 opts->maxpktsize = ifmtu;
521 else if (opts->maxpktsize > ifmtu)
522 opts->pktsize = ifmtu;
523
524 if (opts->pktsize > opts->maxpktsize)
525 opts->pktsize = opts->maxpktsize;
526
527 if (pa->parent[0] == 0)
528 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
529
530 if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
531 if_ppa->meta.root_classes++;
532 if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
533 if_ppa->meta.default_classes++;
534
535 cbq_compute_idletime(pf, pa);
536 return (0);
537 }
538
539 /*
540 * compute ns_per_byte, maxidle, minidle, and offtime
541 */
542 static int
543 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
544 {
545 struct cbq_opts *opts;
546 double maxidle_s, maxidle, minidle;
547 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
548 double z, g, f, gton, gtom;
549 u_int minburst, maxburst;
550
551 opts = &pa->pq_u.cbq_opts;
552 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
553 minburst = opts->minburst;
554 maxburst = opts->maxburst;
555
556 if (pa->bandwidth == 0)
557 f = 0.0001; /* small enough? */
558 else
559 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
560
561 nsPerByte = ifnsPerByte / f;
562 ptime = (double)opts->pktsize * ifnsPerByte;
563 cptime = ptime * (1.0 - f) / f;
564
565 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
566 /*
567 * this causes integer overflow in kernel!
568 * (bandwidth < 6Kbps when max_pkt_size=1500)
569 */
570 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
571 warnx("queue bandwidth must be larger than %s",
572 rate2str(ifnsPerByte * (double)opts->maxpktsize /
573 (double)INT_MAX * (double)pa->ifbandwidth));
574 fprintf(stderr, "cbq: queue %s is too slow!\n",
575 pa->qname);
576 }
577 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
578 }
579
580 if (maxburst == 0) { /* use default */
581 if (cptime > 10.0 * 1000000)
582 maxburst = 4;
583 else
584 maxburst = 16;
585 }
586 if (minburst == 0) /* use default */
587 minburst = 2;
588 if (minburst > maxburst)
589 minburst = maxburst;
590
591 z = (double)(1 << RM_FILTER_GAIN);
592 g = (1.0 - 1.0 / z);
593 gton = pow(g, (double)maxburst);
594 gtom = pow(g, (double)(minburst-1));
595 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
596 maxidle_s = (1.0 - g);
597 if (maxidle > maxidle_s)
598 maxidle = ptime * maxidle;
599 else
600 maxidle = ptime * maxidle_s;
601 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
602 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
603
604 /* scale parameters */
605 maxidle = ((maxidle * 8.0) / nsPerByte) *
606 pow(2.0, (double)RM_FILTER_GAIN);
607 offtime = (offtime * 8.0) / nsPerByte *
608 pow(2.0, (double)RM_FILTER_GAIN);
609 minidle = ((minidle * 8.0) / nsPerByte) *
610 pow(2.0, (double)RM_FILTER_GAIN);
611
612 maxidle = maxidle / 1000.0;
613 offtime = offtime / 1000.0;
614 minidle = minidle / 1000.0;
615
616 opts->minburst = minburst;
617 opts->maxburst = maxburst;
618 opts->ns_per_byte = (u_int)nsPerByte;
619 opts->maxidle = (u_int)fabs(maxidle);
620 opts->minidle = (int)minidle;
621 opts->offtime = (u_int)fabs(offtime);
622
623 return (0);
624 }
625
626 static int
627 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
628 {
629 int error = 0;
630
631 /*
632 * check if cbq has one root queue and one default queue
633 * for this interface
634 */
635 if (if_ppa->meta.root_classes != 1) {
636 warnx("should have one root queue on %s", if_ppa->pa.ifname);
637 error++;
638 }
639 if (if_ppa->meta.default_classes != 1) {
640 warnx("should have one default queue on %s", if_ppa->pa.ifname);
641 error++;
642 }
643 return (error);
644 }
645
646 static int
647 print_cbq_opts(const struct pf_altq *a)
648 {
649 const struct cbq_opts *opts;
650
651 opts = &a->pq_u.cbq_opts;
652 if (opts->flags) {
653 printf("cbq(");
654 if (opts->flags & CBQCLF_RED)
655 printf(" red");
656 if (opts->flags & CBQCLF_ECN)
657 printf(" ecn");
658 if (opts->flags & CBQCLF_RIO)
659 printf(" rio");
660 if (opts->flags & CBQCLF_CODEL)
661 printf(" codel");
662 if (opts->flags & CBQCLF_CLEARDSCP)
663 printf(" cleardscp");
664 if (opts->flags & CBQCLF_FLOWVALVE)
665 printf(" flowvalve");
666 if (opts->flags & CBQCLF_BORROW)
667 printf(" borrow");
668 if (opts->flags & CBQCLF_WRR)
669 printf(" wrr");
670 if (opts->flags & CBQCLF_EFFICIENT)
671 printf(" efficient");
672 if (opts->flags & CBQCLF_ROOTCLASS)
673 printf(" root");
674 if (opts->flags & CBQCLF_DEFCLASS)
675 printf(" default");
676 printf(" ) ");
677
678 return (1);
679 } else
680 return (0);
681 }
682
683 /*
684 * PRIQ support functions
685 */
686 static int
687 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
688 {
689
690 if (pa->priority >= PRIQ_MAXPRI) {
691 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
692 return (-1);
693 }
694 if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
695 warnx("%s does not have a unique priority on interface %s",
696 pa->qname, pa->ifname);
697 return (-1);
698 } else
699 BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
700
701 if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
702 if_ppa->meta.default_classes++;
703 return (0);
704 }
705
706 static int
707 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
708 {
709
710 /*
711 * check if priq has one default class for this interface
712 */
713 if (if_ppa->meta.default_classes != 1) {
714 warnx("should have one default queue on %s", if_ppa->pa.ifname);
715 return (1);
716 }
717 return (0);
718 }
719
720 static int
721 print_priq_opts(const struct pf_altq *a)
722 {
723 const struct priq_opts *opts;
724
725 opts = &a->pq_u.priq_opts;
726
727 if (opts->flags) {
728 printf("priq(");
729 if (opts->flags & PRCF_RED)
730 printf(" red");
731 if (opts->flags & PRCF_ECN)
732 printf(" ecn");
733 if (opts->flags & PRCF_RIO)
734 printf(" rio");
735 if (opts->flags & PRCF_CODEL)
736 printf(" codel");
737 if (opts->flags & PRCF_CLEARDSCP)
738 printf(" cleardscp");
739 if (opts->flags & PRCF_DEFAULTCLASS)
740 printf(" default");
741 printf(" ) ");
742
743 return (1);
744 } else
745 return (0);
746 }
747
748 /*
749 * HFSC support functions
750 */
751 static int
752 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
753 struct pfctl_altq *parent)
754 {
755 struct hfsc_opts_v1 *opts;
756 struct service_curve sc;
757
758 opts = &pa->pq_u.hfsc_opts;
759
760 if (parent == NULL) {
761 /* root queue */
762 opts->lssc_m1 = pa->ifbandwidth;
763 opts->lssc_m2 = pa->ifbandwidth;
764 opts->lssc_d = 0;
765 return (0);
766 }
767
768 /* First child initializes the parent's service curve accumulators. */
769 if (parent->meta.children == 1) {
770 LIST_INIT(&parent->meta.rtsc);
771 LIST_INIT(&parent->meta.lssc);
772 }
773
774 if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
775 warnx("adding %s would make default queue %s not a leaf",
776 pa->qname, pa->parent);
777 return (-1);
778 }
779
780 if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
781 if_ppa->meta.default_classes++;
782
783 /* if link_share is not specified, use bandwidth */
784 if (opts->lssc_m2 == 0)
785 opts->lssc_m2 = pa->bandwidth;
786
787 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
788 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
789 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
790 warnx("m2 is zero for %s", pa->qname);
791 return (-1);
792 }
793
794 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
795 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
796 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
797 warnx("m1 must be zero for convex curve: %s", pa->qname);
798 return (-1);
799 }
800
801 /*
802 * admission control:
803 * for the real-time service curve, the sum of the service curves
804 * should not exceed 80% of the interface bandwidth. 20% is reserved
805 * not to over-commit the actual interface bandwidth.
806 * for the linkshare service curve, the sum of the child service
807 * curve should not exceed the parent service curve.
808 * for the upper-limit service curve, the assigned bandwidth should
809 * be smaller than the interface bandwidth, and the upper-limit should
810 * be larger than the real-time service curve when both are defined.
811 */
812
813 /* check the real-time service curve. reserve 20% of interface bw */
814 if (opts->rtsc_m2 != 0) {
815 /* add this queue to the sum */
816 sc.m1 = opts->rtsc_m1;
817 sc.d = opts->rtsc_d;
818 sc.m2 = opts->rtsc_m2;
819 gsc_add_sc(&parent->meta.rtsc, &sc);
820 /* compare the sum with 80% of the interface */
821 sc.m1 = 0;
822 sc.d = 0;
823 sc.m2 = pa->ifbandwidth / 100 * 80;
824 if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
825 warnx("real-time sc exceeds 80%% of the interface "
826 "bandwidth (%s)", rate2str((double)sc.m2));
827 return (-1);
828 }
829 }
830
831 /* check the linkshare service curve. */
832 if (opts->lssc_m2 != 0) {
833 /* add this queue to the child sum */
834 sc.m1 = opts->lssc_m1;
835 sc.d = opts->lssc_d;
836 sc.m2 = opts->lssc_m2;
837 gsc_add_sc(&parent->meta.lssc, &sc);
838 /* compare the sum of the children with parent's sc */
839 sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
840 sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
841 sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
842 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
843 warnx("linkshare sc exceeds parent's sc");
844 return (-1);
845 }
846 }
847
848 /* check the upper-limit service curve. */
849 if (opts->ulsc_m2 != 0) {
850 if (opts->ulsc_m1 > pa->ifbandwidth ||
851 opts->ulsc_m2 > pa->ifbandwidth) {
852 warnx("upper-limit larger than interface bandwidth");
853 return (-1);
854 }
855 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
856 warnx("upper-limit sc smaller than real-time sc");
857 return (-1);
858 }
859 }
860
861 return (0);
862 }
863
864 /*
865 * FAIRQ support functions
866 */
867 static int
868 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
869 struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
870 {
871 struct fairq_opts *opts;
872 struct service_curve sc;
873
874 opts = &pa->pq_u.fairq_opts;
875
876 if (parent == NULL) {
877 /* root queue */
878 opts->lssc_m1 = pa->ifbandwidth;
879 opts->lssc_m2 = pa->ifbandwidth;
880 opts->lssc_d = 0;
881 return (0);
882 }
883
884 /* First child initializes the parent's service curve accumulator. */
885 if (parent->meta.children == 1)
886 LIST_INIT(&parent->meta.lssc);
887
888 if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
889 warnx("adding %s would make default queue %s not a leaf",
890 pa->qname, pa->parent);
891 return (-1);
892 }
893
894 if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
895 if_ppa->meta.default_classes++;
896
897 /* if link_share is not specified, use bandwidth */
898 if (opts->lssc_m2 == 0)
899 opts->lssc_m2 = pa->bandwidth;
900
901 /*
902 * admission control:
903 * for the real-time service curve, the sum of the service curves
904 * should not exceed 80% of the interface bandwidth. 20% is reserved
905 * not to over-commit the actual interface bandwidth.
906 * for the link-sharing service curve, the sum of the child service
907 * curve should not exceed the parent service curve.
908 * for the upper-limit service curve, the assigned bandwidth should
909 * be smaller than the interface bandwidth, and the upper-limit should
910 * be larger than the real-time service curve when both are defined.
911 */
912
913 /* check the linkshare service curve. */
914 if (opts->lssc_m2 != 0) {
915 /* add this queue to the child sum */
916 sc.m1 = opts->lssc_m1;
917 sc.d = opts->lssc_d;
918 sc.m2 = opts->lssc_m2;
919 gsc_add_sc(&parent->meta.lssc, &sc);
920 /* compare the sum of the children with parent's sc */
921 sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
922 sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
923 sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
924 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
925 warnx("link-sharing sc exceeds parent's sc");
926 return (-1);
927 }
928 }
929
930 return (0);
931 }
932
933 static int
934 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
935 {
936
937 /* check if hfsc has one default queue for this interface */
938 if (if_ppa->meta.default_classes != 1) {
939 warnx("should have one default queue on %s", if_ppa->pa.ifname);
940 return (1);
941 }
942 return (0);
943 }
944
945 static int
946 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
947 {
948
949 /* check if fairq has one default queue for this interface */
950 if (if_ppa->meta.default_classes != 1) {
951 warnx("should have one default queue on %s", if_ppa->pa.ifname);
952 return (1);
953 }
954 return (0);
955 }
956
957 static int
958 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
959 {
960 const struct hfsc_opts_v1 *opts;
961 const struct node_hfsc_sc *rtsc, *lssc, *ulsc;
962
963 opts = &a->pq_u.hfsc_opts;
964 if (qopts == NULL)
965 rtsc = lssc = ulsc = NULL;
966 else {
967 rtsc = &qopts->data.hfsc_opts.realtime;
968 lssc = &qopts->data.hfsc_opts.linkshare;
969 ulsc = &qopts->data.hfsc_opts.upperlimit;
970 }
971
972 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
973 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
974 opts->lssc_d != 0))) {
975 printf("hfsc(");
976 if (opts->flags & HFCF_RED)
977 printf(" red");
978 if (opts->flags & HFCF_ECN)
979 printf(" ecn");
980 if (opts->flags & HFCF_RIO)
981 printf(" rio");
982 if (opts->flags & HFCF_CODEL)
983 printf(" codel");
984 if (opts->flags & HFCF_CLEARDSCP)
985 printf(" cleardscp");
986 if (opts->flags & HFCF_DEFAULTCLASS)
987 printf(" default");
988 if (opts->rtsc_m2 != 0)
989 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
990 opts->rtsc_m2, rtsc);
991 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
992 opts->lssc_d != 0))
993 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
994 opts->lssc_m2, lssc);
995 if (opts->ulsc_m2 != 0)
996 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
997 opts->ulsc_m2, ulsc);
998 printf(" ) ");
999
1000 return (1);
1001 } else
1002 return (0);
1003 }
1004
1005 static int
1006 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1007 {
1008 const struct codel_opts *opts;
1009
1010 opts = &a->pq_u.codel_opts;
1011 if (opts->target || opts->interval || opts->ecn) {
1012 printf("codel(");
1013 if (opts->target)
1014 printf(" target %d", opts->target);
1015 if (opts->interval)
1016 printf(" interval %d", opts->interval);
1017 if (opts->ecn)
1018 printf("ecn");
1019 printf(" ) ");
1020
1021 return (1);
1022 }
1023
1024 return (0);
1025 }
1026
1027 static int
1028 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1029 {
1030 const struct fairq_opts *opts;
1031 const struct node_fairq_sc *loc_lssc;
1032
1033 opts = &a->pq_u.fairq_opts;
1034 if (qopts == NULL)
1035 loc_lssc = NULL;
1036 else
1037 loc_lssc = &qopts->data.fairq_opts.linkshare;
1038
1039 if (opts->flags ||
1040 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1041 opts->lssc_d != 0))) {
1042 printf("fairq(");
1043 if (opts->flags & FARF_RED)
1044 printf(" red");
1045 if (opts->flags & FARF_ECN)
1046 printf(" ecn");
1047 if (opts->flags & FARF_RIO)
1048 printf(" rio");
1049 if (opts->flags & FARF_CODEL)
1050 printf(" codel");
1051 if (opts->flags & FARF_CLEARDSCP)
1052 printf(" cleardscp");
1053 if (opts->flags & FARF_DEFAULTCLASS)
1054 printf(" default");
1055 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1056 opts->lssc_d != 0))
1057 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1058 opts->lssc_m2, loc_lssc);
1059 printf(" ) ");
1060
1061 return (1);
1062 } else
1063 return (0);
1064 }
1065
1066 /*
1067 * admission control using generalized service curve
1068 */
1069
1070 /* add a new service curve to a generalized service curve */
1071 static void
1072 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1073 {
1074 if (is_sc_null(sc))
1075 return;
1076 if (sc->d != 0)
1077 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1078 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1079 }
1080
1081 /*
1082 * check whether all points of a generalized service curve have
1083 * their y-coordinates no larger than a given two-piece linear
1084 * service curve.
1085 */
1086 static int
1087 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1088 {
1089 struct segment *s, *last, *end;
1090 double y;
1091
1092 if (is_sc_null(sc)) {
1093 if (LIST_EMPTY(gsc))
1094 return (1);
1095 LIST_FOREACH(s, gsc, _next) {
1096 if (s->m != 0)
1097 return (0);
1098 }
1099 return (1);
1100 }
1101 /*
1102 * gsc has a dummy entry at the end with x = INFINITY.
1103 * loop through up to this dummy entry.
1104 */
1105 end = gsc_getentry(gsc, INFINITY);
1106 if (end == NULL)
1107 return (1);
1108 last = NULL;
1109 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1110 if (s->y > sc_x2y(sc, s->x))
1111 return (0);
1112 last = s;
1113 }
1114 /* last now holds the real last segment */
1115 if (last == NULL)
1116 return (1);
1117 if (last->m > sc->m2)
1118 return (0);
1119 if (last->x < sc->d && last->m > sc->m1) {
1120 y = last->y + (sc->d - last->x) * last->m;
1121 if (y > sc_x2y(sc, sc->d))
1122 return (0);
1123 }
1124 return (1);
1125 }
1126
1127 /*
1128 * return a segment entry starting at x.
1129 * if gsc has no entry starting at x, a new entry is created at x.
1130 */
1131 static struct segment *
1132 gsc_getentry(struct gen_sc *gsc, double x)
1133 {
1134 struct segment *new, *prev, *s;
1135
1136 prev = NULL;
1137 LIST_FOREACH(s, gsc, _next) {
1138 if (s->x == x)
1139 return (s); /* matching entry found */
1140 else if (s->x < x)
1141 prev = s;
1142 else
1143 break;
1144 }
1145
1146 /* we have to create a new entry */
1147 if ((new = calloc(1, sizeof(struct segment))) == NULL)
1148 return (NULL);
1149
1150 new->x = x;
1151 if (x == INFINITY || s == NULL)
1152 new->d = 0;
1153 else if (s->x == INFINITY)
1154 new->d = INFINITY;
1155 else
1156 new->d = s->x - x;
1157 if (prev == NULL) {
1158 /* insert the new entry at the head of the list */
1159 new->y = 0;
1160 new->m = 0;
1161 LIST_INSERT_HEAD(gsc, new, _next);
1162 } else {
1163 /*
1164 * the start point intersects with the segment pointed by
1165 * prev. divide prev into 2 segments
1166 */
1167 if (x == INFINITY) {
1168 prev->d = INFINITY;
1169 if (prev->m == 0)
1170 new->y = prev->y;
1171 else
1172 new->y = INFINITY;
1173 } else {
1174 prev->d = x - prev->x;
1175 new->y = prev->d * prev->m + prev->y;
1176 }
1177 new->m = prev->m;
1178 LIST_INSERT_AFTER(prev, new, _next);
1179 }
1180 return (new);
1181 }
1182
1183 /* add a segment to a generalized service curve */
1184 static int
1185 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1186 {
1187 struct segment *start, *end, *s;
1188 double x2;
1189
1190 if (d == INFINITY)
1191 x2 = INFINITY;
1192 else
1193 x2 = x + d;
1194 start = gsc_getentry(gsc, x);
1195 end = gsc_getentry(gsc, x2);
1196 if (start == NULL || end == NULL)
1197 return (-1);
1198
1199 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1200 s->m += m;
1201 s->y += y + (s->x - x) * m;
1202 }
1203
1204 end = gsc_getentry(gsc, INFINITY);
1205 for (; s != end; s = LIST_NEXT(s, _next)) {
1206 s->y += m * d;
1207 }
1208
1209 return (0);
1210 }
1211
1212 /* get y-projection of a service curve */
1213 static double
1214 sc_x2y(struct service_curve *sc, double x)
1215 {
1216 double y;
1217
1218 if (x <= (double)sc->d)
1219 /* y belongs to the 1st segment */
1220 y = x * (double)sc->m1;
1221 else
1222 /* y belongs to the 2nd segment */
1223 y = (double)sc->d * (double)sc->m1
1224 + (x - (double)sc->d) * (double)sc->m2;
1225 return (y);
1226 }
1227
1228 /*
1229 * misc utilities
1230 */
1231 #define R2S_BUFS 8
1232 #define RATESTR_MAX 16
1233
1234 char *
1235 rate2str(double rate)
1236 {
1237 char *buf;
1238 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1239 static int idx = 0;
1240 int i;
1241 static const char unit[] = " KMG";
1242
1243 buf = r2sbuf[idx++];
1244 if (idx == R2S_BUFS)
1245 idx = 0;
1246
1247 for (i = 0; rate >= 1000 && i <= 3; i++)
1248 rate /= 1000;
1249
1250 if ((int)(rate * 100) % 100)
1251 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1252 else
1253 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1254
1255 return (buf);
1256 }
1257
1258 #ifdef __FreeBSD__
1259 /*
1260 * XXX
1261 * FreeBSD does not have SIOCGIFDATA.
1262 * To emulate this, DIOCGIFSPEED ioctl added to pf.
1263 */
1264 u_int64_t
1265 getifspeed(int pfdev, char *ifname)
1266 {
1267 struct pf_ifspeed io;
1268
1269 bzero(&io, sizeof io);
1270 if (strlcpy(io.ifname, ifname, IFNAMSIZ) >=
1271 sizeof(io.ifname))
1272 errx(1, "getifspeed: strlcpy");
1273 if (ioctl(pfdev, DIOCGIFSPEED, &io) == -1)
1274 err(1, "DIOCGIFSPEED");
1275 return (io.baudrate);
1276 }
1277 #else
1278 u_int32_t
1279 getifspeed(char *ifname)
1280 {
1281 int s;
1282 struct ifreq ifr;
1283 struct if_data ifrdat;
1284
1285 s = get_query_socket();
1286 bzero(&ifr, sizeof(ifr));
1287 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1288 sizeof(ifr.ifr_name))
1289 errx(1, "getifspeed: strlcpy");
1290 ifr.ifr_data = (caddr_t)&ifrdat;
1291 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1292 err(1, "SIOCGIFDATA");
1293 return ((u_int32_t)ifrdat.ifi_baudrate);
1294 }
1295 #endif
1296
1297 u_long
1298 getifmtu(char *ifname)
1299 {
1300 int s;
1301 struct ifreq ifr;
1302
1303 s = get_query_socket();
1304 bzero(&ifr, sizeof(ifr));
1305 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1306 sizeof(ifr.ifr_name))
1307 errx(1, "getifmtu: strlcpy");
1308 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1309 #ifdef __FreeBSD__
1310 ifr.ifr_mtu = 1500;
1311 #else
1312 err(1, "SIOCGIFMTU");
1313 #endif
1314 if (ifr.ifr_mtu > 0)
1315 return (ifr.ifr_mtu);
1316 else {
1317 warnx("could not get mtu for %s, assuming 1500", ifname);
1318 return (1500);
1319 }
1320 }
1321
1322 int
1323 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1324 u_int64_t ref_bw)
1325 {
1326 int errors = 0;
1327
1328 switch (pa->scheduler) {
1329 case ALTQT_CBQ:
1330 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1331 break;
1332 case ALTQT_PRIQ:
1333 pa->pq_u.priq_opts = opts->data.priq_opts;
1334 break;
1335 case ALTQT_HFSC:
1336 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1337 if (opts->data.hfsc_opts.linkshare.used) {
1338 pa->pq_u.hfsc_opts.lssc_m1 =
1339 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1340 ref_bw);
1341 pa->pq_u.hfsc_opts.lssc_m2 =
1342 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1343 ref_bw);
1344 pa->pq_u.hfsc_opts.lssc_d =
1345 opts->data.hfsc_opts.linkshare.d;
1346 }
1347 if (opts->data.hfsc_opts.realtime.used) {
1348 pa->pq_u.hfsc_opts.rtsc_m1 =
1349 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1350 ref_bw);
1351 pa->pq_u.hfsc_opts.rtsc_m2 =
1352 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1353 ref_bw);
1354 pa->pq_u.hfsc_opts.rtsc_d =
1355 opts->data.hfsc_opts.realtime.d;
1356 }
1357 if (opts->data.hfsc_opts.upperlimit.used) {
1358 pa->pq_u.hfsc_opts.ulsc_m1 =
1359 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1360 ref_bw);
1361 pa->pq_u.hfsc_opts.ulsc_m2 =
1362 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1363 ref_bw);
1364 pa->pq_u.hfsc_opts.ulsc_d =
1365 opts->data.hfsc_opts.upperlimit.d;
1366 }
1367 break;
1368 case ALTQT_FAIRQ:
1369 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1370 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1371 pa->pq_u.fairq_opts.hogs_m1 =
1372 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1373
1374 if (opts->data.fairq_opts.linkshare.used) {
1375 pa->pq_u.fairq_opts.lssc_m1 =
1376 eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1377 ref_bw);
1378 pa->pq_u.fairq_opts.lssc_m2 =
1379 eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1380 ref_bw);
1381 pa->pq_u.fairq_opts.lssc_d =
1382 opts->data.fairq_opts.linkshare.d;
1383 }
1384 break;
1385 case ALTQT_CODEL:
1386 pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1387 pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1388 pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1389 break;
1390 default:
1391 warnx("eval_queue_opts: unknown scheduler type %u",
1392 opts->qtype);
1393 errors++;
1394 break;
1395 }
1396
1397 return (errors);
1398 }
1399
1400 /*
1401 * If absolute bandwidth if set, return the lesser of that value and the
1402 * reference bandwidth. Limiting to the reference bandwidth allows simple
1403 * limiting of configured bandwidth parameters for schedulers that are
1404 * 32-bit limited, as the root/interface bandwidth (top-level reference
1405 * bandwidth) will be properly limited in that case.
1406 *
1407 * Otherwise, if the absolute bandwidth is not set, return given percentage
1408 * of reference bandwidth.
1409 */
1410 u_int64_t
1411 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1412 {
1413 if (bw->bw_absolute > 0)
1414 return (MIN(bw->bw_absolute, ref_bw));
1415
1416 if (bw->bw_percent > 0)
1417 return (ref_bw / 100 * bw->bw_percent);
1418
1419 return (0);
1420 }
1421
1422 void
1423 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1424 const struct node_hfsc_sc *sc)
1425 {
1426 printf(" %s", scname);
1427
1428 if (d != 0) {
1429 printf("(");
1430 if (sc != NULL && sc->m1.bw_percent > 0)
1431 printf("%u%%", sc->m1.bw_percent);
1432 else
1433 printf("%s", rate2str((double)m1));
1434 printf(" %u", d);
1435 }
1436
1437 if (sc != NULL && sc->m2.bw_percent > 0)
1438 printf(" %u%%", sc->m2.bw_percent);
1439 else
1440 printf(" %s", rate2str((double)m2));
1441
1442 if (d != 0)
1443 printf(")");
1444 }
1445
1446 void
1447 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1448 const struct node_fairq_sc *sc)
1449 {
1450 printf(" %s", scname);
1451
1452 if (d != 0) {
1453 printf("(");
1454 if (sc != NULL && sc->m1.bw_percent > 0)
1455 printf("%u%%", sc->m1.bw_percent);
1456 else
1457 printf("%s", rate2str((double)m1));
1458 printf(" %u", d);
1459 }
1460
1461 if (sc != NULL && sc->m2.bw_percent > 0)
1462 printf(" %u%%", sc->m2.bw_percent);
1463 else
1464 printf(" %s", rate2str((double)m2));
1465
1466 if (d != 0)
1467 printf(")");
1468 }
1469