1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
4 */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23 * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24 * avoid sending TC messages the kernel cannot understand.
25 */
26 enum {
27 TCA_FLOWER_UNSPEC,
28 TCA_FLOWER_CLASSID,
29 TCA_FLOWER_INDEV,
30 TCA_FLOWER_ACT,
31 TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
32 TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
33 TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
34 TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
35 TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
36 TCA_FLOWER_KEY_IP_PROTO, /* u8 */
37 TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
38 TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
39 TCA_FLOWER_KEY_IPV4_DST, /* be32 */
40 TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
41 TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
42 TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
43 TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
44 TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
45 TCA_FLOWER_KEY_TCP_SRC, /* be16 */
46 TCA_FLOWER_KEY_TCP_DST, /* be16 */
47 TCA_FLOWER_KEY_UDP_SRC, /* be16 */
48 TCA_FLOWER_KEY_UDP_DST, /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53 /* TCA_FLOWER_FLAGS, */
54 TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55 TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
56 TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
57 };
58 #endif
59 /*
60 * For kernels < 4.2 BPF related enums may not be defined.
61 * Runtime checks will be carried out to gracefully report on TC messages that
62 * are rejected by the kernel. Rejection reasons may be due to:
63 * 1. enum is not defined
64 * 2. enum is defined but kernel is not configured to support BPF system calls,
65 * BPF classifications or BPF actions.
66 */
67 #ifndef HAVE_TC_BPF
68 enum {
69 TCA_BPF_UNSPEC,
70 TCA_BPF_ACT,
71 TCA_BPF_POLICE,
72 TCA_BPF_CLASSID,
73 TCA_BPF_OPS_LEN,
74 TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79 TCA_BPF_FD = TCA_BPF_OPS + 1,
80 TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85 __u32 index; \
86 __u32 capab; \
87 int action; \
88 int refcnt; \
89 int bindcnt
90
91 struct tc_act_bpf {
92 tc_gen;
93 };
94
95 enum {
96 TCA_ACT_BPF_UNSPEC,
97 TCA_ACT_BPF_TM,
98 TCA_ACT_BPF_PARMS,
99 TCA_ACT_BPF_OPS_LEN,
100 TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106 TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107 TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113 KEY_CMD_GET = 1,
114 KEY_CMD_RELEASE,
115 KEY_CMD_INIT,
116 KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120 KEY_STAT_UNSPEC,
121 KEY_STAT_USED,
122 KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126 #define REMOTE_PROMISCUOUS_HANDLE 2
127
128 struct rte_flow {
129 LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
130 struct rte_flow *remote_flow; /* associated remote flow */
131 int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
132 uint32_t key_idx; /* RSS rule key index into BPF map */
133 struct nlmsg msg;
134 };
135
136 struct convert_data {
137 uint16_t eth_type;
138 uint16_t ip_proto;
139 uint8_t vlan;
140 struct rte_flow *flow;
141 };
142
143 struct remote_rule {
144 struct rte_flow_attr attr;
145 struct rte_flow_item items[2];
146 struct rte_flow_action actions[2];
147 int mirred;
148 };
149
150 struct action_data {
151 char id[16];
152
153 union {
154 struct tc_gact gact;
155 struct tc_mirred mirred;
156 struct skbedit {
157 struct tc_skbedit skbedit;
158 uint16_t queue;
159 } skbedit;
160 struct bpf {
161 struct tc_act_bpf bpf;
162 int bpf_fd;
163 const char *annotation;
164 } bpf;
165 };
166 };
167
168 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
173 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
174 static int
175 tap_flow_validate(struct rte_eth_dev *dev,
176 const struct rte_flow_attr *attr,
177 const struct rte_flow_item items[],
178 const struct rte_flow_action actions[],
179 struct rte_flow_error *error);
180
181 static struct rte_flow *
182 tap_flow_create(struct rte_eth_dev *dev,
183 const struct rte_flow_attr *attr,
184 const struct rte_flow_item items[],
185 const struct rte_flow_action actions[],
186 struct rte_flow_error *error);
187
188 static void
189 tap_flow_free(struct pmd_internals *pmd,
190 struct rte_flow *flow);
191
192 static int
193 tap_flow_destroy(struct rte_eth_dev *dev,
194 struct rte_flow *flow,
195 struct rte_flow_error *error);
196
197 static int
198 tap_flow_isolate(struct rte_eth_dev *dev,
199 int set,
200 struct rte_flow_error *error);
201
202 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
203 static int rss_enable(struct pmd_internals *pmd,
204 const struct rte_flow_attr *attr,
205 struct rte_flow_error *error);
206 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
207 const struct rte_flow_action_rss *rss,
208 struct rte_flow_error *error);
209
210 static const struct rte_flow_ops tap_flow_ops = {
211 .validate = tap_flow_validate,
212 .create = tap_flow_create,
213 .destroy = tap_flow_destroy,
214 .flush = tap_flow_flush,
215 .isolate = tap_flow_isolate,
216 };
217
218 /* Static initializer for items. */
219 #define ITEMS(...) \
220 (const enum rte_flow_item_type []){ \
221 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
222 }
223
224 /* Structure to generate a simple graph of layers supported by the NIC. */
225 struct tap_flow_items {
226 /* Bit-mask corresponding to what is supported for this item. */
227 const void *mask;
228 const unsigned int mask_sz; /* Bit-mask size in bytes. */
229 /*
230 * Bit-mask corresponding to the default mask, if none is provided
231 * along with the item.
232 */
233 const void *default_mask;
234 /**
235 * Conversion function from rte_flow to netlink attributes.
236 *
237 * @param item
238 * rte_flow item to convert.
239 * @param data
240 * Internal structure to store the conversion.
241 *
242 * @return
243 * 0 on success, negative value otherwise.
244 */
245 int (*convert)(const struct rte_flow_item *item, void *data);
246 /** List of possible following items. */
247 const enum rte_flow_item_type *const items;
248 };
249
250 /* Graph of supported items and associated actions. */
251 static const struct tap_flow_items tap_flow_items[] = {
252 [RTE_FLOW_ITEM_TYPE_END] = {
253 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
254 },
255 [RTE_FLOW_ITEM_TYPE_ETH] = {
256 .items = ITEMS(
257 RTE_FLOW_ITEM_TYPE_VLAN,
258 RTE_FLOW_ITEM_TYPE_IPV4,
259 RTE_FLOW_ITEM_TYPE_IPV6),
260 .mask = &(const struct rte_flow_item_eth){
261 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
263 .type = -1,
264 },
265 .mask_sz = sizeof(struct rte_flow_item_eth),
266 .default_mask = &rte_flow_item_eth_mask,
267 .convert = tap_flow_create_eth,
268 },
269 [RTE_FLOW_ITEM_TYPE_VLAN] = {
270 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
271 RTE_FLOW_ITEM_TYPE_IPV6),
272 .mask = &(const struct rte_flow_item_vlan){
273 /* DEI matching is not supported */
274 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
275 .tci = 0xffef,
276 #else
277 .tci = 0xefff,
278 #endif
279 .inner_type = -1,
280 },
281 .mask_sz = sizeof(struct rte_flow_item_vlan),
282 .default_mask = &rte_flow_item_vlan_mask,
283 .convert = tap_flow_create_vlan,
284 },
285 [RTE_FLOW_ITEM_TYPE_IPV4] = {
286 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
287 RTE_FLOW_ITEM_TYPE_TCP),
288 .mask = &(const struct rte_flow_item_ipv4){
289 .hdr = {
290 .src_addr = -1,
291 .dst_addr = -1,
292 .next_proto_id = -1,
293 },
294 },
295 .mask_sz = sizeof(struct rte_flow_item_ipv4),
296 .default_mask = &rte_flow_item_ipv4_mask,
297 .convert = tap_flow_create_ipv4,
298 },
299 [RTE_FLOW_ITEM_TYPE_IPV6] = {
300 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
301 RTE_FLOW_ITEM_TYPE_TCP),
302 .mask = &(const struct rte_flow_item_ipv6){
303 .hdr = {
304 .src_addr = {
305 "\xff\xff\xff\xff\xff\xff\xff\xff"
306 "\xff\xff\xff\xff\xff\xff\xff\xff",
307 },
308 .dst_addr = {
309 "\xff\xff\xff\xff\xff\xff\xff\xff"
310 "\xff\xff\xff\xff\xff\xff\xff\xff",
311 },
312 .proto = -1,
313 },
314 },
315 .mask_sz = sizeof(struct rte_flow_item_ipv6),
316 .default_mask = &rte_flow_item_ipv6_mask,
317 .convert = tap_flow_create_ipv6,
318 },
319 [RTE_FLOW_ITEM_TYPE_UDP] = {
320 .mask = &(const struct rte_flow_item_udp){
321 .hdr = {
322 .src_port = -1,
323 .dst_port = -1,
324 },
325 },
326 .mask_sz = sizeof(struct rte_flow_item_udp),
327 .default_mask = &rte_flow_item_udp_mask,
328 .convert = tap_flow_create_udp,
329 },
330 [RTE_FLOW_ITEM_TYPE_TCP] = {
331 .mask = &(const struct rte_flow_item_tcp){
332 .hdr = {
333 .src_port = -1,
334 .dst_port = -1,
335 },
336 },
337 .mask_sz = sizeof(struct rte_flow_item_tcp),
338 .default_mask = &rte_flow_item_tcp_mask,
339 .convert = tap_flow_create_tcp,
340 },
341 };
342
343 /*
344 * TC rules, by growing priority
345 *
346 * Remote netdevice Tap netdevice
347 * +-------------+-------------+ +-------------+-------------+
348 * | Ingress | Egress | | Ingress | Egress |
349 * |-------------|-------------| |-------------|-------------|
350 * | | \ / | | | REMOTE TX | prio 1
351 * | | \ / | | | \ / | prio 2
352 * | EXPLICIT | \ / | | EXPLICIT | \ / | .
353 * | | \ / | | | \ / | .
354 * | RULES | X | | RULES | X | .
355 * | . | / \ | | . | / \ | .
356 * | . | / \ | | . | / \ | .
357 * | . | / \ | | . | / \ | .
358 * | . | / \ | | . | / \ | .
359 *
360 * .... .... .... ....
361 *
362 * | . | \ / | | . | \ / | .
363 * | . | \ / | | . | \ / | .
364 * | | \ / | | | \ / |
365 * | LOCAL_MAC | \ / | | \ / | \ / | last prio - 5
366 * | PROMISC | X | | \ / | X | last prio - 4
367 * | ALLMULTI | / \ | | X | / \ | last prio - 3
368 * | BROADCAST | / \ | | / \ | / \ | last prio - 2
369 * | BROADCASTV6 | / \ | | / \ | / \ | last prio - 1
370 * | xx | / \ | | ISOLATE | / \ | last prio
371 * +-------------+-------------+ +-------------+-------------+
372 *
373 * The implicit flow rules are stored in a list in with mandatorily the last two
374 * being the ISOLATE and REMOTE_TX rules. e.g.:
375 *
376 * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
377 *
378 * That enables tap_flow_isolate() to remove implicit rules by popping the list
379 * head and remove it as long as it applies on the remote netdevice. The
380 * implicit rule for TX redirection is not removed, as isolate concerns only
381 * incoming traffic.
382 */
383
384 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
385 [TAP_REMOTE_LOCAL_MAC] = {
386 .attr = {
387 .group = MAX_GROUP,
388 .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
389 .ingress = 1,
390 },
391 .items[0] = {
392 .type = RTE_FLOW_ITEM_TYPE_ETH,
393 .mask = &(const struct rte_flow_item_eth){
394 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
395 },
396 },
397 .items[1] = {
398 .type = RTE_FLOW_ITEM_TYPE_END,
399 },
400 .mirred = TCA_EGRESS_REDIR,
401 },
402 [TAP_REMOTE_BROADCAST] = {
403 .attr = {
404 .group = MAX_GROUP,
405 .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
406 .ingress = 1,
407 },
408 .items[0] = {
409 .type = RTE_FLOW_ITEM_TYPE_ETH,
410 .mask = &(const struct rte_flow_item_eth){
411 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
412 },
413 .spec = &(const struct rte_flow_item_eth){
414 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
415 },
416 },
417 .items[1] = {
418 .type = RTE_FLOW_ITEM_TYPE_END,
419 },
420 .mirred = TCA_EGRESS_MIRROR,
421 },
422 [TAP_REMOTE_BROADCASTV6] = {
423 .attr = {
424 .group = MAX_GROUP,
425 .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
426 .ingress = 1,
427 },
428 .items[0] = {
429 .type = RTE_FLOW_ITEM_TYPE_ETH,
430 .mask = &(const struct rte_flow_item_eth){
431 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
432 },
433 .spec = &(const struct rte_flow_item_eth){
434 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
435 },
436 },
437 .items[1] = {
438 .type = RTE_FLOW_ITEM_TYPE_END,
439 },
440 .mirred = TCA_EGRESS_MIRROR,
441 },
442 [TAP_REMOTE_PROMISC] = {
443 .attr = {
444 .group = MAX_GROUP,
445 .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
446 .ingress = 1,
447 },
448 .items[0] = {
449 .type = RTE_FLOW_ITEM_TYPE_VOID,
450 },
451 .items[1] = {
452 .type = RTE_FLOW_ITEM_TYPE_END,
453 },
454 .mirred = TCA_EGRESS_MIRROR,
455 },
456 [TAP_REMOTE_ALLMULTI] = {
457 .attr = {
458 .group = MAX_GROUP,
459 .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
460 .ingress = 1,
461 },
462 .items[0] = {
463 .type = RTE_FLOW_ITEM_TYPE_ETH,
464 .mask = &(const struct rte_flow_item_eth){
465 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
466 },
467 .spec = &(const struct rte_flow_item_eth){
468 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
469 },
470 },
471 .items[1] = {
472 .type = RTE_FLOW_ITEM_TYPE_END,
473 },
474 .mirred = TCA_EGRESS_MIRROR,
475 },
476 [TAP_REMOTE_TX] = {
477 .attr = {
478 .group = 0,
479 .priority = TAP_REMOTE_TX,
480 .egress = 1,
481 },
482 .items[0] = {
483 .type = RTE_FLOW_ITEM_TYPE_VOID,
484 },
485 .items[1] = {
486 .type = RTE_FLOW_ITEM_TYPE_END,
487 },
488 .mirred = TCA_EGRESS_MIRROR,
489 },
490 [TAP_ISOLATE] = {
491 .attr = {
492 .group = MAX_GROUP,
493 .priority = PRIORITY_MASK - TAP_ISOLATE,
494 .ingress = 1,
495 },
496 .items[0] = {
497 .type = RTE_FLOW_ITEM_TYPE_VOID,
498 },
499 .items[1] = {
500 .type = RTE_FLOW_ITEM_TYPE_END,
501 },
502 },
503 };
504
505 /**
506 * Make as much checks as possible on an Ethernet item, and if a flow is
507 * provided, fill it appropriately with Ethernet info.
508 *
509 * @param[in] item
510 * Item specification.
511 * @param[in, out] data
512 * Additional data structure to tell next layers we've been here.
513 *
514 * @return
515 * 0 if checks are alright, -1 otherwise.
516 */
517 static int
tap_flow_create_eth(const struct rte_flow_item * item,void * data)518 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
519 {
520 struct convert_data *info = (struct convert_data *)data;
521 const struct rte_flow_item_eth *spec = item->spec;
522 const struct rte_flow_item_eth *mask = item->mask;
523 struct rte_flow *flow = info->flow;
524 struct nlmsg *msg;
525
526 /* use default mask if none provided */
527 if (!mask)
528 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
529 /* TC does not support eth_type masking. Only accept if exact match. */
530 if (mask->type && mask->type != 0xffff)
531 return -1;
532 if (!spec)
533 return 0;
534 /* store eth_type for consistency if ipv4/6 pattern item comes next */
535 if (spec->type & mask->type)
536 info->eth_type = spec->type;
537 if (!flow)
538 return 0;
539 msg = &flow->msg;
540 if (!rte_is_zero_ether_addr(&mask->dst)) {
541 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
542 RTE_ETHER_ADDR_LEN,
543 &spec->dst.addr_bytes);
544 tap_nlattr_add(&msg->nh,
545 TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,
546 &mask->dst.addr_bytes);
547 }
548 if (!rte_is_zero_ether_addr(&mask->src)) {
549 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
550 RTE_ETHER_ADDR_LEN,
551 &spec->src.addr_bytes);
552 tap_nlattr_add(&msg->nh,
553 TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,
554 &mask->src.addr_bytes);
555 }
556 return 0;
557 }
558
559 /**
560 * Make as much checks as possible on a VLAN item, and if a flow is provided,
561 * fill it appropriately with VLAN info.
562 *
563 * @param[in] item
564 * Item specification.
565 * @param[in, out] data
566 * Additional data structure to tell next layers we've been here.
567 *
568 * @return
569 * 0 if checks are alright, -1 otherwise.
570 */
571 static int
tap_flow_create_vlan(const struct rte_flow_item * item,void * data)572 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
573 {
574 struct convert_data *info = (struct convert_data *)data;
575 const struct rte_flow_item_vlan *spec = item->spec;
576 const struct rte_flow_item_vlan *mask = item->mask;
577 struct rte_flow *flow = info->flow;
578 struct nlmsg *msg;
579
580 /* use default mask if none provided */
581 if (!mask)
582 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
583 /* Outer TPID cannot be matched. */
584 if (info->eth_type)
585 return -1;
586 /* Double-tagging not supported. */
587 if (info->vlan)
588 return -1;
589 info->vlan = 1;
590 if (mask->inner_type) {
591 /* TC does not support partial eth_type masking */
592 if (mask->inner_type != RTE_BE16(0xffff))
593 return -1;
594 info->eth_type = spec->inner_type;
595 }
596 if (!flow)
597 return 0;
598 msg = &flow->msg;
599 msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
600 #define VLAN_PRIO(tci) ((tci) >> 13)
601 #define VLAN_ID(tci) ((tci) & 0xfff)
602 if (!spec)
603 return 0;
604 if (spec->tci) {
605 uint16_t tci = ntohs(spec->tci) & mask->tci;
606 uint16_t prio = VLAN_PRIO(tci);
607 uint8_t vid = VLAN_ID(tci);
608
609 if (prio)
610 tap_nlattr_add8(&msg->nh,
611 TCA_FLOWER_KEY_VLAN_PRIO, prio);
612 if (vid)
613 tap_nlattr_add16(&msg->nh,
614 TCA_FLOWER_KEY_VLAN_ID, vid);
615 }
616 return 0;
617 }
618
619 /**
620 * Make as much checks as possible on an IPv4 item, and if a flow is provided,
621 * fill it appropriately with IPv4 info.
622 *
623 * @param[in] item
624 * Item specification.
625 * @param[in, out] data
626 * Additional data structure to tell next layers we've been here.
627 *
628 * @return
629 * 0 if checks are alright, -1 otherwise.
630 */
631 static int
tap_flow_create_ipv4(const struct rte_flow_item * item,void * data)632 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
633 {
634 struct convert_data *info = (struct convert_data *)data;
635 const struct rte_flow_item_ipv4 *spec = item->spec;
636 const struct rte_flow_item_ipv4 *mask = item->mask;
637 struct rte_flow *flow = info->flow;
638 struct nlmsg *msg;
639
640 /* use default mask if none provided */
641 if (!mask)
642 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
643 /* check that previous eth type is compatible with ipv4 */
644 if (info->eth_type && info->eth_type != htons(ETH_P_IP))
645 return -1;
646 /* store ip_proto for consistency if udp/tcp pattern item comes next */
647 if (spec)
648 info->ip_proto = spec->hdr.next_proto_id;
649 if (!flow)
650 return 0;
651 msg = &flow->msg;
652 if (!info->eth_type)
653 info->eth_type = htons(ETH_P_IP);
654 if (!spec)
655 return 0;
656 if (mask->hdr.dst_addr) {
657 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
658 spec->hdr.dst_addr);
659 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
660 mask->hdr.dst_addr);
661 }
662 if (mask->hdr.src_addr) {
663 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
664 spec->hdr.src_addr);
665 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
666 mask->hdr.src_addr);
667 }
668 if (spec->hdr.next_proto_id)
669 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
670 spec->hdr.next_proto_id);
671 return 0;
672 }
673
674 /**
675 * Make as much checks as possible on an IPv6 item, and if a flow is provided,
676 * fill it appropriately with IPv6 info.
677 *
678 * @param[in] item
679 * Item specification.
680 * @param[in, out] data
681 * Additional data structure to tell next layers we've been here.
682 *
683 * @return
684 * 0 if checks are alright, -1 otherwise.
685 */
686 static int
tap_flow_create_ipv6(const struct rte_flow_item * item,void * data)687 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
688 {
689 struct convert_data *info = (struct convert_data *)data;
690 const struct rte_flow_item_ipv6 *spec = item->spec;
691 const struct rte_flow_item_ipv6 *mask = item->mask;
692 struct rte_flow *flow = info->flow;
693 uint8_t empty_addr[16] = { 0 };
694 struct nlmsg *msg;
695
696 /* use default mask if none provided */
697 if (!mask)
698 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
699 /* check that previous eth type is compatible with ipv6 */
700 if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
701 return -1;
702 /* store ip_proto for consistency if udp/tcp pattern item comes next */
703 if (spec)
704 info->ip_proto = spec->hdr.proto;
705 if (!flow)
706 return 0;
707 msg = &flow->msg;
708 if (!info->eth_type)
709 info->eth_type = htons(ETH_P_IPV6);
710 if (!spec)
711 return 0;
712 if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
713 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
714 sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
715 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
716 sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
717 }
718 if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
719 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
720 sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
721 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
722 sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
723 }
724 if (spec->hdr.proto)
725 tap_nlattr_add8(&msg->nh,
726 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
727 return 0;
728 }
729
730 /**
731 * Make as much checks as possible on a UDP item, and if a flow is provided,
732 * fill it appropriately with UDP info.
733 *
734 * @param[in] item
735 * Item specification.
736 * @param[in, out] data
737 * Additional data structure to tell next layers we've been here.
738 *
739 * @return
740 * 0 if checks are alright, -1 otherwise.
741 */
742 static int
tap_flow_create_udp(const struct rte_flow_item * item,void * data)743 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
744 {
745 struct convert_data *info = (struct convert_data *)data;
746 const struct rte_flow_item_udp *spec = item->spec;
747 const struct rte_flow_item_udp *mask = item->mask;
748 struct rte_flow *flow = info->flow;
749 struct nlmsg *msg;
750
751 /* use default mask if none provided */
752 if (!mask)
753 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
754 /* check that previous ip_proto is compatible with udp */
755 if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
756 return -1;
757 /* TC does not support UDP port masking. Only accept if exact match. */
758 if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
759 (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
760 return -1;
761 if (!flow)
762 return 0;
763 msg = &flow->msg;
764 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
765 if (!spec)
766 return 0;
767 if (mask->hdr.dst_port)
768 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
769 spec->hdr.dst_port);
770 if (mask->hdr.src_port)
771 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
772 spec->hdr.src_port);
773 return 0;
774 }
775
776 /**
777 * Make as much checks as possible on a TCP item, and if a flow is provided,
778 * fill it appropriately with TCP info.
779 *
780 * @param[in] item
781 * Item specification.
782 * @param[in, out] data
783 * Additional data structure to tell next layers we've been here.
784 *
785 * @return
786 * 0 if checks are alright, -1 otherwise.
787 */
788 static int
tap_flow_create_tcp(const struct rte_flow_item * item,void * data)789 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
790 {
791 struct convert_data *info = (struct convert_data *)data;
792 const struct rte_flow_item_tcp *spec = item->spec;
793 const struct rte_flow_item_tcp *mask = item->mask;
794 struct rte_flow *flow = info->flow;
795 struct nlmsg *msg;
796
797 /* use default mask if none provided */
798 if (!mask)
799 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
800 /* check that previous ip_proto is compatible with tcp */
801 if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
802 return -1;
803 /* TC does not support TCP port masking. Only accept if exact match. */
804 if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
805 (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
806 return -1;
807 if (!flow)
808 return 0;
809 msg = &flow->msg;
810 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
811 if (!spec)
812 return 0;
813 if (mask->hdr.dst_port)
814 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
815 spec->hdr.dst_port);
816 if (mask->hdr.src_port)
817 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
818 spec->hdr.src_port);
819 return 0;
820 }
821
822 /**
823 * Check support for a given item.
824 *
825 * @param[in] item
826 * Item specification.
827 * @param size
828 * Bit-Mask size in bytes.
829 * @param[in] supported_mask
830 * Bit-mask covering supported fields to compare with spec, last and mask in
831 * \item.
832 * @param[in] default_mask
833 * Bit-mask default mask if none is provided in \item.
834 *
835 * @return
836 * 0 on success.
837 */
838 static int
tap_flow_item_validate(const struct rte_flow_item * item,unsigned int size,const uint8_t * supported_mask,const uint8_t * default_mask)839 tap_flow_item_validate(const struct rte_flow_item *item,
840 unsigned int size,
841 const uint8_t *supported_mask,
842 const uint8_t *default_mask)
843 {
844 int ret = 0;
845
846 /* An empty layer is allowed, as long as all fields are NULL */
847 if (!item->spec && (item->mask || item->last))
848 return -1;
849 /* Is the item spec compatible with what the NIC supports? */
850 if (item->spec && !item->mask) {
851 unsigned int i;
852 const uint8_t *spec = item->spec;
853
854 for (i = 0; i < size; ++i)
855 if ((spec[i] | supported_mask[i]) != supported_mask[i])
856 return -1;
857 /* Is the default mask compatible with what the NIC supports? */
858 for (i = 0; i < size; i++)
859 if ((default_mask[i] | supported_mask[i]) !=
860 supported_mask[i])
861 return -1;
862 }
863 /* Is the item last compatible with what the NIC supports? */
864 if (item->last && !item->mask) {
865 unsigned int i;
866 const uint8_t *spec = item->last;
867
868 for (i = 0; i < size; ++i)
869 if ((spec[i] | supported_mask[i]) != supported_mask[i])
870 return -1;
871 }
872 /* Is the item mask compatible with what the NIC supports? */
873 if (item->mask) {
874 unsigned int i;
875 const uint8_t *spec = item->mask;
876
877 for (i = 0; i < size; ++i)
878 if ((spec[i] | supported_mask[i]) != supported_mask[i])
879 return -1;
880 }
881 /**
882 * Once masked, Are item spec and item last equal?
883 * TC does not support range so anything else is invalid.
884 */
885 if (item->spec && item->last) {
886 uint8_t spec[size];
887 uint8_t last[size];
888 const uint8_t *apply = default_mask;
889 unsigned int i;
890
891 if (item->mask)
892 apply = item->mask;
893 for (i = 0; i < size; ++i) {
894 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
895 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
896 }
897 ret = memcmp(spec, last, size);
898 }
899 return ret;
900 }
901
902 /**
903 * Configure the kernel with a TC action and its configured parameters
904 * Handled actions: "gact", "mirred", "skbedit", "bpf"
905 *
906 * @param[in] flow
907 * Pointer to rte flow containing the netlink message
908 *
909 * @param[in, out] act_index
910 * Pointer to action sequence number in the TC command
911 *
912 * @param[in] adata
913 * Pointer to struct holding the action parameters
914 *
915 * @return
916 * -1 on failure, 0 on success
917 */
918 static int
add_action(struct rte_flow * flow,size_t * act_index,struct action_data * adata)919 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
920 {
921 struct nlmsg *msg = &flow->msg;
922
923 if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
924 return -1;
925
926 tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
927 strlen(adata->id) + 1, adata->id);
928 if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
929 return -1;
930 if (strcmp("gact", adata->id) == 0) {
931 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
932 &adata->gact);
933 } else if (strcmp("mirred", adata->id) == 0) {
934 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
935 adata->mirred.action = TC_ACT_PIPE;
936 else /* REDIRECT */
937 adata->mirred.action = TC_ACT_STOLEN;
938 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
939 sizeof(adata->mirred),
940 &adata->mirred);
941 } else if (strcmp("skbedit", adata->id) == 0) {
942 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
943 sizeof(adata->skbedit.skbedit),
944 &adata->skbedit.skbedit);
945 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
946 adata->skbedit.queue);
947 } else if (strcmp("bpf", adata->id) == 0) {
948 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
949 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
950 strlen(adata->bpf.annotation) + 1,
951 adata->bpf.annotation);
952 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
953 sizeof(adata->bpf.bpf),
954 &adata->bpf.bpf);
955 } else {
956 return -1;
957 }
958 tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
959 tap_nlattr_nested_finish(msg); /* nested act_index */
960 return 0;
961 }
962
963 /**
964 * Helper function to send a series of TC actions to the kernel
965 *
966 * @param[in] flow
967 * Pointer to rte flow containing the netlink message
968 *
969 * @param[in] nb_actions
970 * Number of actions in an array of action structs
971 *
972 * @param[in] data
973 * Pointer to an array of action structs
974 *
975 * @param[in] classifier_actions
976 * The classifier on behave of which the actions are configured
977 *
978 * @return
979 * -1 on failure, 0 on success
980 */
981 static int
add_actions(struct rte_flow * flow,int nb_actions,struct action_data * data,int classifier_action)982 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
983 int classifier_action)
984 {
985 struct nlmsg *msg = &flow->msg;
986 size_t act_index = 1;
987 int i;
988
989 if (tap_nlattr_nested_start(msg, classifier_action) < 0)
990 return -1;
991 for (i = 0; i < nb_actions; i++)
992 if (add_action(flow, &act_index, data + i) < 0)
993 return -1;
994 tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
995 return 0;
996 }
997
998 /**
999 * Validate a flow supported by TC.
1000 * If flow param is not NULL, then also fill the netlink message inside.
1001 *
1002 * @param pmd
1003 * Pointer to private structure.
1004 * @param[in] attr
1005 * Flow rule attributes.
1006 * @param[in] pattern
1007 * Pattern specification (list terminated by the END pattern item).
1008 * @param[in] actions
1009 * Associated actions (list terminated by the END action).
1010 * @param[out] error
1011 * Perform verbose error reporting if not NULL.
1012 * @param[in, out] flow
1013 * Flow structure to update.
1014 * @param[in] mirred
1015 * If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1016 * redirection to the tap netdevice, and the TC rule will be configured
1017 * on the remote netdevice in pmd.
1018 * If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1019 * mirroring to the tap netdevice, and the TC rule will be configured
1020 * on the remote netdevice in pmd. Matching packets will thus be duplicated.
1021 * If set to 0, the standard behavior is to be used: set correct actions for
1022 * the TC rule, and apply it on the tap netdevice.
1023 *
1024 * @return
1025 * 0 on success, a negative errno value otherwise and rte_errno is set.
1026 */
1027 static int
priv_flow_process(struct pmd_internals * pmd,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error,struct rte_flow * flow,int mirred)1028 priv_flow_process(struct pmd_internals *pmd,
1029 const struct rte_flow_attr *attr,
1030 const struct rte_flow_item items[],
1031 const struct rte_flow_action actions[],
1032 struct rte_flow_error *error,
1033 struct rte_flow *flow,
1034 int mirred)
1035 {
1036 const struct tap_flow_items *cur_item = tap_flow_items;
1037 struct convert_data data = {
1038 .eth_type = 0,
1039 .ip_proto = 0,
1040 .flow = flow,
1041 };
1042 int action = 0; /* Only one action authorized for now */
1043
1044 if (attr->transfer) {
1045 rte_flow_error_set(
1046 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1047 NULL, "transfer is not supported");
1048 return -rte_errno;
1049 }
1050 if (attr->group > MAX_GROUP) {
1051 rte_flow_error_set(
1052 error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1053 NULL, "group value too big: cannot exceed 15");
1054 return -rte_errno;
1055 }
1056 if (attr->priority > MAX_PRIORITY) {
1057 rte_flow_error_set(
1058 error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1059 NULL, "priority value too big");
1060 return -rte_errno;
1061 } else if (flow) {
1062 uint16_t group = attr->group << GROUP_SHIFT;
1063 uint16_t prio = group | (attr->priority +
1064 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1065 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1066 flow->msg.t.tcm_info);
1067 }
1068 if (flow) {
1069 if (mirred) {
1070 /*
1071 * If attr->ingress, the rule applies on remote ingress
1072 * to match incoming packets
1073 * If attr->egress, the rule applies on tap ingress (as
1074 * seen from the kernel) to deal with packets going out
1075 * from the DPDK app.
1076 */
1077 flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1078 } else {
1079 /* Standard rule on tap egress (kernel standpoint). */
1080 flow->msg.t.tcm_parent =
1081 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1082 }
1083 /* use flower filter type */
1084 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1085 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1086 goto exit_item_not_supported;
1087 }
1088 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1089 const struct tap_flow_items *token = NULL;
1090 unsigned int i;
1091 int err = 0;
1092
1093 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1094 continue;
1095 for (i = 0;
1096 cur_item->items &&
1097 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1098 ++i) {
1099 if (cur_item->items[i] == items->type) {
1100 token = &tap_flow_items[items->type];
1101 break;
1102 }
1103 }
1104 if (!token)
1105 goto exit_item_not_supported;
1106 cur_item = token;
1107 err = tap_flow_item_validate(
1108 items, cur_item->mask_sz,
1109 (const uint8_t *)cur_item->mask,
1110 (const uint8_t *)cur_item->default_mask);
1111 if (err)
1112 goto exit_item_not_supported;
1113 if (flow && cur_item->convert) {
1114 err = cur_item->convert(items, &data);
1115 if (err)
1116 goto exit_item_not_supported;
1117 }
1118 }
1119 if (flow) {
1120 if (data.vlan) {
1121 tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1122 htons(ETH_P_8021Q));
1123 tap_nlattr_add16(&flow->msg.nh,
1124 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1125 data.eth_type ?
1126 data.eth_type : htons(ETH_P_ALL));
1127 } else if (data.eth_type) {
1128 tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1129 data.eth_type);
1130 }
1131 }
1132 if (mirred && flow) {
1133 struct action_data adata = {
1134 .id = "mirred",
1135 .mirred = {
1136 .eaction = mirred,
1137 },
1138 };
1139
1140 /*
1141 * If attr->egress && mirred, then this is a special
1142 * case where the rule must be applied on the tap, to
1143 * redirect packets coming from the DPDK App, out
1144 * through the remote netdevice.
1145 */
1146 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1147 pmd->remote_if_index;
1148 if (mirred == TCA_EGRESS_MIRROR)
1149 adata.mirred.action = TC_ACT_PIPE;
1150 else
1151 adata.mirred.action = TC_ACT_STOLEN;
1152 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1153 goto exit_action_not_supported;
1154 else
1155 goto end;
1156 }
1157 actions:
1158 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1159 int err = 0;
1160
1161 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1162 continue;
1163 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1164 if (action)
1165 goto exit_action_not_supported;
1166 action = 1;
1167 if (flow) {
1168 struct action_data adata = {
1169 .id = "gact",
1170 .gact = {
1171 .action = TC_ACT_SHOT,
1172 },
1173 };
1174
1175 err = add_actions(flow, 1, &adata,
1176 TCA_FLOWER_ACT);
1177 }
1178 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1179 if (action)
1180 goto exit_action_not_supported;
1181 action = 1;
1182 if (flow) {
1183 struct action_data adata = {
1184 .id = "gact",
1185 .gact = {
1186 /* continue */
1187 .action = TC_ACT_UNSPEC,
1188 },
1189 };
1190
1191 err = add_actions(flow, 1, &adata,
1192 TCA_FLOWER_ACT);
1193 }
1194 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1195 const struct rte_flow_action_queue *queue =
1196 (const struct rte_flow_action_queue *)
1197 actions->conf;
1198
1199 if (action)
1200 goto exit_action_not_supported;
1201 action = 1;
1202 if (!queue ||
1203 (queue->index > pmd->dev->data->nb_rx_queues - 1))
1204 goto exit_action_not_supported;
1205 if (flow) {
1206 struct action_data adata = {
1207 .id = "skbedit",
1208 .skbedit = {
1209 .skbedit = {
1210 .action = TC_ACT_PIPE,
1211 },
1212 .queue = queue->index,
1213 },
1214 };
1215
1216 err = add_actions(flow, 1, &adata,
1217 TCA_FLOWER_ACT);
1218 }
1219 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1220 const struct rte_flow_action_rss *rss =
1221 (const struct rte_flow_action_rss *)
1222 actions->conf;
1223
1224 if (action++)
1225 goto exit_action_not_supported;
1226
1227 if (!pmd->rss_enabled) {
1228 err = rss_enable(pmd, attr, error);
1229 if (err)
1230 goto exit_action_not_supported;
1231 }
1232 if (flow)
1233 err = rss_add_actions(flow, pmd, rss, error);
1234 } else {
1235 goto exit_action_not_supported;
1236 }
1237 if (err)
1238 goto exit_action_not_supported;
1239 }
1240 /* When fate is unknown, drop traffic. */
1241 if (!action) {
1242 static const struct rte_flow_action drop[] = {
1243 { .type = RTE_FLOW_ACTION_TYPE_DROP, },
1244 { .type = RTE_FLOW_ACTION_TYPE_END, },
1245 };
1246
1247 actions = drop;
1248 goto actions;
1249 }
1250 end:
1251 if (flow)
1252 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1253 return 0;
1254 exit_item_not_supported:
1255 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1256 items, "item not supported");
1257 return -rte_errno;
1258 exit_action_not_supported:
1259 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1260 actions, "action not supported");
1261 return -rte_errno;
1262 }
1263
1264
1265
1266 /**
1267 * Validate a flow.
1268 *
1269 * @see rte_flow_validate()
1270 * @see rte_flow_ops
1271 */
1272 static int
tap_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1273 tap_flow_validate(struct rte_eth_dev *dev,
1274 const struct rte_flow_attr *attr,
1275 const struct rte_flow_item items[],
1276 const struct rte_flow_action actions[],
1277 struct rte_flow_error *error)
1278 {
1279 struct pmd_internals *pmd = dev->data->dev_private;
1280
1281 return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1282 }
1283
1284 /**
1285 * Set a unique handle in a flow.
1286 *
1287 * The kernel supports TC rules with equal priority, as long as they use the
1288 * same matching fields (e.g.: dst mac and ipv4) with different values (and
1289 * full mask to ensure no collision is possible).
1290 * In those rules, the handle (uint32_t) is the part that would identify
1291 * specifically each rule.
1292 *
1293 * On 32-bit architectures, the handle can simply be the flow's pointer address.
1294 * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1295 * unique handle.
1296 *
1297 * @param[in, out] flow
1298 * The flow that needs its handle set.
1299 */
1300 static void
tap_flow_set_handle(struct rte_flow * flow)1301 tap_flow_set_handle(struct rte_flow *flow)
1302 {
1303 union {
1304 struct rte_flow *flow;
1305 const void *key;
1306 } tmp;
1307 uint32_t handle = 0;
1308
1309 tmp.flow = flow;
1310
1311 if (sizeof(flow) > 4)
1312 handle = rte_jhash(tmp.key, sizeof(flow), 1);
1313 else
1314 handle = (uintptr_t)flow;
1315 /* must be at least 1 to avoid letting the kernel choose one for us */
1316 if (!handle)
1317 handle = 1;
1318 flow->msg.t.tcm_handle = handle;
1319 }
1320
1321 /**
1322 * Free the flow opened file descriptors and allocated memory
1323 *
1324 * @param[in] flow
1325 * Pointer to the flow to free
1326 *
1327 */
1328 static void
tap_flow_free(struct pmd_internals * pmd,struct rte_flow * flow)1329 tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
1330 {
1331 int i;
1332
1333 if (!flow)
1334 return;
1335
1336 if (pmd->rss_enabled) {
1337 /* Close flow BPF file descriptors */
1338 for (i = 0; i < SEC_MAX; i++)
1339 if (flow->bpf_fd[i] != 0) {
1340 close(flow->bpf_fd[i]);
1341 flow->bpf_fd[i] = 0;
1342 }
1343
1344 /* Release the map key for this RSS rule */
1345 bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1346 flow->key_idx = 0;
1347 }
1348
1349 /* Free flow allocated memory */
1350 rte_free(flow);
1351 }
1352
1353 /**
1354 * Create a flow.
1355 *
1356 * @see rte_flow_create()
1357 * @see rte_flow_ops
1358 */
1359 static struct rte_flow *
tap_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1360 tap_flow_create(struct rte_eth_dev *dev,
1361 const struct rte_flow_attr *attr,
1362 const struct rte_flow_item items[],
1363 const struct rte_flow_action actions[],
1364 struct rte_flow_error *error)
1365 {
1366 struct pmd_internals *pmd = dev->data->dev_private;
1367 struct rte_flow *remote_flow = NULL;
1368 struct rte_flow *flow = NULL;
1369 struct nlmsg *msg = NULL;
1370 int err;
1371
1372 if (!pmd->if_index) {
1373 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1374 NULL,
1375 "can't create rule, ifindex not found");
1376 goto fail;
1377 }
1378 /*
1379 * No rules configured through standard rte_flow should be set on the
1380 * priorities used by implicit rules.
1381 */
1382 if ((attr->group == MAX_GROUP) &&
1383 attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1384 rte_flow_error_set(
1385 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1386 NULL, "priority value too big");
1387 goto fail;
1388 }
1389 flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
1390 if (!flow) {
1391 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1392 NULL, "cannot allocate memory for rte_flow");
1393 goto fail;
1394 }
1395 msg = &flow->msg;
1396 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1397 NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1398 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1399 tap_flow_set_handle(flow);
1400 if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1401 goto fail;
1402 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1403 if (err < 0) {
1404 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1405 NULL, "couldn't send request to kernel");
1406 goto fail;
1407 }
1408 err = tap_nl_recv_ack(pmd->nlsk_fd);
1409 if (err < 0) {
1410 TAP_LOG(ERR,
1411 "Kernel refused TC filter rule creation (%d): %s",
1412 errno, strerror(errno));
1413 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1414 NULL,
1415 "overlapping rules or Kernel too old for flower support");
1416 goto fail;
1417 }
1418 LIST_INSERT_HEAD(&pmd->flows, flow, next);
1419 /**
1420 * If a remote device is configured, a TC rule with identical items for
1421 * matching must be set on that device, with a single action: redirect
1422 * to the local pmd->if_index.
1423 */
1424 if (pmd->remote_if_index) {
1425 remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
1426 if (!remote_flow) {
1427 rte_flow_error_set(
1428 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1429 "cannot allocate memory for rte_flow");
1430 goto fail;
1431 }
1432 msg = &remote_flow->msg;
1433 /* set the rule if_index for the remote netdevice */
1434 tc_init_msg(
1435 msg, pmd->remote_if_index, RTM_NEWTFILTER,
1436 NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1437 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1438 tap_flow_set_handle(remote_flow);
1439 if (priv_flow_process(pmd, attr, items, NULL,
1440 error, remote_flow, TCA_EGRESS_REDIR)) {
1441 rte_flow_error_set(
1442 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1443 NULL, "rte flow rule validation failed");
1444 goto fail;
1445 }
1446 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1447 if (err < 0) {
1448 rte_flow_error_set(
1449 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1450 NULL, "Failure sending nl request");
1451 goto fail;
1452 }
1453 err = tap_nl_recv_ack(pmd->nlsk_fd);
1454 if (err < 0) {
1455 TAP_LOG(ERR,
1456 "Kernel refused TC filter rule creation (%d): %s",
1457 errno, strerror(errno));
1458 rte_flow_error_set(
1459 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1460 NULL,
1461 "overlapping rules or Kernel too old for flower support");
1462 goto fail;
1463 }
1464 flow->remote_flow = remote_flow;
1465 }
1466 return flow;
1467 fail:
1468 rte_free(remote_flow);
1469 if (flow)
1470 tap_flow_free(pmd, flow);
1471 return NULL;
1472 }
1473
1474 /**
1475 * Destroy a flow using pointer to pmd_internal.
1476 *
1477 * @param[in, out] pmd
1478 * Pointer to private structure.
1479 * @param[in] flow
1480 * Pointer to the flow to destroy.
1481 * @param[in, out] error
1482 * Pointer to the flow error handler
1483 *
1484 * @return 0 if the flow could be destroyed, -1 otherwise.
1485 */
1486 static int
tap_flow_destroy_pmd(struct pmd_internals * pmd,struct rte_flow * flow,struct rte_flow_error * error)1487 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1488 struct rte_flow *flow,
1489 struct rte_flow_error *error)
1490 {
1491 struct rte_flow *remote_flow = flow->remote_flow;
1492 int ret = 0;
1493
1494 LIST_REMOVE(flow, next);
1495 flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1496 flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1497
1498 ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1499 if (ret < 0) {
1500 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1501 NULL, "couldn't send request to kernel");
1502 goto end;
1503 }
1504 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1505 /* If errno is ENOENT, the rule is already no longer in the kernel. */
1506 if (ret < 0 && errno == ENOENT)
1507 ret = 0;
1508 if (ret < 0) {
1509 TAP_LOG(ERR,
1510 "Kernel refused TC filter rule deletion (%d): %s",
1511 errno, strerror(errno));
1512 rte_flow_error_set(
1513 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1514 "couldn't receive kernel ack to our request");
1515 goto end;
1516 }
1517
1518 if (remote_flow) {
1519 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1520 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1521
1522 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1523 if (ret < 0) {
1524 rte_flow_error_set(
1525 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1526 NULL, "Failure sending nl request");
1527 goto end;
1528 }
1529 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1530 if (ret < 0 && errno == ENOENT)
1531 ret = 0;
1532 if (ret < 0) {
1533 TAP_LOG(ERR,
1534 "Kernel refused TC filter rule deletion (%d): %s",
1535 errno, strerror(errno));
1536 rte_flow_error_set(
1537 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1538 NULL, "Failure trying to receive nl ack");
1539 goto end;
1540 }
1541 }
1542 end:
1543 rte_free(remote_flow);
1544 tap_flow_free(pmd, flow);
1545 return ret;
1546 }
1547
1548 /**
1549 * Destroy a flow.
1550 *
1551 * @see rte_flow_destroy()
1552 * @see rte_flow_ops
1553 */
1554 static int
tap_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1555 tap_flow_destroy(struct rte_eth_dev *dev,
1556 struct rte_flow *flow,
1557 struct rte_flow_error *error)
1558 {
1559 struct pmd_internals *pmd = dev->data->dev_private;
1560
1561 return tap_flow_destroy_pmd(pmd, flow, error);
1562 }
1563
1564 /**
1565 * Enable/disable flow isolation.
1566 *
1567 * @see rte_flow_isolate()
1568 * @see rte_flow_ops
1569 */
1570 static int
tap_flow_isolate(struct rte_eth_dev * dev,int set,struct rte_flow_error * error __rte_unused)1571 tap_flow_isolate(struct rte_eth_dev *dev,
1572 int set,
1573 struct rte_flow_error *error __rte_unused)
1574 {
1575 struct pmd_internals *pmd = dev->data->dev_private;
1576 struct pmd_process_private *process_private = dev->process_private;
1577
1578 /* normalize 'set' variable to contain 0 or 1 values */
1579 if (set)
1580 set = 1;
1581 /* if already in the right isolation mode - nothing to do */
1582 if ((set ^ pmd->flow_isolate) == 0)
1583 return 0;
1584 /* mark the isolation mode for tap_flow_implicit_create() */
1585 pmd->flow_isolate = set;
1586 /*
1587 * If netdevice is there, setup appropriate flow rules immediately.
1588 * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1589 */
1590 if (!process_private->rxq_fds[0])
1591 return 0;
1592 if (set) {
1593 struct rte_flow *remote_flow;
1594
1595 while (1) {
1596 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1597 if (!remote_flow)
1598 break;
1599 /*
1600 * Remove all implicit rules on the remote.
1601 * Keep the local rule to redirect packets on TX.
1602 * Keep also the last implicit local rule: ISOLATE.
1603 */
1604 if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
1605 break;
1606 if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
1607 goto error;
1608 }
1609 /* Switch the TC rule according to pmd->flow_isolate */
1610 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1611 goto error;
1612 } else {
1613 /* Switch the TC rule according to pmd->flow_isolate */
1614 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1615 goto error;
1616 if (!pmd->remote_if_index)
1617 return 0;
1618 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1619 goto error;
1620 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1621 goto error;
1622 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1623 goto error;
1624 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1625 goto error;
1626 if (dev->data->promiscuous &&
1627 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1628 goto error;
1629 if (dev->data->all_multicast &&
1630 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1631 goto error;
1632 }
1633 return 0;
1634 error:
1635 pmd->flow_isolate = 0;
1636 return rte_flow_error_set(
1637 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1638 "TC rule creation failed");
1639 }
1640
1641 /**
1642 * Destroy all flows.
1643 *
1644 * @see rte_flow_flush()
1645 * @see rte_flow_ops
1646 */
1647 int
tap_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)1648 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1649 {
1650 struct pmd_internals *pmd = dev->data->dev_private;
1651 struct rte_flow *flow;
1652
1653 while (!LIST_EMPTY(&pmd->flows)) {
1654 flow = LIST_FIRST(&pmd->flows);
1655 if (tap_flow_destroy(dev, flow, error) < 0)
1656 return -1;
1657 }
1658 return 0;
1659 }
1660
1661 /**
1662 * Add an implicit flow rule on the remote device to make sure traffic gets to
1663 * the tap netdevice from there.
1664 *
1665 * @param pmd
1666 * Pointer to private structure.
1667 * @param[in] idx
1668 * The idx in the implicit_rte_flows array specifying which rule to apply.
1669 *
1670 * @return -1 if the rule couldn't be applied, 0 otherwise.
1671 */
tap_flow_implicit_create(struct pmd_internals * pmd,enum implicit_rule_index idx)1672 int tap_flow_implicit_create(struct pmd_internals *pmd,
1673 enum implicit_rule_index idx)
1674 {
1675 uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1676 struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1677 struct rte_flow_action isolate_actions[2] = {
1678 [1] = {
1679 .type = RTE_FLOW_ACTION_TYPE_END,
1680 },
1681 };
1682 struct rte_flow_item *items = implicit_rte_flows[idx].items;
1683 struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1684 struct rte_flow_item_eth eth_local = { .type = 0 };
1685 uint16_t if_index = pmd->remote_if_index;
1686 struct rte_flow *remote_flow = NULL;
1687 struct nlmsg *msg = NULL;
1688 int err = 0;
1689 struct rte_flow_item items_local[2] = {
1690 [0] = {
1691 .type = items[0].type,
1692 .spec = ð_local,
1693 .mask = items[0].mask,
1694 },
1695 [1] = {
1696 .type = items[1].type,
1697 }
1698 };
1699
1700 remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
1701 if (!remote_flow) {
1702 TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
1703 goto fail;
1704 }
1705 msg = &remote_flow->msg;
1706 if (idx == TAP_REMOTE_TX) {
1707 if_index = pmd->if_index;
1708 } else if (idx == TAP_ISOLATE) {
1709 if_index = pmd->if_index;
1710 /* Don't be exclusive for this rule, it can be changed later. */
1711 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1712 isolate_actions[0].type = pmd->flow_isolate ?
1713 RTE_FLOW_ACTION_TYPE_DROP :
1714 RTE_FLOW_ACTION_TYPE_PASSTHRU;
1715 actions = isolate_actions;
1716 } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1717 /*
1718 * eth addr couldn't be set in implicit_rte_flows[] as it is not
1719 * known at compile time.
1720 */
1721 memcpy(ð_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1722 items = items_local;
1723 }
1724 tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1725 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1726 /*
1727 * The ISOLATE rule is always present and must have a static handle, as
1728 * the action is changed whether the feature is enabled (DROP) or
1729 * disabled (PASSTHRU).
1730 * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
1731 * have a static handle such that adding it twice will fail with EEXIST
1732 * with any kernel version. Remark: old kernels may falsely accept the
1733 * same REMOTE_PROMISCUOUS rules if they had different handles.
1734 */
1735 if (idx == TAP_ISOLATE)
1736 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1737 else if (idx == TAP_REMOTE_PROMISC)
1738 remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
1739 else
1740 tap_flow_set_handle(remote_flow);
1741 if (priv_flow_process(pmd, attr, items, actions, NULL,
1742 remote_flow, implicit_rte_flows[idx].mirred)) {
1743 TAP_LOG(ERR, "rte flow rule validation failed");
1744 goto fail;
1745 }
1746 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1747 if (err < 0) {
1748 TAP_LOG(ERR, "Failure sending nl request");
1749 goto fail;
1750 }
1751 err = tap_nl_recv_ack(pmd->nlsk_fd);
1752 if (err < 0) {
1753 /* Silently ignore re-entering existing rule */
1754 if (errno == EEXIST)
1755 goto success;
1756 TAP_LOG(ERR,
1757 "Kernel refused TC filter rule creation (%d): %s",
1758 errno, strerror(errno));
1759 goto fail;
1760 }
1761 LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1762 success:
1763 return 0;
1764 fail:
1765 rte_free(remote_flow);
1766 return -1;
1767 }
1768
1769 /**
1770 * Remove specific implicit flow rule on the remote device.
1771 *
1772 * @param[in, out] pmd
1773 * Pointer to private structure.
1774 * @param[in] idx
1775 * The idx in the implicit_rte_flows array specifying which rule to remove.
1776 *
1777 * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1778 */
tap_flow_implicit_destroy(struct pmd_internals * pmd,enum implicit_rule_index idx)1779 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1780 enum implicit_rule_index idx)
1781 {
1782 struct rte_flow *remote_flow;
1783 int cur_prio = -1;
1784 int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1785
1786 for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1787 remote_flow;
1788 remote_flow = LIST_NEXT(remote_flow, next)) {
1789 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1790 if (cur_prio != idx_prio)
1791 continue;
1792 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1793 }
1794 return 0;
1795 }
1796
1797 /**
1798 * Destroy all implicit flows.
1799 *
1800 * @see rte_flow_flush()
1801 */
1802 int
tap_flow_implicit_flush(struct pmd_internals * pmd,struct rte_flow_error * error)1803 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1804 {
1805 struct rte_flow *remote_flow;
1806
1807 while (!LIST_EMPTY(&pmd->implicit_flows)) {
1808 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1809 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1810 return -1;
1811 }
1812 return 0;
1813 }
1814
1815 #define MAX_RSS_KEYS 256
1816 #define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
1817 #define SEC_NAME_CLS_Q "cls_q"
1818
1819 static const char *sec_name[SEC_MAX] = {
1820 [SEC_L3_L4] = "l3_l4",
1821 };
1822
1823 /**
1824 * Enable RSS on tap: create TC rules for queuing.
1825 *
1826 * @param[in, out] pmd
1827 * Pointer to private structure.
1828 *
1829 * @param[in] attr
1830 * Pointer to rte_flow to get flow group
1831 *
1832 * @param[out] error
1833 * Pointer to error reporting if not NULL.
1834 *
1835 * @return 0 on success, negative value on failure.
1836 */
rss_enable(struct pmd_internals * pmd,const struct rte_flow_attr * attr,struct rte_flow_error * error)1837 static int rss_enable(struct pmd_internals *pmd,
1838 const struct rte_flow_attr *attr,
1839 struct rte_flow_error *error)
1840 {
1841 struct rte_flow *rss_flow = NULL;
1842 struct nlmsg *msg = NULL;
1843 /* 4096 is the maximum number of instructions for a BPF program */
1844 char annotation[64];
1845 int i;
1846 int err = 0;
1847
1848 /* unlimit locked memory */
1849 struct rlimit memlock_limit = {
1850 .rlim_cur = RLIM_INFINITY,
1851 .rlim_max = RLIM_INFINITY,
1852 };
1853 setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1854
1855 /* Get a new map key for a new RSS rule */
1856 err = bpf_rss_key(KEY_CMD_INIT, NULL);
1857 if (err < 0) {
1858 rte_flow_error_set(
1859 error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1860 "Failed to initialize BPF RSS keys");
1861
1862 return -1;
1863 }
1864
1865 /*
1866 * Create BPF RSS MAP
1867 */
1868 pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1869 sizeof(struct rss_key),
1870 MAX_RSS_KEYS);
1871 if (pmd->map_fd < 0) {
1872 TAP_LOG(ERR,
1873 "Failed to create BPF map (%d): %s",
1874 errno, strerror(errno));
1875 rte_flow_error_set(
1876 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1877 "Kernel too old or not configured "
1878 "to support BPF maps");
1879
1880 return -ENOTSUP;
1881 }
1882
1883 /*
1884 * Add a rule per queue to match reclassified packets and direct them to
1885 * the correct queue.
1886 */
1887 for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1888 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1889 if (pmd->bpf_fd[i] < 0) {
1890 TAP_LOG(ERR,
1891 "Failed to load BPF section %s for queue %d",
1892 SEC_NAME_CLS_Q, i);
1893 rte_flow_error_set(
1894 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1895 NULL,
1896 "Kernel too old or not configured "
1897 "to support BPF programs loading");
1898
1899 return -ENOTSUP;
1900 }
1901
1902 rss_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
1903 if (!rss_flow) {
1904 TAP_LOG(ERR,
1905 "Cannot allocate memory for rte_flow");
1906 return -1;
1907 }
1908 msg = &rss_flow->msg;
1909 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1910 NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1911 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1912 tap_flow_set_handle(rss_flow);
1913 uint16_t group = attr->group << GROUP_SHIFT;
1914 uint16_t prio = group | (i + PRIORITY_OFFSET);
1915 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1916 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1917
1918 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1919 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1920 return -1;
1921 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1922 snprintf(annotation, sizeof(annotation), "[%s%d]",
1923 SEC_NAME_CLS_Q, i);
1924 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1925 annotation);
1926 /* Actions */
1927 {
1928 struct action_data adata = {
1929 .id = "skbedit",
1930 .skbedit = {
1931 .skbedit = {
1932 .action = TC_ACT_PIPE,
1933 },
1934 .queue = i,
1935 },
1936 };
1937 if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1938 return -1;
1939 }
1940 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1941
1942 /* Netlink message is now ready to be sent */
1943 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1944 return -1;
1945 err = tap_nl_recv_ack(pmd->nlsk_fd);
1946 if (err < 0) {
1947 TAP_LOG(ERR,
1948 "Kernel refused TC filter rule creation (%d): %s",
1949 errno, strerror(errno));
1950 return err;
1951 }
1952 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1953 }
1954
1955 pmd->rss_enabled = 1;
1956 return err;
1957 }
1958
1959 /**
1960 * Manage bpf RSS keys repository with operations: init, get, release
1961 *
1962 * @param[in] cmd
1963 * Command on RSS keys: init, get, release
1964 *
1965 * @param[in, out] key_idx
1966 * Pointer to RSS Key index (out for get command, in for release command)
1967 *
1968 * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1969 */
bpf_rss_key(enum bpf_rss_key_e cmd,__u32 * key_idx)1970 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1971 {
1972 __u32 i;
1973 int err = 0;
1974 static __u32 num_used_keys;
1975 static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1976 static __u32 rss_keys_initialized;
1977 __u32 key;
1978
1979 switch (cmd) {
1980 case KEY_CMD_GET:
1981 if (!rss_keys_initialized) {
1982 err = -1;
1983 break;
1984 }
1985
1986 if (num_used_keys == RTE_DIM(rss_keys)) {
1987 err = -1;
1988 break;
1989 }
1990
1991 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1992 while (rss_keys[*key_idx] == KEY_STAT_USED)
1993 *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1994
1995 rss_keys[*key_idx] = KEY_STAT_USED;
1996
1997 /*
1998 * Add an offset to key_idx in order to handle a case of
1999 * RSS and non RSS flows mixture.
2000 * If a non RSS flow is destroyed it has an eBPF map
2001 * index 0 (initialized on flow creation) and might
2002 * unintentionally remove RSS entry 0 from eBPF map.
2003 * To avoid this issue, add an offset to the real index
2004 * during a KEY_CMD_GET operation and subtract this offset
2005 * during a KEY_CMD_RELEASE operation in order to restore
2006 * the real index.
2007 */
2008 *key_idx += KEY_IDX_OFFSET;
2009 num_used_keys++;
2010 break;
2011
2012 case KEY_CMD_RELEASE:
2013 if (!rss_keys_initialized)
2014 break;
2015
2016 /*
2017 * Subtract offset to restore real key index
2018 * If a non RSS flow is falsely trying to release map
2019 * entry 0 - the offset subtraction will calculate the real
2020 * map index as an out-of-range value and the release operation
2021 * will be silently ignored.
2022 */
2023 key = *key_idx - KEY_IDX_OFFSET;
2024 if (key >= RTE_DIM(rss_keys))
2025 break;
2026
2027 if (rss_keys[key] == KEY_STAT_USED) {
2028 rss_keys[key] = KEY_STAT_AVAILABLE;
2029 num_used_keys--;
2030 }
2031 break;
2032
2033 case KEY_CMD_INIT:
2034 for (i = 0; i < RTE_DIM(rss_keys); i++)
2035 rss_keys[i] = KEY_STAT_AVAILABLE;
2036
2037 rss_keys_initialized = 1;
2038 num_used_keys = 0;
2039 break;
2040
2041 case KEY_CMD_DEINIT:
2042 for (i = 0; i < RTE_DIM(rss_keys); i++)
2043 rss_keys[i] = KEY_STAT_UNSPEC;
2044
2045 rss_keys_initialized = 0;
2046 num_used_keys = 0;
2047 break;
2048
2049 default:
2050 break;
2051 }
2052
2053 return err;
2054 }
2055
2056 /**
2057 * Add RSS hash calculations and queue selection
2058 *
2059 * @param[in, out] pmd
2060 * Pointer to internal structure. Used to set/get RSS map fd
2061 *
2062 * @param[in] rss
2063 * Pointer to RSS flow actions
2064 *
2065 * @param[out] error
2066 * Pointer to error reporting if not NULL.
2067 *
2068 * @return 0 on success, negative value on failure
2069 */
rss_add_actions(struct rte_flow * flow,struct pmd_internals * pmd,const struct rte_flow_action_rss * rss,struct rte_flow_error * error)2070 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
2071 const struct rte_flow_action_rss *rss,
2072 struct rte_flow_error *error)
2073 {
2074 /* 4096 is the maximum number of instructions for a BPF program */
2075 unsigned int i;
2076 int err;
2077 struct rss_key rss_entry = { .hash_fields = 0,
2078 .key_size = 0 };
2079
2080 /* Check supported RSS features */
2081 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2082 return rte_flow_error_set
2083 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2084 "non-default RSS hash functions are not supported");
2085 if (rss->level)
2086 return rte_flow_error_set
2087 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2088 "a nonzero RSS encapsulation level is not supported");
2089
2090 /* Get a new map key for a new RSS rule */
2091 err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
2092 if (err < 0) {
2093 rte_flow_error_set(
2094 error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2095 "Failed to get BPF RSS key");
2096
2097 return -1;
2098 }
2099
2100 /* Update RSS map entry with queues */
2101 rss_entry.nb_queues = rss->queue_num;
2102 for (i = 0; i < rss->queue_num; i++)
2103 rss_entry.queues[i] = rss->queue[i];
2104 rss_entry.hash_fields =
2105 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2106
2107 /* Add this RSS entry to map */
2108 err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2109 &flow->key_idx, &rss_entry);
2110
2111 if (err) {
2112 TAP_LOG(ERR,
2113 "Failed to update BPF map entry #%u (%d): %s",
2114 flow->key_idx, errno, strerror(errno));
2115 rte_flow_error_set(
2116 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2117 "Kernel too old or not configured "
2118 "to support BPF maps updates");
2119
2120 return -ENOTSUP;
2121 }
2122
2123
2124 /*
2125 * Load bpf rules to calculate hash for this key_idx
2126 */
2127
2128 flow->bpf_fd[SEC_L3_L4] =
2129 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2130 if (flow->bpf_fd[SEC_L3_L4] < 0) {
2131 TAP_LOG(ERR,
2132 "Failed to load BPF section %s (%d): %s",
2133 sec_name[SEC_L3_L4], errno, strerror(errno));
2134 rte_flow_error_set(
2135 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2136 "Kernel too old or not configured "
2137 "to support BPF program loading");
2138
2139 return -ENOTSUP;
2140 }
2141
2142 /* Actions */
2143 {
2144 struct action_data adata[] = {
2145 {
2146 .id = "bpf",
2147 .bpf = {
2148 .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2149 .annotation = sec_name[SEC_L3_L4],
2150 .bpf = {
2151 .action = TC_ACT_PIPE,
2152 },
2153 },
2154 },
2155 };
2156
2157 if (add_actions(flow, RTE_DIM(adata), adata,
2158 TCA_FLOWER_ACT) < 0)
2159 return -1;
2160 }
2161
2162 return 0;
2163 }
2164
2165 /**
2166 * Get rte_flow operations.
2167 *
2168 * @param dev
2169 * Pointer to Ethernet device structure.
2170 * @param ops
2171 * Pointer to operation-specific structure.
2172 *
2173 * @return
2174 * 0 on success, negative errno value on failure.
2175 */
2176 int
tap_dev_flow_ops_get(struct rte_eth_dev * dev __rte_unused,const struct rte_flow_ops ** ops)2177 tap_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
2178 const struct rte_flow_ops **ops)
2179 {
2180 *ops = &tap_flow_ops;
2181 return 0;
2182 }
2183