1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4 #include <stdint.h>
5 #include <stdlib.h>
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
11 #include <rte_string_fns.h>
12 #include <rte_flow.h>
13 #include <rte_flow_driver.h>
14 #include <rte_tailq.h>
15
16 #include "rte_eth_softnic_internals.h"
17 #include "rte_eth_softnic.h"
18
19 #define rte_htons rte_cpu_to_be_16
20 #define rte_htonl rte_cpu_to_be_32
21
22 #define rte_ntohs rte_be_to_cpu_16
23 #define rte_ntohl rte_be_to_cpu_32
24
25 static struct rte_flow *
softnic_flow_find(struct softnic_table * table,struct softnic_table_rule_match * rule_match)26 softnic_flow_find(struct softnic_table *table,
27 struct softnic_table_rule_match *rule_match)
28 {
29 struct rte_flow *flow;
30
31 TAILQ_FOREACH(flow, &table->flows, node)
32 if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
33 return flow;
34
35 return NULL;
36 }
37
38 int
flow_attr_map_set(struct pmd_internals * softnic,uint32_t group_id,int ingress,const char * pipeline_name,uint32_t table_id)39 flow_attr_map_set(struct pmd_internals *softnic,
40 uint32_t group_id,
41 int ingress,
42 const char *pipeline_name,
43 uint32_t table_id)
44 {
45 struct pipeline *pipeline;
46 struct flow_attr_map *map;
47
48 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
49 pipeline_name == NULL)
50 return -1;
51
52 pipeline = softnic_pipeline_find(softnic, pipeline_name);
53 if (pipeline == NULL ||
54 table_id >= pipeline->n_tables)
55 return -1;
56
57 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
58 &softnic->flow.egress_map[group_id];
59 strlcpy(map->pipeline_name, pipeline_name, sizeof(map->pipeline_name));
60 map->table_id = table_id;
61 map->valid = 1;
62
63 return 0;
64 }
65
66 struct flow_attr_map *
flow_attr_map_get(struct pmd_internals * softnic,uint32_t group_id,int ingress)67 flow_attr_map_get(struct pmd_internals *softnic,
68 uint32_t group_id,
69 int ingress)
70 {
71 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
72 return NULL;
73
74 return (ingress) ? &softnic->flow.ingress_map[group_id] :
75 &softnic->flow.egress_map[group_id];
76 }
77
78 static int
flow_pipeline_table_get(struct pmd_internals * softnic,const struct rte_flow_attr * attr,const char ** pipeline_name,uint32_t * table_id,struct rte_flow_error * error)79 flow_pipeline_table_get(struct pmd_internals *softnic,
80 const struct rte_flow_attr *attr,
81 const char **pipeline_name,
82 uint32_t *table_id,
83 struct rte_flow_error *error)
84 {
85 struct flow_attr_map *map;
86
87 if (attr == NULL)
88 return rte_flow_error_set(error,
89 EINVAL,
90 RTE_FLOW_ERROR_TYPE_ATTR,
91 NULL,
92 "Null attr");
93
94 if (!attr->ingress && !attr->egress)
95 return rte_flow_error_set(error,
96 EINVAL,
97 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
98 attr,
99 "Ingress/egress not specified");
100
101 if (attr->ingress && attr->egress)
102 return rte_flow_error_set(error,
103 EINVAL,
104 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
105 attr,
106 "Setting both ingress and egress is not allowed");
107
108 map = flow_attr_map_get(softnic,
109 attr->group,
110 attr->ingress);
111 if (map == NULL ||
112 map->valid == 0)
113 return rte_flow_error_set(error,
114 EINVAL,
115 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
116 attr,
117 "Invalid group ID");
118
119 if (pipeline_name)
120 *pipeline_name = map->pipeline_name;
121
122 if (table_id)
123 *table_id = map->table_id;
124
125 return 0;
126 }
127
128 union flow_item {
129 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
130 struct rte_flow_item_eth eth;
131 struct rte_flow_item_vlan vlan;
132 struct rte_flow_item_ipv4 ipv4;
133 struct rte_flow_item_ipv6 ipv6;
134 struct rte_flow_item_icmp icmp;
135 struct rte_flow_item_udp udp;
136 struct rte_flow_item_tcp tcp;
137 struct rte_flow_item_sctp sctp;
138 struct rte_flow_item_vxlan vxlan;
139 struct rte_flow_item_e_tag e_tag;
140 struct rte_flow_item_nvgre nvgre;
141 struct rte_flow_item_mpls mpls;
142 struct rte_flow_item_gre gre;
143 struct rte_flow_item_gtp gtp;
144 struct rte_flow_item_esp esp;
145 struct rte_flow_item_geneve geneve;
146 struct rte_flow_item_vxlan_gpe vxlan_gpe;
147 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
148 struct rte_flow_item_ipv6_ext ipv6_ext;
149 struct rte_flow_item_icmp6 icmp6;
150 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
151 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
152 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
153 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
154 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
155 };
156
157 static const union flow_item flow_item_raw_mask;
158
159 static int
flow_item_is_proto(enum rte_flow_item_type type,const void ** mask,size_t * size)160 flow_item_is_proto(enum rte_flow_item_type type,
161 const void **mask,
162 size_t *size)
163 {
164 switch (type) {
165 case RTE_FLOW_ITEM_TYPE_RAW:
166 *mask = &flow_item_raw_mask;
167 *size = sizeof(flow_item_raw_mask);
168 return 1; /* TRUE */
169
170 case RTE_FLOW_ITEM_TYPE_ETH:
171 *mask = &rte_flow_item_eth_mask;
172 *size = sizeof(struct rte_ether_hdr);
173 return 1; /* TRUE */
174
175 case RTE_FLOW_ITEM_TYPE_VLAN:
176 *mask = &rte_flow_item_vlan_mask;
177 *size = sizeof(struct rte_vlan_hdr);
178 return 1;
179
180 case RTE_FLOW_ITEM_TYPE_IPV4:
181 *mask = &rte_flow_item_ipv4_mask;
182 *size = sizeof(struct rte_ipv4_hdr);
183 return 1;
184
185 case RTE_FLOW_ITEM_TYPE_IPV6:
186 *mask = &rte_flow_item_ipv6_mask;
187 *size = sizeof(struct rte_ipv6_hdr);
188 return 1;
189
190 case RTE_FLOW_ITEM_TYPE_ICMP:
191 *mask = &rte_flow_item_icmp_mask;
192 *size = sizeof(struct rte_flow_item_icmp);
193 return 1;
194
195 case RTE_FLOW_ITEM_TYPE_UDP:
196 *mask = &rte_flow_item_udp_mask;
197 *size = sizeof(struct rte_flow_item_udp);
198 return 1;
199
200 case RTE_FLOW_ITEM_TYPE_TCP:
201 *mask = &rte_flow_item_tcp_mask;
202 *size = sizeof(struct rte_flow_item_tcp);
203 return 1;
204
205 case RTE_FLOW_ITEM_TYPE_SCTP:
206 *mask = &rte_flow_item_sctp_mask;
207 *size = sizeof(struct rte_flow_item_sctp);
208 return 1;
209
210 case RTE_FLOW_ITEM_TYPE_VXLAN:
211 *mask = &rte_flow_item_vxlan_mask;
212 *size = sizeof(struct rte_flow_item_vxlan);
213 return 1;
214
215 case RTE_FLOW_ITEM_TYPE_E_TAG:
216 *mask = &rte_flow_item_e_tag_mask;
217 *size = sizeof(struct rte_flow_item_e_tag);
218 return 1;
219
220 case RTE_FLOW_ITEM_TYPE_NVGRE:
221 *mask = &rte_flow_item_nvgre_mask;
222 *size = sizeof(struct rte_flow_item_nvgre);
223 return 1;
224
225 case RTE_FLOW_ITEM_TYPE_MPLS:
226 *mask = &rte_flow_item_mpls_mask;
227 *size = sizeof(struct rte_flow_item_mpls);
228 return 1;
229
230 case RTE_FLOW_ITEM_TYPE_GRE:
231 *mask = &rte_flow_item_gre_mask;
232 *size = sizeof(struct rte_flow_item_gre);
233 return 1;
234
235 case RTE_FLOW_ITEM_TYPE_GTP:
236 case RTE_FLOW_ITEM_TYPE_GTPC:
237 case RTE_FLOW_ITEM_TYPE_GTPU:
238 *mask = &rte_flow_item_gtp_mask;
239 *size = sizeof(struct rte_flow_item_gtp);
240 return 1;
241
242 case RTE_FLOW_ITEM_TYPE_ESP:
243 *mask = &rte_flow_item_esp_mask;
244 *size = sizeof(struct rte_flow_item_esp);
245 return 1;
246
247 case RTE_FLOW_ITEM_TYPE_GENEVE:
248 *mask = &rte_flow_item_geneve_mask;
249 *size = sizeof(struct rte_flow_item_geneve);
250 return 1;
251
252 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
253 *mask = &rte_flow_item_vxlan_gpe_mask;
254 *size = sizeof(struct rte_flow_item_vxlan_gpe);
255 return 1;
256
257 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
258 *mask = &rte_flow_item_arp_eth_ipv4_mask;
259 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
260 return 1;
261
262 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
263 *mask = &rte_flow_item_ipv6_ext_mask;
264 *size = sizeof(struct rte_flow_item_ipv6_ext);
265 return 1;
266
267 case RTE_FLOW_ITEM_TYPE_ICMP6:
268 *mask = &rte_flow_item_icmp6_mask;
269 *size = sizeof(struct rte_flow_item_icmp6);
270 return 1;
271
272 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
273 *mask = &rte_flow_item_icmp6_nd_ns_mask;
274 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
275 return 1;
276
277 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
278 *mask = &rte_flow_item_icmp6_nd_na_mask;
279 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
280 return 1;
281
282 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
283 *mask = &rte_flow_item_icmp6_nd_opt_mask;
284 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
285 return 1;
286
287 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
288 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
289 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
290 return 1;
291
292 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
293 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
294 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
295 return 1;
296
297 default: return 0; /* FALSE */
298 }
299 }
300
301 static int
flow_item_raw_preprocess(const struct rte_flow_item * item,union flow_item * item_spec,union flow_item * item_mask,size_t * item_size,int * item_disabled,struct rte_flow_error * error)302 flow_item_raw_preprocess(const struct rte_flow_item *item,
303 union flow_item *item_spec,
304 union flow_item *item_mask,
305 size_t *item_size,
306 int *item_disabled,
307 struct rte_flow_error *error)
308 {
309 const struct rte_flow_item_raw *item_raw_spec = item->spec;
310 const struct rte_flow_item_raw *item_raw_mask = item->mask;
311 const uint8_t *pattern;
312 const uint8_t *pattern_mask;
313 uint8_t *spec = (uint8_t *)item_spec;
314 uint8_t *mask = (uint8_t *)item_mask;
315 size_t pattern_length, pattern_offset, i;
316 int disabled;
317
318 if (!item->spec)
319 return rte_flow_error_set(error,
320 ENOTSUP,
321 RTE_FLOW_ERROR_TYPE_ITEM,
322 item,
323 "RAW: Null specification");
324
325 if (item->last)
326 return rte_flow_error_set(error,
327 ENOTSUP,
328 RTE_FLOW_ERROR_TYPE_ITEM,
329 item,
330 "RAW: Range not allowed (last must be NULL)");
331
332 if (item_raw_spec->relative == 0)
333 return rte_flow_error_set(error,
334 ENOTSUP,
335 RTE_FLOW_ERROR_TYPE_ITEM,
336 item,
337 "RAW: Absolute offset not supported");
338
339 if (item_raw_spec->search)
340 return rte_flow_error_set(error,
341 ENOTSUP,
342 RTE_FLOW_ERROR_TYPE_ITEM,
343 item,
344 "RAW: Search not supported");
345
346 if (item_raw_spec->offset < 0)
347 return rte_flow_error_set(error,
348 ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
349 item,
350 "RAW: Negative offset not supported");
351
352 if (item_raw_spec->length == 0)
353 return rte_flow_error_set(error,
354 ENOTSUP,
355 RTE_FLOW_ERROR_TYPE_ITEM,
356 item,
357 "RAW: Zero pattern length");
358
359 if (item_raw_spec->offset + item_raw_spec->length >
360 TABLE_RULE_MATCH_SIZE_MAX)
361 return rte_flow_error_set(error,
362 ENOTSUP,
363 RTE_FLOW_ERROR_TYPE_ITEM,
364 item,
365 "RAW: Item too big");
366
367 if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
368 return rte_flow_error_set(error,
369 ENOTSUP,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 item,
372 "RAW: Non-NULL pattern mask not allowed with NULL pattern");
373
374 pattern = item_raw_spec->pattern;
375 pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
376 pattern_length = (size_t)item_raw_spec->length;
377 pattern_offset = (size_t)item_raw_spec->offset;
378
379 disabled = 0;
380 if (pattern_mask == NULL)
381 disabled = 1;
382 else
383 for (i = 0; i < pattern_length; i++)
384 if ((pattern)[i])
385 disabled = 1;
386
387 memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
388 if (pattern)
389 memcpy(&spec[pattern_offset], pattern, pattern_length);
390
391 memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
392 if (pattern_mask)
393 memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
394
395 *item_size = pattern_offset + pattern_length;
396 *item_disabled = disabled;
397
398 return 0;
399 }
400
401 static int
flow_item_proto_preprocess(const struct rte_flow_item * item,union flow_item * item_spec,union flow_item * item_mask,size_t * item_size,int * item_disabled,struct rte_flow_error * error)402 flow_item_proto_preprocess(const struct rte_flow_item *item,
403 union flow_item *item_spec,
404 union flow_item *item_mask,
405 size_t *item_size,
406 int *item_disabled,
407 struct rte_flow_error *error)
408 {
409 const void *mask_default;
410 uint8_t *spec = (uint8_t *)item_spec;
411 uint8_t *mask = (uint8_t *)item_mask;
412 size_t size, i;
413
414 if (!flow_item_is_proto(item->type, &mask_default, &size))
415 return rte_flow_error_set(error,
416 ENOTSUP,
417 RTE_FLOW_ERROR_TYPE_ITEM,
418 item,
419 "Item type not supported");
420
421 if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
422 return flow_item_raw_preprocess(item,
423 item_spec,
424 item_mask,
425 item_size,
426 item_disabled,
427 error);
428
429 /* spec */
430 if (!item->spec) {
431 /* If spec is NULL, then last and mask also have to be NULL. */
432 if (item->last || item->mask)
433 return rte_flow_error_set(error,
434 EINVAL,
435 RTE_FLOW_ERROR_TYPE_ITEM,
436 item,
437 "Invalid item (NULL spec with non-NULL last or mask)");
438
439 memset(item_spec, 0, size);
440 memset(item_mask, 0, size);
441 *item_size = size;
442 *item_disabled = 1; /* TRUE */
443 return 0;
444 }
445
446 memcpy(spec, item->spec, size);
447 *item_size = size;
448
449 /* mask */
450 if (item->mask)
451 memcpy(mask, item->mask, size);
452 else
453 memcpy(mask, mask_default, size);
454
455 /* disabled */
456 for (i = 0; i < size; i++)
457 if (mask[i])
458 break;
459 *item_disabled = (i == size) ? 1 : 0;
460
461 /* Apply mask over spec. */
462 for (i = 0; i < size; i++)
463 spec[i] &= mask[i];
464
465 /* last */
466 if (item->last) {
467 uint8_t last[size];
468
469 /* init last */
470 memcpy(last, item->last, size);
471 for (i = 0; i < size; i++)
472 last[i] &= mask[i];
473
474 /* check for range */
475 for (i = 0; i < size; i++)
476 if (last[i] != spec[i])
477 return rte_flow_error_set(error,
478 ENOTSUP,
479 RTE_FLOW_ERROR_TYPE_ITEM,
480 item,
481 "Range not supported");
482 }
483
484 return 0;
485 }
486
487 /***
488 * Skip disabled protocol items and VOID items
489 * until any of the mutually exclusive conditions
490 * from the list below takes place:
491 * (A) A protocol present in the proto_mask
492 * is met (either ENABLED or DISABLED);
493 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
494 * (C) The END item is met.
495 */
496 static int
flow_item_skip_disabled_protos(const struct rte_flow_item ** item,uint64_t proto_mask,size_t * length,struct rte_flow_error * error)497 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
498 uint64_t proto_mask,
499 size_t *length,
500 struct rte_flow_error *error)
501 {
502 size_t len = 0;
503
504 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
505 union flow_item spec, mask;
506 size_t size;
507 int disabled = 0, status;
508
509 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
510 continue;
511
512 status = flow_item_proto_preprocess(*item,
513 &spec,
514 &mask,
515 &size,
516 &disabled,
517 error);
518 if (status)
519 return status;
520
521 if ((proto_mask & (1LLU << (*item)->type)) ||
522 !disabled)
523 break;
524
525 len += size;
526 }
527
528 if (length)
529 *length = len;
530
531 return 0;
532 }
533
534 #define FLOW_ITEM_PROTO_IP \
535 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
536 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
537
538 static void
flow_item_skip_void(const struct rte_flow_item ** item)539 flow_item_skip_void(const struct rte_flow_item **item)
540 {
541 for ( ; ; (*item)++)
542 if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
543 return;
544 }
545
546 #define IP_PROTOCOL_TCP 0x06
547 #define IP_PROTOCOL_UDP 0x11
548 #define IP_PROTOCOL_SCTP 0x84
549
550 static int
mask_to_depth(uint64_t mask,uint32_t * depth)551 mask_to_depth(uint64_t mask,
552 uint32_t *depth)
553 {
554 uint64_t n;
555
556 if (mask == UINT64_MAX) {
557 if (depth)
558 *depth = 64;
559
560 return 0;
561 }
562
563 mask = ~mask;
564
565 if (mask & (mask + 1))
566 return -1;
567
568 n = __builtin_popcountll(mask);
569 if (depth)
570 *depth = (uint32_t)(64 - n);
571
572 return 0;
573 }
574
575 static int
ipv4_mask_to_depth(uint32_t mask,uint32_t * depth)576 ipv4_mask_to_depth(uint32_t mask,
577 uint32_t *depth)
578 {
579 uint32_t d;
580 int status;
581
582 status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
583 if (status)
584 return status;
585
586 d -= 32;
587 if (depth)
588 *depth = d;
589
590 return 0;
591 }
592
593 static int
ipv6_mask_to_depth(uint8_t * mask,uint32_t * depth)594 ipv6_mask_to_depth(uint8_t *mask,
595 uint32_t *depth)
596 {
597 uint64_t *m = (uint64_t *)mask;
598 uint64_t m0 = rte_be_to_cpu_64(m[0]);
599 uint64_t m1 = rte_be_to_cpu_64(m[1]);
600 uint32_t d0, d1;
601 int status;
602
603 status = mask_to_depth(m0, &d0);
604 if (status)
605 return status;
606
607 status = mask_to_depth(m1, &d1);
608 if (status)
609 return status;
610
611 if (d0 < 64 && d1)
612 return -1;
613
614 if (depth)
615 *depth = d0 + d1;
616
617 return 0;
618 }
619
620 static int
port_mask_to_range(uint16_t port,uint16_t port_mask,uint16_t * port0,uint16_t * port1)621 port_mask_to_range(uint16_t port,
622 uint16_t port_mask,
623 uint16_t *port0,
624 uint16_t *port1)
625 {
626 int status;
627 uint16_t p0, p1;
628
629 status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
630 if (status)
631 return -1;
632
633 p0 = port & port_mask;
634 p1 = p0 | ~port_mask;
635
636 if (port0)
637 *port0 = p0;
638
639 if (port1)
640 *port1 = p1;
641
642 return 0;
643 }
644
645 static int
flow_rule_match_acl_get(struct pmd_internals * softnic __rte_unused,struct pipeline * pipeline __rte_unused,struct softnic_table * table __rte_unused,const struct rte_flow_attr * attr,const struct rte_flow_item * item,struct softnic_table_rule_match * rule_match,struct rte_flow_error * error)646 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
647 struct pipeline *pipeline __rte_unused,
648 struct softnic_table *table __rte_unused,
649 const struct rte_flow_attr *attr,
650 const struct rte_flow_item *item,
651 struct softnic_table_rule_match *rule_match,
652 struct rte_flow_error *error)
653 {
654 union flow_item spec, mask;
655 size_t size, length = 0;
656 int disabled = 0, status;
657 uint8_t ip_proto, ip_proto_mask;
658
659 memset(rule_match, 0, sizeof(*rule_match));
660 rule_match->match_type = TABLE_ACL;
661 rule_match->match.acl.priority = attr->priority;
662
663 /* VOID or disabled protos only, if any. */
664 status = flow_item_skip_disabled_protos(&item,
665 FLOW_ITEM_PROTO_IP, &length, error);
666 if (status)
667 return status;
668
669 /* IP only. */
670 status = flow_item_proto_preprocess(item, &spec, &mask,
671 &size, &disabled, error);
672 if (status)
673 return status;
674
675 switch (item->type) {
676 case RTE_FLOW_ITEM_TYPE_IPV4:
677 {
678 uint32_t sa_depth, da_depth;
679
680 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
681 &sa_depth);
682 if (status)
683 return rte_flow_error_set(error,
684 EINVAL,
685 RTE_FLOW_ERROR_TYPE_ITEM,
686 item,
687 "ACL: Illegal IPv4 header source address mask");
688
689 status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
690 &da_depth);
691 if (status)
692 return rte_flow_error_set(error,
693 EINVAL,
694 RTE_FLOW_ERROR_TYPE_ITEM,
695 item,
696 "ACL: Illegal IPv4 header destination address mask");
697
698 ip_proto = spec.ipv4.hdr.next_proto_id;
699 ip_proto_mask = mask.ipv4.hdr.next_proto_id;
700
701 rule_match->match.acl.ip_version = 1;
702 rule_match->match.acl.ipv4.sa =
703 rte_ntohl(spec.ipv4.hdr.src_addr);
704 rule_match->match.acl.ipv4.da =
705 rte_ntohl(spec.ipv4.hdr.dst_addr);
706 rule_match->match.acl.sa_depth = sa_depth;
707 rule_match->match.acl.da_depth = da_depth;
708 rule_match->match.acl.proto = ip_proto;
709 rule_match->match.acl.proto_mask = ip_proto_mask;
710 break;
711 } /* RTE_FLOW_ITEM_TYPE_IPV4 */
712
713 case RTE_FLOW_ITEM_TYPE_IPV6:
714 {
715 uint32_t sa_depth, da_depth;
716
717 status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
718 if (status)
719 return rte_flow_error_set(error,
720 EINVAL,
721 RTE_FLOW_ERROR_TYPE_ITEM,
722 item,
723 "ACL: Illegal IPv6 header source address mask");
724
725 status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
726 if (status)
727 return rte_flow_error_set(error,
728 EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 item,
731 "ACL: Illegal IPv6 header destination address mask");
732
733 ip_proto = spec.ipv6.hdr.proto;
734 ip_proto_mask = mask.ipv6.hdr.proto;
735
736 rule_match->match.acl.ip_version = 0;
737 memcpy(rule_match->match.acl.ipv6.sa,
738 spec.ipv6.hdr.src_addr,
739 sizeof(spec.ipv6.hdr.src_addr));
740 memcpy(rule_match->match.acl.ipv6.da,
741 spec.ipv6.hdr.dst_addr,
742 sizeof(spec.ipv6.hdr.dst_addr));
743 rule_match->match.acl.sa_depth = sa_depth;
744 rule_match->match.acl.da_depth = da_depth;
745 rule_match->match.acl.proto = ip_proto;
746 rule_match->match.acl.proto_mask = ip_proto_mask;
747 break;
748 } /* RTE_FLOW_ITEM_TYPE_IPV6 */
749
750 default:
751 return rte_flow_error_set(error,
752 ENOTSUP,
753 RTE_FLOW_ERROR_TYPE_ITEM,
754 item,
755 "ACL: IP protocol required");
756 } /* switch */
757
758 if (ip_proto_mask != UINT8_MAX)
759 return rte_flow_error_set(error,
760 EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM,
762 item,
763 "ACL: Illegal IP protocol mask");
764
765 item++;
766
767 /* VOID only, if any. */
768 flow_item_skip_void(&item);
769
770 /* TCP/UDP/SCTP only. */
771 status = flow_item_proto_preprocess(item, &spec, &mask,
772 &size, &disabled, error);
773 if (status)
774 return status;
775
776 switch (item->type) {
777 case RTE_FLOW_ITEM_TYPE_TCP:
778 {
779 uint16_t sp0, sp1, dp0, dp1;
780
781 if (ip_proto != IP_PROTOCOL_TCP)
782 return rte_flow_error_set(error,
783 EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM,
785 item,
786 "ACL: Item type is TCP, but IP protocol is not");
787
788 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
789 rte_ntohs(mask.tcp.hdr.src_port),
790 &sp0,
791 &sp1);
792
793 if (status)
794 return rte_flow_error_set(error,
795 EINVAL,
796 RTE_FLOW_ERROR_TYPE_ITEM,
797 item,
798 "ACL: Illegal TCP source port mask");
799
800 status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
801 rte_ntohs(mask.tcp.hdr.dst_port),
802 &dp0,
803 &dp1);
804
805 if (status)
806 return rte_flow_error_set(error,
807 EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM,
809 item,
810 "ACL: Illegal TCP destination port mask");
811
812 rule_match->match.acl.sp0 = sp0;
813 rule_match->match.acl.sp1 = sp1;
814 rule_match->match.acl.dp0 = dp0;
815 rule_match->match.acl.dp1 = dp1;
816
817 break;
818 } /* RTE_FLOW_ITEM_TYPE_TCP */
819
820 case RTE_FLOW_ITEM_TYPE_UDP:
821 {
822 uint16_t sp0, sp1, dp0, dp1;
823
824 if (ip_proto != IP_PROTOCOL_UDP)
825 return rte_flow_error_set(error,
826 EINVAL,
827 RTE_FLOW_ERROR_TYPE_ITEM,
828 item,
829 "ACL: Item type is UDP, but IP protocol is not");
830
831 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
832 rte_ntohs(mask.udp.hdr.src_port),
833 &sp0,
834 &sp1);
835 if (status)
836 return rte_flow_error_set(error,
837 EINVAL,
838 RTE_FLOW_ERROR_TYPE_ITEM,
839 item,
840 "ACL: Illegal UDP source port mask");
841
842 status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
843 rte_ntohs(mask.udp.hdr.dst_port),
844 &dp0,
845 &dp1);
846 if (status)
847 return rte_flow_error_set(error,
848 EINVAL,
849 RTE_FLOW_ERROR_TYPE_ITEM,
850 item,
851 "ACL: Illegal UDP destination port mask");
852
853 rule_match->match.acl.sp0 = sp0;
854 rule_match->match.acl.sp1 = sp1;
855 rule_match->match.acl.dp0 = dp0;
856 rule_match->match.acl.dp1 = dp1;
857
858 break;
859 } /* RTE_FLOW_ITEM_TYPE_UDP */
860
861 case RTE_FLOW_ITEM_TYPE_SCTP:
862 {
863 uint16_t sp0, sp1, dp0, dp1;
864
865 if (ip_proto != IP_PROTOCOL_SCTP)
866 return rte_flow_error_set(error,
867 EINVAL,
868 RTE_FLOW_ERROR_TYPE_ITEM,
869 item,
870 "ACL: Item type is SCTP, but IP protocol is not");
871
872 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
873 rte_ntohs(mask.sctp.hdr.src_port),
874 &sp0,
875 &sp1);
876
877 if (status)
878 return rte_flow_error_set(error,
879 EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
881 item,
882 "ACL: Illegal SCTP source port mask");
883
884 status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
885 rte_ntohs(mask.sctp.hdr.dst_port),
886 &dp0,
887 &dp1);
888 if (status)
889 return rte_flow_error_set(error,
890 EINVAL,
891 RTE_FLOW_ERROR_TYPE_ITEM,
892 item,
893 "ACL: Illegal SCTP destination port mask");
894
895 rule_match->match.acl.sp0 = sp0;
896 rule_match->match.acl.sp1 = sp1;
897 rule_match->match.acl.dp0 = dp0;
898 rule_match->match.acl.dp1 = dp1;
899
900 break;
901 } /* RTE_FLOW_ITEM_TYPE_SCTP */
902
903 default:
904 return rte_flow_error_set(error,
905 ENOTSUP,
906 RTE_FLOW_ERROR_TYPE_ITEM,
907 item,
908 "ACL: TCP/UDP/SCTP required");
909 } /* switch */
910
911 item++;
912
913 /* VOID or disabled protos only, if any. */
914 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
915 if (status)
916 return status;
917
918 /* END only. */
919 if (item->type != RTE_FLOW_ITEM_TYPE_END)
920 return rte_flow_error_set(error,
921 EINVAL,
922 RTE_FLOW_ERROR_TYPE_ITEM,
923 item,
924 "ACL: Expecting END item");
925
926 return 0;
927 }
928
929 /***
930 * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
931 * respectively.
932 * They are located within a larger buffer at offsets *toffset* and *foffset*
933 * respectively. Both *tmask* and *fmask* represent bitmasks for the larger
934 * buffer.
935 * Question: are the two masks equivalent?
936 *
937 * Notes:
938 * 1. Offset basically indicates that the first offset bytes in the buffer
939 * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
940 * array of *offset* bytes to the *mask*.
941 * 2. Each *mask* might contain a number of zero bytes at the beginning or
942 * at the end.
943 * 3. Bytes in the larger buffer after the end of the *mask* are also considered
944 * "don't care", so they are equivalent to appending an "all-zeros" array of
945 * bytes to the *mask*.
946 *
947 * Example:
948 * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
949 * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
950 * => buffer mask = [00 00 00 22 00 33 00 00]
951 * fmask = [22 00 33], foffset = 3, fsize = 3 =>
952 * => buffer mask = [00 00 00 22 00 33 00 00]
953 * Therefore, the tmask and fmask from this example are equivalent.
954 */
955 static int
hash_key_mask_is_same(uint8_t * tmask,size_t toffset,size_t tsize,uint8_t * fmask,size_t foffset,size_t fsize,size_t * toffset_plus,size_t * foffset_plus)956 hash_key_mask_is_same(uint8_t *tmask,
957 size_t toffset,
958 size_t tsize,
959 uint8_t *fmask,
960 size_t foffset,
961 size_t fsize,
962 size_t *toffset_plus,
963 size_t *foffset_plus)
964 {
965 size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
966 size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
967
968 /* Compute tpos and fpos. */
969 for (tpos = 0; tmask[tpos] == 0; tpos++)
970 ;
971 for (fpos = 0; fmask[fpos] == 0; fpos++)
972 ;
973
974 if (toffset + tpos != foffset + fpos)
975 return 0; /* FALSE */
976
977 tsize -= tpos;
978 fsize -= fpos;
979
980 if (tsize < fsize) {
981 size_t i;
982
983 for (i = 0; i < tsize; i++)
984 if (tmask[tpos + i] != fmask[fpos + i])
985 return 0; /* FALSE */
986
987 for ( ; i < fsize; i++)
988 if (fmask[fpos + i])
989 return 0; /* FALSE */
990 } else {
991 size_t i;
992
993 for (i = 0; i < fsize; i++)
994 if (tmask[tpos + i] != fmask[fpos + i])
995 return 0; /* FALSE */
996
997 for ( ; i < tsize; i++)
998 if (tmask[tpos + i])
999 return 0; /* FALSE */
1000 }
1001
1002 if (toffset_plus)
1003 *toffset_plus = tpos;
1004
1005 if (foffset_plus)
1006 *foffset_plus = fpos;
1007
1008 return 1; /* TRUE */
1009 }
1010
1011 static int
flow_rule_match_hash_get(struct pmd_internals * softnic __rte_unused,struct pipeline * pipeline __rte_unused,struct softnic_table * table,const struct rte_flow_attr * attr __rte_unused,const struct rte_flow_item * item,struct softnic_table_rule_match * rule_match,struct rte_flow_error * error)1012 flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
1013 struct pipeline *pipeline __rte_unused,
1014 struct softnic_table *table,
1015 const struct rte_flow_attr *attr __rte_unused,
1016 const struct rte_flow_item *item,
1017 struct softnic_table_rule_match *rule_match,
1018 struct rte_flow_error *error)
1019 {
1020 struct softnic_table_rule_match_hash key, key_mask;
1021 struct softnic_table_hash_params *params = &table->params.match.hash;
1022 size_t offset = 0, length = 0, tpos, fpos;
1023 int status;
1024
1025 memset(&key, 0, sizeof(key));
1026 memset(&key_mask, 0, sizeof(key_mask));
1027
1028 /* VOID or disabled protos only, if any. */
1029 status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
1030 if (status)
1031 return status;
1032
1033 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1034 return rte_flow_error_set(error,
1035 EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ITEM,
1037 item,
1038 "HASH: END detected too early");
1039
1040 /* VOID or any protocols (enabled or disabled). */
1041 for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1042 union flow_item spec, mask;
1043 size_t size;
1044 int disabled, status;
1045
1046 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1047 continue;
1048
1049 status = flow_item_proto_preprocess(item,
1050 &spec,
1051 &mask,
1052 &size,
1053 &disabled,
1054 error);
1055 if (status)
1056 return status;
1057
1058 if (length + size > sizeof(key)) {
1059 if (disabled)
1060 break;
1061
1062 return rte_flow_error_set(error,
1063 ENOTSUP,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1065 item,
1066 "HASH: Item too big");
1067 }
1068
1069 memcpy(&key.key[length], &spec, size);
1070 memcpy(&key_mask.key[length], &mask, size);
1071 length += size;
1072 }
1073
1074 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1075 /* VOID or disabled protos only, if any. */
1076 status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
1077 if (status)
1078 return status;
1079
1080 /* END only. */
1081 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1082 return rte_flow_error_set(error,
1083 EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ITEM,
1085 item,
1086 "HASH: Expecting END item");
1087 }
1088
1089 /* Compare flow key mask against table key mask. */
1090 offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
1091
1092 if (!hash_key_mask_is_same(params->key_mask,
1093 params->key_offset,
1094 params->key_size,
1095 key_mask.key,
1096 offset,
1097 length,
1098 &tpos,
1099 &fpos))
1100 return rte_flow_error_set(error,
1101 EINVAL,
1102 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1103 NULL,
1104 "HASH: Item list is not observing the match format");
1105
1106 /* Rule match. */
1107 memset(rule_match, 0, sizeof(*rule_match));
1108 rule_match->match_type = TABLE_HASH;
1109 memcpy(&rule_match->match.hash.key[tpos],
1110 &key.key[fpos],
1111 RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
1112 length - fpos));
1113
1114 return 0;
1115 }
1116
1117 static int
flow_rule_match_get(struct pmd_internals * softnic,struct pipeline * pipeline,struct softnic_table * table,const struct rte_flow_attr * attr,const struct rte_flow_item * item,struct softnic_table_rule_match * rule_match,struct rte_flow_error * error)1118 flow_rule_match_get(struct pmd_internals *softnic,
1119 struct pipeline *pipeline,
1120 struct softnic_table *table,
1121 const struct rte_flow_attr *attr,
1122 const struct rte_flow_item *item,
1123 struct softnic_table_rule_match *rule_match,
1124 struct rte_flow_error *error)
1125 {
1126 switch (table->params.match_type) {
1127 case TABLE_ACL:
1128 return flow_rule_match_acl_get(softnic,
1129 pipeline,
1130 table,
1131 attr,
1132 item,
1133 rule_match,
1134 error);
1135
1136 /* FALLTHROUGH */
1137
1138 case TABLE_HASH:
1139 return flow_rule_match_hash_get(softnic,
1140 pipeline,
1141 table,
1142 attr,
1143 item,
1144 rule_match,
1145 error);
1146
1147 /* FALLTHROUGH */
1148
1149 default:
1150 return rte_flow_error_set(error,
1151 ENOTSUP,
1152 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1153 NULL,
1154 "Unsupported pipeline table match type");
1155 }
1156 }
1157
1158 static int
flow_rule_action_get(struct pmd_internals * softnic,struct pipeline * pipeline,struct softnic_table * table,const struct rte_flow_attr * attr,const struct rte_flow_action * action,struct softnic_table_rule_action * rule_action,struct rte_flow_error * error)1159 flow_rule_action_get(struct pmd_internals *softnic,
1160 struct pipeline *pipeline,
1161 struct softnic_table *table,
1162 const struct rte_flow_attr *attr,
1163 const struct rte_flow_action *action,
1164 struct softnic_table_rule_action *rule_action,
1165 struct rte_flow_error *error)
1166 {
1167 struct softnic_table_action_profile *profile;
1168 struct softnic_table_action_profile_params *params;
1169 struct softnic_mtr_meter_policy *policy;
1170 int n_jump_queue_rss_drop = 0;
1171 int n_count = 0;
1172 int n_mark = 0;
1173 int n_vxlan_decap = 0;
1174
1175 profile = softnic_table_action_profile_find(softnic,
1176 table->params.action_profile_name);
1177 if (profile == NULL)
1178 return rte_flow_error_set(error,
1179 EINVAL,
1180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1181 action,
1182 "JUMP: Table action profile");
1183
1184 params = &profile->params;
1185
1186 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1187 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1188 continue;
1189
1190 switch (action->type) {
1191 case RTE_FLOW_ACTION_TYPE_JUMP:
1192 {
1193 const struct rte_flow_action_jump *conf = action->conf;
1194 struct flow_attr_map *map;
1195
1196 if (conf == NULL)
1197 return rte_flow_error_set(error,
1198 EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ACTION,
1200 action,
1201 "JUMP: Null configuration");
1202
1203 if (n_jump_queue_rss_drop)
1204 return rte_flow_error_set(error,
1205 EINVAL,
1206 RTE_FLOW_ERROR_TYPE_ACTION,
1207 action,
1208 "Only one termination action is"
1209 " allowed per flow");
1210
1211 if ((params->action_mask &
1212 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1213 return rte_flow_error_set(error,
1214 EINVAL,
1215 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1216 NULL,
1217 "JUMP action not enabled for this table");
1218
1219 n_jump_queue_rss_drop = 1;
1220
1221 map = flow_attr_map_get(softnic,
1222 conf->group,
1223 attr->ingress);
1224 if (map == NULL || map->valid == 0)
1225 return rte_flow_error_set(error,
1226 EINVAL,
1227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1228 NULL,
1229 "JUMP: Invalid group mapping");
1230
1231 if (strcmp(pipeline->name, map->pipeline_name) != 0)
1232 return rte_flow_error_set(error,
1233 ENOTSUP,
1234 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 NULL,
1236 "JUMP: Jump to table in different pipeline");
1237
1238 /* RTE_TABLE_ACTION_FWD */
1239 rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
1240 rule_action->fwd.id = map->table_id;
1241 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1242 break;
1243 } /* RTE_FLOW_ACTION_TYPE_JUMP */
1244
1245 case RTE_FLOW_ACTION_TYPE_QUEUE:
1246 {
1247 char name[NAME_SIZE];
1248 struct rte_eth_dev *dev;
1249 const struct rte_flow_action_queue *conf = action->conf;
1250 uint32_t port_id;
1251 int status;
1252
1253 if (conf == NULL)
1254 return rte_flow_error_set(error,
1255 EINVAL,
1256 RTE_FLOW_ERROR_TYPE_ACTION,
1257 action,
1258 "QUEUE: Null configuration");
1259
1260 if (n_jump_queue_rss_drop)
1261 return rte_flow_error_set(error,
1262 EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ACTION,
1264 action,
1265 "Only one termination action is allowed"
1266 " per flow");
1267
1268 if ((params->action_mask &
1269 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1270 return rte_flow_error_set(error,
1271 EINVAL,
1272 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1273 NULL,
1274 "QUEUE action not enabled for this table");
1275
1276 n_jump_queue_rss_drop = 1;
1277
1278 dev = ETHDEV(softnic);
1279 if (dev == NULL ||
1280 conf->index >= dev->data->nb_rx_queues)
1281 return rte_flow_error_set(error,
1282 EINVAL,
1283 RTE_FLOW_ERROR_TYPE_ACTION,
1284 action,
1285 "QUEUE: Invalid RX queue ID");
1286
1287 snprintf(name, sizeof(name), "RXQ%u",
1288 (uint32_t)conf->index);
1289
1290 status = softnic_pipeline_port_out_find(softnic,
1291 pipeline->name,
1292 name,
1293 &port_id);
1294 if (status)
1295 return rte_flow_error_set(error,
1296 ENOTSUP,
1297 RTE_FLOW_ERROR_TYPE_ACTION,
1298 action,
1299 "QUEUE: RX queue not accessible from this pipeline");
1300
1301 /* RTE_TABLE_ACTION_FWD */
1302 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
1303 rule_action->fwd.id = port_id;
1304 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1305 break;
1306 } /*RTE_FLOW_ACTION_TYPE_QUEUE */
1307
1308 case RTE_FLOW_ACTION_TYPE_RSS:
1309 {
1310 const struct rte_flow_action_rss *conf = action->conf;
1311 uint32_t i;
1312
1313 if (conf == NULL)
1314 return rte_flow_error_set(error,
1315 EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ACTION,
1317 action,
1318 "RSS: Null configuration");
1319
1320 if (!rte_is_power_of_2(conf->queue_num))
1321 return rte_flow_error_set(error,
1322 EINVAL,
1323 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1324 conf,
1325 "RSS: Number of queues must be a power of 2");
1326
1327 if (conf->queue_num > RTE_DIM(rule_action->lb.out))
1328 return rte_flow_error_set(error,
1329 EINVAL,
1330 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1331 conf,
1332 "RSS: Number of queues too big");
1333
1334 if (n_jump_queue_rss_drop)
1335 return rte_flow_error_set(error,
1336 EINVAL,
1337 RTE_FLOW_ERROR_TYPE_ACTION,
1338 action,
1339 "Only one termination action is allowed per flow");
1340
1341 if (((params->action_mask &
1342 (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
1343 ((params->action_mask &
1344 (1LLU << RTE_TABLE_ACTION_LB)) == 0))
1345 return rte_flow_error_set(error,
1346 ENOTSUP,
1347 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1348 NULL,
1349 "RSS action not supported by this table");
1350
1351 if (params->lb.out_offset !=
1352 pipeline->params.offset_port_id)
1353 return rte_flow_error_set(error,
1354 EINVAL,
1355 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1356 NULL,
1357 "RSS action not supported by this pipeline");
1358
1359 n_jump_queue_rss_drop = 1;
1360
1361 /* RTE_TABLE_ACTION_LB */
1362 for (i = 0; i < conf->queue_num; i++) {
1363 char name[NAME_SIZE];
1364 struct rte_eth_dev *dev;
1365 uint32_t port_id;
1366 int status;
1367
1368 dev = ETHDEV(softnic);
1369 if (dev == NULL ||
1370 conf->queue[i] >=
1371 dev->data->nb_rx_queues)
1372 return rte_flow_error_set(error,
1373 EINVAL,
1374 RTE_FLOW_ERROR_TYPE_ACTION,
1375 action,
1376 "RSS: Invalid RX queue ID");
1377
1378 snprintf(name, sizeof(name), "RXQ%u",
1379 (uint32_t)conf->queue[i]);
1380
1381 status = softnic_pipeline_port_out_find(softnic,
1382 pipeline->name,
1383 name,
1384 &port_id);
1385 if (status)
1386 return rte_flow_error_set(error,
1387 ENOTSUP,
1388 RTE_FLOW_ERROR_TYPE_ACTION,
1389 action,
1390 "RSS: RX queue not accessible from this pipeline");
1391
1392 rule_action->lb.out[i] = port_id;
1393 }
1394
1395 for ( ; i < RTE_DIM(rule_action->lb.out); i++)
1396 rule_action->lb.out[i] =
1397 rule_action->lb.out[i % conf->queue_num];
1398
1399 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
1400
1401 /* RTE_TABLE_ACTION_FWD */
1402 rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
1403 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1404 break;
1405 } /* RTE_FLOW_ACTION_TYPE_RSS */
1406
1407 case RTE_FLOW_ACTION_TYPE_DROP:
1408 {
1409 const void *conf = action->conf;
1410
1411 if (conf != NULL)
1412 return rte_flow_error_set(error,
1413 EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION,
1415 action,
1416 "DROP: No configuration required");
1417
1418 if (n_jump_queue_rss_drop)
1419 return rte_flow_error_set(error,
1420 EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ACTION,
1422 action,
1423 "Only one termination action is allowed per flow");
1424 if ((params->action_mask &
1425 (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
1426 return rte_flow_error_set(error,
1427 ENOTSUP,
1428 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1429 NULL,
1430 "DROP action not supported by this table");
1431
1432 n_jump_queue_rss_drop = 1;
1433
1434 /* RTE_TABLE_ACTION_FWD */
1435 rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
1436 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
1437 break;
1438 } /* RTE_FLOW_ACTION_TYPE_DROP */
1439
1440 case RTE_FLOW_ACTION_TYPE_COUNT:
1441 {
1442 const struct rte_flow_action_count *conf = action->conf;
1443
1444 if (conf == NULL)
1445 return rte_flow_error_set(error,
1446 EINVAL,
1447 RTE_FLOW_ERROR_TYPE_ACTION,
1448 action,
1449 "COUNT: Null configuration");
1450
1451 if (n_count)
1452 return rte_flow_error_set(error,
1453 ENOTSUP,
1454 RTE_FLOW_ERROR_TYPE_ACTION,
1455 action,
1456 "Only one COUNT action per flow");
1457
1458 if ((params->action_mask &
1459 (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
1460 return rte_flow_error_set(error,
1461 ENOTSUP,
1462 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1463 NULL,
1464 "COUNT action not supported by this table");
1465
1466 n_count = 1;
1467
1468 /* RTE_TABLE_ACTION_STATS */
1469 rule_action->stats.n_packets = 0;
1470 rule_action->stats.n_bytes = 0;
1471 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
1472 break;
1473 } /* RTE_FLOW_ACTION_TYPE_COUNT */
1474
1475 case RTE_FLOW_ACTION_TYPE_MARK:
1476 {
1477 const struct rte_flow_action_mark *conf = action->conf;
1478
1479 if (conf == NULL)
1480 return rte_flow_error_set(error,
1481 EINVAL,
1482 RTE_FLOW_ERROR_TYPE_ACTION,
1483 action,
1484 "MARK: Null configuration");
1485
1486 if (n_mark)
1487 return rte_flow_error_set(error,
1488 ENOTSUP,
1489 RTE_FLOW_ERROR_TYPE_ACTION,
1490 action,
1491 "Only one MARK action per flow");
1492
1493 if ((params->action_mask &
1494 (1LLU << RTE_TABLE_ACTION_TAG)) == 0)
1495 return rte_flow_error_set(error,
1496 ENOTSUP,
1497 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1498 NULL,
1499 "MARK action not supported by this table");
1500
1501 n_mark = 1;
1502
1503 /* RTE_TABLE_ACTION_TAG */
1504 rule_action->tag.tag = conf->id;
1505 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
1506 break;
1507 } /* RTE_FLOW_ACTION_TYPE_MARK */
1508
1509 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1510 {
1511 const struct rte_flow_action_mark *conf = action->conf;
1512
1513 if (conf)
1514 return rte_flow_error_set(error,
1515 EINVAL,
1516 RTE_FLOW_ERROR_TYPE_ACTION,
1517 action,
1518 "VXLAN DECAP: Non-null configuration");
1519
1520 if (n_vxlan_decap)
1521 return rte_flow_error_set(error,
1522 ENOTSUP,
1523 RTE_FLOW_ERROR_TYPE_ACTION,
1524 action,
1525 "Only one VXLAN DECAP action per flow");
1526
1527 if ((params->action_mask &
1528 (1LLU << RTE_TABLE_ACTION_DECAP)) == 0)
1529 return rte_flow_error_set(error,
1530 ENOTSUP,
1531 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1532 NULL,
1533 "VXLAN DECAP action not supported by this table");
1534
1535 n_vxlan_decap = 1;
1536
1537 /* RTE_TABLE_ACTION_DECAP */
1538 rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */
1539 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
1540 break;
1541 } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */
1542
1543 case RTE_FLOW_ACTION_TYPE_METER:
1544 {
1545 const struct rte_flow_action_meter *conf = action->conf;
1546 struct softnic_mtr_meter_profile *mp;
1547 struct softnic_mtr *m;
1548 uint32_t table_id = table - pipeline->table;
1549 uint32_t meter_profile_id;
1550 int status;
1551
1552 if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
1553 return rte_flow_error_set(error,
1554 EINVAL,
1555 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1556 NULL,
1557 "METER: Table action not supported");
1558
1559 if (params->mtr.n_tc != 1)
1560 return rte_flow_error_set(error,
1561 EINVAL,
1562 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1563 NULL,
1564 "METER: Multiple TCs not supported");
1565
1566 if (conf == NULL)
1567 return rte_flow_error_set(error,
1568 EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ACTION,
1570 action,
1571 "METER: Null configuration");
1572
1573 m = softnic_mtr_find(softnic, conf->mtr_id);
1574
1575 if (m == NULL)
1576 return rte_flow_error_set(error,
1577 EINVAL,
1578 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1579 NULL,
1580 "METER: Invalid meter ID");
1581
1582 if (m->flow)
1583 return rte_flow_error_set(error,
1584 EINVAL,
1585 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1586 NULL,
1587 "METER: Meter already attached to a flow");
1588
1589 meter_profile_id = m->params.meter_profile_id;
1590 mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
1591
1592 /* Add meter profile to pipeline table */
1593 if (!softnic_pipeline_table_meter_profile_find(table,
1594 meter_profile_id)) {
1595 struct rte_table_action_meter_profile profile;
1596
1597 memset(&profile, 0, sizeof(profile));
1598 profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
1599 profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
1600 profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
1601 profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
1602 profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
1603
1604 status = softnic_pipeline_table_mtr_profile_add(softnic,
1605 pipeline->name,
1606 table_id,
1607 meter_profile_id,
1608 &profile);
1609 if (status) {
1610 rte_flow_error_set(error,
1611 EINVAL,
1612 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1613 NULL,
1614 "METER: Table meter profile add failed");
1615 return -1;
1616 }
1617 }
1618 /* Meter policy must exist */
1619 policy = softnic_mtr_meter_policy_find(softnic,
1620 m->params.meter_policy_id);
1621 if (policy == NULL) {
1622 rte_flow_error_set(error,
1623 EINVAL,
1624 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1625 NULL,
1626 "METER: fail to find meter policy");
1627 return -1;
1628 }
1629 /* RTE_TABLE_ACTION_METER */
1630 rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
1631 rule_action->mtr.mtr[0].policer[RTE_COLOR_GREEN] =
1632 policy->policer[RTE_COLOR_GREEN];
1633 rule_action->mtr.mtr[0].policer[RTE_COLOR_YELLOW] =
1634 policy->policer[RTE_COLOR_YELLOW];
1635 rule_action->mtr.mtr[0].policer[RTE_COLOR_RED] =
1636 policy->policer[RTE_COLOR_RED];
1637 rule_action->mtr.tc_mask = 1;
1638 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
1639 break;
1640 } /* RTE_FLOW_ACTION_TYPE_METER */
1641
1642 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1643 {
1644 const struct rte_flow_action_vxlan_encap *conf =
1645 action->conf;
1646 const struct rte_flow_item *item;
1647 union flow_item spec, mask;
1648 int disabled = 0, status;
1649 size_t size;
1650
1651 if (conf == NULL)
1652 return rte_flow_error_set(error,
1653 EINVAL,
1654 RTE_FLOW_ERROR_TYPE_ACTION,
1655 action,
1656 "VXLAN ENCAP: Null configuration");
1657
1658 item = conf->definition;
1659 if (item == NULL)
1660 return rte_flow_error_set(error,
1661 EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ACTION,
1663 action,
1664 "VXLAN ENCAP: Null configuration definition");
1665
1666 if (!(params->action_mask &
1667 (1LLU << RTE_TABLE_ACTION_ENCAP)))
1668 return rte_flow_error_set(error,
1669 EINVAL,
1670 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1671 NULL,
1672 "VXLAN ENCAP: Encap action not enabled for this table");
1673
1674 /* Check for Ether. */
1675 flow_item_skip_void(&item);
1676 status = flow_item_proto_preprocess(item, &spec, &mask,
1677 &size, &disabled, error);
1678 if (status)
1679 return status;
1680
1681 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1682 return rte_flow_error_set(error,
1683 EINVAL,
1684 RTE_FLOW_ERROR_TYPE_ITEM,
1685 item,
1686 "VXLAN ENCAP: first encap item should be ether");
1687 }
1688 rte_ether_addr_copy(&spec.eth.dst,
1689 &rule_action->encap.vxlan.ether.da);
1690 rte_ether_addr_copy(&spec.eth.src,
1691 &rule_action->encap.vxlan.ether.sa);
1692
1693 item++;
1694
1695 /* Check for VLAN. */
1696 flow_item_skip_void(&item);
1697 status = flow_item_proto_preprocess(item, &spec, &mask,
1698 &size, &disabled, error);
1699 if (status)
1700 return status;
1701
1702 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1703 if (!params->encap.vxlan.vlan)
1704 return rte_flow_error_set(error,
1705 ENOTSUP,
1706 RTE_FLOW_ERROR_TYPE_ITEM,
1707 item,
1708 "VXLAN ENCAP: vlan encap not supported by table");
1709
1710 uint16_t tci = rte_ntohs(spec.vlan.tci);
1711 rule_action->encap.vxlan.vlan.pcp =
1712 tci >> 13;
1713 rule_action->encap.vxlan.vlan.dei =
1714 (tci >> 12) & 0x1;
1715 rule_action->encap.vxlan.vlan.vid =
1716 tci & 0xfff;
1717
1718 item++;
1719
1720 flow_item_skip_void(&item);
1721 status = flow_item_proto_preprocess(item, &spec,
1722 &mask, &size, &disabled, error);
1723 if (status)
1724 return status;
1725 } else {
1726 if (params->encap.vxlan.vlan)
1727 return rte_flow_error_set(error,
1728 ENOTSUP,
1729 RTE_FLOW_ERROR_TYPE_ITEM,
1730 item,
1731 "VXLAN ENCAP: expecting vlan encap item");
1732 }
1733
1734 /* Check for IPV4/IPV6. */
1735 switch (item->type) {
1736 case RTE_FLOW_ITEM_TYPE_IPV4:
1737 {
1738 rule_action->encap.vxlan.ipv4.sa =
1739 rte_ntohl(spec.ipv4.hdr.src_addr);
1740 rule_action->encap.vxlan.ipv4.da =
1741 rte_ntohl(spec.ipv4.hdr.dst_addr);
1742 rule_action->encap.vxlan.ipv4.dscp =
1743 spec.ipv4.hdr.type_of_service >> 2;
1744 rule_action->encap.vxlan.ipv4.ttl =
1745 spec.ipv4.hdr.time_to_live;
1746 break;
1747 }
1748 case RTE_FLOW_ITEM_TYPE_IPV6:
1749 {
1750 uint32_t vtc_flow;
1751
1752 memcpy(&rule_action->encap.vxlan.ipv6.sa,
1753 &spec.ipv6.hdr.src_addr,
1754 sizeof(spec.ipv6.hdr.src_addr));
1755 memcpy(&rule_action->encap.vxlan.ipv6.da,
1756 &spec.ipv6.hdr.dst_addr,
1757 sizeof(spec.ipv6.hdr.dst_addr));
1758 vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow);
1759 rule_action->encap.vxlan.ipv6.flow_label =
1760 vtc_flow & 0xfffff;
1761 rule_action->encap.vxlan.ipv6.dscp =
1762 (vtc_flow >> 22) & 0x3f;
1763 rule_action->encap.vxlan.ipv6.hop_limit =
1764 spec.ipv6.hdr.hop_limits;
1765 break;
1766 }
1767 default:
1768 return rte_flow_error_set(error,
1769 EINVAL,
1770 RTE_FLOW_ERROR_TYPE_ITEM,
1771 item,
1772 "VXLAN ENCAP: encap item after ether should be ipv4/ipv6");
1773 }
1774
1775 item++;
1776
1777 /* Check for UDP. */
1778 flow_item_skip_void(&item);
1779 status = flow_item_proto_preprocess(item, &spec, &mask,
1780 &size, &disabled, error);
1781 if (status)
1782 return status;
1783
1784 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1785 return rte_flow_error_set(error,
1786 EINVAL,
1787 RTE_FLOW_ERROR_TYPE_ITEM,
1788 item,
1789 "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp");
1790 }
1791 rule_action->encap.vxlan.udp.sp =
1792 rte_ntohs(spec.udp.hdr.src_port);
1793 rule_action->encap.vxlan.udp.dp =
1794 rte_ntohs(spec.udp.hdr.dst_port);
1795
1796 item++;
1797
1798 /* Check for VXLAN. */
1799 flow_item_skip_void(&item);
1800 status = flow_item_proto_preprocess(item, &spec, &mask,
1801 &size, &disabled, error);
1802 if (status)
1803 return status;
1804
1805 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1806 return rte_flow_error_set(error,
1807 EINVAL,
1808 RTE_FLOW_ERROR_TYPE_ITEM,
1809 item,
1810 "VXLAN ENCAP: encap item after udp should be vxlan");
1811 }
1812 rule_action->encap.vxlan.vxlan.vni =
1813 (spec.vxlan.vni[0] << 16U |
1814 spec.vxlan.vni[1] << 8U
1815 | spec.vxlan.vni[2]);
1816
1817 item++;
1818
1819 /* Check for END. */
1820 flow_item_skip_void(&item);
1821
1822 if (item->type != RTE_FLOW_ITEM_TYPE_END)
1823 return rte_flow_error_set(error,
1824 EINVAL,
1825 RTE_FLOW_ERROR_TYPE_ITEM,
1826 item,
1827 "VXLAN ENCAP: expecting END item");
1828
1829 rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
1830 rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
1831 break;
1832 } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */
1833
1834 default:
1835 return -ENOTSUP;
1836 }
1837 }
1838
1839 if (n_jump_queue_rss_drop == 0)
1840 return rte_flow_error_set(error,
1841 EINVAL,
1842 RTE_FLOW_ERROR_TYPE_ACTION,
1843 action,
1844 "Flow does not have any terminating action");
1845
1846 return 0;
1847 }
1848
1849 static int
pmd_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item item[],const struct rte_flow_action action[],struct rte_flow_error * error)1850 pmd_flow_validate(struct rte_eth_dev *dev,
1851 const struct rte_flow_attr *attr,
1852 const struct rte_flow_item item[],
1853 const struct rte_flow_action action[],
1854 struct rte_flow_error *error)
1855 {
1856 struct softnic_table_rule_match rule_match;
1857 struct softnic_table_rule_action rule_action;
1858
1859 struct pmd_internals *softnic = dev->data->dev_private;
1860 struct pipeline *pipeline;
1861 struct softnic_table *table;
1862 const char *pipeline_name = NULL;
1863 uint32_t table_id = 0;
1864 int status;
1865
1866 /* Check input parameters. */
1867 if (attr == NULL)
1868 return rte_flow_error_set(error,
1869 EINVAL,
1870 RTE_FLOW_ERROR_TYPE_ATTR,
1871 NULL, "Null attr");
1872
1873 if (item == NULL)
1874 return rte_flow_error_set(error,
1875 EINVAL,
1876 RTE_FLOW_ERROR_TYPE_ITEM,
1877 NULL,
1878 "Null item");
1879
1880 if (action == NULL)
1881 return rte_flow_error_set(error,
1882 EINVAL,
1883 RTE_FLOW_ERROR_TYPE_ACTION,
1884 NULL,
1885 "Null action");
1886
1887 /* Identify the pipeline table to add this flow to. */
1888 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
1889 &table_id, error);
1890 if (status)
1891 return status;
1892
1893 pipeline = softnic_pipeline_find(softnic, pipeline_name);
1894 if (pipeline == NULL)
1895 return rte_flow_error_set(error,
1896 EINVAL,
1897 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1898 NULL,
1899 "Invalid pipeline name");
1900
1901 if (table_id >= pipeline->n_tables)
1902 return rte_flow_error_set(error,
1903 EINVAL,
1904 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 NULL,
1906 "Invalid pipeline table ID");
1907
1908 table = &pipeline->table[table_id];
1909
1910 /* Rule match. */
1911 memset(&rule_match, 0, sizeof(rule_match));
1912 status = flow_rule_match_get(softnic,
1913 pipeline,
1914 table,
1915 attr,
1916 item,
1917 &rule_match,
1918 error);
1919 if (status)
1920 return status;
1921
1922 /* Rule action. */
1923 memset(&rule_action, 0, sizeof(rule_action));
1924 status = flow_rule_action_get(softnic,
1925 pipeline,
1926 table,
1927 attr,
1928 action,
1929 &rule_action,
1930 error);
1931 if (status)
1932 return status;
1933
1934 return 0;
1935 }
1936
1937 static struct softnic_mtr *
flow_action_meter_get(struct pmd_internals * softnic,const struct rte_flow_action * action)1938 flow_action_meter_get(struct pmd_internals *softnic,
1939 const struct rte_flow_action *action)
1940 {
1941 for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
1942 if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1943 const struct rte_flow_action_meter *conf = action->conf;
1944
1945 if (conf == NULL)
1946 return NULL;
1947
1948 return softnic_mtr_find(softnic, conf->mtr_id);
1949 }
1950
1951 return NULL;
1952 }
1953
1954 static void
flow_meter_owner_reset(struct pmd_internals * softnic,struct rte_flow * flow)1955 flow_meter_owner_reset(struct pmd_internals *softnic,
1956 struct rte_flow *flow)
1957 {
1958 struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
1959 struct softnic_mtr *m;
1960
1961 TAILQ_FOREACH(m, ml, node)
1962 if (m->flow == flow) {
1963 m->flow = NULL;
1964 break;
1965 }
1966 }
1967
1968 static void
flow_meter_owner_set(struct pmd_internals * softnic,struct rte_flow * flow,struct softnic_mtr * mtr)1969 flow_meter_owner_set(struct pmd_internals *softnic,
1970 struct rte_flow *flow,
1971 struct softnic_mtr *mtr)
1972 {
1973 /* Reset current flow meter */
1974 flow_meter_owner_reset(softnic, flow);
1975
1976 /* Set new flow meter */
1977 mtr->flow = flow;
1978 }
1979
1980 static int
is_meter_action_enable(struct pmd_internals * softnic,struct softnic_table * table)1981 is_meter_action_enable(struct pmd_internals *softnic,
1982 struct softnic_table *table)
1983 {
1984 struct softnic_table_action_profile *profile =
1985 softnic_table_action_profile_find(softnic,
1986 table->params.action_profile_name);
1987 struct softnic_table_action_profile_params *params = &profile->params;
1988
1989 return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
1990 }
1991
1992 static struct rte_flow *
pmd_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item item[],const struct rte_flow_action action[],struct rte_flow_error * error)1993 pmd_flow_create(struct rte_eth_dev *dev,
1994 const struct rte_flow_attr *attr,
1995 const struct rte_flow_item item[],
1996 const struct rte_flow_action action[],
1997 struct rte_flow_error *error)
1998 {
1999 struct softnic_table_rule_match rule_match;
2000 struct softnic_table_rule_action rule_action;
2001 void *rule_data;
2002
2003 struct pmd_internals *softnic = dev->data->dev_private;
2004 struct pipeline *pipeline;
2005 struct softnic_table *table;
2006 struct rte_flow *flow;
2007 struct softnic_mtr *mtr;
2008 const char *pipeline_name = NULL;
2009 uint32_t table_id = 0;
2010 int new_flow, status;
2011
2012 /* Check input parameters. */
2013 if (attr == NULL) {
2014 rte_flow_error_set(error,
2015 EINVAL,
2016 RTE_FLOW_ERROR_TYPE_ATTR,
2017 NULL,
2018 "Null attr");
2019 return NULL;
2020 }
2021
2022 if (item == NULL) {
2023 rte_flow_error_set(error,
2024 EINVAL,
2025 RTE_FLOW_ERROR_TYPE_ITEM,
2026 NULL,
2027 "Null item");
2028 return NULL;
2029 }
2030
2031 if (action == NULL) {
2032 rte_flow_error_set(error,
2033 EINVAL,
2034 RTE_FLOW_ERROR_TYPE_ACTION,
2035 NULL,
2036 "Null action");
2037 return NULL;
2038 }
2039
2040 /* Identify the pipeline table to add this flow to. */
2041 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
2042 &table_id, error);
2043 if (status)
2044 return NULL;
2045
2046 pipeline = softnic_pipeline_find(softnic, pipeline_name);
2047 if (pipeline == NULL) {
2048 rte_flow_error_set(error,
2049 EINVAL,
2050 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2051 NULL,
2052 "Invalid pipeline name");
2053 return NULL;
2054 }
2055
2056 if (table_id >= pipeline->n_tables) {
2057 rte_flow_error_set(error,
2058 EINVAL,
2059 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2060 NULL,
2061 "Invalid pipeline table ID");
2062 return NULL;
2063 }
2064
2065 table = &pipeline->table[table_id];
2066
2067 /* Rule match. */
2068 memset(&rule_match, 0, sizeof(rule_match));
2069 status = flow_rule_match_get(softnic,
2070 pipeline,
2071 table,
2072 attr,
2073 item,
2074 &rule_match,
2075 error);
2076 if (status)
2077 return NULL;
2078
2079 /* Rule action. */
2080 memset(&rule_action, 0, sizeof(rule_action));
2081 status = flow_rule_action_get(softnic,
2082 pipeline,
2083 table,
2084 attr,
2085 action,
2086 &rule_action,
2087 error);
2088 if (status)
2089 return NULL;
2090
2091 /* Flow find/allocate. */
2092 new_flow = 0;
2093 flow = softnic_flow_find(table, &rule_match);
2094 if (flow == NULL) {
2095 new_flow = 1;
2096 flow = calloc(1, sizeof(struct rte_flow));
2097 if (flow == NULL) {
2098 rte_flow_error_set(error,
2099 ENOMEM,
2100 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2101 NULL,
2102 "Not enough memory for new flow");
2103 return NULL;
2104 }
2105 }
2106
2107 /* Rule add. */
2108 status = softnic_pipeline_table_rule_add(softnic,
2109 pipeline_name,
2110 table_id,
2111 &rule_match,
2112 &rule_action,
2113 &rule_data);
2114 if (status) {
2115 if (new_flow)
2116 free(flow);
2117
2118 rte_flow_error_set(error,
2119 EINVAL,
2120 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2121 NULL,
2122 "Pipeline table rule add failed");
2123 return NULL;
2124 }
2125
2126 /* Flow fill in. */
2127 memcpy(&flow->match, &rule_match, sizeof(rule_match));
2128 memcpy(&flow->action, &rule_action, sizeof(rule_action));
2129 flow->data = rule_data;
2130 flow->pipeline = pipeline;
2131 flow->table_id = table_id;
2132
2133 mtr = flow_action_meter_get(softnic, action);
2134 if (mtr)
2135 flow_meter_owner_set(softnic, flow, mtr);
2136
2137 /* Flow add to list. */
2138 if (new_flow)
2139 TAILQ_INSERT_TAIL(&table->flows, flow, node);
2140
2141 return flow;
2142 }
2143
2144 static int
pmd_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)2145 pmd_flow_destroy(struct rte_eth_dev *dev,
2146 struct rte_flow *flow,
2147 struct rte_flow_error *error)
2148 {
2149 struct pmd_internals *softnic = dev->data->dev_private;
2150 struct softnic_table *table;
2151 int status;
2152
2153 /* Check input parameters. */
2154 if (flow == NULL)
2155 return rte_flow_error_set(error,
2156 EINVAL,
2157 RTE_FLOW_ERROR_TYPE_HANDLE,
2158 NULL,
2159 "Null flow");
2160
2161 table = &flow->pipeline->table[flow->table_id];
2162
2163 /* Rule delete. */
2164 status = softnic_pipeline_table_rule_delete(softnic,
2165 flow->pipeline->name,
2166 flow->table_id,
2167 &flow->match);
2168 if (status)
2169 return rte_flow_error_set(error,
2170 EINVAL,
2171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2172 NULL,
2173 "Pipeline table rule delete failed");
2174
2175 /* Update dependencies */
2176 if (is_meter_action_enable(softnic, table))
2177 flow_meter_owner_reset(softnic, flow);
2178
2179 /* Flow delete. */
2180 TAILQ_REMOVE(&table->flows, flow, node);
2181 free(flow);
2182
2183 return 0;
2184 }
2185
2186 static int
pmd_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)2187 pmd_flow_flush(struct rte_eth_dev *dev,
2188 struct rte_flow_error *error)
2189 {
2190 struct pmd_internals *softnic = dev->data->dev_private;
2191 struct pipeline *pipeline;
2192 int fail_to_del_rule = 0;
2193 uint32_t i;
2194
2195 TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) {
2196 /* Remove all the flows added to the tables. */
2197 for (i = 0; i < pipeline->n_tables; i++) {
2198 struct softnic_table *table = &pipeline->table[i];
2199 struct rte_flow *flow;
2200 void *temp;
2201 int status;
2202
2203 RTE_TAILQ_FOREACH_SAFE(flow, &table->flows, node,
2204 temp) {
2205 /* Rule delete. */
2206 status = softnic_pipeline_table_rule_delete
2207 (softnic,
2208 pipeline->name,
2209 i,
2210 &flow->match);
2211 if (status)
2212 fail_to_del_rule = 1;
2213 /* Update dependencies */
2214 if (is_meter_action_enable(softnic, table))
2215 flow_meter_owner_reset(softnic, flow);
2216
2217 /* Flow delete. */
2218 TAILQ_REMOVE(&table->flows, flow, node);
2219 free(flow);
2220 }
2221 }
2222 }
2223
2224 if (fail_to_del_rule)
2225 return rte_flow_error_set(error,
2226 EINVAL,
2227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2228 NULL,
2229 "Some of the rules could not be deleted");
2230
2231 return 0;
2232 }
2233
2234 static int
pmd_flow_query(struct rte_eth_dev * dev __rte_unused,struct rte_flow * flow,const struct rte_flow_action * action __rte_unused,void * data,struct rte_flow_error * error)2235 pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
2236 struct rte_flow *flow,
2237 const struct rte_flow_action *action __rte_unused,
2238 void *data,
2239 struct rte_flow_error *error)
2240 {
2241 struct rte_table_action_stats_counters stats;
2242 struct softnic_table *table;
2243 struct rte_flow_query_count *flow_stats = data;
2244 int status;
2245
2246 /* Check input parameters. */
2247 if (flow == NULL)
2248 return rte_flow_error_set(error,
2249 EINVAL,
2250 RTE_FLOW_ERROR_TYPE_HANDLE,
2251 NULL,
2252 "Null flow");
2253
2254 if (data == NULL)
2255 return rte_flow_error_set(error,
2256 EINVAL,
2257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2258 NULL,
2259 "Null data");
2260
2261 table = &flow->pipeline->table[flow->table_id];
2262
2263 /* Rule stats read. */
2264 status = rte_table_action_stats_read(table->a,
2265 flow->data,
2266 &stats,
2267 flow_stats->reset);
2268 if (status)
2269 return rte_flow_error_set(error,
2270 EINVAL,
2271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2272 NULL,
2273 "Pipeline table rule stats read failed");
2274
2275 /* Fill in flow stats. */
2276 flow_stats->hits_set =
2277 (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
2278 flow_stats->bytes_set =
2279 (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
2280 flow_stats->hits = stats.n_packets;
2281 flow_stats->bytes = stats.n_bytes;
2282
2283 return 0;
2284 }
2285
2286 const struct rte_flow_ops pmd_flow_ops = {
2287 .validate = pmd_flow_validate,
2288 .create = pmd_flow_create,
2289 .destroy = pmd_flow_destroy,
2290 .flush = pmd_flow_flush,
2291 .query = pmd_flow_query,
2292 .isolate = NULL,
2293 };
2294