1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606 * Copyright(c) 2017 Intel Corporation
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
52bfe3f2eSlogwang #include <rte_flow_classify.h>
62bfe3f2eSlogwang #include "rte_flow_classify_parse.h"
72bfe3f2eSlogwang #include <rte_flow_driver.h>
82bfe3f2eSlogwang
92bfe3f2eSlogwang struct classify_valid_pattern {
102bfe3f2eSlogwang enum rte_flow_item_type *items;
112bfe3f2eSlogwang parse_filter_t parse_filter;
122bfe3f2eSlogwang };
132bfe3f2eSlogwang
14*d30ea906Sjfb8856606 static struct classify_action action;
152bfe3f2eSlogwang
162bfe3f2eSlogwang /* Pattern for IPv4 5-tuple UDP filter */
172bfe3f2eSlogwang static enum rte_flow_item_type pattern_ntuple_1[] = {
182bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_ETH,
192bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_IPV4,
202bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_UDP,
212bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_END,
222bfe3f2eSlogwang };
232bfe3f2eSlogwang
242bfe3f2eSlogwang /* Pattern for IPv4 5-tuple TCP filter */
252bfe3f2eSlogwang static enum rte_flow_item_type pattern_ntuple_2[] = {
262bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_ETH,
272bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_IPV4,
282bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_TCP,
292bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_END,
302bfe3f2eSlogwang };
312bfe3f2eSlogwang
322bfe3f2eSlogwang /* Pattern for IPv4 5-tuple SCTP filter */
332bfe3f2eSlogwang static enum rte_flow_item_type pattern_ntuple_3[] = {
342bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_ETH,
352bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_IPV4,
362bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_SCTP,
372bfe3f2eSlogwang RTE_FLOW_ITEM_TYPE_END,
382bfe3f2eSlogwang };
392bfe3f2eSlogwang
402bfe3f2eSlogwang static int
412bfe3f2eSlogwang classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
422bfe3f2eSlogwang const struct rte_flow_item pattern[],
432bfe3f2eSlogwang const struct rte_flow_action actions[],
442bfe3f2eSlogwang struct rte_eth_ntuple_filter *filter,
452bfe3f2eSlogwang struct rte_flow_error *error);
462bfe3f2eSlogwang
472bfe3f2eSlogwang static struct classify_valid_pattern classify_supported_patterns[] = {
482bfe3f2eSlogwang /* ntuple */
492bfe3f2eSlogwang { pattern_ntuple_1, classify_parse_ntuple_filter },
502bfe3f2eSlogwang { pattern_ntuple_2, classify_parse_ntuple_filter },
512bfe3f2eSlogwang { pattern_ntuple_3, classify_parse_ntuple_filter },
522bfe3f2eSlogwang };
532bfe3f2eSlogwang
54*d30ea906Sjfb8856606 struct classify_action *
classify_get_flow_action(void)552bfe3f2eSlogwang classify_get_flow_action(void)
562bfe3f2eSlogwang {
572bfe3f2eSlogwang return &action;
582bfe3f2eSlogwang }
592bfe3f2eSlogwang
602bfe3f2eSlogwang /* Find the first VOID or non-VOID item pointer */
612bfe3f2eSlogwang const struct rte_flow_item *
classify_find_first_item(const struct rte_flow_item * item,bool is_void)622bfe3f2eSlogwang classify_find_first_item(const struct rte_flow_item *item, bool is_void)
632bfe3f2eSlogwang {
642bfe3f2eSlogwang bool is_find;
652bfe3f2eSlogwang
662bfe3f2eSlogwang while (item->type != RTE_FLOW_ITEM_TYPE_END) {
672bfe3f2eSlogwang if (is_void)
682bfe3f2eSlogwang is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
692bfe3f2eSlogwang else
702bfe3f2eSlogwang is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
712bfe3f2eSlogwang if (is_find)
722bfe3f2eSlogwang break;
732bfe3f2eSlogwang item++;
742bfe3f2eSlogwang }
752bfe3f2eSlogwang return item;
762bfe3f2eSlogwang }
772bfe3f2eSlogwang
782bfe3f2eSlogwang /* Skip all VOID items of the pattern */
792bfe3f2eSlogwang void
classify_pattern_skip_void_item(struct rte_flow_item * items,const struct rte_flow_item * pattern)802bfe3f2eSlogwang classify_pattern_skip_void_item(struct rte_flow_item *items,
812bfe3f2eSlogwang const struct rte_flow_item *pattern)
822bfe3f2eSlogwang {
832bfe3f2eSlogwang uint32_t cpy_count = 0;
842bfe3f2eSlogwang const struct rte_flow_item *pb = pattern, *pe = pattern;
852bfe3f2eSlogwang
862bfe3f2eSlogwang for (;;) {
872bfe3f2eSlogwang /* Find a non-void item first */
882bfe3f2eSlogwang pb = classify_find_first_item(pb, false);
892bfe3f2eSlogwang if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
902bfe3f2eSlogwang pe = pb;
912bfe3f2eSlogwang break;
922bfe3f2eSlogwang }
932bfe3f2eSlogwang
942bfe3f2eSlogwang /* Find a void item */
952bfe3f2eSlogwang pe = classify_find_first_item(pb + 1, true);
962bfe3f2eSlogwang
972bfe3f2eSlogwang cpy_count = pe - pb;
982bfe3f2eSlogwang rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
992bfe3f2eSlogwang
1002bfe3f2eSlogwang items += cpy_count;
1012bfe3f2eSlogwang
1022bfe3f2eSlogwang if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1032bfe3f2eSlogwang pb = pe;
1042bfe3f2eSlogwang break;
1052bfe3f2eSlogwang }
1062bfe3f2eSlogwang }
1072bfe3f2eSlogwang /* Copy the END item. */
1082bfe3f2eSlogwang rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1092bfe3f2eSlogwang }
1102bfe3f2eSlogwang
1112bfe3f2eSlogwang /* Check if the pattern matches a supported item type array */
1122bfe3f2eSlogwang static bool
classify_match_pattern(enum rte_flow_item_type * item_array,struct rte_flow_item * pattern)1132bfe3f2eSlogwang classify_match_pattern(enum rte_flow_item_type *item_array,
1142bfe3f2eSlogwang struct rte_flow_item *pattern)
1152bfe3f2eSlogwang {
1162bfe3f2eSlogwang struct rte_flow_item *item = pattern;
1172bfe3f2eSlogwang
1182bfe3f2eSlogwang while ((*item_array == item->type) &&
1192bfe3f2eSlogwang (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1202bfe3f2eSlogwang item_array++;
1212bfe3f2eSlogwang item++;
1222bfe3f2eSlogwang }
1232bfe3f2eSlogwang
1242bfe3f2eSlogwang return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1252bfe3f2eSlogwang item->type == RTE_FLOW_ITEM_TYPE_END);
1262bfe3f2eSlogwang }
1272bfe3f2eSlogwang
1282bfe3f2eSlogwang /* Find if there's parse filter function matched */
1292bfe3f2eSlogwang parse_filter_t
classify_find_parse_filter_func(struct rte_flow_item * pattern)1302bfe3f2eSlogwang classify_find_parse_filter_func(struct rte_flow_item *pattern)
1312bfe3f2eSlogwang {
1322bfe3f2eSlogwang parse_filter_t parse_filter = NULL;
1332bfe3f2eSlogwang uint8_t i = 0;
1342bfe3f2eSlogwang
1352bfe3f2eSlogwang for (; i < RTE_DIM(classify_supported_patterns); i++) {
1362bfe3f2eSlogwang if (classify_match_pattern(classify_supported_patterns[i].items,
1372bfe3f2eSlogwang pattern)) {
1382bfe3f2eSlogwang parse_filter =
1392bfe3f2eSlogwang classify_supported_patterns[i].parse_filter;
1402bfe3f2eSlogwang break;
1412bfe3f2eSlogwang }
1422bfe3f2eSlogwang }
1432bfe3f2eSlogwang
1442bfe3f2eSlogwang return parse_filter;
1452bfe3f2eSlogwang }
1462bfe3f2eSlogwang
1472bfe3f2eSlogwang #define FLOW_RULE_MIN_PRIORITY 8
1482bfe3f2eSlogwang #define FLOW_RULE_MAX_PRIORITY 0
1492bfe3f2eSlogwang
1502bfe3f2eSlogwang #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
1512bfe3f2eSlogwang do {\
1522bfe3f2eSlogwang item = pattern + index;\
1532bfe3f2eSlogwang while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
1542bfe3f2eSlogwang index++;\
1552bfe3f2eSlogwang item = pattern + index;\
1562bfe3f2eSlogwang } \
1572bfe3f2eSlogwang } while (0)
1582bfe3f2eSlogwang
1592bfe3f2eSlogwang #define NEXT_ITEM_OF_ACTION(act, actions, index)\
1602bfe3f2eSlogwang do {\
1612bfe3f2eSlogwang act = actions + index;\
1622bfe3f2eSlogwang while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
1632bfe3f2eSlogwang index++;\
1642bfe3f2eSlogwang act = actions + index;\
1652bfe3f2eSlogwang } \
1662bfe3f2eSlogwang } while (0)
1672bfe3f2eSlogwang
1682bfe3f2eSlogwang /**
1692bfe3f2eSlogwang * Please aware there's an assumption for all the parsers.
1702bfe3f2eSlogwang * rte_flow_item is using big endian, rte_flow_attr and
1712bfe3f2eSlogwang * rte_flow_action are using CPU order.
1722bfe3f2eSlogwang * Because the pattern is used to describe the packets,
1732bfe3f2eSlogwang * normally the packets should use network order.
1742bfe3f2eSlogwang */
1752bfe3f2eSlogwang
1762bfe3f2eSlogwang /**
1772bfe3f2eSlogwang * Parse the rule to see if it is a n-tuple rule.
1782bfe3f2eSlogwang * And get the n-tuple filter info BTW.
1792bfe3f2eSlogwang * pattern:
1802bfe3f2eSlogwang * The first not void item can be ETH or IPV4.
1812bfe3f2eSlogwang * The second not void item must be IPV4 if the first one is ETH.
1822bfe3f2eSlogwang * The third not void item must be UDP or TCP.
1832bfe3f2eSlogwang * The next not void item must be END.
1842bfe3f2eSlogwang * action:
1852bfe3f2eSlogwang * The first not void action should be QUEUE.
1862bfe3f2eSlogwang * The next not void action should be END.
1872bfe3f2eSlogwang * pattern example:
1882bfe3f2eSlogwang * ITEM Spec Mask
1892bfe3f2eSlogwang * ETH NULL NULL
1902bfe3f2eSlogwang * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1912bfe3f2eSlogwang * dst_addr 192.167.3.50 0xFFFFFFFF
1922bfe3f2eSlogwang * next_proto_id 17 0xFF
1932bfe3f2eSlogwang * UDP/TCP/ src_port 80 0xFFFF
1942bfe3f2eSlogwang * SCTP dst_port 80 0xFFFF
1952bfe3f2eSlogwang * END
1962bfe3f2eSlogwang * other members in mask and spec should set to 0x00.
1972bfe3f2eSlogwang * item->last should be NULL.
1982bfe3f2eSlogwang */
1992bfe3f2eSlogwang static int
classify_parse_ntuple_filter(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_ntuple_filter * filter,struct rte_flow_error * error)2002bfe3f2eSlogwang classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
2012bfe3f2eSlogwang const struct rte_flow_item pattern[],
2022bfe3f2eSlogwang const struct rte_flow_action actions[],
2032bfe3f2eSlogwang struct rte_eth_ntuple_filter *filter,
2042bfe3f2eSlogwang struct rte_flow_error *error)
2052bfe3f2eSlogwang {
2062bfe3f2eSlogwang const struct rte_flow_item *item;
2072bfe3f2eSlogwang const struct rte_flow_action *act;
2082bfe3f2eSlogwang const struct rte_flow_item_ipv4 *ipv4_spec;
2092bfe3f2eSlogwang const struct rte_flow_item_ipv4 *ipv4_mask;
2102bfe3f2eSlogwang const struct rte_flow_item_tcp *tcp_spec;
2112bfe3f2eSlogwang const struct rte_flow_item_tcp *tcp_mask;
2122bfe3f2eSlogwang const struct rte_flow_item_udp *udp_spec;
2132bfe3f2eSlogwang const struct rte_flow_item_udp *udp_mask;
2142bfe3f2eSlogwang const struct rte_flow_item_sctp *sctp_spec;
2152bfe3f2eSlogwang const struct rte_flow_item_sctp *sctp_mask;
216*d30ea906Sjfb8856606 const struct rte_flow_action_count *count;
217*d30ea906Sjfb8856606 const struct rte_flow_action_mark *mark_spec;
2182bfe3f2eSlogwang uint32_t index;
2192bfe3f2eSlogwang
2202bfe3f2eSlogwang /* parse pattern */
2212bfe3f2eSlogwang index = 0;
2222bfe3f2eSlogwang
2232bfe3f2eSlogwang /* the first not void item can be MAC or IPv4 */
2242bfe3f2eSlogwang NEXT_ITEM_OF_PATTERN(item, pattern, index);
2252bfe3f2eSlogwang
2262bfe3f2eSlogwang if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2272bfe3f2eSlogwang item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2282bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2292bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
2302bfe3f2eSlogwang item, "Not supported by ntuple filter");
2312bfe3f2eSlogwang return -EINVAL;
2322bfe3f2eSlogwang }
2332bfe3f2eSlogwang /* Skip Ethernet */
2342bfe3f2eSlogwang if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2352bfe3f2eSlogwang /*Not supported last point for range*/
2362bfe3f2eSlogwang if (item->last) {
2372bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2382bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2392bfe3f2eSlogwang item,
2402bfe3f2eSlogwang "Not supported last point for range");
2412bfe3f2eSlogwang return -EINVAL;
2422bfe3f2eSlogwang
2432bfe3f2eSlogwang }
2442bfe3f2eSlogwang /* if the first item is MAC, the content should be NULL */
2452bfe3f2eSlogwang if (item->spec || item->mask) {
2462bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2472bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
2482bfe3f2eSlogwang item,
2492bfe3f2eSlogwang "Not supported by ntuple filter");
2502bfe3f2eSlogwang return -EINVAL;
2512bfe3f2eSlogwang }
2522bfe3f2eSlogwang /* check if the next not void item is IPv4 */
2532bfe3f2eSlogwang index++;
2542bfe3f2eSlogwang NEXT_ITEM_OF_PATTERN(item, pattern, index);
2552bfe3f2eSlogwang if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2562bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2572bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
2582bfe3f2eSlogwang item,
2592bfe3f2eSlogwang "Not supported by ntuple filter");
2602bfe3f2eSlogwang return -EINVAL;
2612bfe3f2eSlogwang }
2622bfe3f2eSlogwang }
2632bfe3f2eSlogwang
2642bfe3f2eSlogwang /* get the IPv4 info */
2652bfe3f2eSlogwang if (!item->spec || !item->mask) {
2662bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2672bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
2682bfe3f2eSlogwang item, "Invalid ntuple mask");
2692bfe3f2eSlogwang return -EINVAL;
2702bfe3f2eSlogwang }
2712bfe3f2eSlogwang /*Not supported last point for range*/
2722bfe3f2eSlogwang if (item->last) {
2732bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
2742bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2752bfe3f2eSlogwang item, "Not supported last point for range");
2762bfe3f2eSlogwang return -EINVAL;
2772bfe3f2eSlogwang
2782bfe3f2eSlogwang }
2792bfe3f2eSlogwang
280*d30ea906Sjfb8856606 ipv4_mask = item->mask;
2812bfe3f2eSlogwang /**
2822bfe3f2eSlogwang * Only support src & dst addresses, protocol,
2832bfe3f2eSlogwang * others should be masked.
2842bfe3f2eSlogwang */
2852bfe3f2eSlogwang if (ipv4_mask->hdr.version_ihl ||
2862bfe3f2eSlogwang ipv4_mask->hdr.type_of_service ||
2872bfe3f2eSlogwang ipv4_mask->hdr.total_length ||
2882bfe3f2eSlogwang ipv4_mask->hdr.packet_id ||
2892bfe3f2eSlogwang ipv4_mask->hdr.fragment_offset ||
2902bfe3f2eSlogwang ipv4_mask->hdr.time_to_live ||
2912bfe3f2eSlogwang ipv4_mask->hdr.hdr_checksum) {
2922bfe3f2eSlogwang rte_flow_error_set(error,
2932bfe3f2eSlogwang EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
2942bfe3f2eSlogwang item, "Not supported by ntuple filter");
2952bfe3f2eSlogwang return -EINVAL;
2962bfe3f2eSlogwang }
2972bfe3f2eSlogwang
2982bfe3f2eSlogwang filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
2992bfe3f2eSlogwang filter->src_ip_mask = ipv4_mask->hdr.src_addr;
3002bfe3f2eSlogwang filter->proto_mask = ipv4_mask->hdr.next_proto_id;
3012bfe3f2eSlogwang
302*d30ea906Sjfb8856606 ipv4_spec = item->spec;
3032bfe3f2eSlogwang filter->dst_ip = ipv4_spec->hdr.dst_addr;
3042bfe3f2eSlogwang filter->src_ip = ipv4_spec->hdr.src_addr;
3052bfe3f2eSlogwang filter->proto = ipv4_spec->hdr.next_proto_id;
3062bfe3f2eSlogwang
3072bfe3f2eSlogwang /* check if the next not void item is TCP or UDP or SCTP */
3082bfe3f2eSlogwang index++;
3092bfe3f2eSlogwang NEXT_ITEM_OF_PATTERN(item, pattern, index);
3102bfe3f2eSlogwang if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
3112bfe3f2eSlogwang item->type != RTE_FLOW_ITEM_TYPE_UDP &&
3122bfe3f2eSlogwang item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
3132bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
3142bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3152bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
3162bfe3f2eSlogwang item, "Not supported by ntuple filter");
3172bfe3f2eSlogwang return -EINVAL;
3182bfe3f2eSlogwang }
3192bfe3f2eSlogwang
3202bfe3f2eSlogwang /* get the TCP/UDP info */
3212bfe3f2eSlogwang if (!item->spec || !item->mask) {
3222bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
3232bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3242bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
3252bfe3f2eSlogwang item, "Invalid ntuple mask");
3262bfe3f2eSlogwang return -EINVAL;
3272bfe3f2eSlogwang }
3282bfe3f2eSlogwang
3292bfe3f2eSlogwang /*Not supported last point for range*/
3302bfe3f2eSlogwang if (item->last) {
3312bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
3322bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3332bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3342bfe3f2eSlogwang item, "Not supported last point for range");
3352bfe3f2eSlogwang return -EINVAL;
3362bfe3f2eSlogwang
3372bfe3f2eSlogwang }
3382bfe3f2eSlogwang
3392bfe3f2eSlogwang if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
340*d30ea906Sjfb8856606 tcp_mask = item->mask;
3412bfe3f2eSlogwang
3422bfe3f2eSlogwang /**
3432bfe3f2eSlogwang * Only support src & dst ports, tcp flags,
3442bfe3f2eSlogwang * others should be masked.
3452bfe3f2eSlogwang */
3462bfe3f2eSlogwang if (tcp_mask->hdr.sent_seq ||
3472bfe3f2eSlogwang tcp_mask->hdr.recv_ack ||
3482bfe3f2eSlogwang tcp_mask->hdr.data_off ||
3492bfe3f2eSlogwang tcp_mask->hdr.rx_win ||
3502bfe3f2eSlogwang tcp_mask->hdr.cksum ||
3512bfe3f2eSlogwang tcp_mask->hdr.tcp_urp) {
3522bfe3f2eSlogwang memset(filter, 0,
3532bfe3f2eSlogwang sizeof(struct rte_eth_ntuple_filter));
3542bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3552bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
3562bfe3f2eSlogwang item, "Not supported by ntuple filter");
3572bfe3f2eSlogwang return -EINVAL;
3582bfe3f2eSlogwang }
3592bfe3f2eSlogwang
3602bfe3f2eSlogwang filter->dst_port_mask = tcp_mask->hdr.dst_port;
3612bfe3f2eSlogwang filter->src_port_mask = tcp_mask->hdr.src_port;
3622bfe3f2eSlogwang if (tcp_mask->hdr.tcp_flags == 0xFF) {
3632bfe3f2eSlogwang filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
3642bfe3f2eSlogwang } else if (!tcp_mask->hdr.tcp_flags) {
3652bfe3f2eSlogwang filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
3662bfe3f2eSlogwang } else {
3672bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
3682bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3692bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
3702bfe3f2eSlogwang item, "Not supported by ntuple filter");
3712bfe3f2eSlogwang return -EINVAL;
3722bfe3f2eSlogwang }
3732bfe3f2eSlogwang
374*d30ea906Sjfb8856606 tcp_spec = item->spec;
3752bfe3f2eSlogwang filter->dst_port = tcp_spec->hdr.dst_port;
3762bfe3f2eSlogwang filter->src_port = tcp_spec->hdr.src_port;
3772bfe3f2eSlogwang filter->tcp_flags = tcp_spec->hdr.tcp_flags;
3782bfe3f2eSlogwang } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
379*d30ea906Sjfb8856606 udp_mask = item->mask;
3802bfe3f2eSlogwang
3812bfe3f2eSlogwang /**
3822bfe3f2eSlogwang * Only support src & dst ports,
3832bfe3f2eSlogwang * others should be masked.
3842bfe3f2eSlogwang */
3852bfe3f2eSlogwang if (udp_mask->hdr.dgram_len ||
3862bfe3f2eSlogwang udp_mask->hdr.dgram_cksum) {
3872bfe3f2eSlogwang memset(filter, 0,
3882bfe3f2eSlogwang sizeof(struct rte_eth_ntuple_filter));
3892bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
3902bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
3912bfe3f2eSlogwang item, "Not supported by ntuple filter");
3922bfe3f2eSlogwang return -EINVAL;
3932bfe3f2eSlogwang }
3942bfe3f2eSlogwang
3952bfe3f2eSlogwang filter->dst_port_mask = udp_mask->hdr.dst_port;
3962bfe3f2eSlogwang filter->src_port_mask = udp_mask->hdr.src_port;
3972bfe3f2eSlogwang
398*d30ea906Sjfb8856606 udp_spec = item->spec;
3992bfe3f2eSlogwang filter->dst_port = udp_spec->hdr.dst_port;
4002bfe3f2eSlogwang filter->src_port = udp_spec->hdr.src_port;
4012bfe3f2eSlogwang } else {
402*d30ea906Sjfb8856606 sctp_mask = item->mask;
4032bfe3f2eSlogwang
4042bfe3f2eSlogwang /**
4052bfe3f2eSlogwang * Only support src & dst ports,
4062bfe3f2eSlogwang * others should be masked.
4072bfe3f2eSlogwang */
4082bfe3f2eSlogwang if (sctp_mask->hdr.tag ||
4092bfe3f2eSlogwang sctp_mask->hdr.cksum) {
4102bfe3f2eSlogwang memset(filter, 0,
4112bfe3f2eSlogwang sizeof(struct rte_eth_ntuple_filter));
4122bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
4132bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
4142bfe3f2eSlogwang item, "Not supported by ntuple filter");
4152bfe3f2eSlogwang return -EINVAL;
4162bfe3f2eSlogwang }
4172bfe3f2eSlogwang
4182bfe3f2eSlogwang filter->dst_port_mask = sctp_mask->hdr.dst_port;
4192bfe3f2eSlogwang filter->src_port_mask = sctp_mask->hdr.src_port;
4202bfe3f2eSlogwang
421*d30ea906Sjfb8856606 sctp_spec = item->spec;
4222bfe3f2eSlogwang filter->dst_port = sctp_spec->hdr.dst_port;
4232bfe3f2eSlogwang filter->src_port = sctp_spec->hdr.src_port;
4242bfe3f2eSlogwang }
4252bfe3f2eSlogwang
4262bfe3f2eSlogwang /* check if the next not void item is END */
4272bfe3f2eSlogwang index++;
4282bfe3f2eSlogwang NEXT_ITEM_OF_PATTERN(item, pattern, index);
4292bfe3f2eSlogwang if (item->type != RTE_FLOW_ITEM_TYPE_END) {
4302bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
4312bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
4322bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ITEM,
4332bfe3f2eSlogwang item, "Not supported by ntuple filter");
4342bfe3f2eSlogwang return -EINVAL;
4352bfe3f2eSlogwang }
4362bfe3f2eSlogwang
437*d30ea906Sjfb8856606 table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
4382bfe3f2eSlogwang
4392bfe3f2eSlogwang /* parse attr */
4402bfe3f2eSlogwang /* must be input direction */
4412bfe3f2eSlogwang if (!attr->ingress) {
4422bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
4432bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
4442bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
4452bfe3f2eSlogwang attr, "Only support ingress.");
4462bfe3f2eSlogwang return -EINVAL;
4472bfe3f2eSlogwang }
4482bfe3f2eSlogwang
4492bfe3f2eSlogwang /* not supported */
4502bfe3f2eSlogwang if (attr->egress) {
4512bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
4522bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
4532bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
4542bfe3f2eSlogwang attr, "Not support egress.");
4552bfe3f2eSlogwang return -EINVAL;
4562bfe3f2eSlogwang }
4572bfe3f2eSlogwang
4582bfe3f2eSlogwang if (attr->priority > 0xFFFF) {
4592bfe3f2eSlogwang memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
4602bfe3f2eSlogwang rte_flow_error_set(error, EINVAL,
4612bfe3f2eSlogwang RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4622bfe3f2eSlogwang attr, "Error priority.");
4632bfe3f2eSlogwang return -EINVAL;
4642bfe3f2eSlogwang }
4652bfe3f2eSlogwang filter->priority = (uint16_t)attr->priority;
4662bfe3f2eSlogwang if (attr->priority > FLOW_RULE_MIN_PRIORITY)
4672bfe3f2eSlogwang filter->priority = FLOW_RULE_MAX_PRIORITY;
4682bfe3f2eSlogwang
469*d30ea906Sjfb8856606 /* parse action */
470*d30ea906Sjfb8856606 index = 0;
471*d30ea906Sjfb8856606
472*d30ea906Sjfb8856606 /**
473*d30ea906Sjfb8856606 * n-tuple only supports count and Mark,
474*d30ea906Sjfb8856606 * check if the first not void action is COUNT or MARK.
475*d30ea906Sjfb8856606 */
476*d30ea906Sjfb8856606 memset(&action, 0, sizeof(action));
477*d30ea906Sjfb8856606 NEXT_ITEM_OF_ACTION(act, actions, index);
478*d30ea906Sjfb8856606 switch (act->type) {
479*d30ea906Sjfb8856606 case RTE_FLOW_ACTION_TYPE_COUNT:
480*d30ea906Sjfb8856606 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
481*d30ea906Sjfb8856606 count = act->conf;
482*d30ea906Sjfb8856606 memcpy(&action.act.counter, count, sizeof(action.act.counter));
483*d30ea906Sjfb8856606 break;
484*d30ea906Sjfb8856606 case RTE_FLOW_ACTION_TYPE_MARK:
485*d30ea906Sjfb8856606 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
486*d30ea906Sjfb8856606 mark_spec = act->conf;
487*d30ea906Sjfb8856606 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
488*d30ea906Sjfb8856606 break;
489*d30ea906Sjfb8856606 default:
490*d30ea906Sjfb8856606 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491*d30ea906Sjfb8856606 rte_flow_error_set(error, EINVAL,
492*d30ea906Sjfb8856606 RTE_FLOW_ERROR_TYPE_ACTION, act,
493*d30ea906Sjfb8856606 "Invalid action.");
494*d30ea906Sjfb8856606 return -EINVAL;
495*d30ea906Sjfb8856606 }
496*d30ea906Sjfb8856606
497*d30ea906Sjfb8856606 /* check if the next not void item is MARK or COUNT or END */
498*d30ea906Sjfb8856606 index++;
499*d30ea906Sjfb8856606 NEXT_ITEM_OF_ACTION(act, actions, index);
500*d30ea906Sjfb8856606 switch (act->type) {
501*d30ea906Sjfb8856606 case RTE_FLOW_ACTION_TYPE_COUNT:
502*d30ea906Sjfb8856606 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
503*d30ea906Sjfb8856606 count = act->conf;
504*d30ea906Sjfb8856606 memcpy(&action.act.counter, count, sizeof(action.act.counter));
505*d30ea906Sjfb8856606 break;
506*d30ea906Sjfb8856606 case RTE_FLOW_ACTION_TYPE_MARK:
507*d30ea906Sjfb8856606 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
508*d30ea906Sjfb8856606 mark_spec = act->conf;
509*d30ea906Sjfb8856606 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
510*d30ea906Sjfb8856606 break;
511*d30ea906Sjfb8856606 case RTE_FLOW_ACTION_TYPE_END:
512*d30ea906Sjfb8856606 return 0;
513*d30ea906Sjfb8856606 default:
514*d30ea906Sjfb8856606 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
515*d30ea906Sjfb8856606 rte_flow_error_set(error, EINVAL,
516*d30ea906Sjfb8856606 RTE_FLOW_ERROR_TYPE_ACTION, act,
517*d30ea906Sjfb8856606 "Invalid action.");
518*d30ea906Sjfb8856606 return -EINVAL;
519*d30ea906Sjfb8856606 }
520*d30ea906Sjfb8856606
521*d30ea906Sjfb8856606 /* check if the next not void item is END */
522*d30ea906Sjfb8856606 index++;
523*d30ea906Sjfb8856606 NEXT_ITEM_OF_ACTION(act, actions, index);
524*d30ea906Sjfb8856606 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
525*d30ea906Sjfb8856606 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
526*d30ea906Sjfb8856606 rte_flow_error_set(error, EINVAL,
527*d30ea906Sjfb8856606 RTE_FLOW_ERROR_TYPE_ACTION, act,
528*d30ea906Sjfb8856606 "Invalid action.");
529*d30ea906Sjfb8856606 return -EINVAL;
530*d30ea906Sjfb8856606 }
531*d30ea906Sjfb8856606
5322bfe3f2eSlogwang return 0;
5332bfe3f2eSlogwang }
534