1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
34 #define ICE_SW_PRI_BASE 6
35
36 #define ICE_SW_INSET_ETHER ( \
37 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39 ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42 ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49 ICE_SW_INSET_MAC_QINQ_IPV4 | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52 ICE_SW_INSET_MAC_QINQ_IPV4 | \
53 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65 ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69 ICE_SW_INSET_MAC_QINQ_IPV6 | \
70 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72 ICE_SW_INSET_MAC_QINQ_IPV6 | \
73 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
84 ICE_INSET_NVGRE_TNI)
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
87 ICE_INSET_VXLAN_VNI)
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
110 ICE_INSET_IPV4_TOS)
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
114 ICE_INSET_IPV4_TOS)
115 #define ICE_SW_INSET_MAC_PPPOE ( \
116 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
119 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121 ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147 ICE_SW_INSET_MAC_IPV4 | \
148 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150 ICE_SW_INSET_MAC_IPV6 | \
151 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159 ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166 ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169 ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172 ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175 ICE_INSET_TCP_DST_PORT)
176
177 struct sw_meta {
178 struct ice_adv_lkup_elem *list;
179 uint16_t lkups_num;
180 struct ice_adv_rule_info rule_info;
181 };
182
183 enum ice_sw_fltr_status {
184 ICE_SW_FLTR_ADDED,
185 ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT,
186 ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT,
187 };
188
189 struct ice_switch_filter_conf {
190 enum ice_sw_fltr_status fltr_status;
191
192 struct ice_rule_query_data sw_query_data;
193
194 /*
195 * The lookup elements and rule info are saved here when filter creation
196 * succeeds.
197 */
198 uint16_t vsi_num;
199 uint16_t lkups_num;
200 struct ice_adv_lkup_elem *lkups;
201 struct ice_adv_rule_info rule_info;
202 };
203
204 static struct ice_flow_parser ice_switch_dist_parser;
205 static struct ice_flow_parser ice_switch_perm_parser;
206
207 static struct
208 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
209 {pattern_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
220 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
221 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
222 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
224 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
225 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
226 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
244 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
246 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
248 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
250 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
252 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
254 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
255 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
256 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
257 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
258 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
259 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
260 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
261 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
262 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
263 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
264 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
265 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
266 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
267 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
268 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
269 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
270 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
271 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
272 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
273 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
274 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
275 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
276 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
277 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
278 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
279 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
280 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
281 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
282 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
283 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
284 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
285 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
286 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
287 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
288 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
289 };
290
291 static struct
292 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
293 {pattern_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
299 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
300 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
301 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
302 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
303 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
304 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
305 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
306 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
307 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
308 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
309 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
310 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
311 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
312 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
313 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
314 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
316 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
317 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
318 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
319 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
320 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
321 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
322 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
323 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
324 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
325 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
326 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
327 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
328 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
329 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
330 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
331 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
332 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
333 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
334 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
335 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
336 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
337 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
338 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
339 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
340 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
341 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
342 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
343 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
344 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
345 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
346 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
347 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
348 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
349 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
350 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
351 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
352 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
353 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
354 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
355 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
356 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
357 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
358 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
359 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
360 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
361 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
362 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
363 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
364 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
365 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
366 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
367 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
368 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
369 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
370 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
371 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
372 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
373 };
374
375 static int
ice_switch_create(struct ice_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)376 ice_switch_create(struct ice_adapter *ad,
377 struct rte_flow *flow,
378 void *meta,
379 struct rte_flow_error *error)
380 {
381 int ret = 0;
382 struct ice_pf *pf = &ad->pf;
383 struct ice_hw *hw = ICE_PF_TO_HW(pf);
384 struct ice_rule_query_data rule_added = {0};
385 struct ice_switch_filter_conf *filter_conf_ptr;
386 struct ice_adv_lkup_elem *list =
387 ((struct sw_meta *)meta)->list;
388 uint16_t lkups_cnt =
389 ((struct sw_meta *)meta)->lkups_num;
390 struct ice_adv_rule_info *rule_info =
391 &((struct sw_meta *)meta)->rule_info;
392
393 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
396 "item number too large for rule");
397 goto error;
398 }
399 if (!list) {
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
402 "lookup list should not be NULL");
403 goto error;
404 }
405
406 if (ice_dcf_adminq_need_retry(ad)) {
407 rte_flow_error_set(error, EAGAIN,
408 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
409 "DCF is not on");
410 goto error;
411 }
412
413 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
414 if (!ret) {
415 filter_conf_ptr = rte_zmalloc("ice_switch_filter",
416 sizeof(struct ice_switch_filter_conf), 0);
417 if (!filter_conf_ptr) {
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
420 "No memory for ice_switch_filter");
421 goto error;
422 }
423
424 filter_conf_ptr->sw_query_data = rule_added;
425
426 filter_conf_ptr->vsi_num =
427 ice_get_hw_vsi_num(hw, rule_info->sw_act.vsi_handle);
428 filter_conf_ptr->lkups = list;
429 filter_conf_ptr->lkups_num = lkups_cnt;
430 filter_conf_ptr->rule_info = *rule_info;
431
432 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
433
434 flow->rule = filter_conf_ptr;
435 } else {
436 if (ice_dcf_adminq_need_retry(ad))
437 ret = -EAGAIN;
438 else
439 ret = -EINVAL;
440
441 rte_flow_error_set(error, -ret,
442 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
443 "switch filter create flow fail");
444 goto error;
445 }
446
447 rte_free(meta);
448 return 0;
449
450 error:
451 rte_free(list);
452 rte_free(meta);
453
454 return -rte_errno;
455 }
456
457 static inline void
ice_switch_filter_rule_free(struct rte_flow * flow)458 ice_switch_filter_rule_free(struct rte_flow *flow)
459 {
460 struct ice_switch_filter_conf *filter_conf_ptr =
461 (struct ice_switch_filter_conf *)flow->rule;
462
463 if (filter_conf_ptr)
464 rte_free(filter_conf_ptr->lkups);
465
466 rte_free(filter_conf_ptr);
467 }
468
469 static int
ice_switch_destroy(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error)470 ice_switch_destroy(struct ice_adapter *ad,
471 struct rte_flow *flow,
472 struct rte_flow_error *error)
473 {
474 struct ice_hw *hw = &ad->hw;
475 int ret;
476 struct ice_switch_filter_conf *filter_conf_ptr;
477
478 filter_conf_ptr = (struct ice_switch_filter_conf *)
479 flow->rule;
480
481 if (!filter_conf_ptr ||
482 filter_conf_ptr->fltr_status == ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT) {
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
485 "no such flow"
486 " create by switch filter");
487
488 ice_switch_filter_rule_free(flow);
489
490 return -rte_errno;
491 }
492
493 if (ice_dcf_adminq_need_retry(ad)) {
494 rte_flow_error_set(error, EAGAIN,
495 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
496 "DCF is not on");
497 return -rte_errno;
498 }
499
500 ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
501 if (ret) {
502 if (ice_dcf_adminq_need_retry(ad))
503 ret = -EAGAIN;
504 else
505 ret = -EINVAL;
506
507 rte_flow_error_set(error, -ret,
508 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
509 "fail to destroy switch filter rule");
510 return -rte_errno;
511 }
512
513 ice_switch_filter_rule_free(flow);
514 return ret;
515 }
516
517 static bool
ice_switch_parse_pattern(const struct rte_flow_item pattern[],struct rte_flow_error * error,struct ice_adv_lkup_elem * list,uint16_t * lkups_num,enum ice_sw_tunnel_type * tun_type,const struct ice_pattern_match_item * pattern_match_item)518 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
519 struct rte_flow_error *error,
520 struct ice_adv_lkup_elem *list,
521 uint16_t *lkups_num,
522 enum ice_sw_tunnel_type *tun_type,
523 const struct ice_pattern_match_item *pattern_match_item)
524 {
525 const struct rte_flow_item *item = pattern;
526 enum rte_flow_item_type item_type;
527 const struct rte_flow_item_eth *eth_spec, *eth_mask;
528 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
529 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
530 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
531 const struct rte_flow_item_udp *udp_spec, *udp_mask;
532 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
533 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
534 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
535 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
536 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
537 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
538 *pppoe_proto_mask;
539 const struct rte_flow_item_esp *esp_spec, *esp_mask;
540 const struct rte_flow_item_ah *ah_spec, *ah_mask;
541 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
542 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
543 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
544 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
545 uint64_t outer_input_set = ICE_INSET_NONE;
546 uint64_t inner_input_set = ICE_INSET_NONE;
547 uint64_t *input = NULL;
548 uint16_t input_set_byte = 0;
549 bool pppoe_elem_valid = 0;
550 bool pppoe_patt_valid = 0;
551 bool pppoe_prot_valid = 0;
552 bool inner_vlan_valid = 0;
553 bool outer_vlan_valid = 0;
554 bool tunnel_valid = 0;
555 bool profile_rule = 0;
556 bool nvgre_valid = 0;
557 bool vxlan_valid = 0;
558 bool qinq_valid = 0;
559 bool ipv6_valid = 0;
560 bool ipv4_valid = 0;
561 bool udp_valid = 0;
562 bool tcp_valid = 0;
563 bool gtpu_valid = 0;
564 bool gtpu_psc_valid = 0;
565 bool inner_ipv4_valid = 0;
566 bool inner_ipv6_valid = 0;
567 bool inner_tcp_valid = 0;
568 bool inner_udp_valid = 0;
569 uint16_t j, k, t = 0;
570
571 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
572 *tun_type == ICE_NON_TUN_QINQ)
573 qinq_valid = 1;
574
575 for (item = pattern; item->type !=
576 RTE_FLOW_ITEM_TYPE_END; item++) {
577 if (item->last) {
578 rte_flow_error_set(error, EINVAL,
579 RTE_FLOW_ERROR_TYPE_ITEM,
580 item,
581 "Not support range");
582 return false;
583 }
584 item_type = item->type;
585
586 switch (item_type) {
587 case RTE_FLOW_ITEM_TYPE_ANY:
588 *tun_type = ICE_SW_TUN_AND_NON_TUN;
589 break;
590
591 case RTE_FLOW_ITEM_TYPE_ETH:
592 eth_spec = item->spec;
593 eth_mask = item->mask;
594 if (eth_spec && eth_mask) {
595 const uint8_t *a = eth_mask->src.addr_bytes;
596 const uint8_t *b = eth_mask->dst.addr_bytes;
597 if (tunnel_valid)
598 input = &inner_input_set;
599 else
600 input = &outer_input_set;
601 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
602 if (a[j]) {
603 *input |= ICE_INSET_SMAC;
604 break;
605 }
606 }
607 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
608 if (b[j]) {
609 *input |= ICE_INSET_DMAC;
610 break;
611 }
612 }
613 if (eth_mask->type)
614 *input |= ICE_INSET_ETHERTYPE;
615 list[t].type = (tunnel_valid == 0) ?
616 ICE_MAC_OFOS : ICE_MAC_IL;
617 struct ice_ether_hdr *h;
618 struct ice_ether_hdr *m;
619 uint16_t i = 0;
620 h = &list[t].h_u.eth_hdr;
621 m = &list[t].m_u.eth_hdr;
622 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
623 if (eth_mask->src.addr_bytes[j]) {
624 h->src_addr[j] =
625 eth_spec->src.addr_bytes[j];
626 m->src_addr[j] =
627 eth_mask->src.addr_bytes[j];
628 i = 1;
629 input_set_byte++;
630 }
631 if (eth_mask->dst.addr_bytes[j]) {
632 h->dst_addr[j] =
633 eth_spec->dst.addr_bytes[j];
634 m->dst_addr[j] =
635 eth_mask->dst.addr_bytes[j];
636 i = 1;
637 input_set_byte++;
638 }
639 }
640 if (i)
641 t++;
642 if (eth_mask->type) {
643 list[t].type = ICE_ETYPE_OL;
644 list[t].h_u.ethertype.ethtype_id =
645 eth_spec->type;
646 list[t].m_u.ethertype.ethtype_id =
647 eth_mask->type;
648 input_set_byte += 2;
649 t++;
650 }
651 }
652 break;
653
654 case RTE_FLOW_ITEM_TYPE_IPV4:
655 ipv4_spec = item->spec;
656 ipv4_mask = item->mask;
657 if (tunnel_valid) {
658 inner_ipv4_valid = 1;
659 input = &inner_input_set;
660 } else {
661 ipv4_valid = 1;
662 input = &outer_input_set;
663 }
664
665 if (ipv4_spec && ipv4_mask) {
666 /* Check IPv4 mask and update input set */
667 if (ipv4_mask->hdr.version_ihl ||
668 ipv4_mask->hdr.total_length ||
669 ipv4_mask->hdr.packet_id ||
670 ipv4_mask->hdr.hdr_checksum) {
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ITEM,
673 item,
674 "Invalid IPv4 mask.");
675 return false;
676 }
677
678 if (ipv4_mask->hdr.src_addr)
679 *input |= ICE_INSET_IPV4_SRC;
680 if (ipv4_mask->hdr.dst_addr)
681 *input |= ICE_INSET_IPV4_DST;
682 if (ipv4_mask->hdr.time_to_live)
683 *input |= ICE_INSET_IPV4_TTL;
684 if (ipv4_mask->hdr.next_proto_id)
685 *input |= ICE_INSET_IPV4_PROTO;
686 if (ipv4_mask->hdr.type_of_service)
687 *input |= ICE_INSET_IPV4_TOS;
688
689 list[t].type = (tunnel_valid == 0) ?
690 ICE_IPV4_OFOS : ICE_IPV4_IL;
691 if (ipv4_mask->hdr.src_addr) {
692 list[t].h_u.ipv4_hdr.src_addr =
693 ipv4_spec->hdr.src_addr;
694 list[t].m_u.ipv4_hdr.src_addr =
695 ipv4_mask->hdr.src_addr;
696 input_set_byte += 2;
697 }
698 if (ipv4_mask->hdr.dst_addr) {
699 list[t].h_u.ipv4_hdr.dst_addr =
700 ipv4_spec->hdr.dst_addr;
701 list[t].m_u.ipv4_hdr.dst_addr =
702 ipv4_mask->hdr.dst_addr;
703 input_set_byte += 2;
704 }
705 if (ipv4_mask->hdr.time_to_live) {
706 list[t].h_u.ipv4_hdr.time_to_live =
707 ipv4_spec->hdr.time_to_live;
708 list[t].m_u.ipv4_hdr.time_to_live =
709 ipv4_mask->hdr.time_to_live;
710 input_set_byte++;
711 }
712 if (ipv4_mask->hdr.next_proto_id) {
713 list[t].h_u.ipv4_hdr.protocol =
714 ipv4_spec->hdr.next_proto_id;
715 list[t].m_u.ipv4_hdr.protocol =
716 ipv4_mask->hdr.next_proto_id;
717 input_set_byte++;
718 }
719 if ((ipv4_spec->hdr.next_proto_id &
720 ipv4_mask->hdr.next_proto_id) ==
721 ICE_IPV4_PROTO_NVGRE)
722 *tun_type = ICE_SW_TUN_AND_NON_TUN;
723 if (ipv4_mask->hdr.type_of_service) {
724 list[t].h_u.ipv4_hdr.tos =
725 ipv4_spec->hdr.type_of_service;
726 list[t].m_u.ipv4_hdr.tos =
727 ipv4_mask->hdr.type_of_service;
728 input_set_byte++;
729 }
730 t++;
731 }
732 break;
733
734 case RTE_FLOW_ITEM_TYPE_IPV6:
735 ipv6_spec = item->spec;
736 ipv6_mask = item->mask;
737 if (tunnel_valid) {
738 inner_ipv6_valid = 1;
739 input = &inner_input_set;
740 } else {
741 ipv6_valid = 1;
742 input = &outer_input_set;
743 }
744
745 if (ipv6_spec && ipv6_mask) {
746 if (ipv6_mask->hdr.payload_len) {
747 rte_flow_error_set(error, EINVAL,
748 RTE_FLOW_ERROR_TYPE_ITEM,
749 item,
750 "Invalid IPv6 mask");
751 return false;
752 }
753
754 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
755 if (ipv6_mask->hdr.src_addr[j]) {
756 *input |= ICE_INSET_IPV6_SRC;
757 break;
758 }
759 }
760 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
761 if (ipv6_mask->hdr.dst_addr[j]) {
762 *input |= ICE_INSET_IPV6_DST;
763 break;
764 }
765 }
766 if (ipv6_mask->hdr.proto)
767 *input |= ICE_INSET_IPV6_NEXT_HDR;
768 if (ipv6_mask->hdr.hop_limits)
769 *input |= ICE_INSET_IPV6_HOP_LIMIT;
770 if (ipv6_mask->hdr.vtc_flow &
771 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
772 *input |= ICE_INSET_IPV6_TC;
773
774 list[t].type = (tunnel_valid == 0) ?
775 ICE_IPV6_OFOS : ICE_IPV6_IL;
776 struct ice_ipv6_hdr *f;
777 struct ice_ipv6_hdr *s;
778 f = &list[t].h_u.ipv6_hdr;
779 s = &list[t].m_u.ipv6_hdr;
780 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
781 if (ipv6_mask->hdr.src_addr[j]) {
782 f->src_addr[j] =
783 ipv6_spec->hdr.src_addr[j];
784 s->src_addr[j] =
785 ipv6_mask->hdr.src_addr[j];
786 input_set_byte++;
787 }
788 if (ipv6_mask->hdr.dst_addr[j]) {
789 f->dst_addr[j] =
790 ipv6_spec->hdr.dst_addr[j];
791 s->dst_addr[j] =
792 ipv6_mask->hdr.dst_addr[j];
793 input_set_byte++;
794 }
795 }
796 if (ipv6_mask->hdr.proto) {
797 f->next_hdr =
798 ipv6_spec->hdr.proto;
799 s->next_hdr =
800 ipv6_mask->hdr.proto;
801 input_set_byte++;
802 }
803 if (ipv6_mask->hdr.hop_limits) {
804 f->hop_limit =
805 ipv6_spec->hdr.hop_limits;
806 s->hop_limit =
807 ipv6_mask->hdr.hop_limits;
808 input_set_byte++;
809 }
810 if (ipv6_mask->hdr.vtc_flow &
811 rte_cpu_to_be_32
812 (RTE_IPV6_HDR_TC_MASK)) {
813 struct ice_le_ver_tc_flow vtf;
814 vtf.u.fld.version = 0;
815 vtf.u.fld.flow_label = 0;
816 vtf.u.fld.tc = (rte_be_to_cpu_32
817 (ipv6_spec->hdr.vtc_flow) &
818 RTE_IPV6_HDR_TC_MASK) >>
819 RTE_IPV6_HDR_TC_SHIFT;
820 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
821 vtf.u.fld.tc = (rte_be_to_cpu_32
822 (ipv6_mask->hdr.vtc_flow) &
823 RTE_IPV6_HDR_TC_MASK) >>
824 RTE_IPV6_HDR_TC_SHIFT;
825 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
826 input_set_byte += 4;
827 }
828 t++;
829 }
830 break;
831
832 case RTE_FLOW_ITEM_TYPE_UDP:
833 udp_spec = item->spec;
834 udp_mask = item->mask;
835 if (tunnel_valid) {
836 inner_udp_valid = 1;
837 input = &inner_input_set;
838 } else {
839 udp_valid = 1;
840 input = &outer_input_set;
841 }
842
843 if (udp_spec && udp_mask) {
844 /* Check UDP mask and update input set*/
845 if (udp_mask->hdr.dgram_len ||
846 udp_mask->hdr.dgram_cksum) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
849 item,
850 "Invalid UDP mask");
851 return false;
852 }
853
854 if (udp_mask->hdr.src_port)
855 *input |= ICE_INSET_UDP_SRC_PORT;
856 if (udp_mask->hdr.dst_port)
857 *input |= ICE_INSET_UDP_DST_PORT;
858
859 if (*tun_type == ICE_SW_TUN_VXLAN &&
860 tunnel_valid == 0)
861 list[t].type = ICE_UDP_OF;
862 else
863 list[t].type = ICE_UDP_ILOS;
864 if (udp_mask->hdr.src_port) {
865 list[t].h_u.l4_hdr.src_port =
866 udp_spec->hdr.src_port;
867 list[t].m_u.l4_hdr.src_port =
868 udp_mask->hdr.src_port;
869 input_set_byte += 2;
870 }
871 if (udp_mask->hdr.dst_port) {
872 list[t].h_u.l4_hdr.dst_port =
873 udp_spec->hdr.dst_port;
874 list[t].m_u.l4_hdr.dst_port =
875 udp_mask->hdr.dst_port;
876 input_set_byte += 2;
877 }
878 t++;
879 }
880 break;
881
882 case RTE_FLOW_ITEM_TYPE_TCP:
883 tcp_spec = item->spec;
884 tcp_mask = item->mask;
885 if (tunnel_valid) {
886 inner_tcp_valid = 1;
887 input = &inner_input_set;
888 } else {
889 tcp_valid = 1;
890 input = &outer_input_set;
891 }
892
893 if (tcp_spec && tcp_mask) {
894 /* Check TCP mask and update input set */
895 if (tcp_mask->hdr.sent_seq ||
896 tcp_mask->hdr.recv_ack ||
897 tcp_mask->hdr.data_off ||
898 tcp_mask->hdr.tcp_flags ||
899 tcp_mask->hdr.rx_win ||
900 tcp_mask->hdr.cksum ||
901 tcp_mask->hdr.tcp_urp) {
902 rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ITEM,
904 item,
905 "Invalid TCP mask");
906 return false;
907 }
908
909 if (tcp_mask->hdr.src_port)
910 *input |= ICE_INSET_TCP_SRC_PORT;
911 if (tcp_mask->hdr.dst_port)
912 *input |= ICE_INSET_TCP_DST_PORT;
913 list[t].type = ICE_TCP_IL;
914 if (tcp_mask->hdr.src_port) {
915 list[t].h_u.l4_hdr.src_port =
916 tcp_spec->hdr.src_port;
917 list[t].m_u.l4_hdr.src_port =
918 tcp_mask->hdr.src_port;
919 input_set_byte += 2;
920 }
921 if (tcp_mask->hdr.dst_port) {
922 list[t].h_u.l4_hdr.dst_port =
923 tcp_spec->hdr.dst_port;
924 list[t].m_u.l4_hdr.dst_port =
925 tcp_mask->hdr.dst_port;
926 input_set_byte += 2;
927 }
928 t++;
929 }
930 break;
931
932 case RTE_FLOW_ITEM_TYPE_SCTP:
933 sctp_spec = item->spec;
934 sctp_mask = item->mask;
935 if (sctp_spec && sctp_mask) {
936 /* Check SCTP mask and update input set */
937 if (sctp_mask->hdr.cksum) {
938 rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ITEM,
940 item,
941 "Invalid SCTP mask");
942 return false;
943 }
944 if (tunnel_valid)
945 input = &inner_input_set;
946 else
947 input = &outer_input_set;
948
949 if (sctp_mask->hdr.src_port)
950 *input |= ICE_INSET_SCTP_SRC_PORT;
951 if (sctp_mask->hdr.dst_port)
952 *input |= ICE_INSET_SCTP_DST_PORT;
953
954 list[t].type = ICE_SCTP_IL;
955 if (sctp_mask->hdr.src_port) {
956 list[t].h_u.sctp_hdr.src_port =
957 sctp_spec->hdr.src_port;
958 list[t].m_u.sctp_hdr.src_port =
959 sctp_mask->hdr.src_port;
960 input_set_byte += 2;
961 }
962 if (sctp_mask->hdr.dst_port) {
963 list[t].h_u.sctp_hdr.dst_port =
964 sctp_spec->hdr.dst_port;
965 list[t].m_u.sctp_hdr.dst_port =
966 sctp_mask->hdr.dst_port;
967 input_set_byte += 2;
968 }
969 t++;
970 }
971 break;
972
973 case RTE_FLOW_ITEM_TYPE_VXLAN:
974 vxlan_spec = item->spec;
975 vxlan_mask = item->mask;
976 /* Check if VXLAN item is used to describe protocol.
977 * If yes, both spec and mask should be NULL.
978 * If no, both spec and mask shouldn't be NULL.
979 */
980 if ((!vxlan_spec && vxlan_mask) ||
981 (vxlan_spec && !vxlan_mask)) {
982 rte_flow_error_set(error, EINVAL,
983 RTE_FLOW_ERROR_TYPE_ITEM,
984 item,
985 "Invalid VXLAN item");
986 return false;
987 }
988 vxlan_valid = 1;
989 tunnel_valid = 1;
990 input = &inner_input_set;
991 if (vxlan_spec && vxlan_mask) {
992 list[t].type = ICE_VXLAN;
993 if (vxlan_mask->vni[0] ||
994 vxlan_mask->vni[1] ||
995 vxlan_mask->vni[2]) {
996 list[t].h_u.tnl_hdr.vni =
997 (vxlan_spec->vni[2] << 16) |
998 (vxlan_spec->vni[1] << 8) |
999 vxlan_spec->vni[0];
1000 list[t].m_u.tnl_hdr.vni =
1001 (vxlan_mask->vni[2] << 16) |
1002 (vxlan_mask->vni[1] << 8) |
1003 vxlan_mask->vni[0];
1004 *input |= ICE_INSET_VXLAN_VNI;
1005 input_set_byte += 2;
1006 }
1007 t++;
1008 }
1009 break;
1010
1011 case RTE_FLOW_ITEM_TYPE_NVGRE:
1012 nvgre_spec = item->spec;
1013 nvgre_mask = item->mask;
1014 /* Check if NVGRE item is used to describe protocol.
1015 * If yes, both spec and mask should be NULL.
1016 * If no, both spec and mask shouldn't be NULL.
1017 */
1018 if ((!nvgre_spec && nvgre_mask) ||
1019 (nvgre_spec && !nvgre_mask)) {
1020 rte_flow_error_set(error, EINVAL,
1021 RTE_FLOW_ERROR_TYPE_ITEM,
1022 item,
1023 "Invalid NVGRE item");
1024 return false;
1025 }
1026 nvgre_valid = 1;
1027 tunnel_valid = 1;
1028 input = &inner_input_set;
1029 if (nvgre_spec && nvgre_mask) {
1030 list[t].type = ICE_NVGRE;
1031 if (nvgre_mask->tni[0] ||
1032 nvgre_mask->tni[1] ||
1033 nvgre_mask->tni[2]) {
1034 list[t].h_u.nvgre_hdr.tni_flow =
1035 (nvgre_spec->tni[2] << 16) |
1036 (nvgre_spec->tni[1] << 8) |
1037 nvgre_spec->tni[0];
1038 list[t].m_u.nvgre_hdr.tni_flow =
1039 (nvgre_mask->tni[2] << 16) |
1040 (nvgre_mask->tni[1] << 8) |
1041 nvgre_mask->tni[0];
1042 *input |= ICE_INSET_NVGRE_TNI;
1043 input_set_byte += 2;
1044 }
1045 t++;
1046 }
1047 break;
1048
1049 case RTE_FLOW_ITEM_TYPE_VLAN:
1050 vlan_spec = item->spec;
1051 vlan_mask = item->mask;
1052 /* Check if VLAN item is used to describe protocol.
1053 * If yes, both spec and mask should be NULL.
1054 * If no, both spec and mask shouldn't be NULL.
1055 */
1056 if ((!vlan_spec && vlan_mask) ||
1057 (vlan_spec && !vlan_mask)) {
1058 rte_flow_error_set(error, EINVAL,
1059 RTE_FLOW_ERROR_TYPE_ITEM,
1060 item,
1061 "Invalid VLAN item");
1062 return false;
1063 }
1064
1065 if (qinq_valid) {
1066 if (!outer_vlan_valid)
1067 outer_vlan_valid = 1;
1068 else
1069 inner_vlan_valid = 1;
1070 }
1071
1072 input = &outer_input_set;
1073
1074 if (vlan_spec && vlan_mask) {
1075 if (qinq_valid) {
1076 if (!inner_vlan_valid) {
1077 list[t].type = ICE_VLAN_EX;
1078 *input |=
1079 ICE_INSET_VLAN_OUTER;
1080 } else {
1081 list[t].type = ICE_VLAN_IN;
1082 *input |=
1083 ICE_INSET_VLAN_INNER;
1084 }
1085 } else {
1086 list[t].type = ICE_VLAN_OFOS;
1087 *input |= ICE_INSET_VLAN_INNER;
1088 }
1089
1090 if (vlan_mask->tci) {
1091 list[t].h_u.vlan_hdr.vlan =
1092 vlan_spec->tci;
1093 list[t].m_u.vlan_hdr.vlan =
1094 vlan_mask->tci;
1095 input_set_byte += 2;
1096 }
1097 if (vlan_mask->inner_type) {
1098 rte_flow_error_set(error, EINVAL,
1099 RTE_FLOW_ERROR_TYPE_ITEM,
1100 item,
1101 "Invalid VLAN input set.");
1102 return false;
1103 }
1104 t++;
1105 }
1106 break;
1107
1108 case RTE_FLOW_ITEM_TYPE_PPPOED:
1109 case RTE_FLOW_ITEM_TYPE_PPPOES:
1110 pppoe_spec = item->spec;
1111 pppoe_mask = item->mask;
1112 /* Check if PPPoE item is used to describe protocol.
1113 * If yes, both spec and mask should be NULL.
1114 * If no, both spec and mask shouldn't be NULL.
1115 */
1116 if ((!pppoe_spec && pppoe_mask) ||
1117 (pppoe_spec && !pppoe_mask)) {
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM,
1120 item,
1121 "Invalid pppoe item");
1122 return false;
1123 }
1124 pppoe_patt_valid = 1;
1125 input = &outer_input_set;
1126 if (pppoe_spec && pppoe_mask) {
1127 /* Check pppoe mask and update input set */
1128 if (pppoe_mask->length ||
1129 pppoe_mask->code ||
1130 pppoe_mask->version_type) {
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ITEM,
1133 item,
1134 "Invalid pppoe mask");
1135 return false;
1136 }
1137 list[t].type = ICE_PPPOE;
1138 if (pppoe_mask->session_id) {
1139 list[t].h_u.pppoe_hdr.session_id =
1140 pppoe_spec->session_id;
1141 list[t].m_u.pppoe_hdr.session_id =
1142 pppoe_mask->session_id;
1143 *input |= ICE_INSET_PPPOE_SESSION;
1144 input_set_byte += 2;
1145 }
1146 t++;
1147 pppoe_elem_valid = 1;
1148 }
1149 break;
1150
1151 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1152 pppoe_proto_spec = item->spec;
1153 pppoe_proto_mask = item->mask;
1154 /* Check if PPPoE optional proto_id item
1155 * is used to describe protocol.
1156 * If yes, both spec and mask should be NULL.
1157 * If no, both spec and mask shouldn't be NULL.
1158 */
1159 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1160 (pppoe_proto_spec && !pppoe_proto_mask)) {
1161 rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_ITEM,
1163 item,
1164 "Invalid pppoe proto item");
1165 return false;
1166 }
1167 input = &outer_input_set;
1168 if (pppoe_proto_spec && pppoe_proto_mask) {
1169 if (pppoe_elem_valid)
1170 t--;
1171 list[t].type = ICE_PPPOE;
1172 if (pppoe_proto_mask->proto_id) {
1173 list[t].h_u.pppoe_hdr.ppp_prot_id =
1174 pppoe_proto_spec->proto_id;
1175 list[t].m_u.pppoe_hdr.ppp_prot_id =
1176 pppoe_proto_mask->proto_id;
1177 *input |= ICE_INSET_PPPOE_PROTO;
1178 input_set_byte += 2;
1179 pppoe_prot_valid = 1;
1180 }
1181 if ((pppoe_proto_mask->proto_id &
1182 pppoe_proto_spec->proto_id) !=
1183 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1184 (pppoe_proto_mask->proto_id &
1185 pppoe_proto_spec->proto_id) !=
1186 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1187 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1188 else
1189 *tun_type = ICE_SW_TUN_PPPOE;
1190 t++;
1191 }
1192
1193 break;
1194
1195 case RTE_FLOW_ITEM_TYPE_ESP:
1196 esp_spec = item->spec;
1197 esp_mask = item->mask;
1198 if ((esp_spec && !esp_mask) ||
1199 (!esp_spec && esp_mask)) {
1200 rte_flow_error_set(error, EINVAL,
1201 RTE_FLOW_ERROR_TYPE_ITEM,
1202 item,
1203 "Invalid esp item");
1204 return false;
1205 }
1206 /* Check esp mask and update input set */
1207 if (esp_mask && esp_mask->hdr.seq) {
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ITEM,
1210 item,
1211 "Invalid esp mask");
1212 return false;
1213 }
1214 input = &outer_input_set;
1215 if (!esp_spec && !esp_mask && !(*input)) {
1216 profile_rule = 1;
1217 if (ipv6_valid && udp_valid)
1218 *tun_type =
1219 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1220 else if (ipv6_valid)
1221 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1222 else if (ipv4_valid)
1223 goto inset_check;
1224 } else if (esp_spec && esp_mask &&
1225 esp_mask->hdr.spi){
1226 if (udp_valid)
1227 list[t].type = ICE_NAT_T;
1228 else
1229 list[t].type = ICE_ESP;
1230 list[t].h_u.esp_hdr.spi =
1231 esp_spec->hdr.spi;
1232 list[t].m_u.esp_hdr.spi =
1233 esp_mask->hdr.spi;
1234 *input |= ICE_INSET_ESP_SPI;
1235 input_set_byte += 4;
1236 t++;
1237 }
1238
1239 if (!profile_rule) {
1240 if (ipv6_valid && udp_valid)
1241 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1242 else if (ipv4_valid && udp_valid)
1243 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1244 else if (ipv6_valid)
1245 *tun_type = ICE_SW_TUN_IPV6_ESP;
1246 else if (ipv4_valid)
1247 *tun_type = ICE_SW_TUN_IPV4_ESP;
1248 }
1249 break;
1250
1251 case RTE_FLOW_ITEM_TYPE_AH:
1252 ah_spec = item->spec;
1253 ah_mask = item->mask;
1254 if ((ah_spec && !ah_mask) ||
1255 (!ah_spec && ah_mask)) {
1256 rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ITEM,
1258 item,
1259 "Invalid ah item");
1260 return false;
1261 }
1262 /* Check ah mask and update input set */
1263 if (ah_mask &&
1264 (ah_mask->next_hdr ||
1265 ah_mask->payload_len ||
1266 ah_mask->seq_num ||
1267 ah_mask->reserved)) {
1268 rte_flow_error_set(error, EINVAL,
1269 RTE_FLOW_ERROR_TYPE_ITEM,
1270 item,
1271 "Invalid ah mask");
1272 return false;
1273 }
1274
1275 input = &outer_input_set;
1276 if (!ah_spec && !ah_mask && !(*input)) {
1277 profile_rule = 1;
1278 if (ipv6_valid && udp_valid)
1279 *tun_type =
1280 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1281 else if (ipv6_valid)
1282 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1283 else if (ipv4_valid)
1284 goto inset_check;
1285 } else if (ah_spec && ah_mask &&
1286 ah_mask->spi){
1287 list[t].type = ICE_AH;
1288 list[t].h_u.ah_hdr.spi =
1289 ah_spec->spi;
1290 list[t].m_u.ah_hdr.spi =
1291 ah_mask->spi;
1292 *input |= ICE_INSET_AH_SPI;
1293 input_set_byte += 4;
1294 t++;
1295 }
1296
1297 if (!profile_rule) {
1298 if (udp_valid)
1299 goto inset_check;
1300 else if (ipv6_valid)
1301 *tun_type = ICE_SW_TUN_IPV6_AH;
1302 else if (ipv4_valid)
1303 *tun_type = ICE_SW_TUN_IPV4_AH;
1304 }
1305 break;
1306
1307 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1308 l2tp_spec = item->spec;
1309 l2tp_mask = item->mask;
1310 if ((l2tp_spec && !l2tp_mask) ||
1311 (!l2tp_spec && l2tp_mask)) {
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ITEM,
1314 item,
1315 "Invalid l2tp item");
1316 return false;
1317 }
1318
1319 input = &outer_input_set;
1320 if (!l2tp_spec && !l2tp_mask && !(*input)) {
1321 if (ipv6_valid)
1322 *tun_type =
1323 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1324 else if (ipv4_valid)
1325 goto inset_check;
1326 } else if (l2tp_spec && l2tp_mask &&
1327 l2tp_mask->session_id){
1328 list[t].type = ICE_L2TPV3;
1329 list[t].h_u.l2tpv3_sess_hdr.session_id =
1330 l2tp_spec->session_id;
1331 list[t].m_u.l2tpv3_sess_hdr.session_id =
1332 l2tp_mask->session_id;
1333 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1334 input_set_byte += 4;
1335 t++;
1336 }
1337
1338 if (!profile_rule) {
1339 if (ipv6_valid)
1340 *tun_type =
1341 ICE_SW_TUN_IPV6_L2TPV3;
1342 else if (ipv4_valid)
1343 *tun_type =
1344 ICE_SW_TUN_IPV4_L2TPV3;
1345 }
1346 break;
1347
1348 case RTE_FLOW_ITEM_TYPE_PFCP:
1349 pfcp_spec = item->spec;
1350 pfcp_mask = item->mask;
1351 /* Check if PFCP item is used to describe protocol.
1352 * If yes, both spec and mask should be NULL.
1353 * If no, both spec and mask shouldn't be NULL.
1354 */
1355 if ((!pfcp_spec && pfcp_mask) ||
1356 (pfcp_spec && !pfcp_mask)) {
1357 rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ITEM,
1359 item,
1360 "Invalid PFCP item");
1361 return false;
1362 }
1363 if (pfcp_spec && pfcp_mask) {
1364 /* Check pfcp mask and update input set */
1365 if (pfcp_mask->msg_type ||
1366 pfcp_mask->msg_len ||
1367 pfcp_mask->seid) {
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ITEM,
1370 item,
1371 "Invalid pfcp mask");
1372 return false;
1373 }
1374 if (pfcp_mask->s_field &&
1375 pfcp_spec->s_field == 0x01 &&
1376 ipv6_valid)
1377 *tun_type =
1378 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1379 else if (pfcp_mask->s_field &&
1380 pfcp_spec->s_field == 0x01)
1381 *tun_type =
1382 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1383 else if (pfcp_mask->s_field &&
1384 !pfcp_spec->s_field &&
1385 ipv6_valid)
1386 *tun_type =
1387 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1388 else if (pfcp_mask->s_field &&
1389 !pfcp_spec->s_field)
1390 *tun_type =
1391 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1392 else
1393 return false;
1394 }
1395 break;
1396
1397 case RTE_FLOW_ITEM_TYPE_GTPU:
1398 gtp_spec = item->spec;
1399 gtp_mask = item->mask;
1400 if (gtp_spec && !gtp_mask) {
1401 rte_flow_error_set(error, EINVAL,
1402 RTE_FLOW_ERROR_TYPE_ITEM,
1403 item,
1404 "Invalid GTP item");
1405 return false;
1406 }
1407 if (gtp_spec && gtp_mask) {
1408 if (gtp_mask->v_pt_rsv_flags ||
1409 gtp_mask->msg_type ||
1410 gtp_mask->msg_len) {
1411 rte_flow_error_set(error, EINVAL,
1412 RTE_FLOW_ERROR_TYPE_ITEM,
1413 item,
1414 "Invalid GTP mask");
1415 return false;
1416 }
1417 input = &outer_input_set;
1418 if (gtp_mask->teid)
1419 *input |= ICE_INSET_GTPU_TEID;
1420 list[t].type = ICE_GTP;
1421 list[t].h_u.gtp_hdr.teid =
1422 gtp_spec->teid;
1423 list[t].m_u.gtp_hdr.teid =
1424 gtp_mask->teid;
1425 input_set_byte += 4;
1426 t++;
1427 }
1428 tunnel_valid = 1;
1429 gtpu_valid = 1;
1430 break;
1431
1432 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1433 gtp_psc_spec = item->spec;
1434 gtp_psc_mask = item->mask;
1435 if (gtp_psc_spec && !gtp_psc_mask) {
1436 rte_flow_error_set(error, EINVAL,
1437 RTE_FLOW_ERROR_TYPE_ITEM,
1438 item,
1439 "Invalid GTPU_EH item");
1440 return false;
1441 }
1442 if (gtp_psc_spec && gtp_psc_mask) {
1443 if (gtp_psc_mask->hdr.type) {
1444 rte_flow_error_set(error, EINVAL,
1445 RTE_FLOW_ERROR_TYPE_ITEM,
1446 item,
1447 "Invalid GTPU_EH mask");
1448 return false;
1449 }
1450 input = &outer_input_set;
1451 if (gtp_psc_mask->hdr.qfi)
1452 *input |= ICE_INSET_GTPU_QFI;
1453 list[t].type = ICE_GTP;
1454 list[t].h_u.gtp_hdr.qfi =
1455 gtp_psc_spec->hdr.qfi;
1456 list[t].m_u.gtp_hdr.qfi =
1457 gtp_psc_mask->hdr.qfi;
1458 input_set_byte += 1;
1459 t++;
1460 }
1461 gtpu_psc_valid = 1;
1462 break;
1463
1464 case RTE_FLOW_ITEM_TYPE_VOID:
1465 break;
1466
1467 default:
1468 rte_flow_error_set(error, EINVAL,
1469 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1470 "Invalid pattern item.");
1471 return false;
1472 }
1473 }
1474
1475 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1476 inner_vlan_valid && outer_vlan_valid)
1477 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1478 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1479 inner_vlan_valid && outer_vlan_valid)
1480 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1481 else if (*tun_type == ICE_NON_TUN &&
1482 inner_vlan_valid && outer_vlan_valid)
1483 *tun_type = ICE_NON_TUN_QINQ;
1484 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1485 inner_vlan_valid && outer_vlan_valid)
1486 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1487
1488 if (pppoe_patt_valid && !pppoe_prot_valid) {
1489 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1490 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1491 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1492 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1493 else if (inner_vlan_valid && outer_vlan_valid)
1494 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1495 else if (ipv6_valid && udp_valid)
1496 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1497 else if (ipv6_valid && tcp_valid)
1498 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1499 else if (ipv4_valid && udp_valid)
1500 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1501 else if (ipv4_valid && tcp_valid)
1502 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1503 else if (ipv6_valid)
1504 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1505 else if (ipv4_valid)
1506 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1507 else
1508 *tun_type = ICE_SW_TUN_PPPOE;
1509 }
1510
1511 if (gtpu_valid && gtpu_psc_valid) {
1512 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1513 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1514 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1515 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1516 else if (ipv4_valid && inner_ipv4_valid)
1517 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1518 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1519 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1520 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1521 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1522 else if (ipv4_valid && inner_ipv6_valid)
1523 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1524 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1525 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1526 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1527 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1528 else if (ipv6_valid && inner_ipv4_valid)
1529 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1530 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1531 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1532 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1533 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1534 else if (ipv6_valid && inner_ipv6_valid)
1535 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1536 else if (ipv4_valid)
1537 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1538 else if (ipv6_valid)
1539 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1540 } else if (gtpu_valid) {
1541 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1542 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1543 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1544 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1545 else if (ipv4_valid && inner_ipv4_valid)
1546 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1547 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1548 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1549 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1550 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1551 else if (ipv4_valid && inner_ipv6_valid)
1552 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1553 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1554 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1555 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1556 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1557 else if (ipv6_valid && inner_ipv4_valid)
1558 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1559 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1560 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1561 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1562 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1563 else if (ipv6_valid && inner_ipv6_valid)
1564 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1565 else if (ipv4_valid)
1566 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1567 else if (ipv6_valid)
1568 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1569 }
1570
1571 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1572 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1573 for (k = 0; k < t; k++) {
1574 if (list[k].type == ICE_GTP)
1575 list[k].type = ICE_GTP_NO_PAY;
1576 }
1577 }
1578
1579 if (*tun_type == ICE_NON_TUN) {
1580 if (vxlan_valid)
1581 *tun_type = ICE_SW_TUN_VXLAN;
1582 else if (nvgre_valid)
1583 *tun_type = ICE_SW_TUN_NVGRE;
1584 else if (ipv4_valid && tcp_valid)
1585 *tun_type = ICE_SW_IPV4_TCP;
1586 else if (ipv4_valid && udp_valid)
1587 *tun_type = ICE_SW_IPV4_UDP;
1588 else if (ipv6_valid && tcp_valid)
1589 *tun_type = ICE_SW_IPV6_TCP;
1590 else if (ipv6_valid && udp_valid)
1591 *tun_type = ICE_SW_IPV6_UDP;
1592 }
1593
1594 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1597 item,
1598 "too much input set");
1599 return false;
1600 }
1601
1602 *lkups_num = t;
1603
1604 inset_check:
1605 if ((!outer_input_set && !inner_input_set &&
1606 !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1607 ~pattern_match_item->input_set_mask_o) ||
1608 (inner_input_set & ~pattern_match_item->input_set_mask_i))
1609 return false;
1610
1611 return true;
1612 }
1613
1614 static int
ice_switch_parse_dcf_action(struct ice_dcf_adapter * ad,const struct rte_flow_action * actions,uint32_t priority,struct rte_flow_error * error,struct ice_adv_rule_info * rule_info)1615 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1616 const struct rte_flow_action *actions,
1617 uint32_t priority,
1618 struct rte_flow_error *error,
1619 struct ice_adv_rule_info *rule_info)
1620 {
1621 const struct rte_flow_action_vf *act_vf;
1622 const struct rte_flow_action *action;
1623 enum rte_flow_action_type action_type;
1624
1625 for (action = actions; action->type !=
1626 RTE_FLOW_ACTION_TYPE_END; action++) {
1627 action_type = action->type;
1628 switch (action_type) {
1629 case RTE_FLOW_ACTION_TYPE_VF:
1630 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1631 act_vf = action->conf;
1632
1633 if (act_vf->id >= ad->real_hw.num_vfs &&
1634 !act_vf->original) {
1635 rte_flow_error_set(error,
1636 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1637 actions,
1638 "Invalid vf id");
1639 return -rte_errno;
1640 }
1641
1642 if (act_vf->original)
1643 rule_info->sw_act.vsi_handle =
1644 ad->real_hw.avf.bus.func;
1645 else
1646 rule_info->sw_act.vsi_handle = act_vf->id;
1647 break;
1648
1649 case RTE_FLOW_ACTION_TYPE_DROP:
1650 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1651 break;
1652
1653 default:
1654 rte_flow_error_set(error,
1655 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1656 actions,
1657 "Invalid action type");
1658 return -rte_errno;
1659 }
1660 }
1661
1662 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1663 rule_info->sw_act.flag = ICE_FLTR_RX;
1664 rule_info->rx = 1;
1665 /* 0 denotes lowest priority of recipe and highest priority
1666 * of rte_flow. Change rte_flow priority into recipe priority.
1667 */
1668 rule_info->priority = ICE_SW_PRI_BASE - priority;
1669
1670 return 0;
1671 }
1672
1673 static int
ice_switch_parse_action(struct ice_pf * pf,const struct rte_flow_action * actions,uint32_t priority,struct rte_flow_error * error,struct ice_adv_rule_info * rule_info)1674 ice_switch_parse_action(struct ice_pf *pf,
1675 const struct rte_flow_action *actions,
1676 uint32_t priority,
1677 struct rte_flow_error *error,
1678 struct ice_adv_rule_info *rule_info)
1679 {
1680 struct ice_vsi *vsi = pf->main_vsi;
1681 struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1682 const struct rte_flow_action_queue *act_q;
1683 const struct rte_flow_action_rss *act_qgrop;
1684 uint16_t base_queue, i;
1685 const struct rte_flow_action *action;
1686 enum rte_flow_action_type action_type;
1687 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1688 2, 4, 8, 16, 32, 64, 128};
1689
1690 base_queue = pf->base_queue + vsi->base_queue;
1691 for (action = actions; action->type !=
1692 RTE_FLOW_ACTION_TYPE_END; action++) {
1693 action_type = action->type;
1694 switch (action_type) {
1695 case RTE_FLOW_ACTION_TYPE_RSS:
1696 act_qgrop = action->conf;
1697 if (act_qgrop->queue_num <= 1)
1698 goto error;
1699 rule_info->sw_act.fltr_act =
1700 ICE_FWD_TO_QGRP;
1701 rule_info->sw_act.fwd_id.q_id =
1702 base_queue + act_qgrop->queue[0];
1703 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1704 if (act_qgrop->queue_num ==
1705 valid_qgrop_number[i])
1706 break;
1707 }
1708 if (i == MAX_QGRP_NUM_TYPE)
1709 goto error;
1710 if ((act_qgrop->queue[0] +
1711 act_qgrop->queue_num) >
1712 dev_data->nb_rx_queues)
1713 goto error1;
1714 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1715 if (act_qgrop->queue[i + 1] !=
1716 act_qgrop->queue[i] + 1)
1717 goto error2;
1718 rule_info->sw_act.qgrp_size =
1719 act_qgrop->queue_num;
1720 break;
1721 case RTE_FLOW_ACTION_TYPE_QUEUE:
1722 act_q = action->conf;
1723 if (act_q->index >= dev_data->nb_rx_queues)
1724 goto error;
1725 rule_info->sw_act.fltr_act =
1726 ICE_FWD_TO_Q;
1727 rule_info->sw_act.fwd_id.q_id =
1728 base_queue + act_q->index;
1729 break;
1730
1731 case RTE_FLOW_ACTION_TYPE_DROP:
1732 rule_info->sw_act.fltr_act =
1733 ICE_DROP_PACKET;
1734 break;
1735
1736 case RTE_FLOW_ACTION_TYPE_VOID:
1737 break;
1738
1739 default:
1740 goto error;
1741 }
1742 }
1743
1744 rule_info->sw_act.vsi_handle = vsi->idx;
1745 rule_info->rx = 1;
1746 rule_info->sw_act.src = vsi->idx;
1747 /* 0 denotes lowest priority of recipe and highest priority
1748 * of rte_flow. Change rte_flow priority into recipe priority.
1749 */
1750 rule_info->priority = ICE_SW_PRI_BASE - priority;
1751
1752 return 0;
1753
1754 error:
1755 rte_flow_error_set(error,
1756 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1757 actions,
1758 "Invalid action type or queue number");
1759 return -rte_errno;
1760
1761 error1:
1762 rte_flow_error_set(error,
1763 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1764 actions,
1765 "Invalid queue region indexes");
1766 return -rte_errno;
1767
1768 error2:
1769 rte_flow_error_set(error,
1770 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1771 actions,
1772 "Discontinuous queue region");
1773 return -rte_errno;
1774 }
1775
1776 static int
ice_switch_check_action(const struct rte_flow_action * actions,struct rte_flow_error * error)1777 ice_switch_check_action(const struct rte_flow_action *actions,
1778 struct rte_flow_error *error)
1779 {
1780 const struct rte_flow_action *action;
1781 enum rte_flow_action_type action_type;
1782 uint16_t actions_num = 0;
1783
1784 for (action = actions; action->type !=
1785 RTE_FLOW_ACTION_TYPE_END; action++) {
1786 action_type = action->type;
1787 switch (action_type) {
1788 case RTE_FLOW_ACTION_TYPE_VF:
1789 case RTE_FLOW_ACTION_TYPE_RSS:
1790 case RTE_FLOW_ACTION_TYPE_QUEUE:
1791 case RTE_FLOW_ACTION_TYPE_DROP:
1792 actions_num++;
1793 break;
1794 case RTE_FLOW_ACTION_TYPE_VOID:
1795 continue;
1796 default:
1797 rte_flow_error_set(error,
1798 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1799 actions,
1800 "Invalid action type");
1801 return -rte_errno;
1802 }
1803 }
1804
1805 if (actions_num != 1) {
1806 rte_flow_error_set(error,
1807 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1808 actions,
1809 "Invalid action number");
1810 return -rte_errno;
1811 }
1812
1813 return 0;
1814 }
1815
1816 static int
ice_switch_parse_pattern_action(struct ice_adapter * ad,struct ice_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],uint32_t priority,void ** meta,struct rte_flow_error * error)1817 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1818 struct ice_pattern_match_item *array,
1819 uint32_t array_len,
1820 const struct rte_flow_item pattern[],
1821 const struct rte_flow_action actions[],
1822 uint32_t priority,
1823 void **meta,
1824 struct rte_flow_error *error)
1825 {
1826 struct ice_pf *pf = &ad->pf;
1827 int ret = 0;
1828 struct sw_meta *sw_meta_ptr = NULL;
1829 struct ice_adv_rule_info rule_info;
1830 struct ice_adv_lkup_elem *list = NULL;
1831 uint16_t lkups_num = 0;
1832 const struct rte_flow_item *item = pattern;
1833 uint16_t item_num = 0;
1834 uint16_t vlan_num = 0;
1835 enum ice_sw_tunnel_type tun_type =
1836 ICE_NON_TUN;
1837 struct ice_pattern_match_item *pattern_match_item = NULL;
1838
1839 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1840 item_num++;
1841 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1842 const struct rte_flow_item_eth *eth_mask;
1843 if (item->mask)
1844 eth_mask = item->mask;
1845 else
1846 continue;
1847 if (eth_mask->type == UINT16_MAX)
1848 tun_type = ICE_SW_TUN_AND_NON_TUN;
1849 }
1850
1851 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1852 vlan_num++;
1853
1854 /* reserve one more memory slot for ETH which may
1855 * consume 2 lookup items.
1856 */
1857 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1858 item_num++;
1859 }
1860
1861 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1862 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1863 else if (vlan_num == 2)
1864 tun_type = ICE_NON_TUN_QINQ;
1865
1866 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1867 if (!list) {
1868 rte_flow_error_set(error, EINVAL,
1869 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1870 "No memory for PMD internal items");
1871 return -rte_errno;
1872 }
1873
1874 sw_meta_ptr =
1875 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1876 if (!sw_meta_ptr) {
1877 rte_flow_error_set(error, EINVAL,
1878 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1879 "No memory for sw_pattern_meta_ptr");
1880 goto error;
1881 }
1882
1883 pattern_match_item =
1884 ice_search_pattern_match_item(ad, pattern, array, array_len,
1885 error);
1886 if (!pattern_match_item) {
1887 rte_flow_error_set(error, EINVAL,
1888 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1889 "Invalid input pattern");
1890 goto error;
1891 }
1892
1893 if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1894 &tun_type, pattern_match_item)) {
1895 rte_flow_error_set(error, EINVAL,
1896 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1897 pattern,
1898 "Invalid input set");
1899 goto error;
1900 }
1901
1902 memset(&rule_info, 0, sizeof(rule_info));
1903 rule_info.tun_type = tun_type;
1904
1905 ret = ice_switch_check_action(actions, error);
1906 if (ret)
1907 goto error;
1908
1909 if (ad->hw.dcf_enabled)
1910 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1911 error, &rule_info);
1912 else
1913 ret = ice_switch_parse_action(pf, actions, priority, error,
1914 &rule_info);
1915
1916 if (ret)
1917 goto error;
1918
1919 if (meta) {
1920 *meta = sw_meta_ptr;
1921 ((struct sw_meta *)*meta)->list = list;
1922 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1923 ((struct sw_meta *)*meta)->rule_info = rule_info;
1924 } else {
1925 rte_free(list);
1926 rte_free(sw_meta_ptr);
1927 }
1928
1929 rte_free(pattern_match_item);
1930
1931 return 0;
1932
1933 error:
1934 rte_free(list);
1935 rte_free(sw_meta_ptr);
1936 rte_free(pattern_match_item);
1937
1938 return -rte_errno;
1939 }
1940
1941 static int
ice_switch_query(struct ice_adapter * ad __rte_unused,struct rte_flow * flow __rte_unused,struct rte_flow_query_count * count __rte_unused,struct rte_flow_error * error)1942 ice_switch_query(struct ice_adapter *ad __rte_unused,
1943 struct rte_flow *flow __rte_unused,
1944 struct rte_flow_query_count *count __rte_unused,
1945 struct rte_flow_error *error)
1946 {
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_HANDLE,
1949 NULL,
1950 "count action not supported by switch filter");
1951
1952 return -rte_errno;
1953 }
1954
1955 static int
ice_switch_redirect(struct ice_adapter * ad,struct rte_flow * flow,struct ice_flow_redirect * rd)1956 ice_switch_redirect(struct ice_adapter *ad,
1957 struct rte_flow *flow,
1958 struct ice_flow_redirect *rd)
1959 {
1960 struct ice_rule_query_data *rdata;
1961 struct ice_switch_filter_conf *filter_conf_ptr =
1962 (struct ice_switch_filter_conf *)flow->rule;
1963 struct ice_rule_query_data added_rdata = { 0 };
1964 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1965 struct ice_adv_lkup_elem *lkups_ref = NULL;
1966 struct ice_adv_lkup_elem *lkups_dp = NULL;
1967 struct LIST_HEAD_TYPE *list_head;
1968 struct ice_adv_rule_info rinfo;
1969 struct ice_hw *hw = &ad->hw;
1970 struct ice_switch_info *sw;
1971 uint16_t lkups_cnt;
1972 int ret;
1973
1974 rdata = &filter_conf_ptr->sw_query_data;
1975
1976 if (rdata->vsi_handle != rd->vsi_handle)
1977 return 0;
1978
1979 sw = hw->switch_info;
1980 if (!sw->recp_list[rdata->rid].recp_created)
1981 return -EINVAL;
1982
1983 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1984 return -ENOTSUP;
1985
1986 switch (filter_conf_ptr->fltr_status) {
1987 case ICE_SW_FLTR_ADDED:
1988 list_head = &sw->recp_list[rdata->rid].filt_rules;
1989 LIST_FOR_EACH_ENTRY(list_itr, list_head,
1990 ice_adv_fltr_mgmt_list_entry,
1991 list_entry) {
1992 rinfo = list_itr->rule_info;
1993 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1994 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1995 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1996 (rinfo.fltr_rule_id == rdata->rule_id &&
1997 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1998 lkups_cnt = list_itr->lkups_cnt;
1999
2000 lkups_dp = (struct ice_adv_lkup_elem *)
2001 ice_memdup(hw, list_itr->lkups,
2002 sizeof(*list_itr->lkups) *
2003 lkups_cnt,
2004 ICE_NONDMA_TO_NONDMA);
2005 if (!lkups_dp) {
2006 PMD_DRV_LOG(ERR,
2007 "Failed to allocate memory.");
2008 return -EINVAL;
2009 }
2010 lkups_ref = lkups_dp;
2011
2012 if (rinfo.sw_act.fltr_act ==
2013 ICE_FWD_TO_VSI_LIST) {
2014 rinfo.sw_act.vsi_handle =
2015 rd->vsi_handle;
2016 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2017 }
2018 break;
2019 }
2020 }
2021
2022 if (!lkups_ref)
2023 return -EINVAL;
2024
2025 goto rmv_rule;
2026 case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
2027 /* Recover VSI context */
2028 hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
2029 rinfo = filter_conf_ptr->rule_info;
2030 lkups_cnt = filter_conf_ptr->lkups_num;
2031 lkups_ref = filter_conf_ptr->lkups;
2032
2033 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2034 rinfo.sw_act.vsi_handle = rd->vsi_handle;
2035 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2036 }
2037
2038 goto rmv_rule;
2039 case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
2040 rinfo = filter_conf_ptr->rule_info;
2041 lkups_cnt = filter_conf_ptr->lkups_num;
2042 lkups_ref = filter_conf_ptr->lkups;
2043
2044 goto add_rule;
2045 default:
2046 return -EINVAL;
2047 }
2048
2049 rmv_rule:
2050 if (ice_dcf_adminq_need_retry(ad)) {
2051 PMD_DRV_LOG(WARNING, "DCF is not on");
2052 ret = -EAGAIN;
2053 goto out;
2054 }
2055
2056 /* Remove the old rule */
2057 ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
2058 if (ret) {
2059 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2060 rdata->rule_id);
2061 filter_conf_ptr->fltr_status =
2062 ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
2063 ret = -EINVAL;
2064 goto out;
2065 }
2066
2067 add_rule:
2068 if (ice_dcf_adminq_need_retry(ad)) {
2069 PMD_DRV_LOG(WARNING, "DCF is not on");
2070 ret = -EAGAIN;
2071 goto out;
2072 }
2073
2074 /* Update VSI context */
2075 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2076
2077 /* Replay the rule */
2078 ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
2079 &rinfo, &added_rdata);
2080 if (ret) {
2081 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2082 filter_conf_ptr->fltr_status =
2083 ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
2084 ret = -EINVAL;
2085 } else {
2086 filter_conf_ptr->sw_query_data = added_rdata;
2087 /* Save VSI number for failure recover */
2088 filter_conf_ptr->vsi_num = rd->new_vsi_num;
2089 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
2090 }
2091
2092 out:
2093 if (ret == -EINVAL)
2094 if (ice_dcf_adminq_need_retry(ad))
2095 ret = -EAGAIN;
2096
2097 ice_free(hw, lkups_dp);
2098 return ret;
2099 }
2100
2101 static int
ice_switch_init(struct ice_adapter * ad)2102 ice_switch_init(struct ice_adapter *ad)
2103 {
2104 int ret = 0;
2105 struct ice_flow_parser *dist_parser;
2106 struct ice_flow_parser *perm_parser;
2107
2108 if (ad->devargs.pipe_mode_support) {
2109 perm_parser = &ice_switch_perm_parser;
2110 ret = ice_register_parser(perm_parser, ad);
2111 } else {
2112 dist_parser = &ice_switch_dist_parser;
2113 ret = ice_register_parser(dist_parser, ad);
2114 }
2115 return ret;
2116 }
2117
2118 static void
ice_switch_uninit(struct ice_adapter * ad)2119 ice_switch_uninit(struct ice_adapter *ad)
2120 {
2121 struct ice_flow_parser *dist_parser;
2122 struct ice_flow_parser *perm_parser;
2123
2124 if (ad->devargs.pipe_mode_support) {
2125 perm_parser = &ice_switch_perm_parser;
2126 ice_unregister_parser(perm_parser, ad);
2127 } else {
2128 dist_parser = &ice_switch_dist_parser;
2129 ice_unregister_parser(dist_parser, ad);
2130 }
2131 }
2132
2133 static struct
2134 ice_flow_engine ice_switch_engine = {
2135 .init = ice_switch_init,
2136 .uninit = ice_switch_uninit,
2137 .create = ice_switch_create,
2138 .destroy = ice_switch_destroy,
2139 .query_count = ice_switch_query,
2140 .redirect = ice_switch_redirect,
2141 .free = ice_switch_filter_rule_free,
2142 .type = ICE_FLOW_ENGINE_SWITCH,
2143 };
2144
2145 static struct
2146 ice_flow_parser ice_switch_dist_parser = {
2147 .engine = &ice_switch_engine,
2148 .array = ice_switch_pattern_dist_list,
2149 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2150 .parse_pattern_action = ice_switch_parse_pattern_action,
2151 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2152 };
2153
2154 static struct
2155 ice_flow_parser ice_switch_perm_parser = {
2156 .engine = &ice_switch_engine,
2157 .array = ice_switch_pattern_perm_list,
2158 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2159 .parse_pattern_action = ice_switch_parse_pattern_action,
2160 .stage = ICE_FLOW_STAGE_PERMISSION,
2161 };
2162
RTE_INIT(ice_sw_engine_init)2163 RTE_INIT(ice_sw_engine_init)
2164 {
2165 struct ice_flow_engine *engine = &ice_switch_engine;
2166 ice_register_flow_engine(engine);
2167 }
2168