1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27 
28 
29 #define MAX_QGRP_NUM_TYPE	7
30 #define MAX_INPUT_SET_BYTE	32
31 #define ICE_PPP_IPV4_PROTO	0x0021
32 #define ICE_PPP_IPV6_PROTO	0x0057
33 #define ICE_IPV4_PROTO_NVGRE	0x002F
34 
35 #define ICE_SW_INSET_ETHER ( \
36 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 		ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39 		ICE_INSET_VLAN_OUTER)
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42 	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46 	ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50 	ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53 	ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54 	ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57 	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58 	ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61 	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62 	ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71 	ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 	ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79 	ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83 	ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84 	ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87 	ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 	ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91 	ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93 	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 	ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95 	ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE  ( \
97 	ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98 	ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
100 	ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101 	ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102 	ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114 	ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116 	ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118 	ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120 	ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122 	ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124 	ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126 	ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128 	ICE_SW_INSET_MAC_IPV4 | \
129 	ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131 	ICE_SW_INSET_MAC_IPV6 | \
132 	ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
133 
134 struct sw_meta {
135 	struct ice_adv_lkup_elem *list;
136 	uint16_t lkups_num;
137 	struct ice_adv_rule_info rule_info;
138 };
139 
140 static struct ice_flow_parser ice_switch_dist_parser_os;
141 static struct ice_flow_parser ice_switch_dist_parser_comms;
142 static struct ice_flow_parser ice_switch_perm_parser_os;
143 static struct ice_flow_parser ice_switch_perm_parser_comms;
144 
145 static struct
146 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
147 	{pattern_ethertype,
148 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
149 	{pattern_ethertype_vlan,
150 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
151 	{pattern_eth_arp,
152 			ICE_INSET_NONE, ICE_INSET_NONE},
153 	{pattern_eth_ipv4,
154 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
155 	{pattern_eth_ipv4_udp,
156 			ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
157 	{pattern_eth_ipv4_tcp,
158 			ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
159 	{pattern_eth_ipv6,
160 			ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
161 	{pattern_eth_ipv6_udp,
162 			ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
163 	{pattern_eth_ipv6_tcp,
164 			ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
165 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4,
166 			ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
167 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
168 			ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
169 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
170 			ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
171 	{pattern_eth_ipv4_nvgre_eth_ipv4,
172 			ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
173 	{pattern_eth_ipv4_nvgre_eth_ipv4_udp,
174 			ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
175 	{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
176 			ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
177 };
178 
179 static struct
180 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
181 	{pattern_ethertype,
182 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
183 	{pattern_ethertype_vlan,
184 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
185 	{pattern_eth_arp,
186 			ICE_INSET_NONE, ICE_INSET_NONE},
187 	{pattern_eth_ipv4,
188 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
189 	{pattern_eth_ipv4_udp,
190 			ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
191 	{pattern_eth_ipv4_tcp,
192 			ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
193 	{pattern_eth_ipv6,
194 			ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
195 	{pattern_eth_ipv6_udp,
196 			ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
197 	{pattern_eth_ipv6_tcp,
198 			ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
199 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4,
200 			ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
201 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
202 			ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
203 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
204 			ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
205 	{pattern_eth_ipv4_nvgre_eth_ipv4,
206 			ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
207 	{pattern_eth_ipv4_nvgre_eth_ipv4_udp,
208 			ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
209 	{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
210 			ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
211 	{pattern_eth_pppoes,
212 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
213 	{pattern_eth_vlan_pppoes,
214 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
215 	{pattern_eth_pppoes_proto,
216 			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
217 	{pattern_eth_vlan_pppoes_proto,
218 			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
219 	{pattern_eth_pppoes_ipv4,
220 			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
221 	{pattern_eth_pppoes_ipv4_tcp,
222 			ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
223 	{pattern_eth_pppoes_ipv4_udp,
224 			ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
225 	{pattern_eth_pppoes_ipv6,
226 			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
227 	{pattern_eth_pppoes_ipv6_tcp,
228 			ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
229 	{pattern_eth_pppoes_ipv6_udp,
230 			ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
231 	{pattern_eth_vlan_pppoes_ipv4,
232 			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
233 	{pattern_eth_vlan_pppoes_ipv4_tcp,
234 			ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
235 	{pattern_eth_vlan_pppoes_ipv4_udp,
236 			ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
237 	{pattern_eth_vlan_pppoes_ipv6,
238 			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
239 	{pattern_eth_vlan_pppoes_ipv6_tcp,
240 			ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
241 	{pattern_eth_vlan_pppoes_ipv6_udp,
242 			ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
243 	{pattern_eth_ipv4_esp,
244 			ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
245 	{pattern_eth_ipv4_udp_esp,
246 			ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
247 	{pattern_eth_ipv6_esp,
248 			ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
249 	{pattern_eth_ipv6_udp_esp,
250 			ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
251 	{pattern_eth_ipv4_ah,
252 			ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
253 	{pattern_eth_ipv6_ah,
254 			ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
255 	{pattern_eth_ipv6_udp_ah,
256 			ICE_INSET_NONE, ICE_INSET_NONE},
257 	{pattern_eth_ipv4_l2tp,
258 			ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
259 	{pattern_eth_ipv6_l2tp,
260 			ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
261 	{pattern_eth_ipv4_pfcp,
262 			ICE_INSET_NONE, ICE_INSET_NONE},
263 	{pattern_eth_ipv6_pfcp,
264 			ICE_INSET_NONE, ICE_INSET_NONE},
265 };
266 
267 static struct
268 ice_pattern_match_item ice_switch_pattern_perm_os[] = {
269 	{pattern_ethertype,
270 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
271 	{pattern_ethertype_vlan,
272 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
273 	{pattern_eth_arp,
274 			ICE_INSET_NONE, ICE_INSET_NONE},
275 	{pattern_eth_ipv4,
276 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
277 	{pattern_eth_ipv4_udp,
278 			ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
279 	{pattern_eth_ipv4_tcp,
280 			ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
281 	{pattern_eth_ipv6,
282 			ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
283 	{pattern_eth_ipv6_udp,
284 			ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
285 	{pattern_eth_ipv6_tcp,
286 			ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
287 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4,
288 			ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
289 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
290 			ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
291 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
292 			ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
293 	{pattern_eth_ipv4_nvgre_eth_ipv4,
294 			ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
295 	{pattern_eth_ipv4_nvgre_eth_ipv4_udp,
296 			ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
297 	{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
298 			ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
299 };
300 
301 static struct
302 ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
303 	{pattern_ethertype,
304 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
305 	{pattern_ethertype_vlan,
306 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
307 	{pattern_eth_arp,
308 		ICE_INSET_NONE, ICE_INSET_NONE},
309 	{pattern_eth_ipv4,
310 			ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
311 	{pattern_eth_ipv4_udp,
312 			ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
313 	{pattern_eth_ipv4_tcp,
314 			ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
315 	{pattern_eth_ipv6,
316 			ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
317 	{pattern_eth_ipv6_udp,
318 			ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
319 	{pattern_eth_ipv6_tcp,
320 			ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
321 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4,
322 			ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
323 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
324 			ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
325 	{pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
326 			ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
327 	{pattern_eth_ipv4_nvgre_eth_ipv4,
328 			ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
329 	{pattern_eth_ipv4_nvgre_eth_ipv4_udp,
330 			ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
331 	{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
332 			ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
333 	{pattern_eth_pppoes,
334 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
335 	{pattern_eth_vlan_pppoes,
336 			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
337 	{pattern_eth_pppoes_proto,
338 			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
339 	{pattern_eth_vlan_pppoes_proto,
340 			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
341 	{pattern_eth_pppoes_ipv4,
342 			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
343 	{pattern_eth_pppoes_ipv4_tcp,
344 			ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
345 	{pattern_eth_pppoes_ipv4_udp,
346 			ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
347 	{pattern_eth_pppoes_ipv6,
348 			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
349 	{pattern_eth_pppoes_ipv6_tcp,
350 			ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
351 	{pattern_eth_pppoes_ipv6_udp,
352 			ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
353 	{pattern_eth_vlan_pppoes_ipv4,
354 			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
355 	{pattern_eth_vlan_pppoes_ipv4_tcp,
356 			ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
357 	{pattern_eth_vlan_pppoes_ipv4_udp,
358 			ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
359 	{pattern_eth_vlan_pppoes_ipv6,
360 			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
361 	{pattern_eth_vlan_pppoes_ipv6_tcp,
362 			ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
363 	{pattern_eth_vlan_pppoes_ipv6_udp,
364 			ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
365 	{pattern_eth_ipv4_esp,
366 			ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
367 	{pattern_eth_ipv4_udp_esp,
368 			ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
369 	{pattern_eth_ipv6_esp,
370 			ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
371 	{pattern_eth_ipv6_udp_esp,
372 			ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
373 	{pattern_eth_ipv4_ah,
374 			ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
375 	{pattern_eth_ipv6_ah,
376 			ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
377 	{pattern_eth_ipv6_udp_ah,
378 			ICE_INSET_NONE, ICE_INSET_NONE},
379 	{pattern_eth_ipv4_l2tp,
380 			ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
381 	{pattern_eth_ipv6_l2tp,
382 			ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
383 	{pattern_eth_ipv4_pfcp,
384 			ICE_INSET_NONE, ICE_INSET_NONE},
385 	{pattern_eth_ipv6_pfcp,
386 			ICE_INSET_NONE, ICE_INSET_NONE},
387 };
388 
389 static int
ice_switch_create(struct ice_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)390 ice_switch_create(struct ice_adapter *ad,
391 		struct rte_flow *flow,
392 		void *meta,
393 		struct rte_flow_error *error)
394 {
395 	int ret = 0;
396 	struct ice_pf *pf = &ad->pf;
397 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
398 	struct ice_rule_query_data rule_added = {0};
399 	struct ice_rule_query_data *filter_ptr;
400 	struct ice_adv_lkup_elem *list =
401 		((struct sw_meta *)meta)->list;
402 	uint16_t lkups_cnt =
403 		((struct sw_meta *)meta)->lkups_num;
404 	struct ice_adv_rule_info *rule_info =
405 		&((struct sw_meta *)meta)->rule_info;
406 
407 	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
408 		rte_flow_error_set(error, EINVAL,
409 			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
410 			"item number too large for rule");
411 		goto error;
412 	}
413 	if (!list) {
414 		rte_flow_error_set(error, EINVAL,
415 			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
416 			"lookup list should not be NULL");
417 		goto error;
418 	}
419 	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
420 	if (!ret) {
421 		filter_ptr = rte_zmalloc("ice_switch_filter",
422 			sizeof(struct ice_rule_query_data), 0);
423 		if (!filter_ptr) {
424 			rte_flow_error_set(error, EINVAL,
425 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426 				   "No memory for ice_switch_filter");
427 			goto error;
428 		}
429 		flow->rule = filter_ptr;
430 		rte_memcpy(filter_ptr,
431 			&rule_added,
432 			sizeof(struct ice_rule_query_data));
433 	} else {
434 		rte_flow_error_set(error, EINVAL,
435 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436 			"switch filter create flow fail");
437 		goto error;
438 	}
439 
440 	rte_free(list);
441 	rte_free(meta);
442 	return 0;
443 
444 error:
445 	rte_free(list);
446 	rte_free(meta);
447 
448 	return -rte_errno;
449 }
450 
451 static int
ice_switch_destroy(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error)452 ice_switch_destroy(struct ice_adapter *ad,
453 		struct rte_flow *flow,
454 		struct rte_flow_error *error)
455 {
456 	struct ice_hw *hw = &ad->hw;
457 	int ret;
458 	struct ice_rule_query_data *filter_ptr;
459 
460 	filter_ptr = (struct ice_rule_query_data *)
461 		flow->rule;
462 
463 	if (!filter_ptr) {
464 		rte_flow_error_set(error, EINVAL,
465 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
466 			"no such flow"
467 			" create by switch filter");
468 		return -rte_errno;
469 	}
470 
471 	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
472 	if (ret) {
473 		rte_flow_error_set(error, EINVAL,
474 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
475 			"fail to destroy switch filter rule");
476 		return -rte_errno;
477 	}
478 
479 	rte_free(filter_ptr);
480 	return ret;
481 }
482 
483 static void
ice_switch_filter_rule_free(struct rte_flow * flow)484 ice_switch_filter_rule_free(struct rte_flow *flow)
485 {
486 	rte_free(flow->rule);
487 }
488 
489 static uint64_t
ice_switch_inset_get(const struct rte_flow_item pattern[],struct rte_flow_error * error,struct ice_adv_lkup_elem * list,uint16_t * lkups_num,enum ice_sw_tunnel_type * tun_type)490 ice_switch_inset_get(const struct rte_flow_item pattern[],
491 		struct rte_flow_error *error,
492 		struct ice_adv_lkup_elem *list,
493 		uint16_t *lkups_num,
494 		enum ice_sw_tunnel_type *tun_type)
495 {
496 	const struct rte_flow_item *item = pattern;
497 	enum rte_flow_item_type item_type;
498 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
499 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
500 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
501 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
502 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
503 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
504 	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
505 	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
506 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
507 	const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
508 	const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
509 				*pppoe_proto_mask;
510 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
511 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
512 	const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
513 	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
514 	uint64_t input_set = ICE_INSET_NONE;
515 	uint16_t input_set_byte = 0;
516 	bool pppoe_elem_valid = 0;
517 	bool pppoe_patt_valid = 0;
518 	bool pppoe_prot_valid = 0;
519 	bool tunnel_valid = 0;
520 	bool profile_rule = 0;
521 	bool nvgre_valid = 0;
522 	bool vxlan_valid = 0;
523 	bool ipv6_valid = 0;
524 	bool ipv4_valid = 0;
525 	bool udp_valid = 0;
526 	bool tcp_valid = 0;
527 	uint16_t j, t = 0;
528 
529 	for (item = pattern; item->type !=
530 			RTE_FLOW_ITEM_TYPE_END; item++) {
531 		if (item->last) {
532 			rte_flow_error_set(error, EINVAL,
533 					RTE_FLOW_ERROR_TYPE_ITEM,
534 					item,
535 					"Not support range");
536 			return 0;
537 		}
538 		item_type = item->type;
539 
540 		switch (item_type) {
541 		case RTE_FLOW_ITEM_TYPE_ETH:
542 			eth_spec = item->spec;
543 			eth_mask = item->mask;
544 			if (eth_spec && eth_mask) {
545 				const uint8_t *a = eth_mask->src.addr_bytes;
546 				const uint8_t *b = eth_mask->dst.addr_bytes;
547 				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
548 					if (a[j] && tunnel_valid) {
549 						input_set |=
550 							ICE_INSET_TUN_SMAC;
551 						break;
552 					} else if (a[j]) {
553 						input_set |=
554 							ICE_INSET_SMAC;
555 						break;
556 					}
557 				}
558 				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
559 					if (b[j] && tunnel_valid) {
560 						input_set |=
561 							ICE_INSET_TUN_DMAC;
562 						break;
563 					} else if (b[j]) {
564 						input_set |=
565 							ICE_INSET_DMAC;
566 						break;
567 					}
568 				}
569 				if (eth_mask->type)
570 					input_set |= ICE_INSET_ETHERTYPE;
571 				list[t].type = (tunnel_valid  == 0) ?
572 					ICE_MAC_OFOS : ICE_MAC_IL;
573 				struct ice_ether_hdr *h;
574 				struct ice_ether_hdr *m;
575 				uint16_t i = 0;
576 				h = &list[t].h_u.eth_hdr;
577 				m = &list[t].m_u.eth_hdr;
578 				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
579 					if (eth_mask->src.addr_bytes[j]) {
580 						h->src_addr[j] =
581 						eth_spec->src.addr_bytes[j];
582 						m->src_addr[j] =
583 						eth_mask->src.addr_bytes[j];
584 						i = 1;
585 						input_set_byte++;
586 					}
587 					if (eth_mask->dst.addr_bytes[j]) {
588 						h->dst_addr[j] =
589 						eth_spec->dst.addr_bytes[j];
590 						m->dst_addr[j] =
591 						eth_mask->dst.addr_bytes[j];
592 						i = 1;
593 						input_set_byte++;
594 					}
595 				}
596 				if (i)
597 					t++;
598 				if (eth_mask->type) {
599 					list[t].type = ICE_ETYPE_OL;
600 					list[t].h_u.ethertype.ethtype_id =
601 						eth_spec->type;
602 					list[t].m_u.ethertype.ethtype_id =
603 						eth_mask->type;
604 					input_set_byte += 2;
605 					t++;
606 				}
607 			}
608 			break;
609 
610 		case RTE_FLOW_ITEM_TYPE_IPV4:
611 			ipv4_spec = item->spec;
612 			ipv4_mask = item->mask;
613 			ipv4_valid = 1;
614 			if (ipv4_spec && ipv4_mask) {
615 				/* Check IPv4 mask and update input set */
616 				if (ipv4_mask->hdr.version_ihl ||
617 					ipv4_mask->hdr.total_length ||
618 					ipv4_mask->hdr.packet_id ||
619 					ipv4_mask->hdr.hdr_checksum) {
620 					rte_flow_error_set(error, EINVAL,
621 						   RTE_FLOW_ERROR_TYPE_ITEM,
622 						   item,
623 						   "Invalid IPv4 mask.");
624 					return 0;
625 				}
626 
627 				if (tunnel_valid) {
628 					if (ipv4_mask->hdr.type_of_service)
629 						input_set |=
630 							ICE_INSET_TUN_IPV4_TOS;
631 					if (ipv4_mask->hdr.src_addr)
632 						input_set |=
633 							ICE_INSET_TUN_IPV4_SRC;
634 					if (ipv4_mask->hdr.dst_addr)
635 						input_set |=
636 							ICE_INSET_TUN_IPV4_DST;
637 					if (ipv4_mask->hdr.time_to_live)
638 						input_set |=
639 							ICE_INSET_TUN_IPV4_TTL;
640 					if (ipv4_mask->hdr.next_proto_id)
641 						input_set |=
642 						ICE_INSET_TUN_IPV4_PROTO;
643 				} else {
644 					if (ipv4_mask->hdr.src_addr)
645 						input_set |= ICE_INSET_IPV4_SRC;
646 					if (ipv4_mask->hdr.dst_addr)
647 						input_set |= ICE_INSET_IPV4_DST;
648 					if (ipv4_mask->hdr.time_to_live)
649 						input_set |= ICE_INSET_IPV4_TTL;
650 					if (ipv4_mask->hdr.next_proto_id)
651 						input_set |=
652 						ICE_INSET_IPV4_PROTO;
653 					if (ipv4_mask->hdr.type_of_service)
654 						input_set |=
655 							ICE_INSET_IPV4_TOS;
656 				}
657 				list[t].type = (tunnel_valid  == 0) ?
658 					ICE_IPV4_OFOS : ICE_IPV4_IL;
659 				if (ipv4_mask->hdr.src_addr) {
660 					list[t].h_u.ipv4_hdr.src_addr =
661 						ipv4_spec->hdr.src_addr;
662 					list[t].m_u.ipv4_hdr.src_addr =
663 						ipv4_mask->hdr.src_addr;
664 					input_set_byte += 2;
665 				}
666 				if (ipv4_mask->hdr.dst_addr) {
667 					list[t].h_u.ipv4_hdr.dst_addr =
668 						ipv4_spec->hdr.dst_addr;
669 					list[t].m_u.ipv4_hdr.dst_addr =
670 						ipv4_mask->hdr.dst_addr;
671 					input_set_byte += 2;
672 				}
673 				if (ipv4_mask->hdr.time_to_live) {
674 					list[t].h_u.ipv4_hdr.time_to_live =
675 						ipv4_spec->hdr.time_to_live;
676 					list[t].m_u.ipv4_hdr.time_to_live =
677 						ipv4_mask->hdr.time_to_live;
678 					input_set_byte++;
679 				}
680 				if (ipv4_mask->hdr.next_proto_id) {
681 					list[t].h_u.ipv4_hdr.protocol =
682 						ipv4_spec->hdr.next_proto_id;
683 					list[t].m_u.ipv4_hdr.protocol =
684 						ipv4_mask->hdr.next_proto_id;
685 					input_set_byte++;
686 				}
687 				if ((ipv4_spec->hdr.next_proto_id &
688 					ipv4_mask->hdr.next_proto_id) ==
689 					ICE_IPV4_PROTO_NVGRE)
690 					*tun_type = ICE_SW_TUN_AND_NON_TUN;
691 				if (ipv4_mask->hdr.type_of_service) {
692 					list[t].h_u.ipv4_hdr.tos =
693 						ipv4_spec->hdr.type_of_service;
694 					list[t].m_u.ipv4_hdr.tos =
695 						ipv4_mask->hdr.type_of_service;
696 					input_set_byte++;
697 				}
698 				t++;
699 			}
700 			break;
701 
702 		case RTE_FLOW_ITEM_TYPE_IPV6:
703 			ipv6_spec = item->spec;
704 			ipv6_mask = item->mask;
705 			ipv6_valid = 1;
706 			if (ipv6_spec && ipv6_mask) {
707 				if (ipv6_mask->hdr.payload_len) {
708 					rte_flow_error_set(error, EINVAL,
709 					   RTE_FLOW_ERROR_TYPE_ITEM,
710 					   item,
711 					   "Invalid IPv6 mask");
712 					return 0;
713 				}
714 
715 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
716 					if (ipv6_mask->hdr.src_addr[j] &&
717 						tunnel_valid) {
718 						input_set |=
719 						ICE_INSET_TUN_IPV6_SRC;
720 						break;
721 					} else if (ipv6_mask->hdr.src_addr[j]) {
722 						input_set |= ICE_INSET_IPV6_SRC;
723 						break;
724 					}
725 				}
726 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
727 					if (ipv6_mask->hdr.dst_addr[j] &&
728 						tunnel_valid) {
729 						input_set |=
730 						ICE_INSET_TUN_IPV6_DST;
731 						break;
732 					} else if (ipv6_mask->hdr.dst_addr[j]) {
733 						input_set |= ICE_INSET_IPV6_DST;
734 						break;
735 					}
736 				}
737 				if (ipv6_mask->hdr.proto &&
738 					tunnel_valid)
739 					input_set |=
740 						ICE_INSET_TUN_IPV6_NEXT_HDR;
741 				else if (ipv6_mask->hdr.proto)
742 					input_set |=
743 						ICE_INSET_IPV6_NEXT_HDR;
744 				if (ipv6_mask->hdr.hop_limits &&
745 					tunnel_valid)
746 					input_set |=
747 						ICE_INSET_TUN_IPV6_HOP_LIMIT;
748 				else if (ipv6_mask->hdr.hop_limits)
749 					input_set |=
750 						ICE_INSET_IPV6_HOP_LIMIT;
751 				if ((ipv6_mask->hdr.vtc_flow &
752 						rte_cpu_to_be_32
753 						(RTE_IPV6_HDR_TC_MASK)) &&
754 					tunnel_valid)
755 					input_set |=
756 							ICE_INSET_TUN_IPV6_TC;
757 				else if (ipv6_mask->hdr.vtc_flow &
758 						rte_cpu_to_be_32
759 						(RTE_IPV6_HDR_TC_MASK))
760 					input_set |= ICE_INSET_IPV6_TC;
761 
762 				list[t].type = (tunnel_valid  == 0) ?
763 					ICE_IPV6_OFOS : ICE_IPV6_IL;
764 				struct ice_ipv6_hdr *f;
765 				struct ice_ipv6_hdr *s;
766 				f = &list[t].h_u.ipv6_hdr;
767 				s = &list[t].m_u.ipv6_hdr;
768 				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
769 					if (ipv6_mask->hdr.src_addr[j]) {
770 						f->src_addr[j] =
771 						ipv6_spec->hdr.src_addr[j];
772 						s->src_addr[j] =
773 						ipv6_mask->hdr.src_addr[j];
774 						input_set_byte++;
775 					}
776 					if (ipv6_mask->hdr.dst_addr[j]) {
777 						f->dst_addr[j] =
778 						ipv6_spec->hdr.dst_addr[j];
779 						s->dst_addr[j] =
780 						ipv6_mask->hdr.dst_addr[j];
781 						input_set_byte++;
782 					}
783 				}
784 				if (ipv6_mask->hdr.proto) {
785 					f->next_hdr =
786 						ipv6_spec->hdr.proto;
787 					s->next_hdr =
788 						ipv6_mask->hdr.proto;
789 					input_set_byte++;
790 				}
791 				if (ipv6_mask->hdr.hop_limits) {
792 					f->hop_limit =
793 						ipv6_spec->hdr.hop_limits;
794 					s->hop_limit =
795 						ipv6_mask->hdr.hop_limits;
796 					input_set_byte++;
797 				}
798 				if (ipv6_mask->hdr.vtc_flow &
799 						rte_cpu_to_be_32
800 						(RTE_IPV6_HDR_TC_MASK)) {
801 					struct ice_le_ver_tc_flow vtf;
802 					vtf.u.fld.version = 0;
803 					vtf.u.fld.flow_label = 0;
804 					vtf.u.fld.tc = (rte_be_to_cpu_32
805 						(ipv6_spec->hdr.vtc_flow) &
806 							RTE_IPV6_HDR_TC_MASK) >>
807 							RTE_IPV6_HDR_TC_SHIFT;
808 					f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
809 					vtf.u.fld.tc = (rte_be_to_cpu_32
810 						(ipv6_mask->hdr.vtc_flow) &
811 							RTE_IPV6_HDR_TC_MASK) >>
812 							RTE_IPV6_HDR_TC_SHIFT;
813 					s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
814 					input_set_byte += 4;
815 				}
816 				t++;
817 			}
818 			break;
819 
820 		case RTE_FLOW_ITEM_TYPE_UDP:
821 			udp_spec = item->spec;
822 			udp_mask = item->mask;
823 			udp_valid = 1;
824 			if (udp_spec && udp_mask) {
825 				/* Check UDP mask and update input set*/
826 				if (udp_mask->hdr.dgram_len ||
827 				    udp_mask->hdr.dgram_cksum) {
828 					rte_flow_error_set(error, EINVAL,
829 						   RTE_FLOW_ERROR_TYPE_ITEM,
830 						   item,
831 						   "Invalid UDP mask");
832 					return 0;
833 				}
834 
835 				if (tunnel_valid) {
836 					if (udp_mask->hdr.src_port)
837 						input_set |=
838 						ICE_INSET_TUN_UDP_SRC_PORT;
839 					if (udp_mask->hdr.dst_port)
840 						input_set |=
841 						ICE_INSET_TUN_UDP_DST_PORT;
842 				} else {
843 					if (udp_mask->hdr.src_port)
844 						input_set |=
845 						ICE_INSET_UDP_SRC_PORT;
846 					if (udp_mask->hdr.dst_port)
847 						input_set |=
848 						ICE_INSET_UDP_DST_PORT;
849 				}
850 				if (*tun_type == ICE_SW_TUN_VXLAN &&
851 						tunnel_valid == 0)
852 					list[t].type = ICE_UDP_OF;
853 				else
854 					list[t].type = ICE_UDP_ILOS;
855 				if (udp_mask->hdr.src_port) {
856 					list[t].h_u.l4_hdr.src_port =
857 						udp_spec->hdr.src_port;
858 					list[t].m_u.l4_hdr.src_port =
859 						udp_mask->hdr.src_port;
860 					input_set_byte += 2;
861 				}
862 				if (udp_mask->hdr.dst_port) {
863 					list[t].h_u.l4_hdr.dst_port =
864 						udp_spec->hdr.dst_port;
865 					list[t].m_u.l4_hdr.dst_port =
866 						udp_mask->hdr.dst_port;
867 					input_set_byte += 2;
868 				}
869 				t++;
870 			}
871 			break;
872 
873 		case RTE_FLOW_ITEM_TYPE_TCP:
874 			tcp_spec = item->spec;
875 			tcp_mask = item->mask;
876 			tcp_valid = 1;
877 			if (tcp_spec && tcp_mask) {
878 				/* Check TCP mask and update input set */
879 				if (tcp_mask->hdr.sent_seq ||
880 					tcp_mask->hdr.recv_ack ||
881 					tcp_mask->hdr.data_off ||
882 					tcp_mask->hdr.tcp_flags ||
883 					tcp_mask->hdr.rx_win ||
884 					tcp_mask->hdr.cksum ||
885 					tcp_mask->hdr.tcp_urp) {
886 					rte_flow_error_set(error, EINVAL,
887 					   RTE_FLOW_ERROR_TYPE_ITEM,
888 					   item,
889 					   "Invalid TCP mask");
890 					return 0;
891 				}
892 
893 				if (tunnel_valid) {
894 					if (tcp_mask->hdr.src_port)
895 						input_set |=
896 						ICE_INSET_TUN_TCP_SRC_PORT;
897 					if (tcp_mask->hdr.dst_port)
898 						input_set |=
899 						ICE_INSET_TUN_TCP_DST_PORT;
900 				} else {
901 					if (tcp_mask->hdr.src_port)
902 						input_set |=
903 						ICE_INSET_TCP_SRC_PORT;
904 					if (tcp_mask->hdr.dst_port)
905 						input_set |=
906 						ICE_INSET_TCP_DST_PORT;
907 				}
908 				list[t].type = ICE_TCP_IL;
909 				if (tcp_mask->hdr.src_port) {
910 					list[t].h_u.l4_hdr.src_port =
911 						tcp_spec->hdr.src_port;
912 					list[t].m_u.l4_hdr.src_port =
913 						tcp_mask->hdr.src_port;
914 					input_set_byte += 2;
915 				}
916 				if (tcp_mask->hdr.dst_port) {
917 					list[t].h_u.l4_hdr.dst_port =
918 						tcp_spec->hdr.dst_port;
919 					list[t].m_u.l4_hdr.dst_port =
920 						tcp_mask->hdr.dst_port;
921 					input_set_byte += 2;
922 				}
923 				t++;
924 			}
925 			break;
926 
927 		case RTE_FLOW_ITEM_TYPE_SCTP:
928 			sctp_spec = item->spec;
929 			sctp_mask = item->mask;
930 			if (sctp_spec && sctp_mask) {
931 				/* Check SCTP mask and update input set */
932 				if (sctp_mask->hdr.cksum) {
933 					rte_flow_error_set(error, EINVAL,
934 					   RTE_FLOW_ERROR_TYPE_ITEM,
935 					   item,
936 					   "Invalid SCTP mask");
937 					return 0;
938 				}
939 
940 				if (tunnel_valid) {
941 					if (sctp_mask->hdr.src_port)
942 						input_set |=
943 						ICE_INSET_TUN_SCTP_SRC_PORT;
944 					if (sctp_mask->hdr.dst_port)
945 						input_set |=
946 						ICE_INSET_TUN_SCTP_DST_PORT;
947 				} else {
948 					if (sctp_mask->hdr.src_port)
949 						input_set |=
950 						ICE_INSET_SCTP_SRC_PORT;
951 					if (sctp_mask->hdr.dst_port)
952 						input_set |=
953 						ICE_INSET_SCTP_DST_PORT;
954 				}
955 				list[t].type = ICE_SCTP_IL;
956 				if (sctp_mask->hdr.src_port) {
957 					list[t].h_u.sctp_hdr.src_port =
958 						sctp_spec->hdr.src_port;
959 					list[t].m_u.sctp_hdr.src_port =
960 						sctp_mask->hdr.src_port;
961 					input_set_byte += 2;
962 				}
963 				if (sctp_mask->hdr.dst_port) {
964 					list[t].h_u.sctp_hdr.dst_port =
965 						sctp_spec->hdr.dst_port;
966 					list[t].m_u.sctp_hdr.dst_port =
967 						sctp_mask->hdr.dst_port;
968 					input_set_byte += 2;
969 				}
970 				t++;
971 			}
972 			break;
973 
974 		case RTE_FLOW_ITEM_TYPE_VXLAN:
975 			vxlan_spec = item->spec;
976 			vxlan_mask = item->mask;
977 			/* Check if VXLAN item is used to describe protocol.
978 			 * If yes, both spec and mask should be NULL.
979 			 * If no, both spec and mask shouldn't be NULL.
980 			 */
981 			if ((!vxlan_spec && vxlan_mask) ||
982 			    (vxlan_spec && !vxlan_mask)) {
983 				rte_flow_error_set(error, EINVAL,
984 					   RTE_FLOW_ERROR_TYPE_ITEM,
985 					   item,
986 					   "Invalid VXLAN item");
987 				return 0;
988 			}
989 			vxlan_valid = 1;
990 			tunnel_valid = 1;
991 			if (vxlan_spec && vxlan_mask) {
992 				list[t].type = ICE_VXLAN;
993 				if (vxlan_mask->vni[0] ||
994 					vxlan_mask->vni[1] ||
995 					vxlan_mask->vni[2]) {
996 					list[t].h_u.tnl_hdr.vni =
997 						(vxlan_spec->vni[2] << 16) |
998 						(vxlan_spec->vni[1] << 8) |
999 						vxlan_spec->vni[0];
1000 					list[t].m_u.tnl_hdr.vni =
1001 						(vxlan_mask->vni[2] << 16) |
1002 						(vxlan_mask->vni[1] << 8) |
1003 						vxlan_mask->vni[0];
1004 					input_set |=
1005 						ICE_INSET_TUN_VXLAN_VNI;
1006 					input_set_byte += 2;
1007 				}
1008 				t++;
1009 			}
1010 			break;
1011 
1012 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1013 			nvgre_spec = item->spec;
1014 			nvgre_mask = item->mask;
1015 			/* Check if NVGRE item is used to describe protocol.
1016 			 * If yes, both spec and mask should be NULL.
1017 			 * If no, both spec and mask shouldn't be NULL.
1018 			 */
1019 			if ((!nvgre_spec && nvgre_mask) ||
1020 			    (nvgre_spec && !nvgre_mask)) {
1021 				rte_flow_error_set(error, EINVAL,
1022 					   RTE_FLOW_ERROR_TYPE_ITEM,
1023 					   item,
1024 					   "Invalid NVGRE item");
1025 				return 0;
1026 			}
1027 			nvgre_valid = 1;
1028 			tunnel_valid = 1;
1029 			if (nvgre_spec && nvgre_mask) {
1030 				list[t].type = ICE_NVGRE;
1031 				if (nvgre_mask->tni[0] ||
1032 					nvgre_mask->tni[1] ||
1033 					nvgre_mask->tni[2]) {
1034 					list[t].h_u.nvgre_hdr.tni_flow =
1035 						(nvgre_spec->tni[2] << 16) |
1036 						(nvgre_spec->tni[1] << 8) |
1037 						nvgre_spec->tni[0];
1038 					list[t].m_u.nvgre_hdr.tni_flow =
1039 						(nvgre_mask->tni[2] << 16) |
1040 						(nvgre_mask->tni[1] << 8) |
1041 						nvgre_mask->tni[0];
1042 					input_set |=
1043 						ICE_INSET_TUN_NVGRE_TNI;
1044 					input_set_byte += 2;
1045 				}
1046 				t++;
1047 			}
1048 			break;
1049 
1050 		case RTE_FLOW_ITEM_TYPE_VLAN:
1051 			vlan_spec = item->spec;
1052 			vlan_mask = item->mask;
1053 			/* Check if VLAN item is used to describe protocol.
1054 			 * If yes, both spec and mask should be NULL.
1055 			 * If no, both spec and mask shouldn't be NULL.
1056 			 */
1057 			if ((!vlan_spec && vlan_mask) ||
1058 			    (vlan_spec && !vlan_mask)) {
1059 				rte_flow_error_set(error, EINVAL,
1060 					   RTE_FLOW_ERROR_TYPE_ITEM,
1061 					   item,
1062 					   "Invalid VLAN item");
1063 				return 0;
1064 			}
1065 			if (vlan_spec && vlan_mask) {
1066 				list[t].type = ICE_VLAN_OFOS;
1067 				if (vlan_mask->tci) {
1068 					list[t].h_u.vlan_hdr.vlan =
1069 						vlan_spec->tci;
1070 					list[t].m_u.vlan_hdr.vlan =
1071 						vlan_mask->tci;
1072 					input_set |= ICE_INSET_VLAN_OUTER;
1073 					input_set_byte += 2;
1074 				}
1075 				if (vlan_mask->inner_type) {
1076 					list[t].h_u.vlan_hdr.type =
1077 						vlan_spec->inner_type;
1078 					list[t].m_u.vlan_hdr.type =
1079 						vlan_mask->inner_type;
1080 					input_set |= ICE_INSET_ETHERTYPE;
1081 					input_set_byte += 2;
1082 				}
1083 				t++;
1084 			}
1085 			break;
1086 
1087 		case RTE_FLOW_ITEM_TYPE_PPPOED:
1088 		case RTE_FLOW_ITEM_TYPE_PPPOES:
1089 			pppoe_spec = item->spec;
1090 			pppoe_mask = item->mask;
1091 			/* Check if PPPoE item is used to describe protocol.
1092 			 * If yes, both spec and mask should be NULL.
1093 			 * If no, both spec and mask shouldn't be NULL.
1094 			 */
1095 			if ((!pppoe_spec && pppoe_mask) ||
1096 				(pppoe_spec && !pppoe_mask)) {
1097 				rte_flow_error_set(error, EINVAL,
1098 					RTE_FLOW_ERROR_TYPE_ITEM,
1099 					item,
1100 					"Invalid pppoe item");
1101 				return 0;
1102 			}
1103 			pppoe_patt_valid = 1;
1104 			if (pppoe_spec && pppoe_mask) {
1105 				/* Check pppoe mask and update input set */
1106 				if (pppoe_mask->length ||
1107 					pppoe_mask->code ||
1108 					pppoe_mask->version_type) {
1109 					rte_flow_error_set(error, EINVAL,
1110 						RTE_FLOW_ERROR_TYPE_ITEM,
1111 						item,
1112 						"Invalid pppoe mask");
1113 					return 0;
1114 				}
1115 				list[t].type = ICE_PPPOE;
1116 				if (pppoe_mask->session_id) {
1117 					list[t].h_u.pppoe_hdr.session_id =
1118 						pppoe_spec->session_id;
1119 					list[t].m_u.pppoe_hdr.session_id =
1120 						pppoe_mask->session_id;
1121 					input_set |= ICE_INSET_PPPOE_SESSION;
1122 					input_set_byte += 2;
1123 				}
1124 				t++;
1125 				pppoe_elem_valid = 1;
1126 			}
1127 			break;
1128 
1129 		case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1130 			pppoe_proto_spec = item->spec;
1131 			pppoe_proto_mask = item->mask;
1132 			/* Check if PPPoE optional proto_id item
1133 			 * is used to describe protocol.
1134 			 * If yes, both spec and mask should be NULL.
1135 			 * If no, both spec and mask shouldn't be NULL.
1136 			 */
1137 			if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1138 				(pppoe_proto_spec && !pppoe_proto_mask)) {
1139 				rte_flow_error_set(error, EINVAL,
1140 					RTE_FLOW_ERROR_TYPE_ITEM,
1141 					item,
1142 					"Invalid pppoe proto item");
1143 				return 0;
1144 			}
1145 			if (pppoe_proto_spec && pppoe_proto_mask) {
1146 				if (pppoe_elem_valid)
1147 					t--;
1148 				list[t].type = ICE_PPPOE;
1149 				if (pppoe_proto_mask->proto_id) {
1150 					list[t].h_u.pppoe_hdr.ppp_prot_id =
1151 						pppoe_proto_spec->proto_id;
1152 					list[t].m_u.pppoe_hdr.ppp_prot_id =
1153 						pppoe_proto_mask->proto_id;
1154 					input_set |= ICE_INSET_PPPOE_PROTO;
1155 					input_set_byte += 2;
1156 					pppoe_prot_valid = 1;
1157 				}
1158 				if ((pppoe_proto_mask->proto_id &
1159 					pppoe_proto_spec->proto_id) !=
1160 					    CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1161 					(pppoe_proto_mask->proto_id &
1162 					pppoe_proto_spec->proto_id) !=
1163 					    CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1164 					*tun_type = ICE_SW_TUN_PPPOE_PAY;
1165 				else
1166 					*tun_type = ICE_SW_TUN_PPPOE;
1167 				t++;
1168 			}
1169 
1170 			break;
1171 
1172 		case RTE_FLOW_ITEM_TYPE_ESP:
1173 			esp_spec = item->spec;
1174 			esp_mask = item->mask;
1175 			if ((esp_spec && !esp_mask) ||
1176 				(!esp_spec && esp_mask)) {
1177 				rte_flow_error_set(error, EINVAL,
1178 					   RTE_FLOW_ERROR_TYPE_ITEM,
1179 					   item,
1180 					   "Invalid esp item");
1181 				return 0;
1182 			}
1183 			/* Check esp mask and update input set */
1184 			if (esp_mask && esp_mask->hdr.seq) {
1185 				rte_flow_error_set(error, EINVAL,
1186 						RTE_FLOW_ERROR_TYPE_ITEM,
1187 						item,
1188 						"Invalid esp mask");
1189 				return 0;
1190 			}
1191 
1192 			if (!esp_spec && !esp_mask && !input_set) {
1193 				profile_rule = 1;
1194 				if (ipv6_valid && udp_valid)
1195 					*tun_type =
1196 					ICE_SW_TUN_PROFID_IPV6_NAT_T;
1197 				else if (ipv6_valid)
1198 					*tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1199 				else if (ipv4_valid)
1200 					return 0;
1201 			} else if (esp_spec && esp_mask &&
1202 						esp_mask->hdr.spi){
1203 				if (udp_valid)
1204 					list[t].type = ICE_NAT_T;
1205 				else
1206 					list[t].type = ICE_ESP;
1207 				list[t].h_u.esp_hdr.spi =
1208 					esp_spec->hdr.spi;
1209 				list[t].m_u.esp_hdr.spi =
1210 					esp_mask->hdr.spi;
1211 				input_set |= ICE_INSET_ESP_SPI;
1212 				input_set_byte += 4;
1213 				t++;
1214 			}
1215 
1216 			if (!profile_rule) {
1217 				if (ipv6_valid && udp_valid)
1218 					*tun_type = ICE_SW_TUN_IPV6_NAT_T;
1219 				else if (ipv4_valid && udp_valid)
1220 					*tun_type = ICE_SW_TUN_IPV4_NAT_T;
1221 				else if (ipv6_valid)
1222 					*tun_type = ICE_SW_TUN_IPV6_ESP;
1223 				else if (ipv4_valid)
1224 					*tun_type = ICE_SW_TUN_IPV4_ESP;
1225 			}
1226 			break;
1227 
1228 		case RTE_FLOW_ITEM_TYPE_AH:
1229 			ah_spec = item->spec;
1230 			ah_mask = item->mask;
1231 			if ((ah_spec && !ah_mask) ||
1232 				(!ah_spec && ah_mask)) {
1233 				rte_flow_error_set(error, EINVAL,
1234 					   RTE_FLOW_ERROR_TYPE_ITEM,
1235 					   item,
1236 					   "Invalid ah item");
1237 				return 0;
1238 			}
1239 			/* Check ah mask and update input set */
1240 			if (ah_mask &&
1241 				(ah_mask->next_hdr ||
1242 				ah_mask->payload_len ||
1243 				ah_mask->seq_num ||
1244 				ah_mask->reserved)) {
1245 				rte_flow_error_set(error, EINVAL,
1246 						RTE_FLOW_ERROR_TYPE_ITEM,
1247 						item,
1248 						"Invalid ah mask");
1249 				return 0;
1250 			}
1251 
1252 			if (!ah_spec && !ah_mask && !input_set) {
1253 				profile_rule = 1;
1254 				if (ipv6_valid && udp_valid)
1255 					*tun_type =
1256 					ICE_SW_TUN_PROFID_IPV6_NAT_T;
1257 				else if (ipv6_valid)
1258 					*tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1259 				else if (ipv4_valid)
1260 					return 0;
1261 			} else if (ah_spec && ah_mask &&
1262 						ah_mask->spi){
1263 				list[t].type = ICE_AH;
1264 				list[t].h_u.ah_hdr.spi =
1265 					ah_spec->spi;
1266 				list[t].m_u.ah_hdr.spi =
1267 					ah_mask->spi;
1268 				input_set |= ICE_INSET_AH_SPI;
1269 				input_set_byte += 4;
1270 				t++;
1271 			}
1272 
1273 			if (!profile_rule) {
1274 				if (udp_valid)
1275 					return 0;
1276 				else if (ipv6_valid)
1277 					*tun_type = ICE_SW_TUN_IPV6_AH;
1278 				else if (ipv4_valid)
1279 					*tun_type = ICE_SW_TUN_IPV4_AH;
1280 			}
1281 			break;
1282 
1283 		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1284 			l2tp_spec = item->spec;
1285 			l2tp_mask = item->mask;
1286 			if ((l2tp_spec && !l2tp_mask) ||
1287 				(!l2tp_spec && l2tp_mask)) {
1288 				rte_flow_error_set(error, EINVAL,
1289 					   RTE_FLOW_ERROR_TYPE_ITEM,
1290 					   item,
1291 					   "Invalid l2tp item");
1292 				return 0;
1293 			}
1294 
1295 			if (!l2tp_spec && !l2tp_mask && !input_set) {
1296 				if (ipv6_valid)
1297 					*tun_type =
1298 					ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1299 				else if (ipv4_valid)
1300 					return 0;
1301 			} else if (l2tp_spec && l2tp_mask &&
1302 						l2tp_mask->session_id){
1303 				list[t].type = ICE_L2TPV3;
1304 				list[t].h_u.l2tpv3_sess_hdr.session_id =
1305 					l2tp_spec->session_id;
1306 				list[t].m_u.l2tpv3_sess_hdr.session_id =
1307 					l2tp_mask->session_id;
1308 				input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1309 				input_set_byte += 4;
1310 				t++;
1311 			}
1312 
1313 			if (!profile_rule) {
1314 				if (ipv6_valid)
1315 					*tun_type =
1316 					ICE_SW_TUN_IPV6_L2TPV3;
1317 				else if (ipv4_valid)
1318 					*tun_type =
1319 					ICE_SW_TUN_IPV4_L2TPV3;
1320 			}
1321 			break;
1322 
1323 		case RTE_FLOW_ITEM_TYPE_PFCP:
1324 			pfcp_spec = item->spec;
1325 			pfcp_mask = item->mask;
1326 			/* Check if PFCP item is used to describe protocol.
1327 			 * If yes, both spec and mask should be NULL.
1328 			 * If no, both spec and mask shouldn't be NULL.
1329 			 */
1330 			if ((!pfcp_spec && pfcp_mask) ||
1331 			    (pfcp_spec && !pfcp_mask)) {
1332 				rte_flow_error_set(error, EINVAL,
1333 					   RTE_FLOW_ERROR_TYPE_ITEM,
1334 					   item,
1335 					   "Invalid PFCP item");
1336 				return -ENOTSUP;
1337 			}
1338 			if (pfcp_spec && pfcp_mask) {
1339 				/* Check pfcp mask and update input set */
1340 				if (pfcp_mask->msg_type ||
1341 					pfcp_mask->msg_len ||
1342 					pfcp_mask->seid) {
1343 					rte_flow_error_set(error, EINVAL,
1344 						RTE_FLOW_ERROR_TYPE_ITEM,
1345 						item,
1346 						"Invalid pfcp mask");
1347 					return -ENOTSUP;
1348 				}
1349 				if (pfcp_mask->s_field &&
1350 					pfcp_spec->s_field == 0x01 &&
1351 					ipv6_valid)
1352 					*tun_type =
1353 					ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1354 				else if (pfcp_mask->s_field &&
1355 					pfcp_spec->s_field == 0x01)
1356 					*tun_type =
1357 					ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1358 				else if (pfcp_mask->s_field &&
1359 					!pfcp_spec->s_field &&
1360 					ipv6_valid)
1361 					*tun_type =
1362 					ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1363 				else if (pfcp_mask->s_field &&
1364 					!pfcp_spec->s_field)
1365 					*tun_type =
1366 					ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1367 				else
1368 					return -ENOTSUP;
1369 			}
1370 			break;
1371 
1372 		case RTE_FLOW_ITEM_TYPE_VOID:
1373 			break;
1374 
1375 		default:
1376 			rte_flow_error_set(error, EINVAL,
1377 				   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1378 				   "Invalid pattern item.");
1379 			goto out;
1380 		}
1381 	}
1382 
1383 	if (pppoe_patt_valid && !pppoe_prot_valid) {
1384 		if (ipv6_valid && udp_valid)
1385 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1386 		else if (ipv6_valid && tcp_valid)
1387 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1388 		else if (ipv4_valid && udp_valid)
1389 			*tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1390 		else if (ipv4_valid && tcp_valid)
1391 			*tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1392 		else if (ipv6_valid)
1393 			*tun_type = ICE_SW_TUN_PPPOE_IPV6;
1394 		else if (ipv4_valid)
1395 			*tun_type = ICE_SW_TUN_PPPOE_IPV4;
1396 		else
1397 			*tun_type = ICE_SW_TUN_PPPOE;
1398 	}
1399 
1400 	if (*tun_type == ICE_NON_TUN) {
1401 		if (vxlan_valid)
1402 			*tun_type = ICE_SW_TUN_VXLAN;
1403 		else if (nvgre_valid)
1404 			*tun_type = ICE_SW_TUN_NVGRE;
1405 		else if (ipv4_valid && tcp_valid)
1406 			*tun_type = ICE_SW_IPV4_TCP;
1407 		else if (ipv4_valid && udp_valid)
1408 			*tun_type = ICE_SW_IPV4_UDP;
1409 		else if (ipv6_valid && tcp_valid)
1410 			*tun_type = ICE_SW_IPV6_TCP;
1411 		else if (ipv6_valid && udp_valid)
1412 			*tun_type = ICE_SW_IPV6_UDP;
1413 	}
1414 
1415 	if (input_set_byte > MAX_INPUT_SET_BYTE) {
1416 		rte_flow_error_set(error, EINVAL,
1417 			RTE_FLOW_ERROR_TYPE_ITEM,
1418 			item,
1419 			"too much input set");
1420 		return -ENOTSUP;
1421 	}
1422 
1423 	*lkups_num = t;
1424 
1425 	return input_set;
1426 out:
1427 	return 0;
1428 }
1429 
1430 static int
ice_switch_parse_dcf_action(struct ice_dcf_adapter * ad,const struct rte_flow_action * actions,struct rte_flow_error * error,struct ice_adv_rule_info * rule_info)1431 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1432 			    const struct rte_flow_action *actions,
1433 			    struct rte_flow_error *error,
1434 			    struct ice_adv_rule_info *rule_info)
1435 {
1436 	const struct rte_flow_action_vf *act_vf;
1437 	const struct rte_flow_action *action;
1438 	enum rte_flow_action_type action_type;
1439 
1440 	for (action = actions; action->type !=
1441 				RTE_FLOW_ACTION_TYPE_END; action++) {
1442 		action_type = action->type;
1443 		switch (action_type) {
1444 		case RTE_FLOW_ACTION_TYPE_VF:
1445 			rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1446 			act_vf = action->conf;
1447 
1448 			if (act_vf->id >= ad->real_hw.num_vfs &&
1449 				!act_vf->original) {
1450 				rte_flow_error_set(error,
1451 					EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1452 					actions,
1453 					"Invalid vf id");
1454 				return -rte_errno;
1455 			}
1456 
1457 			if (act_vf->original)
1458 				rule_info->sw_act.vsi_handle =
1459 					ad->real_hw.avf.bus.func;
1460 			else
1461 				rule_info->sw_act.vsi_handle = act_vf->id;
1462 			break;
1463 
1464 		case RTE_FLOW_ACTION_TYPE_DROP:
1465 			rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1466 			break;
1467 
1468 		default:
1469 			rte_flow_error_set(error,
1470 					   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1471 					   actions,
1472 					   "Invalid action type");
1473 			return -rte_errno;
1474 		}
1475 	}
1476 
1477 	rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1478 	rule_info->sw_act.flag = ICE_FLTR_RX;
1479 	rule_info->rx = 1;
1480 	rule_info->priority = 5;
1481 
1482 	return 0;
1483 }
1484 
1485 static int
ice_switch_parse_action(struct ice_pf * pf,const struct rte_flow_action * actions,struct rte_flow_error * error,struct ice_adv_rule_info * rule_info)1486 ice_switch_parse_action(struct ice_pf *pf,
1487 		const struct rte_flow_action *actions,
1488 		struct rte_flow_error *error,
1489 		struct ice_adv_rule_info *rule_info)
1490 {
1491 	struct ice_vsi *vsi = pf->main_vsi;
1492 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
1493 	const struct rte_flow_action_queue *act_q;
1494 	const struct rte_flow_action_rss *act_qgrop;
1495 	uint16_t base_queue, i;
1496 	const struct rte_flow_action *action;
1497 	enum rte_flow_action_type action_type;
1498 	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1499 		 2, 4, 8, 16, 32, 64, 128};
1500 
1501 	base_queue = pf->base_queue + vsi->base_queue;
1502 	for (action = actions; action->type !=
1503 			RTE_FLOW_ACTION_TYPE_END; action++) {
1504 		action_type = action->type;
1505 		switch (action_type) {
1506 		case RTE_FLOW_ACTION_TYPE_RSS:
1507 			act_qgrop = action->conf;
1508 			if (act_qgrop->queue_num <= 1)
1509 				goto error;
1510 			rule_info->sw_act.fltr_act =
1511 				ICE_FWD_TO_QGRP;
1512 			rule_info->sw_act.fwd_id.q_id =
1513 				base_queue + act_qgrop->queue[0];
1514 			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1515 				if (act_qgrop->queue_num ==
1516 					valid_qgrop_number[i])
1517 					break;
1518 			}
1519 			if (i == MAX_QGRP_NUM_TYPE)
1520 				goto error;
1521 			if ((act_qgrop->queue[0] +
1522 				act_qgrop->queue_num) >
1523 				dev->data->nb_rx_queues)
1524 				goto error1;
1525 			for (i = 0; i < act_qgrop->queue_num - 1; i++)
1526 				if (act_qgrop->queue[i + 1] !=
1527 					act_qgrop->queue[i] + 1)
1528 					goto error2;
1529 			rule_info->sw_act.qgrp_size =
1530 				act_qgrop->queue_num;
1531 			break;
1532 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1533 			act_q = action->conf;
1534 			if (act_q->index >= dev->data->nb_rx_queues)
1535 				goto error;
1536 			rule_info->sw_act.fltr_act =
1537 				ICE_FWD_TO_Q;
1538 			rule_info->sw_act.fwd_id.q_id =
1539 				base_queue + act_q->index;
1540 			break;
1541 
1542 		case RTE_FLOW_ACTION_TYPE_DROP:
1543 			rule_info->sw_act.fltr_act =
1544 				ICE_DROP_PACKET;
1545 			break;
1546 
1547 		case RTE_FLOW_ACTION_TYPE_VOID:
1548 			break;
1549 
1550 		default:
1551 			goto error;
1552 		}
1553 	}
1554 
1555 	rule_info->sw_act.vsi_handle = vsi->idx;
1556 	rule_info->rx = 1;
1557 	rule_info->sw_act.src = vsi->idx;
1558 	rule_info->priority = 5;
1559 
1560 	return 0;
1561 
1562 error:
1563 	rte_flow_error_set(error,
1564 		EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1565 		actions,
1566 		"Invalid action type or queue number");
1567 	return -rte_errno;
1568 
1569 error1:
1570 	rte_flow_error_set(error,
1571 		EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1572 		actions,
1573 		"Invalid queue region indexes");
1574 	return -rte_errno;
1575 
1576 error2:
1577 	rte_flow_error_set(error,
1578 		EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1579 		actions,
1580 		"Discontinuous queue region");
1581 	return -rte_errno;
1582 }
1583 
1584 static int
ice_switch_check_action(const struct rte_flow_action * actions,struct rte_flow_error * error)1585 ice_switch_check_action(const struct rte_flow_action *actions,
1586 			    struct rte_flow_error *error)
1587 {
1588 	const struct rte_flow_action *action;
1589 	enum rte_flow_action_type action_type;
1590 	uint16_t actions_num = 0;
1591 
1592 	for (action = actions; action->type !=
1593 				RTE_FLOW_ACTION_TYPE_END; action++) {
1594 		action_type = action->type;
1595 		switch (action_type) {
1596 		case RTE_FLOW_ACTION_TYPE_VF:
1597 		case RTE_FLOW_ACTION_TYPE_RSS:
1598 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1599 		case RTE_FLOW_ACTION_TYPE_DROP:
1600 			actions_num++;
1601 			break;
1602 		case RTE_FLOW_ACTION_TYPE_VOID:
1603 			continue;
1604 		default:
1605 			rte_flow_error_set(error,
1606 					   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1607 					   actions,
1608 					   "Invalid action type");
1609 			return -rte_errno;
1610 		}
1611 	}
1612 
1613 	if (actions_num != 1) {
1614 		rte_flow_error_set(error,
1615 				   EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1616 				   actions,
1617 				   "Invalid action number");
1618 		return -rte_errno;
1619 	}
1620 
1621 	return 0;
1622 }
1623 
1624 static bool
ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)1625 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1626 {
1627 	switch (tun_type) {
1628 	case ICE_SW_TUN_PROFID_IPV6_ESP:
1629 	case ICE_SW_TUN_PROFID_IPV6_AH:
1630 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1631 	case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1632 	case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1633 	case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1634 	case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1635 	case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1636 		return true;
1637 	default:
1638 		break;
1639 	}
1640 
1641 	return false;
1642 }
1643 
1644 static int
ice_switch_parse_pattern_action(struct ice_adapter * ad,struct ice_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],void ** meta,struct rte_flow_error * error)1645 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1646 		struct ice_pattern_match_item *array,
1647 		uint32_t array_len,
1648 		const struct rte_flow_item pattern[],
1649 		const struct rte_flow_action actions[],
1650 		void **meta,
1651 		struct rte_flow_error *error)
1652 {
1653 	struct ice_pf *pf = &ad->pf;
1654 	uint64_t inputset = 0;
1655 	int ret = 0;
1656 	struct sw_meta *sw_meta_ptr = NULL;
1657 	struct ice_adv_rule_info rule_info;
1658 	struct ice_adv_lkup_elem *list = NULL;
1659 	uint16_t lkups_num = 0;
1660 	const struct rte_flow_item *item = pattern;
1661 	uint16_t item_num = 0;
1662 	enum ice_sw_tunnel_type tun_type =
1663 			ICE_NON_TUN;
1664 	struct ice_pattern_match_item *pattern_match_item = NULL;
1665 
1666 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1667 		item_num++;
1668 		if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1669 			const struct rte_flow_item_eth *eth_mask;
1670 			if (item->mask)
1671 				eth_mask = item->mask;
1672 			else
1673 				continue;
1674 			if (eth_mask->type == UINT16_MAX)
1675 				tun_type = ICE_SW_TUN_AND_NON_TUN;
1676 		}
1677 		/* reserve one more memory slot for ETH which may
1678 		 * consume 2 lookup items.
1679 		 */
1680 		if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1681 			item_num++;
1682 	}
1683 
1684 	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1685 	if (!list) {
1686 		rte_flow_error_set(error, EINVAL,
1687 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1688 				   "No memory for PMD internal items");
1689 		return -rte_errno;
1690 	}
1691 
1692 	sw_meta_ptr =
1693 		rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1694 	if (!sw_meta_ptr) {
1695 		rte_flow_error_set(error, EINVAL,
1696 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1697 				   "No memory for sw_pattern_meta_ptr");
1698 		goto error;
1699 	}
1700 
1701 	pattern_match_item =
1702 		ice_search_pattern_match_item(pattern, array, array_len, error);
1703 	if (!pattern_match_item) {
1704 		rte_flow_error_set(error, EINVAL,
1705 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1706 				   "Invalid input pattern");
1707 		goto error;
1708 	}
1709 
1710 	inputset = ice_switch_inset_get
1711 		(pattern, error, list, &lkups_num, &tun_type);
1712 	if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1713 		(inputset & ~pattern_match_item->input_set_mask)) {
1714 		rte_flow_error_set(error, EINVAL,
1715 				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1716 				   pattern,
1717 				   "Invalid input set");
1718 		goto error;
1719 	}
1720 
1721 	memset(&rule_info, 0, sizeof(rule_info));
1722 	rule_info.tun_type = tun_type;
1723 
1724 	ret = ice_switch_check_action(actions, error);
1725 	if (ret)
1726 		goto error;
1727 
1728 	if (ad->hw.dcf_enabled)
1729 		ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1730 						  &rule_info);
1731 	else
1732 		ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1733 
1734 	if (ret)
1735 		goto error;
1736 
1737 	if (meta) {
1738 		*meta = sw_meta_ptr;
1739 		((struct sw_meta *)*meta)->list = list;
1740 		((struct sw_meta *)*meta)->lkups_num = lkups_num;
1741 		((struct sw_meta *)*meta)->rule_info = rule_info;
1742 	} else {
1743 		rte_free(list);
1744 		rte_free(sw_meta_ptr);
1745 	}
1746 
1747 	rte_free(pattern_match_item);
1748 
1749 	return 0;
1750 
1751 error:
1752 	rte_free(list);
1753 	rte_free(sw_meta_ptr);
1754 	rte_free(pattern_match_item);
1755 
1756 	return -rte_errno;
1757 }
1758 
1759 static int
ice_switch_query(struct ice_adapter * ad __rte_unused,struct rte_flow * flow __rte_unused,struct rte_flow_query_count * count __rte_unused,struct rte_flow_error * error)1760 ice_switch_query(struct ice_adapter *ad __rte_unused,
1761 		struct rte_flow *flow __rte_unused,
1762 		struct rte_flow_query_count *count __rte_unused,
1763 		struct rte_flow_error *error)
1764 {
1765 	rte_flow_error_set(error, EINVAL,
1766 		RTE_FLOW_ERROR_TYPE_HANDLE,
1767 		NULL,
1768 		"count action not supported by switch filter");
1769 
1770 	return -rte_errno;
1771 }
1772 
1773 static int
ice_switch_redirect(struct ice_adapter * ad,struct rte_flow * flow,struct ice_flow_redirect * rd)1774 ice_switch_redirect(struct ice_adapter *ad,
1775 		    struct rte_flow *flow,
1776 		    struct ice_flow_redirect *rd)
1777 {
1778 	struct ice_rule_query_data *rdata = flow->rule;
1779 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
1780 	struct ice_adv_lkup_elem *lkups_dp = NULL;
1781 	struct LIST_HEAD_TYPE *list_head;
1782 	struct ice_adv_rule_info rinfo;
1783 	struct ice_hw *hw = &ad->hw;
1784 	struct ice_switch_info *sw;
1785 	uint16_t lkups_cnt;
1786 	int ret;
1787 
1788 	if (rdata->vsi_handle != rd->vsi_handle)
1789 		return 0;
1790 
1791 	sw = hw->switch_info;
1792 	if (!sw->recp_list[rdata->rid].recp_created)
1793 		return -EINVAL;
1794 
1795 	if (rd->type != ICE_FLOW_REDIRECT_VSI)
1796 		return -ENOTSUP;
1797 
1798 	list_head = &sw->recp_list[rdata->rid].filt_rules;
1799 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1800 			    list_entry) {
1801 		rinfo = list_itr->rule_info;
1802 		if ((rinfo.fltr_rule_id == rdata->rule_id &&
1803 		    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1804 		    rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1805 		    (rinfo.fltr_rule_id == rdata->rule_id &&
1806 		    rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1807 			lkups_cnt = list_itr->lkups_cnt;
1808 			lkups_dp = (struct ice_adv_lkup_elem *)
1809 				ice_memdup(hw, list_itr->lkups,
1810 					   sizeof(*list_itr->lkups) *
1811 					   lkups_cnt, ICE_NONDMA_TO_NONDMA);
1812 
1813 			if (!lkups_dp) {
1814 				PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1815 				return -EINVAL;
1816 			}
1817 
1818 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1819 				rinfo.sw_act.vsi_handle = rd->vsi_handle;
1820 				rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1821 			}
1822 			break;
1823 		}
1824 	}
1825 
1826 	if (!lkups_dp)
1827 		return -EINVAL;
1828 
1829 	/* Remove the old rule */
1830 	ret = ice_rem_adv_rule(hw, list_itr->lkups,
1831 			       lkups_cnt, &rinfo);
1832 	if (ret) {
1833 		PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1834 			    rdata->rule_id);
1835 		ret = -EINVAL;
1836 		goto out;
1837 	}
1838 
1839 	/* Update VSI context */
1840 	hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1841 
1842 	/* Replay the rule */
1843 	ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1844 			       &rinfo, rdata);
1845 	if (ret) {
1846 		PMD_DRV_LOG(ERR, "Failed to replay the rule");
1847 		ret = -EINVAL;
1848 	}
1849 
1850 out:
1851 	ice_free(hw, lkups_dp);
1852 	return ret;
1853 }
1854 
1855 static int
ice_switch_init(struct ice_adapter * ad)1856 ice_switch_init(struct ice_adapter *ad)
1857 {
1858 	int ret = 0;
1859 	struct ice_flow_parser *dist_parser;
1860 	struct ice_flow_parser *perm_parser;
1861 
1862 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1863 		dist_parser = &ice_switch_dist_parser_comms;
1864 	else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1865 		dist_parser = &ice_switch_dist_parser_os;
1866 	else
1867 		return -EINVAL;
1868 
1869 	if (ad->devargs.pipe_mode_support) {
1870 		if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1871 			perm_parser = &ice_switch_perm_parser_comms;
1872 		else
1873 			perm_parser = &ice_switch_perm_parser_os;
1874 
1875 		ret = ice_register_parser(perm_parser, ad);
1876 	} else {
1877 		ret = ice_register_parser(dist_parser, ad);
1878 	}
1879 	return ret;
1880 }
1881 
1882 static void
ice_switch_uninit(struct ice_adapter * ad)1883 ice_switch_uninit(struct ice_adapter *ad)
1884 {
1885 	struct ice_flow_parser *dist_parser;
1886 	struct ice_flow_parser *perm_parser;
1887 
1888 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1889 		dist_parser = &ice_switch_dist_parser_comms;
1890 	else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1891 		dist_parser = &ice_switch_dist_parser_os;
1892 	else
1893 		return;
1894 
1895 	if (ad->devargs.pipe_mode_support) {
1896 		if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1897 			perm_parser = &ice_switch_perm_parser_comms;
1898 		else
1899 			perm_parser = &ice_switch_perm_parser_os;
1900 
1901 		ice_unregister_parser(perm_parser, ad);
1902 	} else {
1903 		ice_unregister_parser(dist_parser, ad);
1904 	}
1905 }
1906 
1907 static struct
1908 ice_flow_engine ice_switch_engine = {
1909 	.init = ice_switch_init,
1910 	.uninit = ice_switch_uninit,
1911 	.create = ice_switch_create,
1912 	.destroy = ice_switch_destroy,
1913 	.query_count = ice_switch_query,
1914 	.redirect = ice_switch_redirect,
1915 	.free = ice_switch_filter_rule_free,
1916 	.type = ICE_FLOW_ENGINE_SWITCH,
1917 };
1918 
1919 static struct
1920 ice_flow_parser ice_switch_dist_parser_os = {
1921 	.engine = &ice_switch_engine,
1922 	.array = ice_switch_pattern_dist_os,
1923 	.array_len = RTE_DIM(ice_switch_pattern_dist_os),
1924 	.parse_pattern_action = ice_switch_parse_pattern_action,
1925 	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1926 };
1927 
1928 static struct
1929 ice_flow_parser ice_switch_dist_parser_comms = {
1930 	.engine = &ice_switch_engine,
1931 	.array = ice_switch_pattern_dist_comms,
1932 	.array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1933 	.parse_pattern_action = ice_switch_parse_pattern_action,
1934 	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1935 };
1936 
1937 static struct
1938 ice_flow_parser ice_switch_perm_parser_os = {
1939 	.engine = &ice_switch_engine,
1940 	.array = ice_switch_pattern_perm_os,
1941 	.array_len = RTE_DIM(ice_switch_pattern_perm_os),
1942 	.parse_pattern_action = ice_switch_parse_pattern_action,
1943 	.stage = ICE_FLOW_STAGE_PERMISSION,
1944 };
1945 
1946 static struct
1947 ice_flow_parser ice_switch_perm_parser_comms = {
1948 	.engine = &ice_switch_engine,
1949 	.array = ice_switch_pattern_perm_comms,
1950 	.array_len = RTE_DIM(ice_switch_pattern_perm_comms),
1951 	.parse_pattern_action = ice_switch_parse_pattern_action,
1952 	.stage = ICE_FLOW_STAGE_PERMISSION,
1953 };
1954 
RTE_INIT(ice_sw_engine_init)1955 RTE_INIT(ice_sw_engine_init)
1956 {
1957 	struct ice_flow_engine *engine = &ice_switch_engine;
1958 	ice_register_flow_engine(engine);
1959 }
1960