xref: /dpdk/drivers/net/ice/base/ice_switch.c (revision a62f0950)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2021 Intel Corporation
3  */
4 
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8 
9 #define ICE_ETH_DA_OFFSET		0
10 #define ICE_ETH_ETHTYPE_OFFSET		12
11 #define ICE_ETH_VLAN_TCI_OFFSET		14
12 #define ICE_MAX_VLAN_ID			0xFFF
13 #define ICE_IPV6_ETHER_ID		0x86DD
14 #define ICE_IPV4_NVGRE_PROTO_ID		0x002F
15 #define ICE_PPP_IPV6_PROTO_ID		0x0057
16 #define ICE_TCP_PROTO_ID		0x06
17 #define ICE_GTPU_PROFILE		24
18 #define ICE_ETH_P_8021Q			0x8100
19 #define ICE_MPLS_ETHER_ID		0x8847
20 
21 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
22  * struct to configure any switch filter rules.
23  * {DA (6 bytes), SA(6 bytes),
24  * Ether type (2 bytes for header without VLAN tag) OR
25  * VLAN tag (4 bytes for header with VLAN tag) }
26  *
27  * Word on Hardcoded values
28  * byte 0 = 0x2: to identify it as locally administered DA MAC
29  * byte 6 = 0x2: to identify it as locally administered SA MAC
30  * byte 12 = 0x81 & byte 13 = 0x00:
31  *	In case of VLAN filter first two bytes defines ether type (0x8100)
32  *	and remaining two bytes are placeholder for programming a given VLAN ID
33  *	In case of Ether type filter it is treated as header without VLAN tag
34  *	and byte 12 and 13 is used to program a given Ether type instead
35  */
36 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
37 							0x2, 0, 0, 0, 0, 0,
38 							0x81, 0, 0, 0};
39 
40 struct ice_dummy_pkt_offsets {
41 	enum ice_protocol_type type;
42 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
43 };
44 
45 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
46 	{ ICE_MAC_OFOS,		0 },
47 	{ ICE_ETYPE_OL,		12 },
48 	{ ICE_IPV4_OFOS,	14 },
49 	{ ICE_NVGRE,		34 },
50 	{ ICE_MAC_IL,		42 },
51 	{ ICE_IPV4_IL,		56 },
52 	{ ICE_TCP_IL,		76 },
53 	{ ICE_PROTOCOL_LAST,	0 },
54 };
55 
56 static const u8 dummy_gre_tcp_packet[] = {
57 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
58 	0x00, 0x00, 0x00, 0x00,
59 	0x00, 0x00, 0x00, 0x00,
60 
61 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
62 
63 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
64 	0x00, 0x00, 0x00, 0x00,
65 	0x00, 0x2F, 0x00, 0x00,
66 	0x00, 0x00, 0x00, 0x00,
67 	0x00, 0x00, 0x00, 0x00,
68 
69 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
70 	0x00, 0x00, 0x00, 0x00,
71 
72 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
73 	0x00, 0x00, 0x00, 0x00,
74 	0x00, 0x00, 0x00, 0x00,
75 	0x08, 0x00,
76 
77 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
78 	0x00, 0x00, 0x00, 0x00,
79 	0x00, 0x06, 0x00, 0x00,
80 	0x00, 0x00, 0x00, 0x00,
81 	0x00, 0x00, 0x00, 0x00,
82 
83 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
84 	0x00, 0x00, 0x00, 0x00,
85 	0x00, 0x00, 0x00, 0x00,
86 	0x50, 0x02, 0x20, 0x00,
87 	0x00, 0x00, 0x00, 0x00
88 };
89 
90 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
91 	{ ICE_MAC_OFOS,		0 },
92 	{ ICE_ETYPE_OL,		12 },
93 	{ ICE_IPV4_OFOS,	14 },
94 	{ ICE_NVGRE,		34 },
95 	{ ICE_MAC_IL,		42 },
96 	{ ICE_IPV4_IL,		56 },
97 	{ ICE_UDP_ILOS,		76 },
98 	{ ICE_PROTOCOL_LAST,	0 },
99 };
100 
101 static const u8 dummy_gre_udp_packet[] = {
102 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
103 	0x00, 0x00, 0x00, 0x00,
104 	0x00, 0x00, 0x00, 0x00,
105 
106 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
107 
108 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
109 	0x00, 0x00, 0x00, 0x00,
110 	0x00, 0x2F, 0x00, 0x00,
111 	0x00, 0x00, 0x00, 0x00,
112 	0x00, 0x00, 0x00, 0x00,
113 
114 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
115 	0x00, 0x00, 0x00, 0x00,
116 
117 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
118 	0x00, 0x00, 0x00, 0x00,
119 	0x00, 0x00, 0x00, 0x00,
120 	0x08, 0x00,
121 
122 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
123 	0x00, 0x00, 0x00, 0x00,
124 	0x00, 0x11, 0x00, 0x00,
125 	0x00, 0x00, 0x00, 0x00,
126 	0x00, 0x00, 0x00, 0x00,
127 
128 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
129 	0x00, 0x08, 0x00, 0x00,
130 };
131 
132 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
133 	{ ICE_MAC_OFOS,		0 },
134 	{ ICE_ETYPE_OL,		12 },
135 	{ ICE_IPV4_OFOS,	14 },
136 	{ ICE_UDP_OF,		34 },
137 	{ ICE_VXLAN,		42 },
138 	{ ICE_GENEVE,		42 },
139 	{ ICE_VXLAN_GPE,	42 },
140 	{ ICE_MAC_IL,		50 },
141 	{ ICE_IPV4_IL,		64 },
142 	{ ICE_TCP_IL,		84 },
143 	{ ICE_PROTOCOL_LAST,	0 },
144 };
145 
146 static const u8 dummy_udp_tun_tcp_packet[] = {
147 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
148 	0x00, 0x00, 0x00, 0x00,
149 	0x00, 0x00, 0x00, 0x00,
150 
151 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
152 
153 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
154 	0x00, 0x01, 0x00, 0x00,
155 	0x40, 0x11, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 	0x00, 0x00, 0x00, 0x00,
158 
159 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
160 	0x00, 0x46, 0x00, 0x00,
161 
162 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
163 	0x00, 0x00, 0x00, 0x00,
164 
165 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
166 	0x00, 0x00, 0x00, 0x00,
167 	0x00, 0x00, 0x00, 0x00,
168 	0x08, 0x00,
169 
170 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
171 	0x00, 0x01, 0x00, 0x00,
172 	0x40, 0x06, 0x00, 0x00,
173 	0x00, 0x00, 0x00, 0x00,
174 	0x00, 0x00, 0x00, 0x00,
175 
176 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
177 	0x00, 0x00, 0x00, 0x00,
178 	0x00, 0x00, 0x00, 0x00,
179 	0x50, 0x02, 0x20, 0x00,
180 	0x00, 0x00, 0x00, 0x00
181 };
182 
183 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
184 	{ ICE_MAC_OFOS,		0 },
185 	{ ICE_ETYPE_OL,		12 },
186 	{ ICE_IPV4_OFOS,	14 },
187 	{ ICE_UDP_OF,		34 },
188 	{ ICE_VXLAN,		42 },
189 	{ ICE_GENEVE,		42 },
190 	{ ICE_VXLAN_GPE,	42 },
191 	{ ICE_MAC_IL,		50 },
192 	{ ICE_IPV4_IL,		64 },
193 	{ ICE_UDP_ILOS,		84 },
194 	{ ICE_PROTOCOL_LAST,	0 },
195 };
196 
197 static const u8 dummy_udp_tun_udp_packet[] = {
198 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
199 	0x00, 0x00, 0x00, 0x00,
200 	0x00, 0x00, 0x00, 0x00,
201 
202 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
203 
204 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
205 	0x00, 0x01, 0x00, 0x00,
206 	0x00, 0x11, 0x00, 0x00,
207 	0x00, 0x00, 0x00, 0x00,
208 	0x00, 0x00, 0x00, 0x00,
209 
210 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
211 	0x00, 0x3a, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
214 	0x00, 0x00, 0x00, 0x00,
215 
216 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
217 	0x00, 0x00, 0x00, 0x00,
218 	0x00, 0x00, 0x00, 0x00,
219 	0x08, 0x00,
220 
221 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
222 	0x00, 0x01, 0x00, 0x00,
223 	0x00, 0x11, 0x00, 0x00,
224 	0x00, 0x00, 0x00, 0x00,
225 	0x00, 0x00, 0x00, 0x00,
226 
227 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
228 	0x00, 0x08, 0x00, 0x00,
229 };
230 
231 /* offset info for MAC + IPv4 + UDP dummy packet */
232 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
233 	{ ICE_MAC_OFOS,		0 },
234 	{ ICE_ETYPE_OL,		12 },
235 	{ ICE_IPV4_OFOS,	14 },
236 	{ ICE_UDP_ILOS,		34 },
237 	{ ICE_PROTOCOL_LAST,	0 },
238 };
239 
240 /* Dummy packet for MAC + IPv4 + UDP */
241 static const u8 dummy_udp_packet[] = {
242 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
243 	0x00, 0x00, 0x00, 0x00,
244 	0x00, 0x00, 0x00, 0x00,
245 
246 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
247 
248 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
249 	0x00, 0x01, 0x00, 0x00,
250 	0x00, 0x11, 0x00, 0x00,
251 	0x00, 0x00, 0x00, 0x00,
252 	0x00, 0x00, 0x00, 0x00,
253 
254 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
255 	0x00, 0x08, 0x00, 0x00,
256 
257 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
258 };
259 
260 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
261 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
262 	{ ICE_MAC_OFOS,		0 },
263 	{ ICE_VLAN_OFOS,	12 },
264 	{ ICE_ETYPE_OL,		16 },
265 	{ ICE_IPV4_OFOS,	18 },
266 	{ ICE_UDP_ILOS,		38 },
267 	{ ICE_PROTOCOL_LAST,	0 },
268 };
269 
270 /* C-tag (801.1Q), IPv4:UDP dummy packet */
271 static const u8 dummy_vlan_udp_packet[] = {
272 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
273 	0x00, 0x00, 0x00, 0x00,
274 	0x00, 0x00, 0x00, 0x00,
275 
276 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
277 
278 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
279 
280 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
281 	0x00, 0x01, 0x00, 0x00,
282 	0x00, 0x11, 0x00, 0x00,
283 	0x00, 0x00, 0x00, 0x00,
284 	0x00, 0x00, 0x00, 0x00,
285 
286 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
287 	0x00, 0x08, 0x00, 0x00,
288 
289 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
290 };
291 
292 /* offset info for MAC + IPv4 + TCP dummy packet */
293 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
294 	{ ICE_MAC_OFOS,		0 },
295 	{ ICE_ETYPE_OL,		12 },
296 	{ ICE_IPV4_OFOS,	14 },
297 	{ ICE_TCP_IL,		34 },
298 	{ ICE_PROTOCOL_LAST,	0 },
299 };
300 
301 /* Dummy packet for MAC + IPv4 + TCP */
302 static const u8 dummy_tcp_packet[] = {
303 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
304 	0x00, 0x00, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x00,
306 
307 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
308 
309 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
310 	0x00, 0x01, 0x00, 0x00,
311 	0x00, 0x06, 0x00, 0x00,
312 	0x00, 0x00, 0x00, 0x00,
313 	0x00, 0x00, 0x00, 0x00,
314 
315 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
316 	0x00, 0x00, 0x00, 0x00,
317 	0x00, 0x00, 0x00, 0x00,
318 	0x50, 0x00, 0x00, 0x00,
319 	0x00, 0x00, 0x00, 0x00,
320 
321 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
322 };
323 
324 /* offset info for MAC + MPLS dummy packet */
325 static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
326 	{ ICE_MAC_OFOS,		0 },
327 	{ ICE_ETYPE_OL,		12 },
328 	{ ICE_PROTOCOL_LAST,	0 },
329 };
330 
331 /* Dummy packet for MAC + MPLS */
332 static const u8 dummy_mpls_packet[] = {
333 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 
337 	0x88, 0x47,		/* ICE_ETYPE_OL 12 */
338 	0x00, 0x00, 0x01, 0x00,
339 
340 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
341 };
342 
343 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
344 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
345 	{ ICE_MAC_OFOS,		0 },
346 	{ ICE_VLAN_OFOS,	12 },
347 	{ ICE_ETYPE_OL,		16 },
348 	{ ICE_IPV4_OFOS,	18 },
349 	{ ICE_TCP_IL,		38 },
350 	{ ICE_PROTOCOL_LAST,	0 },
351 };
352 
353 /* C-tag (801.1Q), IPv4:TCP dummy packet */
354 static const u8 dummy_vlan_tcp_packet[] = {
355 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
356 	0x00, 0x00, 0x00, 0x00,
357 	0x00, 0x00, 0x00, 0x00,
358 
359 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
360 
361 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
362 
363 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
364 	0x00, 0x01, 0x00, 0x00,
365 	0x00, 0x06, 0x00, 0x00,
366 	0x00, 0x00, 0x00, 0x00,
367 	0x00, 0x00, 0x00, 0x00,
368 
369 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
370 	0x00, 0x00, 0x00, 0x00,
371 	0x00, 0x00, 0x00, 0x00,
372 	0x50, 0x00, 0x00, 0x00,
373 	0x00, 0x00, 0x00, 0x00,
374 
375 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
376 };
377 
378 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
379 	{ ICE_MAC_OFOS,		0 },
380 	{ ICE_ETYPE_OL,		12 },
381 	{ ICE_IPV6_OFOS,	14 },
382 	{ ICE_TCP_IL,		54 },
383 	{ ICE_PROTOCOL_LAST,	0 },
384 };
385 
386 static const u8 dummy_tcp_ipv6_packet[] = {
387 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
388 	0x00, 0x00, 0x00, 0x00,
389 	0x00, 0x00, 0x00, 0x00,
390 
391 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
392 
393 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
394 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
395 	0x00, 0x00, 0x00, 0x00,
396 	0x00, 0x00, 0x00, 0x00,
397 	0x00, 0x00, 0x00, 0x00,
398 	0x00, 0x00, 0x00, 0x00,
399 	0x00, 0x00, 0x00, 0x00,
400 	0x00, 0x00, 0x00, 0x00,
401 	0x00, 0x00, 0x00, 0x00,
402 	0x00, 0x00, 0x00, 0x00,
403 
404 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
405 	0x00, 0x00, 0x00, 0x00,
406 	0x00, 0x00, 0x00, 0x00,
407 	0x50, 0x00, 0x00, 0x00,
408 	0x00, 0x00, 0x00, 0x00,
409 
410 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
411 };
412 
413 /* C-tag (802.1Q): IPv6 + TCP */
414 static const struct ice_dummy_pkt_offsets
415 dummy_vlan_tcp_ipv6_packet_offsets[] = {
416 	{ ICE_MAC_OFOS,		0 },
417 	{ ICE_VLAN_OFOS,	12 },
418 	{ ICE_ETYPE_OL,		16 },
419 	{ ICE_IPV6_OFOS,	18 },
420 	{ ICE_TCP_IL,		58 },
421 	{ ICE_PROTOCOL_LAST,	0 },
422 };
423 
424 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
425 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
426 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
427 	0x00, 0x00, 0x00, 0x00,
428 	0x00, 0x00, 0x00, 0x00,
429 
430 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
431 
432 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
433 
434 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
435 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
436 	0x00, 0x00, 0x00, 0x00,
437 	0x00, 0x00, 0x00, 0x00,
438 	0x00, 0x00, 0x00, 0x00,
439 	0x00, 0x00, 0x00, 0x00,
440 	0x00, 0x00, 0x00, 0x00,
441 	0x00, 0x00, 0x00, 0x00,
442 	0x00, 0x00, 0x00, 0x00,
443 	0x00, 0x00, 0x00, 0x00,
444 
445 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
446 	0x00, 0x00, 0x00, 0x00,
447 	0x00, 0x00, 0x00, 0x00,
448 	0x50, 0x00, 0x00, 0x00,
449 	0x00, 0x00, 0x00, 0x00,
450 
451 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
452 };
453 
454 /* IPv6 + UDP */
455 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
456 	{ ICE_MAC_OFOS,		0 },
457 	{ ICE_ETYPE_OL,		12 },
458 	{ ICE_IPV6_OFOS,	14 },
459 	{ ICE_UDP_ILOS,		54 },
460 	{ ICE_PROTOCOL_LAST,	0 },
461 };
462 
463 /* IPv6 + UDP dummy packet */
464 static const u8 dummy_udp_ipv6_packet[] = {
465 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
466 	0x00, 0x00, 0x00, 0x00,
467 	0x00, 0x00, 0x00, 0x00,
468 
469 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
470 
471 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
472 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
473 	0x00, 0x00, 0x00, 0x00,
474 	0x00, 0x00, 0x00, 0x00,
475 	0x00, 0x00, 0x00, 0x00,
476 	0x00, 0x00, 0x00, 0x00,
477 	0x00, 0x00, 0x00, 0x00,
478 	0x00, 0x00, 0x00, 0x00,
479 	0x00, 0x00, 0x00, 0x00,
480 	0x00, 0x00, 0x00, 0x00,
481 
482 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
483 	0x00, 0x10, 0x00, 0x00,
484 
485 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
486 	0x00, 0x00, 0x00, 0x00,
487 
488 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
489 };
490 
491 /* C-tag (802.1Q): IPv6 + UDP */
492 static const struct ice_dummy_pkt_offsets
493 dummy_vlan_udp_ipv6_packet_offsets[] = {
494 	{ ICE_MAC_OFOS,		0 },
495 	{ ICE_VLAN_OFOS,	12 },
496 	{ ICE_ETYPE_OL,		16 },
497 	{ ICE_IPV6_OFOS,	18 },
498 	{ ICE_UDP_ILOS,		58 },
499 	{ ICE_PROTOCOL_LAST,	0 },
500 };
501 
502 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
503 static const u8 dummy_vlan_udp_ipv6_packet[] = {
504 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
505 	0x00, 0x00, 0x00, 0x00,
506 	0x00, 0x00, 0x00, 0x00,
507 
508 	0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
509 
510 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
511 
512 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
513 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
514 	0x00, 0x00, 0x00, 0x00,
515 	0x00, 0x00, 0x00, 0x00,
516 	0x00, 0x00, 0x00, 0x00,
517 	0x00, 0x00, 0x00, 0x00,
518 	0x00, 0x00, 0x00, 0x00,
519 	0x00, 0x00, 0x00, 0x00,
520 	0x00, 0x00, 0x00, 0x00,
521 	0x00, 0x00, 0x00, 0x00,
522 
523 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
524 	0x00, 0x08, 0x00, 0x00,
525 
526 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
527 };
528 
529 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
530 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
531 	{ ICE_MAC_OFOS,		0 },
532 	{ ICE_IPV4_OFOS,	14 },
533 	{ ICE_UDP_OF,		34 },
534 	{ ICE_GTP,		42 },
535 	{ ICE_IPV4_IL,		62 },
536 	{ ICE_TCP_IL,		82 },
537 	{ ICE_PROTOCOL_LAST,	0 },
538 };
539 
540 static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
541 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
542 	0x00, 0x00, 0x00, 0x00,
543 	0x00, 0x00, 0x00, 0x00,
544 	0x08, 0x00,
545 
546 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
547 	0x00, 0x00, 0x00, 0x00,
548 	0x00, 0x11, 0x00, 0x00,
549 	0x00, 0x00, 0x00, 0x00,
550 	0x00, 0x00, 0x00, 0x00,
551 
552 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
553 	0x00, 0x44, 0x00, 0x00,
554 
555 	0x34, 0xff, 0x00, 0x34, /* GTP-U Header 42 */
556 	0x00, 0x00, 0x00, 0x00,
557 	0x00, 0x00, 0x00, 0x85,
558 
559 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
560 	0x00, 0x00, 0x00, 0x00,
561 
562 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
563 	0x00, 0x00, 0x00, 0x00,
564 	0x00, 0x06, 0x00, 0x00,
565 	0x00, 0x00, 0x00, 0x00,
566 	0x00, 0x00, 0x00, 0x00,
567 
568 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
569 	0x00, 0x00, 0x00, 0x00,
570 	0x00, 0x00, 0x00, 0x00,
571 	0x50, 0x00, 0x00, 0x00,
572 	0x00, 0x00, 0x00, 0x00,
573 
574 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
575 };
576 
577 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
578 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
579 	{ ICE_MAC_OFOS,		0 },
580 	{ ICE_IPV4_OFOS,	14 },
581 	{ ICE_UDP_OF,		34 },
582 	{ ICE_GTP,		42 },
583 	{ ICE_IPV4_IL,		62 },
584 	{ ICE_UDP_ILOS,		82 },
585 	{ ICE_PROTOCOL_LAST,	0 },
586 };
587 
588 static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
589 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x08, 0x00,
593 
594 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
595 	0x00, 0x00, 0x00, 0x00,
596 	0x00, 0x11, 0x00, 0x00,
597 	0x00, 0x00, 0x00, 0x00,
598 	0x00, 0x00, 0x00, 0x00,
599 
600 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
601 	0x00, 0x38, 0x00, 0x00,
602 
603 	0x34, 0xff, 0x00, 0x28, /* GTP-U Header 42 */
604 	0x00, 0x00, 0x00, 0x00,
605 	0x00, 0x00, 0x00, 0x85,
606 
607 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
608 	0x00, 0x00, 0x00, 0x00,
609 
610 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
611 	0x00, 0x00, 0x00, 0x00,
612 	0x00, 0x11, 0x00, 0x00,
613 	0x00, 0x00, 0x00, 0x00,
614 	0x00, 0x00, 0x00, 0x00,
615 
616 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
617 	0x00, 0x08, 0x00, 0x00,
618 
619 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
620 };
621 
622 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
623 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
624 	{ ICE_MAC_OFOS,		0 },
625 	{ ICE_IPV4_OFOS,	14 },
626 	{ ICE_UDP_OF,		34 },
627 	{ ICE_GTP,		42 },
628 	{ ICE_IPV6_IL,		62 },
629 	{ ICE_TCP_IL,		102 },
630 	{ ICE_PROTOCOL_LAST,	0 },
631 };
632 
633 static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
634 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
635 	0x00, 0x00, 0x00, 0x00,
636 	0x00, 0x00, 0x00, 0x00,
637 	0x08, 0x00,
638 
639 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
640 	0x00, 0x00, 0x00, 0x00,
641 	0x00, 0x11, 0x00, 0x00,
642 	0x00, 0x00, 0x00, 0x00,
643 	0x00, 0x00, 0x00, 0x00,
644 
645 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
646 	0x00, 0x58, 0x00, 0x00,
647 
648 	0x34, 0xff, 0x00, 0x48, /* GTP-U Header 42 */
649 	0x00, 0x00, 0x00, 0x00,
650 	0x00, 0x00, 0x00, 0x85,
651 
652 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
653 	0x00, 0x00, 0x00, 0x00,
654 
655 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
656 	0x00, 0x14, 0x06, 0x00,
657 	0x00, 0x00, 0x00, 0x00,
658 	0x00, 0x00, 0x00, 0x00,
659 	0x00, 0x00, 0x00, 0x00,
660 	0x00, 0x00, 0x00, 0x00,
661 	0x00, 0x00, 0x00, 0x00,
662 	0x00, 0x00, 0x00, 0x00,
663 	0x00, 0x00, 0x00, 0x00,
664 	0x00, 0x00, 0x00, 0x00,
665 
666 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
667 	0x00, 0x00, 0x00, 0x00,
668 	0x00, 0x00, 0x00, 0x00,
669 	0x50, 0x00, 0x00, 0x00,
670 	0x00, 0x00, 0x00, 0x00,
671 
672 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
673 };
674 
675 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
676 	{ ICE_MAC_OFOS,		0 },
677 	{ ICE_IPV4_OFOS,	14 },
678 	{ ICE_UDP_OF,		34 },
679 	{ ICE_GTP,		42 },
680 	{ ICE_IPV6_IL,		62 },
681 	{ ICE_UDP_ILOS,		102 },
682 	{ ICE_PROTOCOL_LAST,	0 },
683 };
684 
685 static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
686 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
687 	0x00, 0x00, 0x00, 0x00,
688 	0x00, 0x00, 0x00, 0x00,
689 	0x08, 0x00,
690 
691 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
692 	0x00, 0x00, 0x00, 0x00,
693 	0x00, 0x11, 0x00, 0x00,
694 	0x00, 0x00, 0x00, 0x00,
695 	0x00, 0x00, 0x00, 0x00,
696 
697 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
698 	0x00, 0x4c, 0x00, 0x00,
699 
700 	0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 42 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x00, 0x00, 0x85,
703 
704 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
705 	0x00, 0x00, 0x00, 0x00,
706 
707 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
708 	0x00, 0x08, 0x11, 0x00,
709 	0x00, 0x00, 0x00, 0x00,
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x00,
712 	0x00, 0x00, 0x00, 0x00,
713 	0x00, 0x00, 0x00, 0x00,
714 	0x00, 0x00, 0x00, 0x00,
715 	0x00, 0x00, 0x00, 0x00,
716 	0x00, 0x00, 0x00, 0x00,
717 
718 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
719 	0x00, 0x08, 0x00, 0x00,
720 
721 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
722 };
723 
724 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
725 	{ ICE_MAC_OFOS,		0 },
726 	{ ICE_IPV6_OFOS,	14 },
727 	{ ICE_UDP_OF,		54 },
728 	{ ICE_GTP,		62 },
729 	{ ICE_IPV4_IL,		82 },
730 	{ ICE_TCP_IL,		102 },
731 	{ ICE_PROTOCOL_LAST,	0 },
732 };
733 
734 static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
735 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
736 	0x00, 0x00, 0x00, 0x00,
737 	0x00, 0x00, 0x00, 0x00,
738 	0x86, 0xdd,
739 
740 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
741 	0x00, 0x44, 0x11, 0x00,
742 	0x00, 0x00, 0x00, 0x00,
743 	0x00, 0x00, 0x00, 0x00,
744 	0x00, 0x00, 0x00, 0x00,
745 	0x00, 0x00, 0x00, 0x00,
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x00, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
752 	0x00, 0x44, 0x00, 0x00,
753 
754 	0x34, 0xff, 0x00, 0x34, /* GTP-U Header 62 */
755 	0x00, 0x00, 0x00, 0x00,
756 	0x00, 0x00, 0x00, 0x85,
757 
758 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
762 	0x00, 0x00, 0x00, 0x00,
763 	0x00, 0x06, 0x00, 0x00,
764 	0x00, 0x00, 0x00, 0x00,
765 	0x00, 0x00, 0x00, 0x00,
766 
767 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
768 	0x00, 0x00, 0x00, 0x00,
769 	0x00, 0x00, 0x00, 0x00,
770 	0x50, 0x00, 0x00, 0x00,
771 	0x00, 0x00, 0x00, 0x00,
772 
773 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
774 };
775 
776 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
777 	{ ICE_MAC_OFOS,		0 },
778 	{ ICE_IPV6_OFOS,	14 },
779 	{ ICE_UDP_OF,		54 },
780 	{ ICE_GTP,		62 },
781 	{ ICE_IPV4_IL,		82 },
782 	{ ICE_UDP_ILOS,		102 },
783 	{ ICE_PROTOCOL_LAST,	0 },
784 };
785 
786 static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
787 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
788 	0x00, 0x00, 0x00, 0x00,
789 	0x00, 0x00, 0x00, 0x00,
790 	0x86, 0xdd,
791 
792 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
793 	0x00, 0x38, 0x11, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 	0x00, 0x00, 0x00, 0x00,
796 	0x00, 0x00, 0x00, 0x00,
797 	0x00, 0x00, 0x00, 0x00,
798 	0x00, 0x00, 0x00, 0x00,
799 	0x00, 0x00, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 	0x00, 0x00, 0x00, 0x00,
802 
803 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
804 	0x00, 0x38, 0x00, 0x00,
805 
806 	0x34, 0xff, 0x00, 0x28, /* GTP-U Header 62 */
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x85,
809 
810 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
811 	0x00, 0x00, 0x00, 0x00,
812 
813 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
814 	0x00, 0x00, 0x00, 0x00,
815 	0x00, 0x11, 0x00, 0x00,
816 	0x00, 0x00, 0x00, 0x00,
817 	0x00, 0x00, 0x00, 0x00,
818 
819 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
820 	0x00, 0x08, 0x00, 0x00,
821 
822 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
823 };
824 
825 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
826 	{ ICE_MAC_OFOS,		0 },
827 	{ ICE_IPV6_OFOS,	14 },
828 	{ ICE_UDP_OF,		54 },
829 	{ ICE_GTP,		62 },
830 	{ ICE_IPV6_IL,		82 },
831 	{ ICE_TCP_IL,		122 },
832 	{ ICE_PROTOCOL_LAST,	0 },
833 };
834 
835 static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
836 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
837 	0x00, 0x00, 0x00, 0x00,
838 	0x00, 0x00, 0x00, 0x00,
839 	0x86, 0xdd,
840 
841 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
842 	0x00, 0x58, 0x11, 0x00,
843 	0x00, 0x00, 0x00, 0x00,
844 	0x00, 0x00, 0x00, 0x00,
845 	0x00, 0x00, 0x00, 0x00,
846 	0x00, 0x00, 0x00, 0x00,
847 	0x00, 0x00, 0x00, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 	0x00, 0x00, 0x00, 0x00,
850 	0x00, 0x00, 0x00, 0x00,
851 
852 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
853 	0x00, 0x58, 0x00, 0x00,
854 
855 	0x34, 0xff, 0x00, 0x48, /* GTP-U Header 62 */
856 	0x00, 0x00, 0x00, 0x00,
857 	0x00, 0x00, 0x00, 0x85,
858 
859 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
860 	0x00, 0x00, 0x00, 0x00,
861 
862 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
863 	0x00, 0x14, 0x06, 0x00,
864 	0x00, 0x00, 0x00, 0x00,
865 	0x00, 0x00, 0x00, 0x00,
866 	0x00, 0x00, 0x00, 0x00,
867 	0x00, 0x00, 0x00, 0x00,
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x00, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
874 	0x00, 0x00, 0x00, 0x00,
875 	0x00, 0x00, 0x00, 0x00,
876 	0x50, 0x00, 0x00, 0x00,
877 	0x00, 0x00, 0x00, 0x00,
878 
879 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
880 };
881 
882 static const struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
883 	{ ICE_MAC_OFOS,		0 },
884 	{ ICE_IPV6_OFOS,	14 },
885 	{ ICE_UDP_OF,		54 },
886 	{ ICE_GTP,		62 },
887 	{ ICE_IPV6_IL,		82 },
888 	{ ICE_UDP_ILOS,		122 },
889 	{ ICE_PROTOCOL_LAST,	0 },
890 };
891 
892 static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
893 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 	0x00, 0x00, 0x00, 0x00,
895 	0x00, 0x00, 0x00, 0x00,
896 	0x86, 0xdd,
897 
898 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 	0x00, 0x4c, 0x11, 0x00,
900 	0x00, 0x00, 0x00, 0x00,
901 	0x00, 0x00, 0x00, 0x00,
902 	0x00, 0x00, 0x00, 0x00,
903 	0x00, 0x00, 0x00, 0x00,
904 	0x00, 0x00, 0x00, 0x00,
905 	0x00, 0x00, 0x00, 0x00,
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 	0x00, 0x4c, 0x00, 0x00,
911 
912 	0x34, 0xff, 0x00, 0x3c, /* GTP-U Header 62 */
913 	0x00, 0x00, 0x00, 0x00,
914 	0x00, 0x00, 0x00, 0x85,
915 
916 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 	0x00, 0x00, 0x00, 0x00,
918 
919 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
920 	0x00, 0x08, 0x11, 0x00,
921 	0x00, 0x00, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 	0x00, 0x00, 0x00, 0x00,
925 	0x00, 0x00, 0x00, 0x00,
926 	0x00, 0x00, 0x00, 0x00,
927 	0x00, 0x00, 0x00, 0x00,
928 	0x00, 0x00, 0x00, 0x00,
929 
930 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
931 	0x00, 0x08, 0x00, 0x00,
932 
933 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
934 };
935 
936 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
937 	{ ICE_MAC_OFOS,		0 },
938 	{ ICE_IPV4_OFOS,	14 },
939 	{ ICE_UDP_OF,		34 },
940 	{ ICE_GTP,		42 },
941 	{ ICE_IPV4_IL,		62 },
942 	{ ICE_PROTOCOL_LAST,	0 },
943 };
944 
945 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
946 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
947 	0x00, 0x00, 0x00, 0x00,
948 	0x00, 0x00, 0x00, 0x00,
949 	0x08, 0x00,
950 
951 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
952 	0x00, 0x00, 0x40, 0x00,
953 	0x40, 0x11, 0x00, 0x00,
954 	0x00, 0x00, 0x00, 0x00,
955 	0x00, 0x00, 0x00, 0x00,
956 
957 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
958 	0x00, 0x00, 0x00, 0x00,
959 
960 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
961 	0x00, 0x00, 0x00, 0x00,
962 	0x00, 0x00, 0x00, 0x85,
963 
964 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
965 	0x00, 0x00, 0x00, 0x00,
966 
967 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
968 	0x00, 0x00, 0x40, 0x00,
969 	0x40, 0x00, 0x00, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00,
973 };
974 
975 static const
976 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
977 	{ ICE_MAC_OFOS,		0 },
978 	{ ICE_IPV4_OFOS,	14 },
979 	{ ICE_UDP_OF,		34 },
980 	{ ICE_GTP,		42 },
981 	{ ICE_IPV6_IL,		62 },
982 	{ ICE_PROTOCOL_LAST,	0 },
983 };
984 
985 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
986 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
987 	0x00, 0x00, 0x00, 0x00,
988 	0x00, 0x00, 0x00, 0x00,
989 	0x08, 0x00,
990 
991 	0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
992 	0x00, 0x00, 0x40, 0x00,
993 	0x40, 0x11, 0x00, 0x00,
994 	0x00, 0x00, 0x00, 0x00,
995 	0x00, 0x00, 0x00, 0x00,
996 
997 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
998 	0x00, 0x00, 0x00, 0x00,
999 
1000 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
1001 	0x00, 0x00, 0x00, 0x00,
1002 	0x00, 0x00, 0x00, 0x85,
1003 
1004 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1005 	0x00, 0x00, 0x00, 0x00,
1006 
1007 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
1008 	0x00, 0x00, 0x3b, 0x00,
1009 	0x00, 0x00, 0x00, 0x00,
1010 	0x00, 0x00, 0x00, 0x00,
1011 	0x00, 0x00, 0x00, 0x00,
1012 	0x00, 0x00, 0x00, 0x00,
1013 	0x00, 0x00, 0x00, 0x00,
1014 	0x00, 0x00, 0x00, 0x00,
1015 	0x00, 0x00, 0x00, 0x00,
1016 	0x00, 0x00, 0x00, 0x00,
1017 
1018 	0x00, 0x00,
1019 };
1020 
1021 static const
1022 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
1023 	{ ICE_MAC_OFOS,		0 },
1024 	{ ICE_IPV6_OFOS,	14 },
1025 	{ ICE_UDP_OF,		54 },
1026 	{ ICE_GTP,		62 },
1027 	{ ICE_IPV4_IL,		82 },
1028 	{ ICE_PROTOCOL_LAST,	0 },
1029 };
1030 
1031 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
1032 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1033 	0x00, 0x00, 0x00, 0x00,
1034 	0x00, 0x00, 0x00, 0x00,
1035 	0x86, 0xdd,
1036 
1037 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1038 	0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
1039 	0x00, 0x00, 0x00, 0x00,
1040 	0x00, 0x00, 0x00, 0x00,
1041 	0x00, 0x00, 0x00, 0x00,
1042 	0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x00, 0x00, 0x00,
1044 	0x00, 0x00, 0x00, 0x00,
1045 	0x00, 0x00, 0x00, 0x00,
1046 	0x00, 0x00, 0x00, 0x00,
1047 
1048 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1049 	0x00, 0x00, 0x00, 0x00,
1050 
1051 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1052 	0x00, 0x00, 0x00, 0x00,
1053 	0x00, 0x00, 0x00, 0x85,
1054 
1055 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1056 	0x00, 0x00, 0x00, 0x00,
1057 
1058 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
1059 	0x00, 0x00, 0x40, 0x00,
1060 	0x40, 0x00, 0x00, 0x00,
1061 	0x00, 0x00, 0x00, 0x00,
1062 	0x00, 0x00, 0x00, 0x00,
1063 
1064 	0x00, 0x00,
1065 };
1066 
1067 static const
1068 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
1069 	{ ICE_MAC_OFOS,		0 },
1070 	{ ICE_IPV6_OFOS,	14 },
1071 	{ ICE_UDP_OF,		54 },
1072 	{ ICE_GTP,		62 },
1073 	{ ICE_IPV6_IL,		82 },
1074 	{ ICE_PROTOCOL_LAST,	0 },
1075 };
1076 
1077 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
1078 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1079 	0x00, 0x00, 0x00, 0x00,
1080 	0x00, 0x00, 0x00, 0x00,
1081 	0x86, 0xdd,
1082 
1083 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1084 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1085 	0x00, 0x00, 0x00, 0x00,
1086 	0x00, 0x00, 0x00, 0x00,
1087 	0x00, 0x00, 0x00, 0x00,
1088 	0x00, 0x00, 0x00, 0x00,
1089 	0x00, 0x00, 0x00, 0x00,
1090 	0x00, 0x00, 0x00, 0x00,
1091 	0x00, 0x00, 0x00, 0x00,
1092 	0x00, 0x00, 0x00, 0x00,
1093 
1094 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1095 	0x00, 0x00, 0x00, 0x00,
1096 
1097 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x85,
1100 
1101 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1102 	0x00, 0x00, 0x00, 0x00,
1103 
1104 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
1105 	0x00, 0x00, 0x3b, 0x00,
1106 	0x00, 0x00, 0x00, 0x00,
1107 	0x00, 0x00, 0x00, 0x00,
1108 	0x00, 0x00, 0x00, 0x00,
1109 	0x00, 0x00, 0x00, 0x00,
1110 	0x00, 0x00, 0x00, 0x00,
1111 	0x00, 0x00, 0x00, 0x00,
1112 	0x00, 0x00, 0x00, 0x00,
1113 	0x00, 0x00, 0x00, 0x00,
1114 
1115 	0x00, 0x00,
1116 };
1117 
1118 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
1119 	{ ICE_MAC_OFOS,		0 },
1120 	{ ICE_IPV4_OFOS,	14 },
1121 	{ ICE_UDP_OF,		34 },
1122 	{ ICE_GTP,		42 },
1123 	{ ICE_PROTOCOL_LAST,	0 },
1124 };
1125 
1126 static const u8 dummy_udp_gtp_packet[] = {
1127 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1128 	0x00, 0x00, 0x00, 0x00,
1129 	0x00, 0x00, 0x00, 0x00,
1130 	0x08, 0x00,
1131 
1132 	0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x11, 0x00, 0x00,
1135 	0x00, 0x00, 0x00, 0x00,
1136 	0x00, 0x00, 0x00, 0x00,
1137 
1138 	0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
1139 	0x00, 0x1c, 0x00, 0x00,
1140 
1141 	0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 	0x00, 0x00, 0x00, 0x85,
1144 
1145 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1146 	0x00, 0x00, 0x00, 0x00,
1147 
1148 };
1149 
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_IPV4_OFOS,	14 },
1153 	{ ICE_UDP_OF,		34 },
1154 	{ ICE_GTP_NO_PAY,	42 },
1155 	{ ICE_PROTOCOL_LAST,	0 },
1156 };
1157 
1158 static const
1159 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
1160 	{ ICE_MAC_OFOS,		0 },
1161 	{ ICE_IPV6_OFOS,	14 },
1162 	{ ICE_UDP_OF,		54 },
1163 	{ ICE_GTP_NO_PAY,	62 },
1164 	{ ICE_PROTOCOL_LAST,	0 },
1165 };
1166 
1167 static const u8 dummy_ipv6_gtp_packet[] = {
1168 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1169 	0x00, 0x00, 0x00, 0x00,
1170 	0x00, 0x00, 0x00, 0x00,
1171 	0x86, 0xdd,
1172 
1173 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1174 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1175 	0x00, 0x00, 0x00, 0x00,
1176 	0x00, 0x00, 0x00, 0x00,
1177 	0x00, 0x00, 0x00, 0x00,
1178 	0x00, 0x00, 0x00, 0x00,
1179 	0x00, 0x00, 0x00, 0x00,
1180 	0x00, 0x00, 0x00, 0x00,
1181 	0x00, 0x00, 0x00, 0x00,
1182 	0x00, 0x00, 0x00, 0x00,
1183 
1184 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1185 	0x00, 0x00, 0x00, 0x00,
1186 
1187 	0x30, 0x00, 0x00, 0x28,  /* ICE_GTP 62 */
1188 	0x00, 0x00, 0x00, 0x00,
1189 
1190 	0x00, 0x00,
1191 };
1192 
1193 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
1194 	{ ICE_MAC_OFOS,		0 },
1195 	{ ICE_VLAN_OFOS,	12 },
1196 	{ ICE_ETYPE_OL,		16 },
1197 	{ ICE_PPPOE,		18 },
1198 	{ ICE_PROTOCOL_LAST,	0 },
1199 };
1200 
1201 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
1202 	{ ICE_MAC_OFOS,		0 },
1203 	{ ICE_VLAN_OFOS,	12 },
1204 	{ ICE_ETYPE_OL,		16 },
1205 	{ ICE_PPPOE,		18 },
1206 	{ ICE_IPV4_OFOS,	26 },
1207 	{ ICE_PROTOCOL_LAST,	0 },
1208 };
1209 
1210 static const u8 dummy_pppoe_ipv4_packet[] = {
1211 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 
1215 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1216 
1217 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1218 
1219 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1220 	0x00, 0x16,
1221 
1222 	0x00, 0x21,		/* PPP Link Layer 24 */
1223 
1224 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
1225 	0x00, 0x00, 0x00, 0x00,
1226 	0x00, 0x00, 0x00, 0x00,
1227 	0x00, 0x00, 0x00, 0x00,
1228 	0x00, 0x00, 0x00, 0x00,
1229 
1230 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1231 };
1232 
1233 static const
1234 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
1235 	{ ICE_MAC_OFOS,		0 },
1236 	{ ICE_VLAN_OFOS,	12 },
1237 	{ ICE_ETYPE_OL,		16 },
1238 	{ ICE_PPPOE,		18 },
1239 	{ ICE_IPV4_OFOS,	26 },
1240 	{ ICE_TCP_IL,		46 },
1241 	{ ICE_PROTOCOL_LAST,	0 },
1242 };
1243 
1244 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
1245 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1246 	0x00, 0x00, 0x00, 0x00,
1247 	0x00, 0x00, 0x00, 0x00,
1248 
1249 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
1250 
1251 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1252 
1253 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1254 	0x00, 0x16,
1255 
1256 	0x00, 0x21,		/* PPP Link Layer 24 */
1257 
1258 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
1259 	0x00, 0x01, 0x00, 0x00,
1260 	0x00, 0x06, 0x00, 0x00,
1261 	0x00, 0x00, 0x00, 0x00,
1262 	0x00, 0x00, 0x00, 0x00,
1263 
1264 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
1265 	0x00, 0x00, 0x00, 0x00,
1266 	0x00, 0x00, 0x00, 0x00,
1267 	0x50, 0x00, 0x00, 0x00,
1268 	0x00, 0x00, 0x00, 0x00,
1269 
1270 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1271 };
1272 
1273 static const
1274 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
1275 	{ ICE_MAC_OFOS,		0 },
1276 	{ ICE_VLAN_OFOS,	12 },
1277 	{ ICE_ETYPE_OL,		16 },
1278 	{ ICE_PPPOE,		18 },
1279 	{ ICE_IPV4_OFOS,	26 },
1280 	{ ICE_UDP_ILOS,		46 },
1281 	{ ICE_PROTOCOL_LAST,	0 },
1282 };
1283 
1284 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
1285 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1286 	0x00, 0x00, 0x00, 0x00,
1287 	0x00, 0x00, 0x00, 0x00,
1288 
1289 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1290 
1291 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1292 
1293 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1294 	0x00, 0x16,
1295 
1296 	0x00, 0x21,		/* PPP Link Layer 24 */
1297 
1298 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
1299 	0x00, 0x01, 0x00, 0x00,
1300 	0x00, 0x11, 0x00, 0x00,
1301 	0x00, 0x00, 0x00, 0x00,
1302 	0x00, 0x00, 0x00, 0x00,
1303 
1304 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
1305 	0x00, 0x08, 0x00, 0x00,
1306 
1307 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1308 };
1309 
1310 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
1311 	{ ICE_MAC_OFOS,		0 },
1312 	{ ICE_VLAN_OFOS,	12 },
1313 	{ ICE_ETYPE_OL,		16 },
1314 	{ ICE_PPPOE,		18 },
1315 	{ ICE_IPV6_OFOS,	26 },
1316 	{ ICE_PROTOCOL_LAST,	0 },
1317 };
1318 
1319 static const u8 dummy_pppoe_ipv6_packet[] = {
1320 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1321 	0x00, 0x00, 0x00, 0x00,
1322 	0x00, 0x00, 0x00, 0x00,
1323 
1324 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
1325 
1326 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1327 
1328 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1329 	0x00, 0x2a,
1330 
1331 	0x00, 0x57,		/* PPP Link Layer 24 */
1332 
1333 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1334 	0x00, 0x00, 0x3b, 0x00,
1335 	0x00, 0x00, 0x00, 0x00,
1336 	0x00, 0x00, 0x00, 0x00,
1337 	0x00, 0x00, 0x00, 0x00,
1338 	0x00, 0x00, 0x00, 0x00,
1339 	0x00, 0x00, 0x00, 0x00,
1340 	0x00, 0x00, 0x00, 0x00,
1341 	0x00, 0x00, 0x00, 0x00,
1342 	0x00, 0x00, 0x00, 0x00,
1343 
1344 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1345 };
1346 
1347 static const
1348 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
1349 	{ ICE_MAC_OFOS,		0 },
1350 	{ ICE_VLAN_OFOS,	12 },
1351 	{ ICE_ETYPE_OL,		16 },
1352 	{ ICE_PPPOE,		18 },
1353 	{ ICE_IPV6_OFOS,	26 },
1354 	{ ICE_TCP_IL,		66 },
1355 	{ ICE_PROTOCOL_LAST,	0 },
1356 };
1357 
1358 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
1359 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1360 	0x00, 0x00, 0x00, 0x00,
1361 	0x00, 0x00, 0x00, 0x00,
1362 
1363 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
1364 
1365 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1366 
1367 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1368 	0x00, 0x2a,
1369 
1370 	0x00, 0x57,		/* PPP Link Layer 24 */
1371 
1372 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1373 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1374 	0x00, 0x00, 0x00, 0x00,
1375 	0x00, 0x00, 0x00, 0x00,
1376 	0x00, 0x00, 0x00, 0x00,
1377 	0x00, 0x00, 0x00, 0x00,
1378 	0x00, 0x00, 0x00, 0x00,
1379 	0x00, 0x00, 0x00, 0x00,
1380 	0x00, 0x00, 0x00, 0x00,
1381 	0x00, 0x00, 0x00, 0x00,
1382 
1383 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
1384 	0x00, 0x00, 0x00, 0x00,
1385 	0x00, 0x00, 0x00, 0x00,
1386 	0x50, 0x00, 0x00, 0x00,
1387 	0x00, 0x00, 0x00, 0x00,
1388 
1389 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1390 };
1391 
1392 static const
1393 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
1394 	{ ICE_MAC_OFOS,		0 },
1395 	{ ICE_VLAN_OFOS,	12 },
1396 	{ ICE_ETYPE_OL,		16 },
1397 	{ ICE_PPPOE,		18 },
1398 	{ ICE_IPV6_OFOS,	26 },
1399 	{ ICE_UDP_ILOS,		66 },
1400 	{ ICE_PROTOCOL_LAST,	0 },
1401 };
1402 
1403 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
1404 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1405 	0x00, 0x00, 0x00, 0x00,
1406 	0x00, 0x00, 0x00, 0x00,
1407 
1408 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
1409 
1410 	0x88, 0x64,		/* ICE_ETYPE_OL 16 */
1411 
1412 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
1413 	0x00, 0x2a,
1414 
1415 	0x00, 0x57,		/* PPP Link Layer 24 */
1416 
1417 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
1418 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1419 	0x00, 0x00, 0x00, 0x00,
1420 	0x00, 0x00, 0x00, 0x00,
1421 	0x00, 0x00, 0x00, 0x00,
1422 	0x00, 0x00, 0x00, 0x00,
1423 	0x00, 0x00, 0x00, 0x00,
1424 	0x00, 0x00, 0x00, 0x00,
1425 	0x00, 0x00, 0x00, 0x00,
1426 	0x00, 0x00, 0x00, 0x00,
1427 
1428 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
1429 	0x00, 0x08, 0x00, 0x00,
1430 
1431 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1432 };
1433 
1434 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
1435 	{ ICE_MAC_OFOS,		0 },
1436 	{ ICE_IPV4_OFOS,	14 },
1437 	{ ICE_ESP,			34 },
1438 	{ ICE_PROTOCOL_LAST,	0 },
1439 };
1440 
1441 static const u8 dummy_ipv4_esp_pkt[] = {
1442 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1443 	0x00, 0x00, 0x00, 0x00,
1444 	0x00, 0x00, 0x00, 0x00,
1445 	0x08, 0x00,
1446 
1447 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
1448 	0x00, 0x00, 0x40, 0x00,
1449 	0x40, 0x32, 0x00, 0x00,
1450 	0x00, 0x00, 0x00, 0x00,
1451 	0x00, 0x00, 0x00, 0x00,
1452 
1453 	0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1454 	0x00, 0x00, 0x00, 0x00,
1455 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1456 };
1457 
1458 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1459 	{ ICE_MAC_OFOS,		0 },
1460 	{ ICE_IPV6_OFOS,	14 },
1461 	{ ICE_ESP,			54 },
1462 	{ ICE_PROTOCOL_LAST,	0 },
1463 };
1464 
1465 static const u8 dummy_ipv6_esp_pkt[] = {
1466 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1467 	0x00, 0x00, 0x00, 0x00,
1468 	0x00, 0x00, 0x00, 0x00,
1469 	0x86, 0xDD,
1470 
1471 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1472 	0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1473 	0x00, 0x00, 0x00, 0x00,
1474 	0x00, 0x00, 0x00, 0x00,
1475 	0x00, 0x00, 0x00, 0x00,
1476 	0x00, 0x00, 0x00, 0x00,
1477 	0x00, 0x00, 0x00, 0x00,
1478 	0x00, 0x00, 0x00, 0x00,
1479 	0x00, 0x00, 0x00, 0x00,
1480 	0x00, 0x00, 0x00, 0x00,
1481 
1482 	0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1483 	0x00, 0x00, 0x00, 0x00,
1484 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1485 };
1486 
1487 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1488 	{ ICE_MAC_OFOS,		0 },
1489 	{ ICE_IPV4_OFOS,	14 },
1490 	{ ICE_AH,			34 },
1491 	{ ICE_PROTOCOL_LAST,	0 },
1492 };
1493 
1494 static const u8 dummy_ipv4_ah_pkt[] = {
1495 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1496 	0x00, 0x00, 0x00, 0x00,
1497 	0x00, 0x00, 0x00, 0x00,
1498 	0x08, 0x00,
1499 
1500 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1501 	0x00, 0x00, 0x40, 0x00,
1502 	0x40, 0x33, 0x00, 0x00,
1503 	0x00, 0x00, 0x00, 0x00,
1504 	0x00, 0x00, 0x00, 0x00,
1505 
1506 	0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1507 	0x00, 0x00, 0x00, 0x00,
1508 	0x00, 0x00, 0x00, 0x00,
1509 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1510 };
1511 
1512 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1513 	{ ICE_MAC_OFOS,		0 },
1514 	{ ICE_IPV6_OFOS,	14 },
1515 	{ ICE_AH,			54 },
1516 	{ ICE_PROTOCOL_LAST,	0 },
1517 };
1518 
1519 static const u8 dummy_ipv6_ah_pkt[] = {
1520 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1521 	0x00, 0x00, 0x00, 0x00,
1522 	0x00, 0x00, 0x00, 0x00,
1523 	0x86, 0xDD,
1524 
1525 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1526 	0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1527 	0x00, 0x00, 0x00, 0x00,
1528 	0x00, 0x00, 0x00, 0x00,
1529 	0x00, 0x00, 0x00, 0x00,
1530 	0x00, 0x00, 0x00, 0x00,
1531 	0x00, 0x00, 0x00, 0x00,
1532 	0x00, 0x00, 0x00, 0x00,
1533 	0x00, 0x00, 0x00, 0x00,
1534 	0x00, 0x00, 0x00, 0x00,
1535 
1536 	0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1537 	0x00, 0x00, 0x00, 0x00,
1538 	0x00, 0x00, 0x00, 0x00,
1539 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1540 };
1541 
1542 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1543 	{ ICE_MAC_OFOS,		0 },
1544 	{ ICE_IPV4_OFOS,	14 },
1545 	{ ICE_UDP_ILOS,		34 },
1546 	{ ICE_NAT_T,		42 },
1547 	{ ICE_PROTOCOL_LAST,	0 },
1548 };
1549 
1550 static const u8 dummy_ipv4_nat_pkt[] = {
1551 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1552 	0x00, 0x00, 0x00, 0x00,
1553 	0x00, 0x00, 0x00, 0x00,
1554 	0x08, 0x00,
1555 
1556 	0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1557 	0x00, 0x00, 0x40, 0x00,
1558 	0x40, 0x11, 0x00, 0x00,
1559 	0x00, 0x00, 0x00, 0x00,
1560 	0x00, 0x00, 0x00, 0x00,
1561 
1562 	0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1563 	0x00, 0x00, 0x00, 0x00,
1564 
1565 	0x00, 0x00, 0x00, 0x00,
1566 	0x00, 0x00, 0x00, 0x00,
1567 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1568 };
1569 
1570 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1571 	{ ICE_MAC_OFOS,		0 },
1572 	{ ICE_IPV6_OFOS,	14 },
1573 	{ ICE_UDP_ILOS,		54 },
1574 	{ ICE_NAT_T,		62 },
1575 	{ ICE_PROTOCOL_LAST,	0 },
1576 };
1577 
1578 static const u8 dummy_ipv6_nat_pkt[] = {
1579 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1580 	0x00, 0x00, 0x00, 0x00,
1581 	0x00, 0x00, 0x00, 0x00,
1582 	0x86, 0xDD,
1583 
1584 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1585 	0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1586 	0x00, 0x00, 0x00, 0x00,
1587 	0x00, 0x00, 0x00, 0x00,
1588 	0x00, 0x00, 0x00, 0x00,
1589 	0x00, 0x00, 0x00, 0x00,
1590 	0x00, 0x00, 0x00, 0x00,
1591 	0x00, 0x00, 0x00, 0x00,
1592 	0x00, 0x00, 0x00, 0x00,
1593 	0x00, 0x00, 0x00, 0x00,
1594 
1595 	0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1596 	0x00, 0x00, 0x00, 0x00,
1597 
1598 	0x00, 0x00, 0x00, 0x00,
1599 	0x00, 0x00, 0x00, 0x00,
1600 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1601 
1602 };
1603 
1604 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1605 	{ ICE_MAC_OFOS,		0 },
1606 	{ ICE_IPV4_OFOS,	14 },
1607 	{ ICE_L2TPV3,		34 },
1608 	{ ICE_PROTOCOL_LAST,	0 },
1609 };
1610 
1611 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1612 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1613 	0x00, 0x00, 0x00, 0x00,
1614 	0x00, 0x00, 0x00, 0x00,
1615 	0x08, 0x00,
1616 
1617 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1618 	0x00, 0x00, 0x40, 0x00,
1619 	0x40, 0x73, 0x00, 0x00,
1620 	0x00, 0x00, 0x00, 0x00,
1621 	0x00, 0x00, 0x00, 0x00,
1622 
1623 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1624 	0x00, 0x00, 0x00, 0x00,
1625 	0x00, 0x00, 0x00, 0x00,
1626 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1627 };
1628 
1629 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1630 	{ ICE_MAC_OFOS,		0 },
1631 	{ ICE_IPV6_OFOS,	14 },
1632 	{ ICE_L2TPV3,		54 },
1633 	{ ICE_PROTOCOL_LAST,	0 },
1634 };
1635 
1636 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1637 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1638 	0x00, 0x00, 0x00, 0x00,
1639 	0x00, 0x00, 0x00, 0x00,
1640 	0x86, 0xDD,
1641 
1642 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1643 	0x00, 0x0c, 0x73, 0x40,
1644 	0x00, 0x00, 0x00, 0x00,
1645 	0x00, 0x00, 0x00, 0x00,
1646 	0x00, 0x00, 0x00, 0x00,
1647 	0x00, 0x00, 0x00, 0x00,
1648 	0x00, 0x00, 0x00, 0x00,
1649 	0x00, 0x00, 0x00, 0x00,
1650 	0x00, 0x00, 0x00, 0x00,
1651 	0x00, 0x00, 0x00, 0x00,
1652 
1653 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1654 	0x00, 0x00, 0x00, 0x00,
1655 	0x00, 0x00, 0x00, 0x00,
1656 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1657 };
1658 
1659 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1660 	{ ICE_MAC_OFOS,		0 },
1661 	{ ICE_VLAN_EX,		12 },
1662 	{ ICE_VLAN_IN,		16 },
1663 	{ ICE_ETYPE_OL,		20 },
1664 	{ ICE_IPV4_OFOS,	22 },
1665 	{ ICE_PROTOCOL_LAST,	0 },
1666 };
1667 
1668 static const u8 dummy_qinq_ipv4_pkt[] = {
1669 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1670 	0x00, 0x00, 0x00, 0x00,
1671 	0x00, 0x00, 0x00, 0x00,
1672 
1673 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1674 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1675 	0x08, 0x00,		/* ICE_ETYPE_OL 20 */
1676 
1677 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */
1678 	0x00, 0x01, 0x00, 0x00,
1679 	0x00, 0x00, 0x00, 0x00,
1680 	0x00, 0x00, 0x00, 0x00,
1681 	0x00, 0x00, 0x00, 0x00,
1682 
1683 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1684 };
1685 
1686 static const
1687 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = {
1688 	{ ICE_MAC_OFOS,		0 },
1689 	{ ICE_VLAN_EX,		12 },
1690 	{ ICE_VLAN_IN,		16 },
1691 	{ ICE_ETYPE_OL,		20 },
1692 	{ ICE_IPV4_OFOS,	22 },
1693 	{ ICE_UDP_ILOS,		42 },
1694 	{ ICE_PROTOCOL_LAST,	0 },
1695 };
1696 
1697 static const u8 dummy_qinq_ipv4_udp_pkt[] = {
1698 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1699 	0x00, 0x00, 0x00, 0x00,
1700 	0x00, 0x00, 0x00, 0x00,
1701 
1702 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1703 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1704 	0x08, 0x00,		/* ICE_ETYPE_OL 20 */
1705 
1706 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1707 	0x00, 0x01, 0x00, 0x00,
1708 	0x00, 0x11, 0x00, 0x00,
1709 	0x00, 0x00, 0x00, 0x00,
1710 	0x00, 0x00, 0x00, 0x00,
1711 
1712 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1713 	0x00, 0x08, 0x00, 0x00,
1714 
1715 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1716 };
1717 
1718 static const
1719 struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = {
1720 	{ ICE_MAC_OFOS,		0 },
1721 	{ ICE_VLAN_EX,		12 },
1722 	{ ICE_VLAN_IN,		16 },
1723 	{ ICE_ETYPE_OL,		20 },
1724 	{ ICE_IPV4_OFOS,	22 },
1725 	{ ICE_TCP_IL,		42 },
1726 	{ ICE_PROTOCOL_LAST,	0 },
1727 };
1728 
1729 static const u8 dummy_qinq_ipv4_tcp_pkt[] = {
1730 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1731 	0x00, 0x00, 0x00, 0x00,
1732 	0x00, 0x00, 0x00, 0x00,
1733 
1734 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1735 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1736 	0x08, 0x00,		/* ICE_ETYPE_OL 20 */
1737 
1738 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1739 	0x00, 0x01, 0x00, 0x00,
1740 	0x00, 0x06, 0x00, 0x00,
1741 	0x00, 0x00, 0x00, 0x00,
1742 	0x00, 0x00, 0x00, 0x00,
1743 
1744 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1745 	0x00, 0x00, 0x00, 0x00,
1746 	0x00, 0x00, 0x00, 0x00,
1747 	0x50, 0x00, 0x00, 0x00,
1748 	0x00, 0x00, 0x00, 0x00,
1749 
1750 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1751 };
1752 
1753 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1754 	{ ICE_MAC_OFOS,		0 },
1755 	{ ICE_VLAN_EX,		12 },
1756 	{ ICE_VLAN_IN,		16 },
1757 	{ ICE_ETYPE_OL,		20 },
1758 	{ ICE_IPV6_OFOS,	22 },
1759 	{ ICE_PROTOCOL_LAST,	0 },
1760 };
1761 
1762 static const u8 dummy_qinq_ipv6_pkt[] = {
1763 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1764 	0x00, 0x00, 0x00, 0x00,
1765 	0x00, 0x00, 0x00, 0x00,
1766 
1767 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1768 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1769 	0x86, 0xDD,		/* ICE_ETYPE_OL 20 */
1770 
1771 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1772 	0x00, 0x00, 0x3b, 0x00,
1773 	0x00, 0x00, 0x00, 0x00,
1774 	0x00, 0x00, 0x00, 0x00,
1775 	0x00, 0x00, 0x00, 0x00,
1776 	0x00, 0x00, 0x00, 0x00,
1777 	0x00, 0x00, 0x00, 0x00,
1778 	0x00, 0x00, 0x00, 0x00,
1779 	0x00, 0x00, 0x00, 0x00,
1780 	0x00, 0x00, 0x00, 0x00,
1781 
1782 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1783 };
1784 
1785 static const
1786 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = {
1787 	{ ICE_MAC_OFOS,		0 },
1788 	{ ICE_VLAN_EX,		12 },
1789 	{ ICE_VLAN_IN,		16 },
1790 	{ ICE_ETYPE_OL,		20 },
1791 	{ ICE_IPV6_OFOS,	22 },
1792 	{ ICE_UDP_ILOS,		62 },
1793 	{ ICE_PROTOCOL_LAST,	0 },
1794 };
1795 
1796 static const u8 dummy_qinq_ipv6_udp_pkt[] = {
1797 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1798 	0x00, 0x00, 0x00, 0x00,
1799 	0x00, 0x00, 0x00, 0x00,
1800 
1801 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1802 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1803 	0x86, 0xDD,		/* ICE_ETYPE_OL 20 */
1804 
1805 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1806 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
1807 	0x00, 0x00, 0x00, 0x00,
1808 	0x00, 0x00, 0x00, 0x00,
1809 	0x00, 0x00, 0x00, 0x00,
1810 	0x00, 0x00, 0x00, 0x00,
1811 	0x00, 0x00, 0x00, 0x00,
1812 	0x00, 0x00, 0x00, 0x00,
1813 	0x00, 0x00, 0x00, 0x00,
1814 	0x00, 0x00, 0x00, 0x00,
1815 
1816 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1817 	0x00, 0x08, 0x00, 0x00,
1818 
1819 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1820 };
1821 
1822 static const
1823 struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = {
1824 	{ ICE_MAC_OFOS,		0 },
1825 	{ ICE_VLAN_EX,		12 },
1826 	{ ICE_VLAN_IN,		16 },
1827 	{ ICE_ETYPE_OL,		20 },
1828 	{ ICE_IPV6_OFOS,	22 },
1829 	{ ICE_TCP_IL,		62 },
1830 	{ ICE_PROTOCOL_LAST,	0 },
1831 };
1832 
1833 static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
1834 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1835 	0x00, 0x00, 0x00, 0x00,
1836 	0x00, 0x00, 0x00, 0x00,
1837 
1838 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1839 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1840 	0x86, 0xDD,		/* ICE_ETYPE_OL 20 */
1841 
1842 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1843 	0x00, 0x14, 0x06, 0x00, /* Next header TCP */
1844 	0x00, 0x00, 0x00, 0x00,
1845 	0x00, 0x00, 0x00, 0x00,
1846 	0x00, 0x00, 0x00, 0x00,
1847 	0x00, 0x00, 0x00, 0x00,
1848 	0x00, 0x00, 0x00, 0x00,
1849 	0x00, 0x00, 0x00, 0x00,
1850 	0x00, 0x00, 0x00, 0x00,
1851 	0x00, 0x00, 0x00, 0x00,
1852 
1853 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1854 	0x00, 0x00, 0x00, 0x00,
1855 	0x00, 0x00, 0x00, 0x00,
1856 	0x50, 0x00, 0x00, 0x00,
1857 	0x00, 0x00, 0x00, 0x00,
1858 
1859 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1860 };
1861 
1862 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1863 	{ ICE_MAC_OFOS,		0 },
1864 	{ ICE_VLAN_EX,		12 },
1865 	{ ICE_VLAN_IN,		16 },
1866 	{ ICE_ETYPE_OL,         20 },
1867 	{ ICE_PPPOE,		22 },
1868 	{ ICE_PROTOCOL_LAST,	0 },
1869 };
1870 
1871 static const
1872 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1873 	{ ICE_MAC_OFOS,		0 },
1874 	{ ICE_VLAN_EX,		12 },
1875 	{ ICE_VLAN_IN,		16 },
1876 	{ ICE_ETYPE_OL,         20 },
1877 	{ ICE_PPPOE,		22 },
1878 	{ ICE_IPV4_OFOS,	30 },
1879 	{ ICE_PROTOCOL_LAST,	0 },
1880 };
1881 
1882 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1883 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1884 	0x00, 0x00, 0x00, 0x00,
1885 	0x00, 0x00, 0x00, 0x00,
1886 
1887 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1888 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1889 	0x88, 0x64,		/* ICE_ETYPE_OL 20 */
1890 
1891 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1892 	0x00, 0x16,
1893 
1894 	0x00, 0x21,		/* PPP Link Layer 28 */
1895 
1896 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 30 */
1897 	0x00, 0x00, 0x00, 0x00,
1898 	0x00, 0x00, 0x00, 0x00,
1899 	0x00, 0x00, 0x00, 0x00,
1900 	0x00, 0x00, 0x00, 0x00,
1901 
1902 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1903 };
1904 
1905 static const
1906 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1907 	{ ICE_MAC_OFOS,		0 },
1908 	{ ICE_VLAN_EX,		12 },
1909 	{ ICE_VLAN_IN,		16 },
1910 	{ ICE_ETYPE_OL,		20 },
1911 	{ ICE_PPPOE,		22 },
1912 	{ ICE_IPV6_OFOS,	30 },
1913 	{ ICE_PROTOCOL_LAST,	0 },
1914 };
1915 
1916 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1917 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1918 	0x00, 0x00, 0x00, 0x00,
1919 	0x00, 0x00, 0x00, 0x00,
1920 
1921 	0x91, 0x00, 0x00, 0x00,	/* ICE_VLAN_EX 12 */
1922 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
1923 	0x88, 0x64,		/* ICE_ETYPE_OL 20 */
1924 
1925 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1926 	0x00, 0x2a,
1927 
1928 	0x00, 0x57,		/* PPP Link Layer 28*/
1929 
1930 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1931 	0x00, 0x00, 0x3b, 0x00,
1932 	0x00, 0x00, 0x00, 0x00,
1933 	0x00, 0x00, 0x00, 0x00,
1934 	0x00, 0x00, 0x00, 0x00,
1935 	0x00, 0x00, 0x00, 0x00,
1936 	0x00, 0x00, 0x00, 0x00,
1937 	0x00, 0x00, 0x00, 0x00,
1938 	0x00, 0x00, 0x00, 0x00,
1939 	0x00, 0x00, 0x00, 0x00,
1940 
1941 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1942 };
1943 
1944 /* this is a recipe to profile association bitmap */
1945 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1946 			  ICE_MAX_NUM_PROFILES);
1947 
1948 /* this is a profile to recipe association bitmap */
1949 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1950 			  ICE_MAX_NUM_RECIPES);
1951 
1952 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1953 
1954 /**
1955  * ice_collect_result_idx - copy result index values
1956  * @buf: buffer that contains the result index
1957  * @recp: the recipe struct to copy data into
1958  */
ice_collect_result_idx(struct ice_aqc_recipe_data_elem * buf,struct ice_sw_recipe * recp)1959 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1960 				   struct ice_sw_recipe *recp)
1961 {
1962 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1963 		ice_set_bit(buf->content.result_indx &
1964 			    ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1965 }
1966 
1967 static struct ice_prof_type_entry ice_prof_type_tbl[ICE_GTPU_PROFILE] = {
1968 	{ ICE_PROFID_IPV4_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV4},
1969 	{ ICE_PROFID_IPV4_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV4_UDP},
1970 	{ ICE_PROFID_IPV4_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV4_TCP},
1971 	{ ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV4},
1972 	{ ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP},
1973 	{ ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP},
1974 	{ ICE_PROFID_IPV4_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV4_GTPU_IPV6},
1975 	{ ICE_PROFID_IPV4_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV4_GTPU_IPV6_UDP},
1976 	{ ICE_PROFID_IPV4_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV4_GTPU_IPV6_TCP},
1977 	{ ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV4_GTPU_EH_IPV6},
1978 	{ ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP},
1979 	{ ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP},
1980 	{ ICE_PROFID_IPV6_GTPU_IPV4_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV4},
1981 	{ ICE_PROFID_IPV6_GTPU_IPV4_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV4_UDP},
1982 	{ ICE_PROFID_IPV6_GTPU_IPV4_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV4_TCP},
1983 	{ ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV4},
1984 	{ ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP},
1985 	{ ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP},
1986 	{ ICE_PROFID_IPV6_GTPU_IPV6_OTHER,    ICE_SW_TUN_IPV6_GTPU_IPV6},
1987 	{ ICE_PROFID_IPV6_GTPU_IPV6_UDP,      ICE_SW_TUN_IPV6_GTPU_IPV6_UDP},
1988 	{ ICE_PROFID_IPV6_GTPU_IPV6_TCP,      ICE_SW_TUN_IPV6_GTPU_IPV6_TCP},
1989 	{ ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, ICE_SW_TUN_IPV6_GTPU_EH_IPV6},
1990 	{ ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP},
1991 	{ ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP,   ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP},
1992 };
1993 
1994 /**
1995  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1996  * @rid: recipe ID that we are populating
1997  */
ice_get_tun_type_for_recipe(u8 rid,bool vlan)1998 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1999 {
2000 	u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
2001 	u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
2002 	u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
2003 	u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
2004 	enum ice_sw_tunnel_type tun_type;
2005 	u16 i, j, k, profile_num = 0;
2006 	bool non_tun_valid = false;
2007 	bool pppoe_valid = false;
2008 	bool vxlan_valid = false;
2009 	bool gre_valid = false;
2010 	bool gtp_valid = false;
2011 	bool flag_valid = false;
2012 
2013 	for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2014 		if (!ice_is_bit_set(recipe_to_profile[rid], j))
2015 			continue;
2016 		else
2017 			profile_num++;
2018 
2019 		for (i = 0; i < 12; i++) {
2020 			if (gre_profile[i] == j)
2021 				gre_valid = true;
2022 		}
2023 
2024 		for (i = 0; i < 12; i++) {
2025 			if (vxlan_profile[i] == j)
2026 				vxlan_valid = true;
2027 		}
2028 
2029 		for (i = 0; i < 7; i++) {
2030 			if (pppoe_profile[i] == j)
2031 				pppoe_valid = true;
2032 		}
2033 
2034 		for (i = 0; i < 6; i++) {
2035 			if (non_tun_profile[i] == j)
2036 				non_tun_valid = true;
2037 		}
2038 
2039 		if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
2040 		    j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
2041 			gtp_valid = true;
2042 
2043 		if ((j >= ICE_PROFID_IPV4_ESP &&
2044 		     j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
2045 		    (j >= ICE_PROFID_IPV4_GTPC_TEID &&
2046 		     j <= ICE_PROFID_IPV6_GTPU_TEID))
2047 			flag_valid = true;
2048 	}
2049 
2050 	if (!non_tun_valid && vxlan_valid)
2051 		tun_type = ICE_SW_TUN_VXLAN;
2052 	else if (!non_tun_valid && gre_valid)
2053 		tun_type = ICE_SW_TUN_NVGRE;
2054 	else if (!non_tun_valid && pppoe_valid)
2055 		tun_type = ICE_SW_TUN_PPPOE;
2056 	else if (!non_tun_valid && gtp_valid)
2057 		tun_type = ICE_SW_TUN_GTP;
2058 	else if (non_tun_valid &&
2059 		 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
2060 		tun_type = ICE_SW_TUN_AND_NON_TUN;
2061 	else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
2062 		 !pppoe_valid)
2063 		tun_type = ICE_NON_TUN;
2064 	else
2065 		tun_type = ICE_NON_TUN;
2066 
2067 	if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
2068 		i = ice_is_bit_set(recipe_to_profile[rid],
2069 				   ICE_PROFID_PPPOE_IPV4_OTHER);
2070 		j = ice_is_bit_set(recipe_to_profile[rid],
2071 				   ICE_PROFID_PPPOE_IPV6_OTHER);
2072 		if (i && !j)
2073 			tun_type = ICE_SW_TUN_PPPOE_IPV4;
2074 		else if (!i && j)
2075 			tun_type = ICE_SW_TUN_PPPOE_IPV6;
2076 	}
2077 
2078 	if (tun_type == ICE_SW_TUN_GTP) {
2079 		for (k = 0; k < ARRAY_SIZE(ice_prof_type_tbl); k++)
2080 			if (ice_is_bit_set(recipe_to_profile[rid],
2081 					   ice_prof_type_tbl[k].prof_id)) {
2082 				tun_type = ice_prof_type_tbl[k].type;
2083 				break;
2084 			}
2085 	}
2086 
2087 	if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
2088 		for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
2089 			if (ice_is_bit_set(recipe_to_profile[rid], j)) {
2090 				switch (j) {
2091 				case ICE_PROFID_IPV4_TCP:
2092 					tun_type = ICE_SW_IPV4_TCP;
2093 					break;
2094 				case ICE_PROFID_IPV4_UDP:
2095 					tun_type = ICE_SW_IPV4_UDP;
2096 					break;
2097 				case ICE_PROFID_IPV6_TCP:
2098 					tun_type = ICE_SW_IPV6_TCP;
2099 					break;
2100 				case ICE_PROFID_IPV6_UDP:
2101 					tun_type = ICE_SW_IPV6_UDP;
2102 					break;
2103 				case ICE_PROFID_PPPOE_PAY:
2104 					tun_type = ICE_SW_TUN_PPPOE_PAY;
2105 					break;
2106 				case ICE_PROFID_PPPOE_IPV4_TCP:
2107 					tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
2108 					break;
2109 				case ICE_PROFID_PPPOE_IPV4_UDP:
2110 					tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
2111 					break;
2112 				case ICE_PROFID_PPPOE_IPV4_OTHER:
2113 					tun_type = ICE_SW_TUN_PPPOE_IPV4;
2114 					break;
2115 				case ICE_PROFID_PPPOE_IPV6_TCP:
2116 					tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
2117 					break;
2118 				case ICE_PROFID_PPPOE_IPV6_UDP:
2119 					tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
2120 					break;
2121 				case ICE_PROFID_PPPOE_IPV6_OTHER:
2122 					tun_type = ICE_SW_TUN_PPPOE_IPV6;
2123 					break;
2124 				case ICE_PROFID_IPV4_ESP:
2125 					tun_type = ICE_SW_TUN_IPV4_ESP;
2126 					break;
2127 				case ICE_PROFID_IPV6_ESP:
2128 					tun_type = ICE_SW_TUN_IPV6_ESP;
2129 					break;
2130 				case ICE_PROFID_IPV4_AH:
2131 					tun_type = ICE_SW_TUN_IPV4_AH;
2132 					break;
2133 				case ICE_PROFID_IPV6_AH:
2134 					tun_type = ICE_SW_TUN_IPV6_AH;
2135 					break;
2136 				case ICE_PROFID_IPV4_NAT_T:
2137 					tun_type = ICE_SW_TUN_IPV4_NAT_T;
2138 					break;
2139 				case ICE_PROFID_IPV6_NAT_T:
2140 					tun_type = ICE_SW_TUN_IPV6_NAT_T;
2141 					break;
2142 				case ICE_PROFID_IPV4_PFCP_NODE:
2143 					tun_type =
2144 					ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
2145 					break;
2146 				case ICE_PROFID_IPV6_PFCP_NODE:
2147 					tun_type =
2148 					ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
2149 					break;
2150 				case ICE_PROFID_IPV4_PFCP_SESSION:
2151 					tun_type =
2152 					ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
2153 					break;
2154 				case ICE_PROFID_IPV6_PFCP_SESSION:
2155 					tun_type =
2156 					ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
2157 					break;
2158 				case ICE_PROFID_MAC_IPV4_L2TPV3:
2159 					tun_type = ICE_SW_TUN_IPV4_L2TPV3;
2160 					break;
2161 				case ICE_PROFID_MAC_IPV6_L2TPV3:
2162 					tun_type = ICE_SW_TUN_IPV6_L2TPV3;
2163 					break;
2164 				case ICE_PROFID_IPV4_GTPU_TEID:
2165 					tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
2166 					break;
2167 				case ICE_PROFID_IPV6_GTPU_TEID:
2168 					tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
2169 					break;
2170 				default:
2171 					break;
2172 				}
2173 
2174 				return tun_type;
2175 			}
2176 		}
2177 	}
2178 
2179 	if (vlan && tun_type == ICE_SW_TUN_PPPOE)
2180 		tun_type = ICE_SW_TUN_PPPOE_QINQ;
2181 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
2182 		tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
2183 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
2184 		tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
2185 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
2186 		tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
2187 	else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
2188 		tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
2189 	else if (vlan && tun_type == ICE_NON_TUN)
2190 		tun_type = ICE_NON_TUN_QINQ;
2191 
2192 	return tun_type;
2193 }
2194 
2195 /**
2196  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2197  * @hw: pointer to hardware structure
2198  * @recps: struct that we need to populate
2199  * @rid: recipe ID that we are populating
2200  * @refresh_required: true if we should get recipe to profile mapping from FW
2201  *
2202  * This function is used to populate all the necessary entries into our
2203  * bookkeeping so that we have a current list of all the recipes that are
2204  * programmed in the firmware.
2205  */
2206 static enum ice_status
ice_get_recp_frm_fw(struct ice_hw * hw,struct ice_sw_recipe * recps,u8 rid,bool * refresh_required)2207 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2208 		    bool *refresh_required)
2209 {
2210 	ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
2211 	struct ice_aqc_recipe_data_elem *tmp;
2212 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2213 	struct ice_prot_lkup_ext *lkup_exts;
2214 	enum ice_status status;
2215 	u8 fv_word_idx = 0;
2216 	bool vlan = false;
2217 	u16 sub_recps;
2218 
2219 	ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
2220 
2221 	/* we need a buffer big enough to accommodate all the recipes */
2222 	tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
2223 		ICE_MAX_NUM_RECIPES, sizeof(*tmp));
2224 	if (!tmp)
2225 		return ICE_ERR_NO_MEMORY;
2226 
2227 	tmp[0].recipe_indx = rid;
2228 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2229 	/* non-zero status meaning recipe doesn't exist */
2230 	if (status)
2231 		goto err_unroll;
2232 
2233 	/* Get recipe to profile map so that we can get the fv from lkups that
2234 	 * we read for a recipe from FW. Since we want to minimize the number of
2235 	 * times we make this FW call, just make one call and cache the copy
2236 	 * until a new recipe is added. This operation is only required the
2237 	 * first time to get the changes from FW. Then to search existing
2238 	 * entries we don't need to update the cache again until another recipe
2239 	 * gets added.
2240 	 */
2241 	if (*refresh_required) {
2242 		ice_get_recp_to_prof_map(hw);
2243 		*refresh_required = false;
2244 	}
2245 
2246 	/* Start populating all the entries for recps[rid] based on lkups from
2247 	 * firmware. Note that we are only creating the root recipe in our
2248 	 * database.
2249 	 */
2250 	lkup_exts = &recps[rid].lkup_exts;
2251 
2252 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2253 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2254 		struct ice_recp_grp_entry *rg_entry;
2255 		u8 i, prof, idx, prot = 0;
2256 		bool is_root;
2257 		u16 off = 0;
2258 
2259 		rg_entry = (struct ice_recp_grp_entry *)
2260 			ice_malloc(hw, sizeof(*rg_entry));
2261 		if (!rg_entry) {
2262 			status = ICE_ERR_NO_MEMORY;
2263 			goto err_unroll;
2264 		}
2265 
2266 		idx = root_bufs.recipe_indx;
2267 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2268 
2269 		/* Mark all result indices in this chain */
2270 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2271 			ice_set_bit(root_bufs.content.result_indx &
2272 				    ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
2273 
2274 		/* get the first profile that is associated with rid */
2275 		prof = ice_find_first_bit(recipe_to_profile[idx],
2276 					  ICE_MAX_NUM_PROFILES);
2277 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2278 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2279 
2280 			rg_entry->fv_idx[i] = lkup_indx;
2281 			rg_entry->fv_mask[i] =
2282 				LE16_TO_CPU(root_bufs.content.mask[i + 1]);
2283 
2284 			/* If the recipe is a chained recipe then all its
2285 			 * child recipe's result will have a result index.
2286 			 * To fill fv_words we should not use those result
2287 			 * index, we only need the protocol ids and offsets.
2288 			 * We will skip all the fv_idx which stores result
2289 			 * index in them. We also need to skip any fv_idx which
2290 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2291 			 * valid offset value.
2292 			 */
2293 			if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
2294 					   rg_entry->fv_idx[i]) ||
2295 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2296 			    rg_entry->fv_idx[i] == 0)
2297 				continue;
2298 
2299 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2300 					  rg_entry->fv_idx[i], &prot, &off);
2301 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2302 			lkup_exts->fv_words[fv_word_idx].off = off;
2303 			lkup_exts->field_mask[fv_word_idx] =
2304 				rg_entry->fv_mask[i];
2305 			if (prot == ICE_META_DATA_ID_HW &&
2306 			    off == ICE_TUN_FLAG_MDID_OFF)
2307 				vlan = true;
2308 			fv_word_idx++;
2309 		}
2310 		/* populate rg_list with the data from the child entry of this
2311 		 * recipe
2312 		 */
2313 		LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
2314 
2315 		/* Propagate some data to the recipe database */
2316 		recps[idx].is_root = !!is_root;
2317 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2318 		ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2319 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2320 			recps[idx].chain_idx = root_bufs.content.result_indx &
2321 				~ICE_AQ_RECIPE_RESULT_EN;
2322 			ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2323 		} else {
2324 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2325 		}
2326 
2327 		if (!is_root)
2328 			continue;
2329 
2330 		/* Only do the following for root recipes entries */
2331 		ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2332 			   sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
2333 		recps[idx].root_rid = root_bufs.content.rid &
2334 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2335 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2336 	}
2337 
2338 	/* Complete initialization of the root recipe entry */
2339 	lkup_exts->n_val_words = fv_word_idx;
2340 	recps[rid].big_recp = (num_recps > 1);
2341 	recps[rid].n_grp_count = (u8)num_recps;
2342 	recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
2343 	recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
2344 		ice_memdup(hw, tmp, recps[rid].n_grp_count *
2345 			   sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
2346 	if (!recps[rid].root_buf)
2347 		goto err_unroll;
2348 
2349 	/* Copy result indexes */
2350 	ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2351 	recps[rid].recp_created = true;
2352 
2353 err_unroll:
2354 	ice_free(hw, tmp);
2355 	return status;
2356 }
2357 
2358 /**
2359  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2360  * @hw: pointer to hardware structure
2361  *
2362  * This function is used to populate recipe_to_profile matrix where index to
2363  * this array is the recipe ID and the element is the mapping of which profiles
2364  * is this recipe mapped to.
2365  */
ice_get_recp_to_prof_map(struct ice_hw * hw)2366 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2367 {
2368 	ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2369 	u16 i;
2370 
2371 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2372 		u16 j;
2373 
2374 		ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2375 		ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
2376 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2377 			continue;
2378 		ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
2379 			      ICE_MAX_NUM_RECIPES);
2380 		ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2381 			ice_set_bit(i, recipe_to_profile[j]);
2382 	}
2383 }
2384 
2385 /**
2386  * ice_init_def_sw_recp - initialize the recipe book keeping tables
2387  * @hw: pointer to the HW struct
2388  * @recp_list: pointer to sw recipe list
2389  *
2390  * Allocate memory for the entire recipe table and initialize the structures/
2391  * entries corresponding to basic recipes.
2392  */
2393 enum ice_status
ice_init_def_sw_recp(struct ice_hw * hw,struct ice_sw_recipe ** recp_list)2394 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
2395 {
2396 	struct ice_sw_recipe *recps;
2397 	u8 i;
2398 
2399 	recps = (struct ice_sw_recipe *)
2400 		ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
2401 	if (!recps)
2402 		return ICE_ERR_NO_MEMORY;
2403 
2404 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
2405 		recps[i].root_rid = i;
2406 		INIT_LIST_HEAD(&recps[i].filt_rules);
2407 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
2408 		INIT_LIST_HEAD(&recps[i].rg_list);
2409 		ice_init_lock(&recps[i].filt_rule_lock);
2410 	}
2411 
2412 	*recp_list = recps;
2413 
2414 	return ICE_SUCCESS;
2415 }
2416 
2417 /**
2418  * ice_aq_get_sw_cfg - get switch configuration
2419  * @hw: pointer to the hardware structure
2420  * @buf: pointer to the result buffer
2421  * @buf_size: length of the buffer available for response
2422  * @req_desc: pointer to requested descriptor
2423  * @num_elems: pointer to number of elements
2424  * @cd: pointer to command details structure or NULL
2425  *
2426  * Get switch configuration (0x0200) to be placed in buf.
2427  * This admin command returns information such as initial VSI/port number
2428  * and switch ID it belongs to.
2429  *
2430  * NOTE: *req_desc is both an input/output parameter.
2431  * The caller of this function first calls this function with *request_desc set
2432  * to 0. If the response from f/w has *req_desc set to 0, all the switch
2433  * configuration information has been returned; if non-zero (meaning not all
2434  * the information was returned), the caller should call this function again
2435  * with *req_desc set to the previous value returned by f/w to get the
2436  * next block of switch configuration information.
2437  *
2438  * *num_elems is output only parameter. This reflects the number of elements
2439  * in response buffer. The caller of this function to use *num_elems while
2440  * parsing the response buffer.
2441  */
2442 static enum ice_status
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)2443 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
2444 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
2445 		  struct ice_sq_cd *cd)
2446 {
2447 	struct ice_aqc_get_sw_cfg *cmd;
2448 	struct ice_aq_desc desc;
2449 	enum ice_status status;
2450 
2451 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
2452 	cmd = &desc.params.get_sw_conf;
2453 	cmd->element = CPU_TO_LE16(*req_desc);
2454 
2455 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456 	if (!status) {
2457 		*req_desc = LE16_TO_CPU(cmd->element);
2458 		*num_elems = LE16_TO_CPU(cmd->num_elems);
2459 	}
2460 
2461 	return status;
2462 }
2463 
2464 /**
2465  * ice_alloc_rss_global_lut - allocate a RSS global LUT
2466  * @hw: pointer to the HW struct
2467  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
2468  * @global_lut_id: output parameter for the RSS global LUT's ID
2469  */
ice_alloc_rss_global_lut(struct ice_hw * hw,bool shared_res,u16 * global_lut_id)2470 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
2471 {
2472 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2473 	enum ice_status status;
2474 	u16 buf_len;
2475 
2476 	buf_len = ice_struct_size(sw_buf, elem, 1);
2477 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2478 	if (!sw_buf)
2479 		return ICE_ERR_NO_MEMORY;
2480 
2481 	sw_buf->num_elems = CPU_TO_LE16(1);
2482 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
2483 				       (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2484 				       ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2485 
2486 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
2487 	if (status) {
2488 		ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
2489 			  shared_res ? "shared" : "dedicated", status);
2490 		goto ice_alloc_global_lut_exit;
2491 	}
2492 
2493 	*global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2494 
2495 ice_alloc_global_lut_exit:
2496 	ice_free(hw, sw_buf);
2497 	return status;
2498 }
2499 
2500 /**
2501  * ice_free_rss_global_lut - free a RSS global LUT
2502  * @hw: pointer to the HW struct
2503  * @global_lut_id: ID of the RSS global LUT to free
2504  */
ice_free_rss_global_lut(struct ice_hw * hw,u16 global_lut_id)2505 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
2506 {
2507 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2508 	u16 buf_len, num_elems = 1;
2509 	enum ice_status status;
2510 
2511 	buf_len = ice_struct_size(sw_buf, elem, num_elems);
2512 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2513 	if (!sw_buf)
2514 		return ICE_ERR_NO_MEMORY;
2515 
2516 	sw_buf->num_elems = CPU_TO_LE16(num_elems);
2517 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
2518 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
2519 
2520 	status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
2521 	if (status)
2522 		ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
2523 			  global_lut_id, status);
2524 
2525 	ice_free(hw, sw_buf);
2526 	return status;
2527 }
2528 
2529 /**
2530  * ice_alloc_sw - allocate resources specific to switch
2531  * @hw: pointer to the HW struct
2532  * @ena_stats: true to turn on VEB stats
2533  * @shared_res: true for shared resource, false for dedicated resource
2534  * @sw_id: switch ID returned
2535  * @counter_id: VEB counter ID returned
2536  *
2537  * allocates switch resources (SWID and VEB counter) (0x0208)
2538  */
2539 enum ice_status
ice_alloc_sw(struct ice_hw * hw,bool ena_stats,bool shared_res,u16 * sw_id,u16 * counter_id)2540 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
2541 	     u16 *counter_id)
2542 {
2543 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2544 	struct ice_aqc_res_elem *sw_ele;
2545 	enum ice_status status;
2546 	u16 buf_len;
2547 
2548 	buf_len = ice_struct_size(sw_buf, elem, 1);
2549 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2550 	if (!sw_buf)
2551 		return ICE_ERR_NO_MEMORY;
2552 
2553 	/* Prepare buffer for switch ID.
2554 	 * The number of resource entries in buffer is passed as 1 since only a
2555 	 * single switch/VEB instance is allocated, and hence a single sw_id
2556 	 * is requested.
2557 	 */
2558 	sw_buf->num_elems = CPU_TO_LE16(1);
2559 	sw_buf->res_type =
2560 		CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
2561 			    (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
2562 			    ICE_AQC_RES_TYPE_FLAG_DEDICATED));
2563 
2564 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2565 				       ice_aqc_opc_alloc_res, NULL);
2566 
2567 	if (status)
2568 		goto ice_alloc_sw_exit;
2569 
2570 	sw_ele = &sw_buf->elem[0];
2571 	*sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
2572 
2573 	if (ena_stats) {
2574 		/* Prepare buffer for VEB Counter */
2575 		enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
2576 		struct ice_aqc_alloc_free_res_elem *counter_buf;
2577 		struct ice_aqc_res_elem *counter_ele;
2578 
2579 		counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2580 				ice_malloc(hw, buf_len);
2581 		if (!counter_buf) {
2582 			status = ICE_ERR_NO_MEMORY;
2583 			goto ice_alloc_sw_exit;
2584 		}
2585 
2586 		/* The number of resource entries in buffer is passed as 1 since
2587 		 * only a single switch/VEB instance is allocated, and hence a
2588 		 * single VEB counter is requested.
2589 		 */
2590 		counter_buf->num_elems = CPU_TO_LE16(1);
2591 		counter_buf->res_type =
2592 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
2593 				    ICE_AQC_RES_TYPE_FLAG_DEDICATED);
2594 		status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2595 					       opc, NULL);
2596 
2597 		if (status) {
2598 			ice_free(hw, counter_buf);
2599 			goto ice_alloc_sw_exit;
2600 		}
2601 		counter_ele = &counter_buf->elem[0];
2602 		*counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
2603 		ice_free(hw, counter_buf);
2604 	}
2605 
2606 ice_alloc_sw_exit:
2607 	ice_free(hw, sw_buf);
2608 	return status;
2609 }
2610 
2611 /**
2612  * ice_free_sw - free resources specific to switch
2613  * @hw: pointer to the HW struct
2614  * @sw_id: switch ID returned
2615  * @counter_id: VEB counter ID returned
2616  *
2617  * free switch resources (SWID and VEB counter) (0x0209)
2618  *
2619  * NOTE: This function frees multiple resources. It continues
2620  * releasing other resources even after it encounters error.
2621  * The error code returned is the last error it encountered.
2622  */
ice_free_sw(struct ice_hw * hw,u16 sw_id,u16 counter_id)2623 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2624 {
2625 	struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2626 	enum ice_status status, ret_status;
2627 	u16 buf_len;
2628 
2629 	buf_len = ice_struct_size(sw_buf, elem, 1);
2630 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2631 	if (!sw_buf)
2632 		return ICE_ERR_NO_MEMORY;
2633 
2634 	/* Prepare buffer to free for switch ID res.
2635 	 * The number of resource entries in buffer is passed as 1 since only a
2636 	 * single switch/VEB instance is freed, and hence a single sw_id
2637 	 * is released.
2638 	 */
2639 	sw_buf->num_elems = CPU_TO_LE16(1);
2640 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2641 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2642 
2643 	ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2644 					   ice_aqc_opc_free_res, NULL);
2645 
2646 	if (ret_status)
2647 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2648 
2649 	/* Prepare buffer to free for VEB Counter resource */
2650 	counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2651 			ice_malloc(hw, buf_len);
2652 	if (!counter_buf) {
2653 		ice_free(hw, sw_buf);
2654 		return ICE_ERR_NO_MEMORY;
2655 	}
2656 
2657 	/* The number of resource entries in buffer is passed as 1 since only a
2658 	 * single switch/VEB instance is freed, and hence a single VEB counter
2659 	 * is released
2660 	 */
2661 	counter_buf->num_elems = CPU_TO_LE16(1);
2662 	counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2663 	counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2664 
2665 	status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2666 				       ice_aqc_opc_free_res, NULL);
2667 	if (status) {
2668 		ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2669 		ret_status = status;
2670 	}
2671 
2672 	ice_free(hw, counter_buf);
2673 	ice_free(hw, sw_buf);
2674 	return ret_status;
2675 }
2676 
2677 /**
2678  * ice_aq_add_vsi
2679  * @hw: pointer to the HW struct
2680  * @vsi_ctx: pointer to a VSI context struct
2681  * @cd: pointer to command details structure or NULL
2682  *
2683  * Add a VSI context to the hardware (0x0210)
2684  */
2685 enum ice_status
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2686 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2687 	       struct ice_sq_cd *cd)
2688 {
2689 	struct ice_aqc_add_update_free_vsi_resp *res;
2690 	struct ice_aqc_add_get_update_free_vsi *cmd;
2691 	struct ice_aq_desc desc;
2692 	enum ice_status status;
2693 
2694 	cmd = &desc.params.vsi_cmd;
2695 	res = &desc.params.add_update_free_vsi_res;
2696 
2697 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2698 
2699 	if (!vsi_ctx->alloc_from_pool)
2700 		cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2701 					   ICE_AQ_VSI_IS_VALID);
2702 
2703 	cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2704 
2705 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2706 
2707 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2708 				 sizeof(vsi_ctx->info), cd);
2709 
2710 	if (!status) {
2711 		vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2712 		vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2713 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2714 	}
2715 
2716 	return status;
2717 }
2718 
2719 /**
2720  * ice_aq_free_vsi
2721  * @hw: pointer to the HW struct
2722  * @vsi_ctx: pointer to a VSI context struct
2723  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2724  * @cd: pointer to command details structure or NULL
2725  *
2726  * Free VSI context info from hardware (0x0213)
2727  */
2728 enum ice_status
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)2729 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2730 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
2731 {
2732 	struct ice_aqc_add_update_free_vsi_resp *resp;
2733 	struct ice_aqc_add_get_update_free_vsi *cmd;
2734 	struct ice_aq_desc desc;
2735 	enum ice_status status;
2736 
2737 	cmd = &desc.params.vsi_cmd;
2738 	resp = &desc.params.add_update_free_vsi_res;
2739 
2740 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2741 
2742 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2743 	if (keep_vsi_alloc)
2744 		cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2745 
2746 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2747 	if (!status) {
2748 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2749 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2750 	}
2751 
2752 	return status;
2753 }
2754 
2755 /**
2756  * ice_aq_update_vsi
2757  * @hw: pointer to the HW struct
2758  * @vsi_ctx: pointer to a VSI context struct
2759  * @cd: pointer to command details structure or NULL
2760  *
2761  * Update VSI context in the hardware (0x0211)
2762  */
2763 enum ice_status
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2764 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2765 		  struct ice_sq_cd *cd)
2766 {
2767 	struct ice_aqc_add_update_free_vsi_resp *resp;
2768 	struct ice_aqc_add_get_update_free_vsi *cmd;
2769 	struct ice_aq_desc desc;
2770 	enum ice_status status;
2771 
2772 	cmd = &desc.params.vsi_cmd;
2773 	resp = &desc.params.add_update_free_vsi_res;
2774 
2775 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2776 
2777 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2778 
2779 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2780 
2781 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2782 				 sizeof(vsi_ctx->info), cd);
2783 
2784 	if (!status) {
2785 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2786 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2787 	}
2788 
2789 	return status;
2790 }
2791 
2792 /**
2793  * ice_is_vsi_valid - check whether the VSI is valid or not
2794  * @hw: pointer to the HW struct
2795  * @vsi_handle: VSI handle
2796  *
2797  * check whether the VSI is valid or not
2798  */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)2799 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2800 {
2801 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2802 }
2803 
2804 /**
2805  * ice_get_hw_vsi_num - return the HW VSI number
2806  * @hw: pointer to the HW struct
2807  * @vsi_handle: VSI handle
2808  *
2809  * return the HW VSI number
2810  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2811  */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)2812 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2813 {
2814 	return hw->vsi_ctx[vsi_handle]->vsi_num;
2815 }
2816 
2817 /**
2818  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2819  * @hw: pointer to the HW struct
2820  * @vsi_handle: VSI handle
2821  *
2822  * return the VSI context entry for a given VSI handle
2823  */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)2824 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2825 {
2826 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2827 }
2828 
2829 /**
2830  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2831  * @hw: pointer to the HW struct
2832  * @vsi_handle: VSI handle
2833  * @vsi: VSI context pointer
2834  *
2835  * save the VSI context entry for a given VSI handle
2836  */
2837 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)2838 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2839 {
2840 	hw->vsi_ctx[vsi_handle] = vsi;
2841 }
2842 
2843 /**
2844  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2845  * @hw: pointer to the HW struct
2846  * @vsi_handle: VSI handle
2847  */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)2848 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2849 {
2850 	struct ice_vsi_ctx *vsi;
2851 	u8 i;
2852 
2853 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
2854 	if (!vsi)
2855 		return;
2856 	ice_for_each_traffic_class(i) {
2857 		if (vsi->lan_q_ctx[i]) {
2858 			ice_free(hw, vsi->lan_q_ctx[i]);
2859 			vsi->lan_q_ctx[i] = NULL;
2860 		}
2861 	}
2862 }
2863 
2864 /**
2865  * ice_clear_vsi_ctx - clear the VSI context entry
2866  * @hw: pointer to the HW struct
2867  * @vsi_handle: VSI handle
2868  *
2869  * clear the VSI context entry
2870  */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)2871 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2872 {
2873 	struct ice_vsi_ctx *vsi;
2874 
2875 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
2876 	if (vsi) {
2877 		ice_clear_vsi_q_ctx(hw, vsi_handle);
2878 		ice_free(hw, vsi);
2879 		hw->vsi_ctx[vsi_handle] = NULL;
2880 	}
2881 }
2882 
2883 /**
2884  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2885  * @hw: pointer to the HW struct
2886  */
ice_clear_all_vsi_ctx(struct ice_hw * hw)2887 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2888 {
2889 	u16 i;
2890 
2891 	for (i = 0; i < ICE_MAX_VSI; i++)
2892 		ice_clear_vsi_ctx(hw, i);
2893 }
2894 
2895 /**
2896  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2897  * @hw: pointer to the HW struct
2898  * @vsi_handle: unique VSI handle provided by drivers
2899  * @vsi_ctx: pointer to a VSI context struct
2900  * @cd: pointer to command details structure or NULL
2901  *
2902  * Add a VSI context to the hardware also add it into the VSI handle list.
2903  * If this function gets called after reset for existing VSIs then update
2904  * with the new HW VSI number in the corresponding VSI handle list entry.
2905  */
2906 enum ice_status
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2907 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2908 	    struct ice_sq_cd *cd)
2909 {
2910 	struct ice_vsi_ctx *tmp_vsi_ctx;
2911 	enum ice_status status;
2912 
2913 	if (vsi_handle >= ICE_MAX_VSI)
2914 		return ICE_ERR_PARAM;
2915 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2916 	if (status)
2917 		return status;
2918 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2919 	if (!tmp_vsi_ctx) {
2920 		/* Create a new VSI context */
2921 		tmp_vsi_ctx = (struct ice_vsi_ctx *)
2922 			ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2923 		if (!tmp_vsi_ctx) {
2924 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2925 			return ICE_ERR_NO_MEMORY;
2926 		}
2927 		*tmp_vsi_ctx = *vsi_ctx;
2928 
2929 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2930 	} else {
2931 		/* update with new HW VSI num */
2932 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2933 	}
2934 
2935 	return ICE_SUCCESS;
2936 }
2937 
2938 /**
2939  * ice_free_vsi- free VSI context from hardware and VSI handle list
2940  * @hw: pointer to the HW struct
2941  * @vsi_handle: unique VSI handle
2942  * @vsi_ctx: pointer to a VSI context struct
2943  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2944  * @cd: pointer to command details structure or NULL
2945  *
2946  * Free VSI context info from hardware as well as from VSI handle list
2947  */
2948 enum ice_status
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)2949 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2950 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
2951 {
2952 	enum ice_status status;
2953 
2954 	if (!ice_is_vsi_valid(hw, vsi_handle))
2955 		return ICE_ERR_PARAM;
2956 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2957 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2958 	if (!status)
2959 		ice_clear_vsi_ctx(hw, vsi_handle);
2960 	return status;
2961 }
2962 
2963 /**
2964  * ice_update_vsi
2965  * @hw: pointer to the HW struct
2966  * @vsi_handle: unique VSI handle
2967  * @vsi_ctx: pointer to a VSI context struct
2968  * @cd: pointer to command details structure or NULL
2969  *
2970  * Update VSI context in the hardware
2971  */
2972 enum ice_status
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2973 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2974 	       struct ice_sq_cd *cd)
2975 {
2976 	if (!ice_is_vsi_valid(hw, vsi_handle))
2977 		return ICE_ERR_PARAM;
2978 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2979 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
2980 }
2981 
2982 /**
2983  * ice_aq_get_vsi_params
2984  * @hw: pointer to the HW struct
2985  * @vsi_ctx: pointer to a VSI context struct
2986  * @cd: pointer to command details structure or NULL
2987  *
2988  * Get VSI context info from hardware (0x0212)
2989  */
2990 enum ice_status
ice_aq_get_vsi_params(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2991 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2992 		      struct ice_sq_cd *cd)
2993 {
2994 	struct ice_aqc_add_get_update_free_vsi *cmd;
2995 	struct ice_aqc_get_vsi_resp *resp;
2996 	struct ice_aq_desc desc;
2997 	enum ice_status status;
2998 
2999 	cmd = &desc.params.vsi_cmd;
3000 	resp = &desc.params.get_vsi_resp;
3001 
3002 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
3003 
3004 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
3005 
3006 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
3007 				 sizeof(vsi_ctx->info), cd);
3008 	if (!status) {
3009 		vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
3010 					ICE_AQ_VSI_NUM_M;
3011 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
3012 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
3013 	}
3014 
3015 	return status;
3016 }
3017 
3018 /**
3019  * ice_aq_add_update_mir_rule - add/update a mirror rule
3020  * @hw: pointer to the HW struct
3021  * @rule_type: Rule Type
3022  * @dest_vsi: VSI number to which packets will be mirrored
3023  * @count: length of the list
3024  * @mr_buf: buffer for list of mirrored VSI numbers
3025  * @cd: pointer to command details structure or NULL
3026  * @rule_id: Rule ID
3027  *
3028  * Add/Update Mirror Rule (0x260).
3029  */
3030 enum ice_status
ice_aq_add_update_mir_rule(struct ice_hw * hw,u16 rule_type,u16 dest_vsi,u16 count,struct ice_mir_rule_buf * mr_buf,struct ice_sq_cd * cd,u16 * rule_id)3031 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
3032 			   u16 count, struct ice_mir_rule_buf *mr_buf,
3033 			   struct ice_sq_cd *cd, u16 *rule_id)
3034 {
3035 	struct ice_aqc_add_update_mir_rule *cmd;
3036 	struct ice_aq_desc desc;
3037 	enum ice_status status;
3038 	__le16 *mr_list = NULL;
3039 	u16 buf_size = 0;
3040 
3041 	switch (rule_type) {
3042 	case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
3043 	case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
3044 		/* Make sure count and mr_buf are set for these rule_types */
3045 		if (!(count && mr_buf))
3046 			return ICE_ERR_PARAM;
3047 
3048 		buf_size = count * sizeof(__le16);
3049 		mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
3050 		if (!mr_list)
3051 			return ICE_ERR_NO_MEMORY;
3052 		break;
3053 	case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
3054 	case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
3055 		/* Make sure count and mr_buf are not set for these
3056 		 * rule_types
3057 		 */
3058 		if (count || mr_buf)
3059 			return ICE_ERR_PARAM;
3060 		break;
3061 	default:
3062 		ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
3063 		return ICE_ERR_OUT_OF_RANGE;
3064 	}
3065 
3066 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
3067 
3068 	/* Pre-process 'mr_buf' items for add/update of virtual port
3069 	 * ingress/egress mirroring (but not physical port ingress/egress
3070 	 * mirroring)
3071 	 */
3072 	if (mr_buf) {
3073 		int i;
3074 
3075 		for (i = 0; i < count; i++) {
3076 			u16 id;
3077 
3078 			id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
3079 
3080 			/* Validate specified VSI number, make sure it is less
3081 			 * than ICE_MAX_VSI, if not return with error.
3082 			 */
3083 			if (id >= ICE_MAX_VSI) {
3084 				ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
3085 					  id);
3086 				ice_free(hw, mr_list);
3087 				return ICE_ERR_OUT_OF_RANGE;
3088 			}
3089 
3090 			/* add VSI to mirror rule */
3091 			if (mr_buf[i].add)
3092 				mr_list[i] =
3093 					CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
3094 			else /* remove VSI from mirror rule */
3095 				mr_list[i] = CPU_TO_LE16(id);
3096 		}
3097 	}
3098 
3099 	cmd = &desc.params.add_update_rule;
3100 	if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
3101 		cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
3102 					   ICE_AQC_RULE_ID_VALID_M);
3103 	cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
3104 	cmd->num_entries = CPU_TO_LE16(count);
3105 	cmd->dest = CPU_TO_LE16(dest_vsi);
3106 
3107 	status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
3108 	if (!status)
3109 		*rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
3110 
3111 	ice_free(hw, mr_list);
3112 
3113 	return status;
3114 }
3115 
3116 /**
3117  * ice_aq_delete_mir_rule - delete a mirror rule
3118  * @hw: pointer to the HW struct
3119  * @rule_id: Mirror rule ID (to be deleted)
3120  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
3121  *		 otherwise it is returned to the shared pool
3122  * @cd: pointer to command details structure or NULL
3123  *
3124  * Delete Mirror Rule (0x261).
3125  */
3126 enum ice_status
ice_aq_delete_mir_rule(struct ice_hw * hw,u16 rule_id,bool keep_allocd,struct ice_sq_cd * cd)3127 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
3128 		       struct ice_sq_cd *cd)
3129 {
3130 	struct ice_aqc_delete_mir_rule *cmd;
3131 	struct ice_aq_desc desc;
3132 
3133 	/* rule_id should be in the range 0...63 */
3134 	if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
3135 		return ICE_ERR_OUT_OF_RANGE;
3136 
3137 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
3138 
3139 	cmd = &desc.params.del_rule;
3140 	rule_id |= ICE_AQC_RULE_ID_VALID_M;
3141 	cmd->rule_id = CPU_TO_LE16(rule_id);
3142 
3143 	if (keep_allocd)
3144 		cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
3145 
3146 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3147 }
3148 
3149 /**
3150  * ice_aq_alloc_free_vsi_list
3151  * @hw: pointer to the HW struct
3152  * @vsi_list_id: VSI list ID returned or used for lookup
3153  * @lkup_type: switch rule filter lookup type
3154  * @opc: switch rules population command type - pass in the command opcode
3155  *
3156  * allocates or free a VSI list resource
3157  */
3158 static enum ice_status
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)3159 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
3160 			   enum ice_sw_lkup_type lkup_type,
3161 			   enum ice_adminq_opc opc)
3162 {
3163 	struct ice_aqc_alloc_free_res_elem *sw_buf;
3164 	struct ice_aqc_res_elem *vsi_ele;
3165 	enum ice_status status;
3166 	u16 buf_len;
3167 
3168 	buf_len = ice_struct_size(sw_buf, elem, 1);
3169 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3170 	if (!sw_buf)
3171 		return ICE_ERR_NO_MEMORY;
3172 	sw_buf->num_elems = CPU_TO_LE16(1);
3173 
3174 	if (lkup_type == ICE_SW_LKUP_MAC ||
3175 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3176 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3177 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3178 	    lkup_type == ICE_SW_LKUP_PROMISC ||
3179 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3180 	    lkup_type == ICE_SW_LKUP_LAST) {
3181 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
3182 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
3183 		sw_buf->res_type =
3184 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
3185 	} else {
3186 		status = ICE_ERR_PARAM;
3187 		goto ice_aq_alloc_free_vsi_list_exit;
3188 	}
3189 
3190 	if (opc == ice_aqc_opc_free_res)
3191 		sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
3192 
3193 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
3194 	if (status)
3195 		goto ice_aq_alloc_free_vsi_list_exit;
3196 
3197 	if (opc == ice_aqc_opc_alloc_res) {
3198 		vsi_ele = &sw_buf->elem[0];
3199 		*vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
3200 	}
3201 
3202 ice_aq_alloc_free_vsi_list_exit:
3203 	ice_free(hw, sw_buf);
3204 	return status;
3205 }
3206 
3207 /**
3208  * ice_aq_set_storm_ctrl - Sets storm control configuration
3209  * @hw: pointer to the HW struct
3210  * @bcast_thresh: represents the upper threshold for broadcast storm control
3211  * @mcast_thresh: represents the upper threshold for multicast storm control
3212  * @ctl_bitmask: storm control knobs
3213  *
3214  * Sets the storm control configuration (0x0280)
3215  */
3216 enum ice_status
ice_aq_set_storm_ctrl(struct ice_hw * hw,u32 bcast_thresh,u32 mcast_thresh,u32 ctl_bitmask)3217 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
3218 		      u32 ctl_bitmask)
3219 {
3220 	struct ice_aqc_storm_cfg *cmd;
3221 	struct ice_aq_desc desc;
3222 
3223 	cmd = &desc.params.storm_conf;
3224 
3225 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
3226 
3227 	cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
3228 	cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
3229 	cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
3230 
3231 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3232 }
3233 
3234 /**
3235  * ice_aq_get_storm_ctrl - gets storm control configuration
3236  * @hw: pointer to the HW struct
3237  * @bcast_thresh: represents the upper threshold for broadcast storm control
3238  * @mcast_thresh: represents the upper threshold for multicast storm control
3239  * @ctl_bitmask: storm control knobs
3240  *
3241  * Gets the storm control configuration (0x0281)
3242  */
3243 enum ice_status
ice_aq_get_storm_ctrl(struct ice_hw * hw,u32 * bcast_thresh,u32 * mcast_thresh,u32 * ctl_bitmask)3244 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
3245 		      u32 *ctl_bitmask)
3246 {
3247 	enum ice_status status;
3248 	struct ice_aq_desc desc;
3249 
3250 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
3251 
3252 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3253 	if (!status) {
3254 		struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
3255 
3256 		if (bcast_thresh)
3257 			*bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
3258 				ICE_AQ_THRESHOLD_M;
3259 		if (mcast_thresh)
3260 			*mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
3261 				ICE_AQ_THRESHOLD_M;
3262 		if (ctl_bitmask)
3263 			*ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
3264 	}
3265 
3266 	return status;
3267 }
3268 
3269 /**
3270  * ice_aq_sw_rules - add/update/remove switch rules
3271  * @hw: pointer to the HW struct
3272  * @rule_list: pointer to switch rule population list
3273  * @rule_list_sz: total size of the rule list in bytes
3274  * @num_rules: number of switch rules in the rule_list
3275  * @opc: switch rules population command type - pass in the command opcode
3276  * @cd: pointer to command details structure or NULL
3277  *
3278  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
3279  */
3280 static enum ice_status
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)3281 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
3282 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3283 {
3284 	struct ice_aq_desc desc;
3285 	enum ice_status status;
3286 
3287 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3288 
3289 	if (opc != ice_aqc_opc_add_sw_rules &&
3290 	    opc != ice_aqc_opc_update_sw_rules &&
3291 	    opc != ice_aqc_opc_remove_sw_rules)
3292 		return ICE_ERR_PARAM;
3293 
3294 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
3295 
3296 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3297 	desc.params.sw_rules.num_rules_fltr_entry_index =
3298 		CPU_TO_LE16(num_rules);
3299 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
3300 	if (opc != ice_aqc_opc_add_sw_rules &&
3301 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
3302 		status = ICE_ERR_DOES_NOT_EXIST;
3303 
3304 	return status;
3305 }
3306 
3307 /**
3308  * ice_aq_add_recipe - add switch recipe
3309  * @hw: pointer to the HW struct
3310  * @s_recipe_list: pointer to switch rule population list
3311  * @num_recipes: number of switch recipes in the list
3312  * @cd: pointer to command details structure or NULL
3313  *
3314  * Add(0x0290)
3315  */
3316 enum ice_status
ice_aq_add_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 num_recipes,struct ice_sq_cd * cd)3317 ice_aq_add_recipe(struct ice_hw *hw,
3318 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
3319 		  u16 num_recipes, struct ice_sq_cd *cd)
3320 {
3321 	struct ice_aqc_add_get_recipe *cmd;
3322 	struct ice_aq_desc desc;
3323 	u16 buf_size;
3324 
3325 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3326 	cmd = &desc.params.add_get_recipe;
3327 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
3328 
3329 	cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
3330 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3331 
3332 	buf_size = num_recipes * sizeof(*s_recipe_list);
3333 
3334 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3335 }
3336 
3337 /**
3338  * ice_aq_get_recipe - get switch recipe
3339  * @hw: pointer to the HW struct
3340  * @s_recipe_list: pointer to switch rule population list
3341  * @num_recipes: pointer to the number of recipes (input and output)
3342  * @recipe_root: root recipe number of recipe(s) to retrieve
3343  * @cd: pointer to command details structure or NULL
3344  *
3345  * Get(0x0292)
3346  *
3347  * On input, *num_recipes should equal the number of entries in s_recipe_list.
3348  * On output, *num_recipes will equal the number of entries returned in
3349  * s_recipe_list.
3350  *
3351  * The caller must supply enough space in s_recipe_list to hold all possible
3352  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
3353  */
3354 enum ice_status
ice_aq_get_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 * num_recipes,u16 recipe_root,struct ice_sq_cd * cd)3355 ice_aq_get_recipe(struct ice_hw *hw,
3356 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
3357 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
3358 {
3359 	struct ice_aqc_add_get_recipe *cmd;
3360 	struct ice_aq_desc desc;
3361 	enum ice_status status;
3362 	u16 buf_size;
3363 
3364 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
3365 		return ICE_ERR_PARAM;
3366 
3367 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3368 	cmd = &desc.params.add_get_recipe;
3369 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
3370 
3371 	cmd->return_index = CPU_TO_LE16(recipe_root);
3372 	cmd->num_sub_recipes = 0;
3373 
3374 	buf_size = *num_recipes * sizeof(*s_recipe_list);
3375 
3376 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
3377 	*num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
3378 
3379 	return status;
3380 }
3381 
3382 /**
3383  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
3384  * @hw: pointer to the HW struct
3385  * @params: parameters used to update the default recipe
3386  *
3387  * This function only supports updating default recipes and it only supports
3388  * updating a single recipe based on the lkup_idx at a time.
3389  *
3390  * This is done as a read-modify-write operation. First, get the current recipe
3391  * contents based on the recipe's ID. Then modify the field vector index and
3392  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
3393  * the pre-existing recipe with the modifications.
3394  */
3395 enum ice_status
ice_update_recipe_lkup_idx(struct ice_hw * hw,struct ice_update_recipe_lkup_idx_params * params)3396 ice_update_recipe_lkup_idx(struct ice_hw *hw,
3397 			   struct ice_update_recipe_lkup_idx_params *params)
3398 {
3399 	struct ice_aqc_recipe_data_elem *rcp_list;
3400 	u16 num_recps = ICE_MAX_NUM_RECIPES;
3401 	enum ice_status status;
3402 
3403 	rcp_list = (struct ice_aqc_recipe_data_elem *)ice_malloc(hw, num_recps * sizeof(*rcp_list));
3404 	if (!rcp_list)
3405 		return ICE_ERR_NO_MEMORY;
3406 
3407 	/* read current recipe list from firmware */
3408 	rcp_list->recipe_indx = params->rid;
3409 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
3410 	if (status) {
3411 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
3412 			  params->rid, status);
3413 		goto error_out;
3414 	}
3415 
3416 	/* only modify existing recipe's lkup_idx and mask if valid, while
3417 	 * leaving all other fields the same, then update the recipe firmware
3418 	 */
3419 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
3420 	if (params->mask_valid)
3421 		rcp_list->content.mask[params->lkup_idx] =
3422 			CPU_TO_LE16(params->mask);
3423 
3424 	if (params->ignore_valid)
3425 		rcp_list->content.lkup_indx[params->lkup_idx] |=
3426 			ICE_AQ_RECIPE_LKUP_IGNORE;
3427 
3428 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
3429 	if (status)
3430 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
3431 			  params->rid, params->lkup_idx, params->fv_idx,
3432 			  params->mask, params->mask_valid ? "true" : "false",
3433 			  status);
3434 
3435 error_out:
3436 	ice_free(hw, rcp_list);
3437 	return status;
3438 }
3439 
3440 /**
3441  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
3442  * @hw: pointer to the HW struct
3443  * @profile_id: package profile ID to associate the recipe with
3444  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3445  * @cd: pointer to command details structure or NULL
3446  * Recipe to profile association (0x0291)
3447  */
3448 enum ice_status
ice_aq_map_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)3449 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3450 			     struct ice_sq_cd *cd)
3451 {
3452 	struct ice_aqc_recipe_to_profile *cmd;
3453 	struct ice_aq_desc desc;
3454 
3455 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3456 	cmd = &desc.params.recipe_to_profile;
3457 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
3458 	cmd->profile_id = CPU_TO_LE16(profile_id);
3459 	/* Set the recipe ID bit in the bitmask to let the device know which
3460 	 * profile we are associating the recipe to
3461 	 */
3462 	ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
3463 		   ICE_NONDMA_TO_NONDMA);
3464 
3465 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3466 }
3467 
3468 /**
3469  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
3470  * @hw: pointer to the HW struct
3471  * @profile_id: package profile ID to associate the recipe with
3472  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
3473  * @cd: pointer to command details structure or NULL
3474  * Associate profile ID with given recipe (0x0293)
3475  */
3476 enum ice_status
ice_aq_get_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)3477 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
3478 			     struct ice_sq_cd *cd)
3479 {
3480 	struct ice_aqc_recipe_to_profile *cmd;
3481 	struct ice_aq_desc desc;
3482 	enum ice_status status;
3483 
3484 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3485 	cmd = &desc.params.recipe_to_profile;
3486 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
3487 	cmd->profile_id = CPU_TO_LE16(profile_id);
3488 
3489 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3490 	if (!status)
3491 		ice_memcpy(r_bitmap, cmd->recipe_assoc,
3492 			   sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
3493 
3494 	return status;
3495 }
3496 
3497 /**
3498  * ice_alloc_recipe - add recipe resource
3499  * @hw: pointer to the hardware structure
3500  * @rid: recipe ID returned as response to AQ call
3501  */
ice_alloc_recipe(struct ice_hw * hw,u16 * rid)3502 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
3503 {
3504 	struct ice_aqc_alloc_free_res_elem *sw_buf;
3505 	enum ice_status status;
3506 	u16 buf_len;
3507 
3508 	buf_len = ice_struct_size(sw_buf, elem, 1);
3509 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
3510 	if (!sw_buf)
3511 		return ICE_ERR_NO_MEMORY;
3512 
3513 	sw_buf->num_elems = CPU_TO_LE16(1);
3514 	sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
3515 					ICE_AQC_RES_TYPE_S) |
3516 					ICE_AQC_RES_TYPE_FLAG_SHARED);
3517 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3518 				       ice_aqc_opc_alloc_res, NULL);
3519 	if (!status)
3520 		*rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3521 	ice_free(hw, sw_buf);
3522 
3523 	return status;
3524 }
3525 
3526 /* ice_init_port_info - Initialize port_info with switch configuration data
3527  * @pi: pointer to port_info
3528  * @vsi_port_num: VSI number or port number
3529  * @type: Type of switch element (port or VSI)
3530  * @swid: switch ID of the switch the element is attached to
3531  * @pf_vf_num: PF or VF number
3532  * @is_vf: true if the element is a VF, false otherwise
3533  */
3534 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)3535 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
3536 		   u16 swid, u16 pf_vf_num, bool is_vf)
3537 {
3538 	switch (type) {
3539 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3540 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
3541 		pi->sw_id = swid;
3542 		pi->pf_vf_num = pf_vf_num;
3543 		pi->is_vf = is_vf;
3544 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3545 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3546 		break;
3547 	default:
3548 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
3549 		break;
3550 	}
3551 }
3552 
3553 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
3554  * @hw: pointer to the hardware structure
3555  */
ice_get_initial_sw_cfg(struct ice_hw * hw)3556 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
3557 {
3558 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
3559 	enum ice_status status;
3560 	u8 num_total_ports;
3561 	u16 req_desc = 0;
3562 	u16 num_elems;
3563 	u8 j = 0;
3564 	u16 i;
3565 
3566 	num_total_ports = 1;
3567 
3568 	rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
3569 		ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
3570 
3571 	if (!rbuf)
3572 		return ICE_ERR_NO_MEMORY;
3573 
3574 	/* Multiple calls to ice_aq_get_sw_cfg may be required
3575 	 * to get all the switch configuration information. The need
3576 	 * for additional calls is indicated by ice_aq_get_sw_cfg
3577 	 * writing a non-zero value in req_desc
3578 	 */
3579 	do {
3580 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
3581 
3582 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
3583 					   &req_desc, &num_elems, NULL);
3584 
3585 		if (status)
3586 			break;
3587 
3588 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
3589 			u16 pf_vf_num, swid, vsi_port_num;
3590 			bool is_vf = false;
3591 			u8 res_type;
3592 
3593 			vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
3594 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
3595 
3596 			pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
3597 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
3598 
3599 			swid = LE16_TO_CPU(ele->swid);
3600 
3601 			if (LE16_TO_CPU(ele->pf_vf_num) &
3602 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
3603 				is_vf = true;
3604 
3605 			res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
3606 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
3607 
3608 			switch (res_type) {
3609 			case ICE_AQC_GET_SW_CONF_RESP_VSI:
3610 				if (hw->dcf_enabled && !is_vf)
3611 					hw->pf_id = pf_vf_num;
3612 				break;
3613 			case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
3614 			case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
3615 				if (j == num_total_ports) {
3616 					ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
3617 					status = ICE_ERR_CFG;
3618 					goto out;
3619 				}
3620 				ice_init_port_info(hw->port_info,
3621 						   vsi_port_num, res_type, swid,
3622 						   pf_vf_num, is_vf);
3623 				j++;
3624 				break;
3625 			default:
3626 				break;
3627 			}
3628 		}
3629 	} while (req_desc && !status);
3630 
3631 out:
3632 	ice_free(hw, rbuf);
3633 	return status;
3634 }
3635 
3636 /**
3637  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
3638  * @hw: pointer to the hardware structure
3639  * @fi: filter info structure to fill/update
3640  *
3641  * This helper function populates the lb_en and lan_en elements of the provided
3642  * ice_fltr_info struct using the switch's type and characteristics of the
3643  * switch rule being configured.
3644  */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)3645 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
3646 {
3647 	if ((fi->flag & ICE_FLTR_RX) &&
3648 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
3649 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
3650 	    fi->lkup_type == ICE_SW_LKUP_LAST)
3651 		fi->lan_en = true;
3652 	fi->lb_en = false;
3653 	fi->lan_en = false;
3654 	if ((fi->flag & ICE_FLTR_TX) &&
3655 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
3656 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3657 	     fi->fltr_act == ICE_FWD_TO_Q ||
3658 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
3659 		/* Setting LB for prune actions will result in replicated
3660 		 * packets to the internal switch that will be dropped.
3661 		 */
3662 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
3663 			fi->lb_en = true;
3664 
3665 		/* Set lan_en to TRUE if
3666 		 * 1. The switch is a VEB AND
3667 		 * 2
3668 		 * 2.1 The lookup is a directional lookup like ethertype,
3669 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
3670 		 * and default-port OR
3671 		 * 2.2 The lookup is VLAN, OR
3672 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
3673 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3674 		 *
3675 		 * OR
3676 		 *
3677 		 * The switch is a VEPA.
3678 		 *
3679 		 * In all other cases, the LAN enable has to be set to false.
3680 		 */
3681 		if (hw->evb_veb) {
3682 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3683 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3684 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3685 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3686 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
3687 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
3688 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
3689 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3690 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3691 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3692 				fi->lan_en = true;
3693 		} else {
3694 			fi->lan_en = true;
3695 		}
3696 	}
3697 }
3698 
3699 /**
3700  * ice_fill_sw_rule - Helper function to fill switch rule structure
3701  * @hw: pointer to the hardware structure
3702  * @f_info: entry containing packet forwarding information
3703  * @s_rule: switch rule structure to be filled in based on mac_entry
3704  * @opc: switch rules population command type - pass in the command opcode
3705  */
3706 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_aqc_sw_rules_elem * s_rule,enum ice_adminq_opc opc)3707 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3708 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3709 {
3710 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3711 	u16 vlan_tpid = ICE_ETH_P_8021Q;
3712 	void *daddr = NULL;
3713 	u16 eth_hdr_sz;
3714 	u8 *eth_hdr;
3715 	u32 act = 0;
3716 	__be16 *off;
3717 	u8 q_rgn;
3718 
3719 	if (opc == ice_aqc_opc_remove_sw_rules) {
3720 		s_rule->pdata.lkup_tx_rx.act = 0;
3721 		s_rule->pdata.lkup_tx_rx.index =
3722 			CPU_TO_LE16(f_info->fltr_rule_id);
3723 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3724 		return;
3725 	}
3726 
3727 	eth_hdr_sz = sizeof(dummy_eth_header);
3728 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3729 
3730 	/* initialize the ether header with a dummy header */
3731 	ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3732 	ice_fill_sw_info(hw, f_info);
3733 
3734 	switch (f_info->fltr_act) {
3735 	case ICE_FWD_TO_VSI:
3736 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3737 			ICE_SINGLE_ACT_VSI_ID_M;
3738 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3739 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3740 				ICE_SINGLE_ACT_VALID_BIT;
3741 		break;
3742 	case ICE_FWD_TO_VSI_LIST:
3743 		act |= ICE_SINGLE_ACT_VSI_LIST;
3744 		act |= (f_info->fwd_id.vsi_list_id <<
3745 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3746 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
3747 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3748 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3749 				ICE_SINGLE_ACT_VALID_BIT;
3750 		break;
3751 	case ICE_FWD_TO_Q:
3752 		act |= ICE_SINGLE_ACT_TO_Q;
3753 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3754 			ICE_SINGLE_ACT_Q_INDEX_M;
3755 		break;
3756 	case ICE_DROP_PACKET:
3757 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3758 			ICE_SINGLE_ACT_VALID_BIT;
3759 		break;
3760 	case ICE_FWD_TO_QGRP:
3761 		q_rgn = f_info->qgrp_size > 0 ?
3762 			(u8)ice_ilog2(f_info->qgrp_size) : 0;
3763 		act |= ICE_SINGLE_ACT_TO_Q;
3764 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3765 			ICE_SINGLE_ACT_Q_INDEX_M;
3766 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3767 			ICE_SINGLE_ACT_Q_REGION_M;
3768 		break;
3769 	default:
3770 		return;
3771 	}
3772 
3773 	if (f_info->lb_en)
3774 		act |= ICE_SINGLE_ACT_LB_ENABLE;
3775 	if (f_info->lan_en)
3776 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
3777 
3778 	switch (f_info->lkup_type) {
3779 	case ICE_SW_LKUP_MAC:
3780 		daddr = f_info->l_data.mac.mac_addr;
3781 		break;
3782 	case ICE_SW_LKUP_VLAN:
3783 		vlan_id = f_info->l_data.vlan.vlan_id;
3784 		if (f_info->l_data.vlan.tpid_valid)
3785 			vlan_tpid = f_info->l_data.vlan.tpid;
3786 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3787 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3788 			act |= ICE_SINGLE_ACT_PRUNE;
3789 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3790 		}
3791 		break;
3792 	case ICE_SW_LKUP_ETHERTYPE_MAC:
3793 		daddr = f_info->l_data.ethertype_mac.mac_addr;
3794 		/* fall-through */
3795 	case ICE_SW_LKUP_ETHERTYPE:
3796 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3797 		*off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3798 		break;
3799 	case ICE_SW_LKUP_MAC_VLAN:
3800 		daddr = f_info->l_data.mac_vlan.mac_addr;
3801 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
3802 		break;
3803 	case ICE_SW_LKUP_PROMISC_VLAN:
3804 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
3805 		/* fall-through */
3806 	case ICE_SW_LKUP_PROMISC:
3807 		daddr = f_info->l_data.mac_vlan.mac_addr;
3808 		break;
3809 	default:
3810 		break;
3811 	}
3812 
3813 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3814 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3815 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3816 
3817 	/* Recipe set depending on lookup type */
3818 	s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3819 	s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3820 	s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3821 
3822 	if (daddr)
3823 		ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3824 			   ICE_NONDMA_TO_NONDMA);
3825 
3826 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3827 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3828 		*off = CPU_TO_BE16(vlan_id);
3829 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3830 		*off = CPU_TO_BE16(vlan_tpid);
3831 	}
3832 
3833 	/* Create the switch rule with the final dummy Ethernet header */
3834 	if (opc != ice_aqc_opc_update_sw_rules)
3835 		s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3836 }
3837 
3838 /**
3839  * ice_add_marker_act
3840  * @hw: pointer to the hardware structure
3841  * @m_ent: the management entry for which sw marker needs to be added
3842  * @sw_marker: sw marker to tag the Rx descriptor with
3843  * @l_id: large action resource ID
3844  *
3845  * Create a large action to hold software marker and update the switch rule
3846  * entry pointed by m_ent with newly created large action
3847  */
3848 static enum ice_status
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)3849 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3850 		   u16 sw_marker, u16 l_id)
3851 {
3852 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3853 	/* For software marker we need 3 large actions
3854 	 * 1. FWD action: FWD TO VSI or VSI LIST
3855 	 * 2. GENERIC VALUE action to hold the profile ID
3856 	 * 3. GENERIC VALUE action to hold the software marker ID
3857 	 */
3858 	const u16 num_lg_acts = 3;
3859 	enum ice_status status;
3860 	u16 lg_act_size;
3861 	u16 rules_size;
3862 	u32 act;
3863 	u16 id;
3864 
3865 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3866 		return ICE_ERR_PARAM;
3867 
3868 	/* Create two back-to-back switch rules and submit them to the HW using
3869 	 * one memory buffer:
3870 	 *    1. Large Action
3871 	 *    2. Look up Tx Rx
3872 	 */
3873 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3874 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3875 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3876 	if (!lg_act)
3877 		return ICE_ERR_NO_MEMORY;
3878 
3879 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3880 
3881 	/* Fill in the first switch rule i.e. large action */
3882 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3883 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3884 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3885 
3886 	/* First action VSI forwarding or VSI list forwarding depending on how
3887 	 * many VSIs
3888 	 */
3889 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3890 		m_ent->fltr_info.fwd_id.hw_vsi_id;
3891 
3892 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3893 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3894 	if (m_ent->vsi_count > 1)
3895 		act |= ICE_LG_ACT_VSI_LIST;
3896 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3897 
3898 	/* Second action descriptor type */
3899 	act = ICE_LG_ACT_GENERIC;
3900 
3901 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3902 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3903 
3904 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3905 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3906 
3907 	/* Third action Marker value */
3908 	act |= ICE_LG_ACT_GENERIC;
3909 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3910 		ICE_LG_ACT_GENERIC_VALUE_M;
3911 
3912 	lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3913 
3914 	/* call the fill switch rule to fill the lookup Tx Rx structure */
3915 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3916 			 ice_aqc_opc_update_sw_rules);
3917 
3918 	/* Update the action to point to the large action ID */
3919 	rx_tx->pdata.lkup_tx_rx.act =
3920 		CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3921 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3922 			     ICE_SINGLE_ACT_PTR_VAL_M));
3923 
3924 	/* Use the filter rule ID of the previously created rule with single
3925 	 * act. Once the update happens, hardware will treat this as large
3926 	 * action
3927 	 */
3928 	rx_tx->pdata.lkup_tx_rx.index =
3929 		CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3930 
3931 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3932 				 ice_aqc_opc_update_sw_rules, NULL);
3933 	if (!status) {
3934 		m_ent->lg_act_idx = l_id;
3935 		m_ent->sw_marker_id = sw_marker;
3936 	}
3937 
3938 	ice_free(hw, lg_act);
3939 	return status;
3940 }
3941 
3942 /**
3943  * ice_add_counter_act - add/update filter rule with counter action
3944  * @hw: pointer to the hardware structure
3945  * @m_ent: the management entry for which counter needs to be added
3946  * @counter_id: VLAN counter ID returned as part of allocate resource
3947  * @l_id: large action resource ID
3948  */
3949 static enum ice_status
ice_add_counter_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 counter_id,u16 l_id)3950 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3951 		    u16 counter_id, u16 l_id)
3952 {
3953 	struct ice_aqc_sw_rules_elem *lg_act;
3954 	struct ice_aqc_sw_rules_elem *rx_tx;
3955 	enum ice_status status;
3956 	/* 2 actions will be added while adding a large action counter */
3957 	const int num_acts = 2;
3958 	u16 lg_act_size;
3959 	u16 rules_size;
3960 	u16 f_rule_id;
3961 	u32 act;
3962 	u16 id;
3963 
3964 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3965 		return ICE_ERR_PARAM;
3966 
3967 	/* Create two back-to-back switch rules and submit them to the HW using
3968 	 * one memory buffer:
3969 	 * 1. Large Action
3970 	 * 2. Look up Tx Rx
3971 	 */
3972 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3973 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3974 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3975 	if (!lg_act)
3976 		return ICE_ERR_NO_MEMORY;
3977 
3978 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3979 
3980 	/* Fill in the first switch rule i.e. large action */
3981 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3982 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3983 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3984 
3985 	/* First action VSI forwarding or VSI list forwarding depending on how
3986 	 * many VSIs
3987 	 */
3988 	id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3989 		m_ent->fltr_info.fwd_id.hw_vsi_id;
3990 
3991 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3992 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3993 		ICE_LG_ACT_VSI_LIST_ID_M;
3994 	if (m_ent->vsi_count > 1)
3995 		act |= ICE_LG_ACT_VSI_LIST;
3996 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3997 
3998 	/* Second action counter ID */
3999 	act = ICE_LG_ACT_STAT_COUNT;
4000 	act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
4001 		ICE_LG_ACT_STAT_COUNT_M;
4002 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
4003 
4004 	/* call the fill switch rule to fill the lookup Tx Rx structure */
4005 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
4006 			 ice_aqc_opc_update_sw_rules);
4007 
4008 	act = ICE_SINGLE_ACT_PTR;
4009 	act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
4010 	rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
4011 
4012 	/* Use the filter rule ID of the previously created rule with single
4013 	 * act. Once the update happens, hardware will treat this as large
4014 	 * action
4015 	 */
4016 	f_rule_id = m_ent->fltr_info.fltr_rule_id;
4017 	rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
4018 
4019 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
4020 				 ice_aqc_opc_update_sw_rules, NULL);
4021 	if (!status) {
4022 		m_ent->lg_act_idx = l_id;
4023 		m_ent->counter_index = counter_id;
4024 	}
4025 
4026 	ice_free(hw, lg_act);
4027 	return status;
4028 }
4029 
4030 /**
4031  * ice_create_vsi_list_map
4032  * @hw: pointer to the hardware structure
4033  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
4034  * @num_vsi: number of VSI handles in the array
4035  * @vsi_list_id: VSI list ID generated as part of allocate resource
4036  *
4037  * Helper function to create a new entry of VSI list ID to VSI mapping
4038  * using the given VSI list ID
4039  */
4040 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)4041 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4042 			u16 vsi_list_id)
4043 {
4044 	struct ice_switch_info *sw = hw->switch_info;
4045 	struct ice_vsi_list_map_info *v_map;
4046 	int i;
4047 
4048 	v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
4049 	if (!v_map)
4050 		return NULL;
4051 
4052 	v_map->vsi_list_id = vsi_list_id;
4053 	v_map->ref_cnt = 1;
4054 	for (i = 0; i < num_vsi; i++)
4055 		ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
4056 
4057 	LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
4058 	return v_map;
4059 }
4060 
4061 /**
4062  * ice_update_vsi_list_rule
4063  * @hw: pointer to the hardware structure
4064  * @vsi_handle_arr: array of VSI handles to form a VSI list
4065  * @num_vsi: number of VSI handles in the array
4066  * @vsi_list_id: VSI list ID generated as part of allocate resource
4067  * @remove: Boolean value to indicate if this is a remove action
4068  * @opc: switch rules population command type - pass in the command opcode
4069  * @lkup_type: lookup type of the filter
4070  *
4071  * Call AQ command to add a new switch rule or update existing switch rule
4072  * using the given VSI list ID
4073  */
4074 static enum ice_status
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)4075 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4076 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
4077 			 enum ice_sw_lkup_type lkup_type)
4078 {
4079 	struct ice_aqc_sw_rules_elem *s_rule;
4080 	enum ice_status status;
4081 	u16 s_rule_size;
4082 	u16 rule_type;
4083 	int i;
4084 
4085 	if (!num_vsi)
4086 		return ICE_ERR_PARAM;
4087 
4088 	if (lkup_type == ICE_SW_LKUP_MAC ||
4089 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
4090 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
4091 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
4092 	    lkup_type == ICE_SW_LKUP_PROMISC ||
4093 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
4094 	    lkup_type == ICE_SW_LKUP_LAST)
4095 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
4096 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
4097 	else if (lkup_type == ICE_SW_LKUP_VLAN)
4098 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
4099 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
4100 	else
4101 		return ICE_ERR_PARAM;
4102 
4103 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
4104 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4105 	if (!s_rule)
4106 		return ICE_ERR_NO_MEMORY;
4107 	for (i = 0; i < num_vsi; i++) {
4108 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
4109 			status = ICE_ERR_PARAM;
4110 			goto exit;
4111 		}
4112 		/* AQ call requires hw_vsi_id(s) */
4113 		s_rule->pdata.vsi_list.vsi[i] =
4114 			CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
4115 	}
4116 
4117 	s_rule->type = CPU_TO_LE16(rule_type);
4118 	s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
4119 	s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
4120 
4121 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
4122 
4123 exit:
4124 	ice_free(hw, s_rule);
4125 	return status;
4126 }
4127 
4128 /**
4129  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
4130  * @hw: pointer to the HW struct
4131  * @vsi_handle_arr: array of VSI handles to form a VSI list
4132  * @num_vsi: number of VSI handles in the array
4133  * @vsi_list_id: stores the ID of the VSI list to be created
4134  * @lkup_type: switch rule filter's lookup type
4135  */
4136 static enum ice_status
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)4137 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
4138 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
4139 {
4140 	enum ice_status status;
4141 
4142 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
4143 					    ice_aqc_opc_alloc_res);
4144 	if (status)
4145 		return status;
4146 
4147 	/* Update the newly created VSI list to include the specified VSIs */
4148 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
4149 					*vsi_list_id, false,
4150 					ice_aqc_opc_add_sw_rules, lkup_type);
4151 }
4152 
4153 /**
4154  * ice_create_pkt_fwd_rule
4155  * @hw: pointer to the hardware structure
4156  * @recp_list: corresponding filter management list
4157  * @f_entry: entry containing packet forwarding information
4158  *
4159  * Create switch rule with given filter information and add an entry
4160  * to the corresponding filter management list to track this switch rule
4161  * and VSI mapping
4162  */
4163 static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)4164 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4165 			struct ice_fltr_list_entry *f_entry)
4166 {
4167 	struct ice_fltr_mgmt_list_entry *fm_entry;
4168 	struct ice_aqc_sw_rules_elem *s_rule;
4169 	enum ice_status status;
4170 
4171 	s_rule = (struct ice_aqc_sw_rules_elem *)
4172 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4173 	if (!s_rule)
4174 		return ICE_ERR_NO_MEMORY;
4175 	fm_entry = (struct ice_fltr_mgmt_list_entry *)
4176 		   ice_malloc(hw, sizeof(*fm_entry));
4177 	if (!fm_entry) {
4178 		status = ICE_ERR_NO_MEMORY;
4179 		goto ice_create_pkt_fwd_rule_exit;
4180 	}
4181 
4182 	fm_entry->fltr_info = f_entry->fltr_info;
4183 
4184 	/* Initialize all the fields for the management entry */
4185 	fm_entry->vsi_count = 1;
4186 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
4187 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
4188 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
4189 
4190 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
4191 			 ice_aqc_opc_add_sw_rules);
4192 
4193 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4194 				 ice_aqc_opc_add_sw_rules, NULL);
4195 	if (status) {
4196 		ice_free(hw, fm_entry);
4197 		goto ice_create_pkt_fwd_rule_exit;
4198 	}
4199 
4200 	f_entry->fltr_info.fltr_rule_id =
4201 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4202 	fm_entry->fltr_info.fltr_rule_id =
4203 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4204 
4205 	/* The book keeping entries will get removed when base driver
4206 	 * calls remove filter AQ command
4207 	 */
4208 	LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
4209 
4210 ice_create_pkt_fwd_rule_exit:
4211 	ice_free(hw, s_rule);
4212 	return status;
4213 }
4214 
4215 /**
4216  * ice_update_pkt_fwd_rule
4217  * @hw: pointer to the hardware structure
4218  * @f_info: filter information for switch rule
4219  *
4220  * Call AQ command to update a previously created switch rule with a
4221  * VSI list ID
4222  */
4223 static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)4224 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
4225 {
4226 	struct ice_aqc_sw_rules_elem *s_rule;
4227 	enum ice_status status;
4228 
4229 	s_rule = (struct ice_aqc_sw_rules_elem *)
4230 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
4231 	if (!s_rule)
4232 		return ICE_ERR_NO_MEMORY;
4233 
4234 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
4235 
4236 	s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
4237 
4238 	/* Update switch rule with new rule set to forward VSI list */
4239 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
4240 				 ice_aqc_opc_update_sw_rules, NULL);
4241 
4242 	ice_free(hw, s_rule);
4243 	return status;
4244 }
4245 
4246 /**
4247  * ice_update_sw_rule_bridge_mode
4248  * @hw: pointer to the HW struct
4249  *
4250  * Updates unicast switch filter rules based on VEB/VEPA mode
4251  */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)4252 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
4253 {
4254 	struct ice_switch_info *sw = hw->switch_info;
4255 	struct ice_fltr_mgmt_list_entry *fm_entry;
4256 	enum ice_status status = ICE_SUCCESS;
4257 	struct LIST_HEAD_TYPE *rule_head;
4258 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4259 
4260 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4261 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
4262 
4263 	ice_acquire_lock(rule_lock);
4264 	LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
4265 			    list_entry) {
4266 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
4267 		u8 *addr = fi->l_data.mac.mac_addr;
4268 
4269 		/* Update unicast Tx rules to reflect the selected
4270 		 * VEB/VEPA mode
4271 		 */
4272 		if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
4273 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
4274 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
4275 		     fi->fltr_act == ICE_FWD_TO_Q ||
4276 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
4277 			status = ice_update_pkt_fwd_rule(hw, fi);
4278 			if (status)
4279 				break;
4280 		}
4281 	}
4282 
4283 	ice_release_lock(rule_lock);
4284 
4285 	return status;
4286 }
4287 
4288 /**
4289  * ice_add_update_vsi_list
4290  * @hw: pointer to the hardware structure
4291  * @m_entry: pointer to current filter management list entry
4292  * @cur_fltr: filter information from the book keeping entry
4293  * @new_fltr: filter information with the new VSI to be added
4294  *
4295  * Call AQ command to add or update previously created VSI list with new VSI.
4296  *
4297  * Helper function to do book keeping associated with adding filter information
4298  * The algorithm to do the book keeping is described below :
4299  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
4300  *	if only one VSI has been added till now
4301  *		Allocate a new VSI list and add two VSIs
4302  *		to this list using switch rule command
4303  *		Update the previously created switch rule with the
4304  *		newly created VSI list ID
4305  *	if a VSI list was previously created
4306  *		Add the new VSI to the previously created VSI list set
4307  *		using the update switch rule command
4308  */
4309 static enum ice_status
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)4310 ice_add_update_vsi_list(struct ice_hw *hw,
4311 			struct ice_fltr_mgmt_list_entry *m_entry,
4312 			struct ice_fltr_info *cur_fltr,
4313 			struct ice_fltr_info *new_fltr)
4314 {
4315 	enum ice_status status = ICE_SUCCESS;
4316 	u16 vsi_list_id = 0;
4317 
4318 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
4319 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
4320 		return ICE_ERR_NOT_IMPL;
4321 
4322 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
4323 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
4324 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
4325 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
4326 		return ICE_ERR_NOT_IMPL;
4327 
4328 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
4329 		/* Only one entry existed in the mapping and it was not already
4330 		 * a part of a VSI list. So, create a VSI list with the old and
4331 		 * new VSIs.
4332 		 */
4333 		struct ice_fltr_info tmp_fltr;
4334 		u16 vsi_handle_arr[2];
4335 
4336 		/* A rule already exists with the new VSI being added */
4337 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
4338 			return ICE_ERR_ALREADY_EXISTS;
4339 
4340 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
4341 		vsi_handle_arr[1] = new_fltr->vsi_handle;
4342 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4343 						  &vsi_list_id,
4344 						  new_fltr->lkup_type);
4345 		if (status)
4346 			return status;
4347 
4348 		tmp_fltr = *new_fltr;
4349 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
4350 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4351 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4352 		/* Update the previous switch rule of "MAC forward to VSI" to
4353 		 * "MAC fwd to VSI list"
4354 		 */
4355 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4356 		if (status)
4357 			return status;
4358 
4359 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
4360 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4361 		m_entry->vsi_list_info =
4362 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4363 						vsi_list_id);
4364 
4365 		if (!m_entry->vsi_list_info)
4366 			return ICE_ERR_NO_MEMORY;
4367 
4368 		/* If this entry was large action then the large action needs
4369 		 * to be updated to point to FWD to VSI list
4370 		 */
4371 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
4372 			status =
4373 			    ice_add_marker_act(hw, m_entry,
4374 					       m_entry->sw_marker_id,
4375 					       m_entry->lg_act_idx);
4376 	} else {
4377 		u16 vsi_handle = new_fltr->vsi_handle;
4378 		enum ice_adminq_opc opcode;
4379 
4380 		if (!m_entry->vsi_list_info)
4381 			return ICE_ERR_CFG;
4382 
4383 		/* A rule already exists with the new VSI being added */
4384 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
4385 			return ICE_SUCCESS;
4386 
4387 		/* Update the previously created VSI list set with
4388 		 * the new VSI ID passed in
4389 		 */
4390 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
4391 		opcode = ice_aqc_opc_update_sw_rules;
4392 
4393 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
4394 						  vsi_list_id, false, opcode,
4395 						  new_fltr->lkup_type);
4396 		/* update VSI list mapping info with new VSI ID */
4397 		if (!status)
4398 			ice_set_bit(vsi_handle,
4399 				    m_entry->vsi_list_info->vsi_map);
4400 	}
4401 	if (!status)
4402 		m_entry->vsi_count++;
4403 	return status;
4404 }
4405 
4406 /**
4407  * ice_find_rule_entry - Search a rule entry
4408  * @list_head: head of rule list
4409  * @f_info: rule information
4410  *
4411  * Helper function to search for a given rule entry
4412  * Returns pointer to entry storing the rule if found
4413  */
4414 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct LIST_HEAD_TYPE * list_head,struct ice_fltr_info * f_info)4415 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
4416 		    struct ice_fltr_info *f_info)
4417 {
4418 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
4419 
4420 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4421 			    list_entry) {
4422 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4423 			    sizeof(f_info->l_data)) &&
4424 		    f_info->flag == list_itr->fltr_info.flag) {
4425 			ret = list_itr;
4426 			break;
4427 		}
4428 	}
4429 	return ret;
4430 }
4431 
4432 /**
4433  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
4434  * @recp_list: VSI lists needs to be searched
4435  * @vsi_handle: VSI handle to be found in VSI list
4436  * @vsi_list_id: VSI list ID found containing vsi_handle
4437  *
4438  * Helper function to search a VSI list with single entry containing given VSI
4439  * handle element. This can be extended further to search VSI list with more
4440  * than 1 vsi_count. Returns pointer to VSI list entry if found.
4441  */
4442 static struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_sw_recipe * recp_list,u16 vsi_handle,u16 * vsi_list_id)4443 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
4444 			u16 *vsi_list_id)
4445 {
4446 	struct ice_vsi_list_map_info *map_info = NULL;
4447 	struct LIST_HEAD_TYPE *list_head;
4448 
4449 	list_head = &recp_list->filt_rules;
4450 	if (recp_list->adv_rule) {
4451 		struct ice_adv_fltr_mgmt_list_entry *list_itr;
4452 
4453 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
4454 				    ice_adv_fltr_mgmt_list_entry,
4455 				    list_entry) {
4456 			if (list_itr->vsi_list_info) {
4457 				map_info = list_itr->vsi_list_info;
4458 				if (ice_is_bit_set(map_info->vsi_map,
4459 						   vsi_handle)) {
4460 					*vsi_list_id = map_info->vsi_list_id;
4461 					return map_info;
4462 				}
4463 			}
4464 		}
4465 	} else {
4466 		struct ice_fltr_mgmt_list_entry *list_itr;
4467 
4468 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
4469 				    ice_fltr_mgmt_list_entry,
4470 				    list_entry) {
4471 			if (list_itr->vsi_count == 1 &&
4472 			    list_itr->vsi_list_info) {
4473 				map_info = list_itr->vsi_list_info;
4474 				if (ice_is_bit_set(map_info->vsi_map,
4475 						   vsi_handle)) {
4476 					*vsi_list_id = map_info->vsi_list_id;
4477 					return map_info;
4478 				}
4479 			}
4480 		}
4481 	}
4482 	return NULL;
4483 }
4484 
4485 /**
4486  * ice_add_rule_internal - add rule for a given lookup type
4487  * @hw: pointer to the hardware structure
4488  * @recp_list: recipe list for which rule has to be added
4489  * @lport: logic port number on which function add rule
4490  * @f_entry: structure containing MAC forwarding information
4491  *
4492  * Adds or updates the rule lists for a given recipe
4493  */
4494 static enum ice_status
ice_add_rule_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,u8 lport,struct ice_fltr_list_entry * f_entry)4495 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4496 		      u8 lport, struct ice_fltr_list_entry *f_entry)
4497 {
4498 	struct ice_fltr_info *new_fltr, *cur_fltr;
4499 	struct ice_fltr_mgmt_list_entry *m_entry;
4500 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4501 	enum ice_status status = ICE_SUCCESS;
4502 
4503 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4504 		return ICE_ERR_PARAM;
4505 
4506 	/* Load the hw_vsi_id only if the fwd action is fwd to VSI */
4507 	if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
4508 		f_entry->fltr_info.fwd_id.hw_vsi_id =
4509 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4510 
4511 	rule_lock = &recp_list->filt_rule_lock;
4512 
4513 	ice_acquire_lock(rule_lock);
4514 	new_fltr = &f_entry->fltr_info;
4515 	if (new_fltr->flag & ICE_FLTR_RX)
4516 		new_fltr->src = lport;
4517 	else if (new_fltr->flag & ICE_FLTR_TX)
4518 		new_fltr->src =
4519 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4520 
4521 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4522 	if (!m_entry) {
4523 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4524 		goto exit_add_rule_internal;
4525 	}
4526 
4527 	cur_fltr = &m_entry->fltr_info;
4528 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
4529 
4530 exit_add_rule_internal:
4531 	ice_release_lock(rule_lock);
4532 	return status;
4533 }
4534 
4535 /**
4536  * ice_remove_vsi_list_rule
4537  * @hw: pointer to the hardware structure
4538  * @vsi_list_id: VSI list ID generated as part of allocate resource
4539  * @lkup_type: switch rule filter lookup type
4540  *
4541  * The VSI list should be emptied before this function is called to remove the
4542  * VSI list.
4543  */
4544 static enum ice_status
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)4545 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
4546 			 enum ice_sw_lkup_type lkup_type)
4547 {
4548 	/* Free the vsi_list resource that we allocated. It is assumed that the
4549 	 * list is empty at this point.
4550 	 */
4551 	return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
4552 					    ice_aqc_opc_free_res);
4553 }
4554 
4555 /**
4556  * ice_rem_update_vsi_list
4557  * @hw: pointer to the hardware structure
4558  * @vsi_handle: VSI handle of the VSI to remove
4559  * @fm_list: filter management entry for which the VSI list management needs to
4560  *	     be done
4561  */
4562 static enum ice_status
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)4563 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
4564 			struct ice_fltr_mgmt_list_entry *fm_list)
4565 {
4566 	enum ice_sw_lkup_type lkup_type;
4567 	enum ice_status status = ICE_SUCCESS;
4568 	u16 vsi_list_id;
4569 
4570 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
4571 	    fm_list->vsi_count == 0)
4572 		return ICE_ERR_PARAM;
4573 
4574 	/* A rule with the VSI being removed does not exist */
4575 	if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
4576 		return ICE_ERR_DOES_NOT_EXIST;
4577 
4578 	lkup_type = fm_list->fltr_info.lkup_type;
4579 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
4580 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
4581 					  ice_aqc_opc_update_sw_rules,
4582 					  lkup_type);
4583 	if (status)
4584 		return status;
4585 
4586 	fm_list->vsi_count--;
4587 	ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
4588 
4589 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
4590 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
4591 		struct ice_vsi_list_map_info *vsi_list_info =
4592 			fm_list->vsi_list_info;
4593 		u16 rem_vsi_handle;
4594 
4595 		rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
4596 						    ICE_MAX_VSI);
4597 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
4598 			return ICE_ERR_OUT_OF_RANGE;
4599 
4600 		/* Make sure VSI list is empty before removing it below */
4601 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
4602 						  vsi_list_id, true,
4603 						  ice_aqc_opc_update_sw_rules,
4604 						  lkup_type);
4605 		if (status)
4606 			return status;
4607 
4608 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
4609 		tmp_fltr_info.fwd_id.hw_vsi_id =
4610 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
4611 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
4612 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
4613 		if (status) {
4614 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
4615 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
4616 			return status;
4617 		}
4618 
4619 		fm_list->fltr_info = tmp_fltr_info;
4620 	}
4621 
4622 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
4623 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
4624 		struct ice_vsi_list_map_info *vsi_list_info =
4625 			fm_list->vsi_list_info;
4626 
4627 		/* Remove the VSI list since it is no longer used */
4628 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
4629 		if (status) {
4630 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
4631 				  vsi_list_id, status);
4632 			return status;
4633 		}
4634 
4635 		LIST_DEL(&vsi_list_info->list_entry);
4636 		ice_free(hw, vsi_list_info);
4637 		fm_list->vsi_list_info = NULL;
4638 	}
4639 
4640 	return status;
4641 }
4642 
4643 /**
4644  * ice_remove_rule_internal - Remove a filter rule of a given type
4645  *
4646  * @hw: pointer to the hardware structure
4647  * @recp_list: recipe list for which the rule needs to removed
4648  * @f_entry: rule entry containing filter information
4649  */
4650 static enum ice_status
ice_remove_rule_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)4651 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4652 			 struct ice_fltr_list_entry *f_entry)
4653 {
4654 	struct ice_fltr_mgmt_list_entry *list_elem;
4655 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4656 	enum ice_status status = ICE_SUCCESS;
4657 	bool remove_rule = false;
4658 	u16 vsi_handle;
4659 
4660 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4661 		return ICE_ERR_PARAM;
4662 	f_entry->fltr_info.fwd_id.hw_vsi_id =
4663 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4664 
4665 	rule_lock = &recp_list->filt_rule_lock;
4666 	ice_acquire_lock(rule_lock);
4667 	list_elem = ice_find_rule_entry(&recp_list->filt_rules,
4668 					&f_entry->fltr_info);
4669 	if (!list_elem) {
4670 		status = ICE_ERR_DOES_NOT_EXIST;
4671 		goto exit;
4672 	}
4673 
4674 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
4675 		remove_rule = true;
4676 	} else if (!list_elem->vsi_list_info) {
4677 		status = ICE_ERR_DOES_NOT_EXIST;
4678 		goto exit;
4679 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
4680 		/* a ref_cnt > 1 indicates that the vsi_list is being
4681 		 * shared by multiple rules. Decrement the ref_cnt and
4682 		 * remove this rule, but do not modify the list, as it
4683 		 * is in-use by other rules.
4684 		 */
4685 		list_elem->vsi_list_info->ref_cnt--;
4686 		remove_rule = true;
4687 	} else {
4688 		/* a ref_cnt of 1 indicates the vsi_list is only used
4689 		 * by one rule. However, the original removal request is only
4690 		 * for a single VSI. Update the vsi_list first, and only
4691 		 * remove the rule if there are no further VSIs in this list.
4692 		 */
4693 		vsi_handle = f_entry->fltr_info.vsi_handle;
4694 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4695 		if (status)
4696 			goto exit;
4697 		/* if VSI count goes to zero after updating the VSI list */
4698 		if (list_elem->vsi_count == 0)
4699 			remove_rule = true;
4700 	}
4701 
4702 	if (remove_rule) {
4703 		/* Remove the lookup rule */
4704 		struct ice_aqc_sw_rules_elem *s_rule;
4705 
4706 		s_rule = (struct ice_aqc_sw_rules_elem *)
4707 			ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4708 		if (!s_rule) {
4709 			status = ICE_ERR_NO_MEMORY;
4710 			goto exit;
4711 		}
4712 
4713 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4714 				 ice_aqc_opc_remove_sw_rules);
4715 
4716 		status = ice_aq_sw_rules(hw, s_rule,
4717 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4718 					 ice_aqc_opc_remove_sw_rules, NULL);
4719 
4720 		/* Remove a book keeping from the list */
4721 		ice_free(hw, s_rule);
4722 
4723 		if (status)
4724 			goto exit;
4725 
4726 		LIST_DEL(&list_elem->list_entry);
4727 		ice_free(hw, list_elem);
4728 	}
4729 exit:
4730 	ice_release_lock(rule_lock);
4731 	return status;
4732 }
4733 
4734 /**
4735  * ice_aq_get_res_alloc - get allocated resources
4736  * @hw: pointer to the HW struct
4737  * @num_entries: pointer to u16 to store the number of resource entries returned
4738  * @buf: pointer to buffer
4739  * @buf_size: size of buf
4740  * @cd: pointer to command details structure or NULL
4741  *
4742  * The caller-supplied buffer must be large enough to store the resource
4743  * information for all resource types. Each resource type is an
4744  * ice_aqc_get_res_resp_elem structure.
4745  */
4746 enum ice_status
ice_aq_get_res_alloc(struct ice_hw * hw,u16 * num_entries,struct ice_aqc_get_res_resp_elem * buf,u16 buf_size,struct ice_sq_cd * cd)4747 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4748 		     struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4749 		     struct ice_sq_cd *cd)
4750 {
4751 	struct ice_aqc_get_res_alloc *resp;
4752 	enum ice_status status;
4753 	struct ice_aq_desc desc;
4754 
4755 	if (!buf)
4756 		return ICE_ERR_BAD_PTR;
4757 
4758 	if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4759 		return ICE_ERR_INVAL_SIZE;
4760 
4761 	resp = &desc.params.get_res;
4762 
4763 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4764 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4765 
4766 	if (!status && num_entries)
4767 		*num_entries = LE16_TO_CPU(resp->resp_elem_num);
4768 
4769 	return status;
4770 }
4771 
4772 /**
4773  * ice_aq_get_res_descs - get allocated resource descriptors
4774  * @hw: pointer to the hardware structure
4775  * @num_entries: number of resource entries in buffer
4776  * @buf: structure to hold response data buffer
4777  * @buf_size: size of buffer
4778  * @res_type: resource type
4779  * @res_shared: is resource shared
4780  * @desc_id: input - first desc ID to start; output - next desc ID
4781  * @cd: pointer to command details structure or NULL
4782  */
4783 enum ice_status
ice_aq_get_res_descs(struct ice_hw * hw,u16 num_entries,struct ice_aqc_res_elem * buf,u16 buf_size,u16 res_type,bool res_shared,u16 * desc_id,struct ice_sq_cd * cd)4784 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4785 		     struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4786 		     bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4787 {
4788 	struct ice_aqc_get_allocd_res_desc *cmd;
4789 	struct ice_aq_desc desc;
4790 	enum ice_status status;
4791 
4792 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4793 
4794 	cmd = &desc.params.get_res_desc;
4795 
4796 	if (!buf)
4797 		return ICE_ERR_PARAM;
4798 
4799 	if (buf_size != (num_entries * sizeof(*buf)))
4800 		return ICE_ERR_PARAM;
4801 
4802 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4803 
4804 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4805 					 ICE_AQC_RES_TYPE_M) | (res_shared ?
4806 					ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4807 	cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4808 
4809 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4810 	if (!status)
4811 		*desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4812 
4813 	return status;
4814 }
4815 
4816 /**
4817  * ice_add_mac_rule - Add a MAC address based filter rule
4818  * @hw: pointer to the hardware structure
4819  * @m_list: list of MAC addresses and forwarding information
4820  * @sw: pointer to switch info struct for which function add rule
4821  * @lport: logic port number on which function add rule
4822  *
4823  * IMPORTANT: When the umac_shared flag is set to false and m_list has
4824  * multiple unicast addresses, the function assumes that all the
4825  * addresses are unique in a given add_mac call. It doesn't
4826  * check for duplicates in this case, removing duplicates from a given
4827  * list should be taken care of in the caller of this function.
4828  */
4829 static enum ice_status
ice_add_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list,struct ice_switch_info * sw,u8 lport)4830 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4831 		 struct ice_switch_info *sw, u8 lport)
4832 {
4833 	struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4834 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4835 	struct ice_fltr_list_entry *m_list_itr;
4836 	struct LIST_HEAD_TYPE *rule_head;
4837 	u16 total_elem_left, s_rule_size;
4838 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4839 	enum ice_status status = ICE_SUCCESS;
4840 	u16 num_unicast = 0;
4841 	u8 elem_sent;
4842 
4843 	s_rule = NULL;
4844 	rule_lock = &recp_list->filt_rule_lock;
4845 	rule_head = &recp_list->filt_rules;
4846 
4847 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4848 			    list_entry) {
4849 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4850 		u16 vsi_handle;
4851 		u16 hw_vsi_id;
4852 
4853 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4854 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
4855 		if (!ice_is_vsi_valid(hw, vsi_handle))
4856 			return ICE_ERR_PARAM;
4857 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4858 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4859 		/* update the src in case it is VSI num */
4860 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4861 			return ICE_ERR_PARAM;
4862 		m_list_itr->fltr_info.src = hw_vsi_id;
4863 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4864 		    IS_ZERO_ETHER_ADDR(add))
4865 			return ICE_ERR_PARAM;
4866 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
4867 			/* Don't overwrite the unicast address */
4868 			ice_acquire_lock(rule_lock);
4869 			if (ice_find_rule_entry(rule_head,
4870 						&m_list_itr->fltr_info)) {
4871 				ice_release_lock(rule_lock);
4872 				continue;
4873 			}
4874 			ice_release_lock(rule_lock);
4875 			num_unicast++;
4876 		} else if (IS_MULTICAST_ETHER_ADDR(add) ||
4877 			   (IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
4878 			m_list_itr->status =
4879 				ice_add_rule_internal(hw, recp_list, lport,
4880 						      m_list_itr);
4881 			if (m_list_itr->status)
4882 				return m_list_itr->status;
4883 		}
4884 	}
4885 
4886 	ice_acquire_lock(rule_lock);
4887 	/* Exit if no suitable entries were found for adding bulk switch rule */
4888 	if (!num_unicast) {
4889 		status = ICE_SUCCESS;
4890 		goto ice_add_mac_exit;
4891 	}
4892 
4893 	/* Allocate switch rule buffer for the bulk update for unicast */
4894 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4895 	s_rule = (struct ice_aqc_sw_rules_elem *)
4896 		ice_calloc(hw, num_unicast, s_rule_size);
4897 	if (!s_rule) {
4898 		status = ICE_ERR_NO_MEMORY;
4899 		goto ice_add_mac_exit;
4900 	}
4901 
4902 	r_iter = s_rule;
4903 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4904 			    list_entry) {
4905 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4906 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4907 
4908 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4909 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4910 					 ice_aqc_opc_add_sw_rules);
4911 			r_iter = (struct ice_aqc_sw_rules_elem *)
4912 				((u8 *)r_iter + s_rule_size);
4913 		}
4914 	}
4915 
4916 	/* Call AQ bulk switch rule update for all unicast addresses */
4917 	r_iter = s_rule;
4918 	/* Call AQ switch rule in AQ_MAX chunk */
4919 	for (total_elem_left = num_unicast; total_elem_left > 0;
4920 	     total_elem_left -= elem_sent) {
4921 		struct ice_aqc_sw_rules_elem *entry = r_iter;
4922 
4923 		elem_sent = MIN_T(u8, total_elem_left,
4924 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4925 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4926 					 elem_sent, ice_aqc_opc_add_sw_rules,
4927 					 NULL);
4928 		if (status)
4929 			goto ice_add_mac_exit;
4930 		r_iter = (struct ice_aqc_sw_rules_elem *)
4931 			((u8 *)r_iter + (elem_sent * s_rule_size));
4932 	}
4933 
4934 	/* Fill up rule ID based on the value returned from FW */
4935 	r_iter = s_rule;
4936 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4937 			    list_entry) {
4938 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4939 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4940 		struct ice_fltr_mgmt_list_entry *fm_entry;
4941 
4942 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4943 			f_info->fltr_rule_id =
4944 				LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4945 			f_info->fltr_act = ICE_FWD_TO_VSI;
4946 			/* Create an entry to track this MAC address */
4947 			fm_entry = (struct ice_fltr_mgmt_list_entry *)
4948 				ice_malloc(hw, sizeof(*fm_entry));
4949 			if (!fm_entry) {
4950 				status = ICE_ERR_NO_MEMORY;
4951 				goto ice_add_mac_exit;
4952 			}
4953 			fm_entry->fltr_info = *f_info;
4954 			fm_entry->vsi_count = 1;
4955 			/* The book keeping entries will get removed when
4956 			 * base driver calls remove filter AQ command
4957 			 */
4958 
4959 			LIST_ADD(&fm_entry->list_entry, rule_head);
4960 			r_iter = (struct ice_aqc_sw_rules_elem *)
4961 				((u8 *)r_iter + s_rule_size);
4962 		}
4963 	}
4964 
4965 ice_add_mac_exit:
4966 	ice_release_lock(rule_lock);
4967 	if (s_rule)
4968 		ice_free(hw, s_rule);
4969 	return status;
4970 }
4971 
4972 /**
4973  * ice_add_mac - Add a MAC address based filter rule
4974  * @hw: pointer to the hardware structure
4975  * @m_list: list of MAC addresses and forwarding information
4976  *
4977  * Function add MAC rule for logical port from HW struct
4978  */
ice_add_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list)4979 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4980 {
4981 	if (!m_list || !hw)
4982 		return ICE_ERR_PARAM;
4983 
4984 	return ice_add_mac_rule(hw, m_list, hw->switch_info,
4985 				hw->port_info->lport);
4986 }
4987 
4988 /**
4989  * ice_add_vlan_internal - Add one VLAN based filter rule
4990  * @hw: pointer to the hardware structure
4991  * @recp_list: recipe list for which rule has to be added
4992  * @f_entry: filter entry containing one VLAN information
4993  */
4994 static enum ice_status
ice_add_vlan_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)4995 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4996 		      struct ice_fltr_list_entry *f_entry)
4997 {
4998 	struct ice_fltr_mgmt_list_entry *v_list_itr;
4999 	struct ice_fltr_info *new_fltr, *cur_fltr;
5000 	enum ice_sw_lkup_type lkup_type;
5001 	u16 vsi_list_id = 0, vsi_handle;
5002 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5003 	enum ice_status status = ICE_SUCCESS;
5004 
5005 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
5006 		return ICE_ERR_PARAM;
5007 
5008 	f_entry->fltr_info.fwd_id.hw_vsi_id =
5009 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
5010 	new_fltr = &f_entry->fltr_info;
5011 
5012 	/* VLAN ID should only be 12 bits */
5013 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
5014 		return ICE_ERR_PARAM;
5015 
5016 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
5017 		return ICE_ERR_PARAM;
5018 
5019 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
5020 	lkup_type = new_fltr->lkup_type;
5021 	vsi_handle = new_fltr->vsi_handle;
5022 	rule_lock = &recp_list->filt_rule_lock;
5023 	ice_acquire_lock(rule_lock);
5024 	v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
5025 	if (!v_list_itr) {
5026 		struct ice_vsi_list_map_info *map_info = NULL;
5027 
5028 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
5029 			/* All VLAN pruning rules use a VSI list. Check if
5030 			 * there is already a VSI list containing VSI that we
5031 			 * want to add. If found, use the same vsi_list_id for
5032 			 * this new VLAN rule or else create a new list.
5033 			 */
5034 			map_info = ice_find_vsi_list_entry(recp_list,
5035 							   vsi_handle,
5036 							   &vsi_list_id);
5037 			if (!map_info) {
5038 				status = ice_create_vsi_list_rule(hw,
5039 								  &vsi_handle,
5040 								  1,
5041 								  &vsi_list_id,
5042 								  lkup_type);
5043 				if (status)
5044 					goto exit;
5045 			}
5046 			/* Convert the action to forwarding to a VSI list. */
5047 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
5048 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
5049 		}
5050 
5051 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
5052 		if (!status) {
5053 			v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
5054 							 new_fltr);
5055 			if (!v_list_itr) {
5056 				status = ICE_ERR_DOES_NOT_EXIST;
5057 				goto exit;
5058 			}
5059 			/* reuse VSI list for new rule and increment ref_cnt */
5060 			if (map_info) {
5061 				v_list_itr->vsi_list_info = map_info;
5062 				map_info->ref_cnt++;
5063 			} else {
5064 				v_list_itr->vsi_list_info =
5065 					ice_create_vsi_list_map(hw, &vsi_handle,
5066 								1, vsi_list_id);
5067 			}
5068 		}
5069 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
5070 		/* Update existing VSI list to add new VSI ID only if it used
5071 		 * by one VLAN rule.
5072 		 */
5073 		cur_fltr = &v_list_itr->fltr_info;
5074 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
5075 						 new_fltr);
5076 	} else {
5077 		/* If VLAN rule exists and VSI list being used by this rule is
5078 		 * referenced by more than 1 VLAN rule. Then create a new VSI
5079 		 * list appending previous VSI with new VSI and update existing
5080 		 * VLAN rule to point to new VSI list ID
5081 		 */
5082 		struct ice_fltr_info tmp_fltr;
5083 		u16 vsi_handle_arr[2];
5084 		u16 cur_handle;
5085 
5086 		/* Current implementation only supports reusing VSI list with
5087 		 * one VSI count. We should never hit below condition
5088 		 */
5089 		if (v_list_itr->vsi_count > 1 &&
5090 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
5091 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
5092 			status = ICE_ERR_CFG;
5093 			goto exit;
5094 		}
5095 
5096 		cur_handle =
5097 			ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
5098 					   ICE_MAX_VSI);
5099 
5100 		/* A rule already exists with the new VSI being added */
5101 		if (cur_handle == vsi_handle) {
5102 			status = ICE_ERR_ALREADY_EXISTS;
5103 			goto exit;
5104 		}
5105 
5106 		vsi_handle_arr[0] = cur_handle;
5107 		vsi_handle_arr[1] = vsi_handle;
5108 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5109 						  &vsi_list_id, lkup_type);
5110 		if (status)
5111 			goto exit;
5112 
5113 		tmp_fltr = v_list_itr->fltr_info;
5114 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
5115 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5116 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5117 		/* Update the previous switch rule to a new VSI list which
5118 		 * includes current VSI that is requested
5119 		 */
5120 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5121 		if (status)
5122 			goto exit;
5123 
5124 		/* before overriding VSI list map info. decrement ref_cnt of
5125 		 * previous VSI list
5126 		 */
5127 		v_list_itr->vsi_list_info->ref_cnt--;
5128 
5129 		/* now update to newly created list */
5130 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
5131 		v_list_itr->vsi_list_info =
5132 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5133 						vsi_list_id);
5134 		v_list_itr->vsi_count++;
5135 	}
5136 
5137 exit:
5138 	ice_release_lock(rule_lock);
5139 	return status;
5140 }
5141 
5142 /**
5143  * ice_add_vlan_rule - Add VLAN based filter rule
5144  * @hw: pointer to the hardware structure
5145  * @v_list: list of VLAN entries and forwarding information
5146  * @sw: pointer to switch info struct for which function add rule
5147  */
5148 static enum ice_status
ice_add_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_switch_info * sw)5149 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5150 		  struct ice_switch_info *sw)
5151 {
5152 	struct ice_fltr_list_entry *v_list_itr;
5153 	struct ice_sw_recipe *recp_list;
5154 
5155 	recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
5156 	LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
5157 			    list_entry) {
5158 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
5159 			return ICE_ERR_PARAM;
5160 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
5161 		v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
5162 							   v_list_itr);
5163 		if (v_list_itr->status)
5164 			return v_list_itr->status;
5165 	}
5166 	return ICE_SUCCESS;
5167 }
5168 
5169 /**
5170  * ice_add_vlan - Add a VLAN based filter rule
5171  * @hw: pointer to the hardware structure
5172  * @v_list: list of VLAN and forwarding information
5173  *
5174  * Function add VLAN rule for logical port from HW struct
5175  */
ice_add_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list)5176 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5177 {
5178 	if (!v_list || !hw)
5179 		return ICE_ERR_PARAM;
5180 
5181 	return ice_add_vlan_rule(hw, v_list, hw->switch_info);
5182 }
5183 
5184 /**
5185  * ice_add_mac_vlan_rule - Add MAC and VLAN pair based filter rule
5186  * @hw: pointer to the hardware structure
5187  * @mv_list: list of MAC and VLAN filters
5188  * @sw: pointer to switch info struct for which function add rule
5189  * @lport: logic port number on which function add rule
5190  *
5191  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
5192  * pruning bits enabled, then it is the responsibility of the caller to make
5193  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
5194  * VLAN won't be received on that VSI otherwise.
5195  */
5196 static enum ice_status
ice_add_mac_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list,struct ice_switch_info * sw,u8 lport)5197 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
5198 		      struct ice_switch_info *sw, u8 lport)
5199 {
5200 	struct ice_fltr_list_entry *mv_list_itr;
5201 	struct ice_sw_recipe *recp_list;
5202 
5203 	if (!mv_list || !hw)
5204 		return ICE_ERR_PARAM;
5205 
5206 	recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
5207 	LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
5208 			    list_entry) {
5209 		enum ice_sw_lkup_type l_type =
5210 			mv_list_itr->fltr_info.lkup_type;
5211 
5212 		if (l_type != ICE_SW_LKUP_MAC_VLAN)
5213 			return ICE_ERR_PARAM;
5214 		mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
5215 		mv_list_itr->status =
5216 			ice_add_rule_internal(hw, recp_list, lport,
5217 					      mv_list_itr);
5218 		if (mv_list_itr->status)
5219 			return mv_list_itr->status;
5220 	}
5221 	return ICE_SUCCESS;
5222 }
5223 
5224 /**
5225  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
5226  * @hw: pointer to the hardware structure
5227  * @mv_list: list of MAC VLAN addresses and forwarding information
5228  *
5229  * Function add MAC VLAN rule for logical port from HW struct
5230  */
5231 enum ice_status
ice_add_mac_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list)5232 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5233 {
5234 	if (!mv_list || !hw)
5235 		return ICE_ERR_PARAM;
5236 
5237 	return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
5238 				     hw->port_info->lport);
5239 }
5240 
5241 /**
5242  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
5243  * @hw: pointer to the hardware structure
5244  * @em_list: list of ether type MAC filter, MAC is optional
5245  * @sw: pointer to switch info struct for which function add rule
5246  * @lport: logic port number on which function add rule
5247  *
5248  * This function requires the caller to populate the entries in
5249  * the filter list with the necessary fields (including flags to
5250  * indicate Tx or Rx rules).
5251  */
5252 static enum ice_status
ice_add_eth_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list,struct ice_switch_info * sw,u8 lport)5253 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5254 		     struct ice_switch_info *sw, u8 lport)
5255 {
5256 	struct ice_fltr_list_entry *em_list_itr;
5257 
5258 	LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
5259 			    list_entry) {
5260 		struct ice_sw_recipe *recp_list;
5261 		enum ice_sw_lkup_type l_type;
5262 
5263 		l_type = em_list_itr->fltr_info.lkup_type;
5264 		recp_list = &sw->recp_list[l_type];
5265 
5266 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5267 		    l_type != ICE_SW_LKUP_ETHERTYPE)
5268 			return ICE_ERR_PARAM;
5269 
5270 		em_list_itr->status = ice_add_rule_internal(hw, recp_list,
5271 							    lport,
5272 							    em_list_itr);
5273 		if (em_list_itr->status)
5274 			return em_list_itr->status;
5275 	}
5276 	return ICE_SUCCESS;
5277 }
5278 
5279 /**
5280  * ice_add_eth_mac - Add a ethertype based filter rule
5281  * @hw: pointer to the hardware structure
5282  * @em_list: list of ethertype and forwarding information
5283  *
5284  * Function add ethertype rule for logical port from HW struct
5285  */
5286 enum ice_status
ice_add_eth_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list)5287 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5288 {
5289 	if (!em_list || !hw)
5290 		return ICE_ERR_PARAM;
5291 
5292 	return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
5293 				    hw->port_info->lport);
5294 }
5295 
5296 /**
5297  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
5298  * @hw: pointer to the hardware structure
5299  * @em_list: list of ethertype or ethertype MAC entries
5300  * @sw: pointer to switch info struct for which function add rule
5301  */
5302 static enum ice_status
ice_remove_eth_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list,struct ice_switch_info * sw)5303 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
5304 			struct ice_switch_info *sw)
5305 {
5306 	struct ice_fltr_list_entry *em_list_itr, *tmp;
5307 
5308 	LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
5309 				 list_entry) {
5310 		struct ice_sw_recipe *recp_list;
5311 		enum ice_sw_lkup_type l_type;
5312 
5313 		l_type = em_list_itr->fltr_info.lkup_type;
5314 
5315 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
5316 		    l_type != ICE_SW_LKUP_ETHERTYPE)
5317 			return ICE_ERR_PARAM;
5318 
5319 		recp_list = &sw->recp_list[l_type];
5320 		em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5321 							       em_list_itr);
5322 		if (em_list_itr->status)
5323 			return em_list_itr->status;
5324 	}
5325 	return ICE_SUCCESS;
5326 }
5327 
5328 /**
5329  * ice_remove_eth_mac - remove a ethertype based filter rule
5330  * @hw: pointer to the hardware structure
5331  * @em_list: list of ethertype and forwarding information
5332  *
5333  */
5334 enum ice_status
ice_remove_eth_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list)5335 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
5336 {
5337 	if (!em_list || !hw)
5338 		return ICE_ERR_PARAM;
5339 
5340 	return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
5341 }
5342 
5343 /**
5344  * ice_rem_sw_rule_info
5345  * @hw: pointer to the hardware structure
5346  * @rule_head: pointer to the switch list structure that we want to delete
5347  */
5348 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct LIST_HEAD_TYPE * rule_head)5349 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5350 {
5351 	if (!LIST_EMPTY(rule_head)) {
5352 		struct ice_fltr_mgmt_list_entry *entry;
5353 		struct ice_fltr_mgmt_list_entry *tmp;
5354 
5355 		LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
5356 					 ice_fltr_mgmt_list_entry, list_entry) {
5357 			LIST_DEL(&entry->list_entry);
5358 			ice_free(hw, entry);
5359 		}
5360 	}
5361 }
5362 
5363 /**
5364  * ice_rem_adv_rule_info
5365  * @hw: pointer to the hardware structure
5366  * @rule_head: pointer to the switch list structure that we want to delete
5367  */
5368 static void
ice_rem_adv_rule_info(struct ice_hw * hw,struct LIST_HEAD_TYPE * rule_head)5369 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
5370 {
5371 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
5372 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
5373 
5374 	if (LIST_EMPTY(rule_head))
5375 		return;
5376 
5377 	LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
5378 				 ice_adv_fltr_mgmt_list_entry, list_entry) {
5379 		LIST_DEL(&lst_itr->list_entry);
5380 		ice_free(hw, lst_itr->lkups);
5381 		ice_free(hw, lst_itr);
5382 	}
5383 }
5384 
5385 /**
5386  * ice_rem_all_sw_rules_info
5387  * @hw: pointer to the hardware structure
5388  */
ice_rem_all_sw_rules_info(struct ice_hw * hw)5389 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
5390 {
5391 	struct ice_switch_info *sw = hw->switch_info;
5392 	u8 i;
5393 
5394 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5395 		struct LIST_HEAD_TYPE *rule_head;
5396 
5397 		rule_head = &sw->recp_list[i].filt_rules;
5398 		if (!sw->recp_list[i].adv_rule)
5399 			ice_rem_sw_rule_info(hw, rule_head);
5400 		else
5401 			ice_rem_adv_rule_info(hw, rule_head);
5402 		if (sw->recp_list[i].adv_rule &&
5403 		    LIST_EMPTY(&sw->recp_list[i].filt_rules))
5404 			sw->recp_list[i].adv_rule = false;
5405 	}
5406 }
5407 
5408 /**
5409  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
5410  * @pi: pointer to the port_info structure
5411  * @vsi_handle: VSI handle to set as default
5412  * @set: true to add the above mentioned switch rule, false to remove it
5413  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
5414  *
5415  * add filter rule to set/unset given VSI as default VSI for the switch
5416  * (represented by swid)
5417  */
5418 enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool set,u8 direction)5419 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
5420 		 u8 direction)
5421 {
5422 	struct ice_aqc_sw_rules_elem *s_rule;
5423 	struct ice_fltr_info f_info;
5424 	struct ice_hw *hw = pi->hw;
5425 	enum ice_adminq_opc opcode;
5426 	enum ice_status status;
5427 	u16 s_rule_size;
5428 	u16 hw_vsi_id;
5429 
5430 	if (!ice_is_vsi_valid(hw, vsi_handle))
5431 		return ICE_ERR_PARAM;
5432 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5433 
5434 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
5435 		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
5436 
5437 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
5438 	if (!s_rule)
5439 		return ICE_ERR_NO_MEMORY;
5440 
5441 	ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
5442 
5443 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
5444 	f_info.flag = direction;
5445 	f_info.fltr_act = ICE_FWD_TO_VSI;
5446 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
5447 
5448 	if (f_info.flag & ICE_FLTR_RX) {
5449 		f_info.src = pi->lport;
5450 		f_info.src_id = ICE_SRC_ID_LPORT;
5451 		if (!set)
5452 			f_info.fltr_rule_id =
5453 				pi->dflt_rx_vsi_rule_id;
5454 	} else if (f_info.flag & ICE_FLTR_TX) {
5455 		f_info.src_id = ICE_SRC_ID_VSI;
5456 		f_info.src = hw_vsi_id;
5457 		if (!set)
5458 			f_info.fltr_rule_id =
5459 				pi->dflt_tx_vsi_rule_id;
5460 	}
5461 
5462 	if (set)
5463 		opcode = ice_aqc_opc_add_sw_rules;
5464 	else
5465 		opcode = ice_aqc_opc_remove_sw_rules;
5466 
5467 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
5468 
5469 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
5470 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
5471 		goto out;
5472 	if (set) {
5473 		u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5474 
5475 		if (f_info.flag & ICE_FLTR_TX) {
5476 			pi->dflt_tx_vsi_num = hw_vsi_id;
5477 			pi->dflt_tx_vsi_rule_id = index;
5478 		} else if (f_info.flag & ICE_FLTR_RX) {
5479 			pi->dflt_rx_vsi_num = hw_vsi_id;
5480 			pi->dflt_rx_vsi_rule_id = index;
5481 		}
5482 	} else {
5483 		if (f_info.flag & ICE_FLTR_TX) {
5484 			pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
5485 			pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
5486 		} else if (f_info.flag & ICE_FLTR_RX) {
5487 			pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
5488 			pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
5489 		}
5490 	}
5491 
5492 out:
5493 	ice_free(hw, s_rule);
5494 	return status;
5495 }
5496 
5497 /**
5498  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
5499  * @list_head: head of rule list
5500  * @f_info: rule information
5501  *
5502  * Helper function to search for a unicast rule entry - this is to be used
5503  * to remove unicast MAC filter that is not shared with other VSIs on the
5504  * PF switch.
5505  *
5506  * Returns pointer to entry storing the rule if found
5507  */
5508 static struct ice_fltr_mgmt_list_entry *
ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE * list_head,struct ice_fltr_info * f_info)5509 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
5510 			  struct ice_fltr_info *f_info)
5511 {
5512 	struct ice_fltr_mgmt_list_entry *list_itr;
5513 
5514 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
5515 			    list_entry) {
5516 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
5517 			    sizeof(f_info->l_data)) &&
5518 		    f_info->fwd_id.hw_vsi_id ==
5519 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
5520 		    f_info->flag == list_itr->fltr_info.flag)
5521 			return list_itr;
5522 	}
5523 	return NULL;
5524 }
5525 
5526 /**
5527  * ice_remove_mac_rule - remove a MAC based filter rule
5528  * @hw: pointer to the hardware structure
5529  * @m_list: list of MAC addresses and forwarding information
5530  * @recp_list: list from which function remove MAC address
5531  *
5532  * This function removes either a MAC filter rule or a specific VSI from a
5533  * VSI list for a multicast MAC address.
5534  *
5535  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
5536  * ice_add_mac. Caller should be aware that this call will only work if all
5537  * the entries passed into m_list were added previously. It will not attempt to
5538  * do a partial remove of entries that were found.
5539  */
5540 static enum ice_status
ice_remove_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list,struct ice_sw_recipe * recp_list)5541 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
5542 		    struct ice_sw_recipe *recp_list)
5543 {
5544 	struct ice_fltr_list_entry *list_itr, *tmp;
5545 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5546 
5547 	if (!m_list)
5548 		return ICE_ERR_PARAM;
5549 
5550 	rule_lock = &recp_list->filt_rule_lock;
5551 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
5552 				 list_entry) {
5553 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
5554 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
5555 		u16 vsi_handle;
5556 
5557 		if (l_type != ICE_SW_LKUP_MAC)
5558 			return ICE_ERR_PARAM;
5559 
5560 		vsi_handle = list_itr->fltr_info.vsi_handle;
5561 		if (!ice_is_vsi_valid(hw, vsi_handle))
5562 			return ICE_ERR_PARAM;
5563 
5564 		list_itr->fltr_info.fwd_id.hw_vsi_id =
5565 					ice_get_hw_vsi_num(hw, vsi_handle);
5566 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
5567 			/* Don't remove the unicast address that belongs to
5568 			 * another VSI on the switch, since it is not being
5569 			 * shared...
5570 			 */
5571 			ice_acquire_lock(rule_lock);
5572 			if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
5573 						       &list_itr->fltr_info)) {
5574 				ice_release_lock(rule_lock);
5575 				return ICE_ERR_DOES_NOT_EXIST;
5576 			}
5577 			ice_release_lock(rule_lock);
5578 		}
5579 		list_itr->status = ice_remove_rule_internal(hw, recp_list,
5580 							    list_itr);
5581 		if (list_itr->status)
5582 			return list_itr->status;
5583 	}
5584 	return ICE_SUCCESS;
5585 }
5586 
5587 /**
5588  * ice_remove_mac - remove a MAC address based filter rule
5589  * @hw: pointer to the hardware structure
5590  * @m_list: list of MAC addresses and forwarding information
5591  *
5592  */
ice_remove_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list)5593 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
5594 {
5595 	struct ice_sw_recipe *recp_list;
5596 
5597 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5598 	return ice_remove_mac_rule(hw, m_list, recp_list);
5599 }
5600 
5601 /**
5602  * ice_remove_vlan_rule - Remove VLAN based filter rule
5603  * @hw: pointer to the hardware structure
5604  * @v_list: list of VLAN entries and forwarding information
5605  * @recp_list: list from which function remove VLAN
5606  */
5607 static enum ice_status
ice_remove_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_sw_recipe * recp_list)5608 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5609 		     struct ice_sw_recipe *recp_list)
5610 {
5611 	struct ice_fltr_list_entry *v_list_itr, *tmp;
5612 
5613 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5614 				 list_entry) {
5615 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5616 
5617 		if (l_type != ICE_SW_LKUP_VLAN)
5618 			return ICE_ERR_PARAM;
5619 		v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
5620 							      v_list_itr);
5621 		if (v_list_itr->status)
5622 			return v_list_itr->status;
5623 	}
5624 	return ICE_SUCCESS;
5625 }
5626 
5627 /**
5628  * ice_remove_vlan - remove a VLAN address based filter rule
5629  * @hw: pointer to the hardware structure
5630  * @v_list: list of VLAN and forwarding information
5631  *
5632  */
5633 enum ice_status
ice_remove_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list)5634 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
5635 {
5636 	struct ice_sw_recipe *recp_list;
5637 
5638 	if (!v_list || !hw)
5639 		return ICE_ERR_PARAM;
5640 
5641 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
5642 	return ice_remove_vlan_rule(hw, v_list, recp_list);
5643 }
5644 
5645 /**
5646  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
5647  * @hw: pointer to the hardware structure
5648  * @v_list: list of MAC VLAN entries and forwarding information
5649  * @recp_list: list from which function remove MAC VLAN
5650  */
5651 static enum ice_status
ice_remove_mac_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_sw_recipe * recp_list)5652 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
5653 			 struct ice_sw_recipe *recp_list)
5654 {
5655 	struct ice_fltr_list_entry *v_list_itr, *tmp;
5656 
5657 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5658 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5659 				 list_entry) {
5660 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
5661 
5662 		if (l_type != ICE_SW_LKUP_MAC_VLAN)
5663 			return ICE_ERR_PARAM;
5664 		v_list_itr->status =
5665 			ice_remove_rule_internal(hw, recp_list,
5666 						 v_list_itr);
5667 		if (v_list_itr->status)
5668 			return v_list_itr->status;
5669 	}
5670 	return ICE_SUCCESS;
5671 }
5672 
5673 /**
5674  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
5675  * @hw: pointer to the hardware structure
5676  * @mv_list: list of MAC VLAN and forwarding information
5677  */
5678 enum ice_status
ice_remove_mac_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list)5679 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
5680 {
5681 	struct ice_sw_recipe *recp_list;
5682 
5683 	if (!mv_list || !hw)
5684 		return ICE_ERR_PARAM;
5685 
5686 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5687 	return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5688 }
5689 
5690 /**
5691  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5692  * @fm_entry: filter entry to inspect
5693  * @vsi_handle: VSI handle to compare with filter info
5694  */
5695 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)5696 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5697 {
5698 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5699 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5700 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5701 		 fm_entry->vsi_list_info &&
5702 		 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5703 				 vsi_handle))));
5704 }
5705 
5706 /**
5707  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5708  * @hw: pointer to the hardware structure
5709  * @vsi_handle: VSI handle to remove filters from
5710  * @vsi_list_head: pointer to the list to add entry to
5711  * @fi: pointer to fltr_info of filter entry to copy & add
5712  *
5713  * Helper function, used when creating a list of filters to remove from
5714  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5715  * original filter entry, with the exception of fltr_info.fltr_act and
5716  * fltr_info.fwd_id fields. These are set such that later logic can
5717  * extract which VSI to remove the fltr from, and pass on that information.
5718  */
5719 static enum ice_status
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * vsi_list_head,struct ice_fltr_info * fi)5720 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5721 			       struct LIST_HEAD_TYPE *vsi_list_head,
5722 			       struct ice_fltr_info *fi)
5723 {
5724 	struct ice_fltr_list_entry *tmp;
5725 
5726 	/* this memory is freed up in the caller function
5727 	 * once filters for this VSI are removed
5728 	 */
5729 	tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5730 	if (!tmp)
5731 		return ICE_ERR_NO_MEMORY;
5732 
5733 	tmp->fltr_info = *fi;
5734 
5735 	/* Overwrite these fields to indicate which VSI to remove filter from,
5736 	 * so find and remove logic can extract the information from the
5737 	 * list entries. Note that original entries will still have proper
5738 	 * values.
5739 	 */
5740 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5741 	tmp->fltr_info.vsi_handle = vsi_handle;
5742 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5743 
5744 	LIST_ADD(&tmp->list_entry, vsi_list_head);
5745 
5746 	return ICE_SUCCESS;
5747 }
5748 
5749 /**
5750  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5751  * @hw: pointer to the hardware structure
5752  * @vsi_handle: VSI handle to remove filters from
5753  * @lkup_list_head: pointer to the list that has certain lookup type filters
5754  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5755  *
5756  * Locates all filters in lkup_list_head that are used by the given VSI,
5757  * and adds COPIES of those entries to vsi_list_head (intended to be used
5758  * to remove the listed filters).
5759  * Note that this means all entries in vsi_list_head must be explicitly
5760  * deallocated by the caller when done with list.
5761  */
5762 static enum ice_status
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * lkup_list_head,struct LIST_HEAD_TYPE * vsi_list_head)5763 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5764 			 struct LIST_HEAD_TYPE *lkup_list_head,
5765 			 struct LIST_HEAD_TYPE *vsi_list_head)
5766 {
5767 	struct ice_fltr_mgmt_list_entry *fm_entry;
5768 	enum ice_status status = ICE_SUCCESS;
5769 
5770 	/* check to make sure VSI ID is valid and within boundary */
5771 	if (!ice_is_vsi_valid(hw, vsi_handle))
5772 		return ICE_ERR_PARAM;
5773 
5774 	LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5775 			    ice_fltr_mgmt_list_entry, list_entry) {
5776 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
5777 			continue;
5778 
5779 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5780 							vsi_list_head,
5781 							&fm_entry->fltr_info);
5782 		if (status)
5783 			return status;
5784 	}
5785 	return status;
5786 }
5787 
5788 /**
5789  * ice_determine_promisc_mask
5790  * @fi: filter info to parse
5791  *
5792  * Helper function to determine which ICE_PROMISC_ mask corresponds
5793  * to given filter into.
5794  */
ice_determine_promisc_mask(struct ice_fltr_info * fi)5795 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5796 {
5797 	u16 vid = fi->l_data.mac_vlan.vlan_id;
5798 	u8 *macaddr = fi->l_data.mac.mac_addr;
5799 	bool is_tx_fltr = false;
5800 	u8 promisc_mask = 0;
5801 
5802 	if (fi->flag == ICE_FLTR_TX)
5803 		is_tx_fltr = true;
5804 
5805 	if (IS_BROADCAST_ETHER_ADDR(macaddr))
5806 		promisc_mask |= is_tx_fltr ?
5807 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5808 	else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5809 		promisc_mask |= is_tx_fltr ?
5810 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5811 	else if (IS_UNICAST_ETHER_ADDR(macaddr))
5812 		promisc_mask |= is_tx_fltr ?
5813 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5814 	if (vid)
5815 		promisc_mask |= is_tx_fltr ?
5816 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5817 
5818 	return promisc_mask;
5819 }
5820 
5821 /**
5822  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5823  * @hw: pointer to the hardware structure
5824  * @vsi_handle: VSI handle to retrieve info from
5825  * @promisc_mask: pointer to mask to be filled in
5826  * @vid: VLAN ID of promisc VLAN VSI
5827  * @sw: pointer to switch info struct for which function add rule
5828  */
5829 static enum ice_status
_ice_get_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid,struct ice_switch_info * sw)5830 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5831 		     u16 *vid, struct ice_switch_info *sw)
5832 {
5833 	struct ice_fltr_mgmt_list_entry *itr;
5834 	struct LIST_HEAD_TYPE *rule_head;
5835 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5836 
5837 	if (!ice_is_vsi_valid(hw, vsi_handle))
5838 		return ICE_ERR_PARAM;
5839 
5840 	*vid = 0;
5841 	*promisc_mask = 0;
5842 	rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5843 	rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5844 
5845 	ice_acquire_lock(rule_lock);
5846 	LIST_FOR_EACH_ENTRY(itr, rule_head,
5847 			    ice_fltr_mgmt_list_entry, list_entry) {
5848 		/* Continue if this filter doesn't apply to this VSI or the
5849 		 * VSI ID is not in the VSI map for this filter
5850 		 */
5851 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5852 			continue;
5853 
5854 		*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5855 	}
5856 	ice_release_lock(rule_lock);
5857 
5858 	return ICE_SUCCESS;
5859 }
5860 
5861 /**
5862  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5863  * @hw: pointer to the hardware structure
5864  * @vsi_handle: VSI handle to retrieve info from
5865  * @promisc_mask: pointer to mask to be filled in
5866  * @vid: VLAN ID of promisc VLAN VSI
5867  */
5868 enum ice_status
ice_get_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid)5869 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5870 		    u16 *vid)
5871 {
5872 	return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5873 				    vid, hw->switch_info);
5874 }
5875 
5876 /**
5877  * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5878  * @hw: pointer to the hardware structure
5879  * @vsi_handle: VSI handle to retrieve info from
5880  * @promisc_mask: pointer to mask to be filled in
5881  * @vid: VLAN ID of promisc VLAN VSI
5882  * @sw: pointer to switch info struct for which function add rule
5883  */
5884 static enum ice_status
_ice_get_vsi_vlan_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid,struct ice_switch_info * sw)5885 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5886 			  u16 *vid, struct ice_switch_info *sw)
5887 {
5888 	struct ice_fltr_mgmt_list_entry *itr;
5889 	struct LIST_HEAD_TYPE *rule_head;
5890 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5891 
5892 	if (!ice_is_vsi_valid(hw, vsi_handle))
5893 		return ICE_ERR_PARAM;
5894 
5895 	*vid = 0;
5896 	*promisc_mask = 0;
5897 	rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5898 	rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5899 
5900 	ice_acquire_lock(rule_lock);
5901 	LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5902 			    list_entry) {
5903 		/* Continue if this filter doesn't apply to this VSI or the
5904 		 * VSI ID is not in the VSI map for this filter
5905 		 */
5906 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5907 			continue;
5908 
5909 		*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5910 	}
5911 	ice_release_lock(rule_lock);
5912 
5913 	return ICE_SUCCESS;
5914 }
5915 
5916 /**
5917  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5918  * @hw: pointer to the hardware structure
5919  * @vsi_handle: VSI handle to retrieve info from
5920  * @promisc_mask: pointer to mask to be filled in
5921  * @vid: VLAN ID of promisc VLAN VSI
5922  */
5923 enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid)5924 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5925 			 u16 *vid)
5926 {
5927 	return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5928 					 vid, hw->switch_info);
5929 }
5930 
5931 /**
5932  * ice_remove_promisc - Remove promisc based filter rules
5933  * @hw: pointer to the hardware structure
5934  * @recp_id: recipe ID for which the rule needs to removed
5935  * @v_list: list of promisc entries
5936  */
5937 static enum ice_status
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct LIST_HEAD_TYPE * v_list)5938 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5939 		   struct LIST_HEAD_TYPE *v_list)
5940 {
5941 	struct ice_fltr_list_entry *v_list_itr, *tmp;
5942 	struct ice_sw_recipe *recp_list;
5943 
5944 	recp_list = &hw->switch_info->recp_list[recp_id];
5945 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5946 				 list_entry) {
5947 		v_list_itr->status =
5948 			ice_remove_rule_internal(hw, recp_list, v_list_itr);
5949 		if (v_list_itr->status)
5950 			return v_list_itr->status;
5951 	}
5952 	return ICE_SUCCESS;
5953 }
5954 
5955 /**
5956  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5957  * @hw: pointer to the hardware structure
5958  * @vsi_handle: VSI handle to clear mode
5959  * @promisc_mask: mask of promiscuous config bits to clear
5960  * @vid: VLAN ID to clear VLAN promiscuous
5961  * @sw: pointer to switch info struct for which function add rule
5962  */
5963 static enum ice_status
_ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid,struct ice_switch_info * sw)5964 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5965 		       u16 vid, struct ice_switch_info *sw)
5966 {
5967 	struct ice_fltr_list_entry *fm_entry, *tmp;
5968 	struct LIST_HEAD_TYPE remove_list_head;
5969 	struct ice_fltr_mgmt_list_entry *itr;
5970 	struct LIST_HEAD_TYPE *rule_head;
5971 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5972 	enum ice_status status = ICE_SUCCESS;
5973 	u8 recipe_id;
5974 
5975 	if (!ice_is_vsi_valid(hw, vsi_handle))
5976 		return ICE_ERR_PARAM;
5977 
5978 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5979 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5980 	else
5981 		recipe_id = ICE_SW_LKUP_PROMISC;
5982 
5983 	rule_head = &sw->recp_list[recipe_id].filt_rules;
5984 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5985 
5986 	INIT_LIST_HEAD(&remove_list_head);
5987 
5988 	ice_acquire_lock(rule_lock);
5989 	LIST_FOR_EACH_ENTRY(itr, rule_head,
5990 			    ice_fltr_mgmt_list_entry, list_entry) {
5991 		struct ice_fltr_info *fltr_info;
5992 		u8 fltr_promisc_mask = 0;
5993 
5994 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5995 			continue;
5996 		fltr_info = &itr->fltr_info;
5997 
5998 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5999 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
6000 			continue;
6001 
6002 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
6003 
6004 		/* Skip if filter is not completely specified by given mask */
6005 		if (fltr_promisc_mask & ~promisc_mask)
6006 			continue;
6007 
6008 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
6009 							&remove_list_head,
6010 							fltr_info);
6011 		if (status) {
6012 			ice_release_lock(rule_lock);
6013 			goto free_fltr_list;
6014 		}
6015 	}
6016 	ice_release_lock(rule_lock);
6017 
6018 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
6019 
6020 free_fltr_list:
6021 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6022 				 ice_fltr_list_entry, list_entry) {
6023 		LIST_DEL(&fm_entry->list_entry);
6024 		ice_free(hw, fm_entry);
6025 	}
6026 
6027 	return status;
6028 }
6029 
6030 /**
6031  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
6032  * @hw: pointer to the hardware structure
6033  * @vsi_handle: VSI handle to clear mode
6034  * @promisc_mask: mask of promiscuous config bits to clear
6035  * @vid: VLAN ID to clear VLAN promiscuous
6036  */
6037 enum ice_status
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)6038 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
6039 		      u8 promisc_mask, u16 vid)
6040 {
6041 	return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
6042 				      vid, hw->switch_info);
6043 }
6044 
6045 /**
6046  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6047  * @hw: pointer to the hardware structure
6048  * @vsi_handle: VSI handle to configure
6049  * @promisc_mask: mask of promiscuous config bits
6050  * @vid: VLAN ID to set VLAN promiscuous
6051  * @lport: logical port number to configure promisc mode
6052  * @sw: pointer to switch info struct for which function add rule
6053  */
6054 static enum ice_status
_ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid,u8 lport,struct ice_switch_info * sw)6055 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6056 		     u16 vid, u8 lport, struct ice_switch_info *sw)
6057 {
6058 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
6059 	struct ice_fltr_list_entry f_list_entry;
6060 	struct ice_fltr_info new_fltr;
6061 	enum ice_status status = ICE_SUCCESS;
6062 	bool is_tx_fltr;
6063 	u16 hw_vsi_id;
6064 	int pkt_type;
6065 	u8 recipe_id;
6066 
6067 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6068 
6069 	if (!ice_is_vsi_valid(hw, vsi_handle))
6070 		return ICE_ERR_PARAM;
6071 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6072 
6073 	ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
6074 
6075 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
6076 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
6077 		new_fltr.l_data.mac_vlan.vlan_id = vid;
6078 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
6079 	} else {
6080 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
6081 		recipe_id = ICE_SW_LKUP_PROMISC;
6082 	}
6083 
6084 	/* Separate filters must be set for each direction/packet type
6085 	 * combination, so we will loop over the mask value, store the
6086 	 * individual type, and clear it out in the input mask as it
6087 	 * is found.
6088 	 */
6089 	while (promisc_mask) {
6090 		struct ice_sw_recipe *recp_list;
6091 		u8 *mac_addr;
6092 
6093 		pkt_type = 0;
6094 		is_tx_fltr = false;
6095 
6096 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
6097 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
6098 			pkt_type = UCAST_FLTR;
6099 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
6100 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
6101 			pkt_type = UCAST_FLTR;
6102 			is_tx_fltr = true;
6103 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
6104 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
6105 			pkt_type = MCAST_FLTR;
6106 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
6107 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
6108 			pkt_type = MCAST_FLTR;
6109 			is_tx_fltr = true;
6110 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
6111 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
6112 			pkt_type = BCAST_FLTR;
6113 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
6114 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
6115 			pkt_type = BCAST_FLTR;
6116 			is_tx_fltr = true;
6117 		}
6118 
6119 		/* Check for VLAN promiscuous flag */
6120 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
6121 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
6122 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
6123 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
6124 			is_tx_fltr = true;
6125 		}
6126 
6127 		/* Set filter DA based on packet type */
6128 		mac_addr = new_fltr.l_data.mac.mac_addr;
6129 		if (pkt_type == BCAST_FLTR) {
6130 			ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
6131 		} else if (pkt_type == MCAST_FLTR ||
6132 			   pkt_type == UCAST_FLTR) {
6133 			/* Use the dummy ether header DA */
6134 			ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
6135 				   ICE_NONDMA_TO_NONDMA);
6136 			if (pkt_type == MCAST_FLTR)
6137 				mac_addr[0] |= 0x1;	/* Set multicast bit */
6138 		}
6139 
6140 		/* Need to reset this to zero for all iterations */
6141 		new_fltr.flag = 0;
6142 		if (is_tx_fltr) {
6143 			new_fltr.flag |= ICE_FLTR_TX;
6144 			new_fltr.src = hw_vsi_id;
6145 		} else {
6146 			new_fltr.flag |= ICE_FLTR_RX;
6147 			new_fltr.src = lport;
6148 		}
6149 
6150 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
6151 		new_fltr.vsi_handle = vsi_handle;
6152 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
6153 		f_list_entry.fltr_info = new_fltr;
6154 		recp_list = &sw->recp_list[recipe_id];
6155 
6156 		status = ice_add_rule_internal(hw, recp_list, lport,
6157 					       &f_list_entry);
6158 		if (status != ICE_SUCCESS)
6159 			goto set_promisc_exit;
6160 	}
6161 
6162 set_promisc_exit:
6163 	return status;
6164 }
6165 
6166 /**
6167  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
6168  * @hw: pointer to the hardware structure
6169  * @vsi_handle: VSI handle to configure
6170  * @promisc_mask: mask of promiscuous config bits
6171  * @vid: VLAN ID to set VLAN promiscuous
6172  */
6173 enum ice_status
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)6174 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6175 		    u16 vid)
6176 {
6177 	return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
6178 				    hw->port_info->lport,
6179 				    hw->switch_info);
6180 }
6181 
6182 /**
6183  * _ice_set_vlan_vsi_promisc
6184  * @hw: pointer to the hardware structure
6185  * @vsi_handle: VSI handle to configure
6186  * @promisc_mask: mask of promiscuous config bits
6187  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6188  * @lport: logical port number to configure promisc mode
6189  * @sw: pointer to switch info struct for which function add rule
6190  *
6191  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6192  */
6193 static enum ice_status
_ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc,u8 lport,struct ice_switch_info * sw)6194 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6195 			  bool rm_vlan_promisc, u8 lport,
6196 			  struct ice_switch_info *sw)
6197 {
6198 	struct ice_fltr_list_entry *list_itr, *tmp;
6199 	struct LIST_HEAD_TYPE vsi_list_head;
6200 	struct LIST_HEAD_TYPE *vlan_head;
6201 	struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
6202 	enum ice_status status;
6203 	u16 vlan_id;
6204 
6205 	INIT_LIST_HEAD(&vsi_list_head);
6206 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
6207 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
6208 	ice_acquire_lock(vlan_lock);
6209 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
6210 					  &vsi_list_head);
6211 	ice_release_lock(vlan_lock);
6212 	if (status)
6213 		goto free_fltr_list;
6214 
6215 	LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
6216 			    list_entry) {
6217 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
6218 		if (rm_vlan_promisc)
6219 			status =  _ice_clear_vsi_promisc(hw, vsi_handle,
6220 							 promisc_mask,
6221 							 vlan_id, sw);
6222 		else
6223 			status =  _ice_set_vsi_promisc(hw, vsi_handle,
6224 						       promisc_mask, vlan_id,
6225 						       lport, sw);
6226 		if (status)
6227 			break;
6228 	}
6229 
6230 free_fltr_list:
6231 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
6232 				 ice_fltr_list_entry, list_entry) {
6233 		LIST_DEL(&list_itr->list_entry);
6234 		ice_free(hw, list_itr);
6235 	}
6236 	return status;
6237 }
6238 
6239 /**
6240  * ice_set_vlan_vsi_promisc
6241  * @hw: pointer to the hardware structure
6242  * @vsi_handle: VSI handle to configure
6243  * @promisc_mask: mask of promiscuous config bits
6244  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
6245  *
6246  * Configure VSI with all associated VLANs to given promiscuous mode(s)
6247  */
6248 enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)6249 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
6250 			 bool rm_vlan_promisc)
6251 {
6252 	return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
6253 					 rm_vlan_promisc, hw->port_info->lport,
6254 					 hw->switch_info);
6255 }
6256 
6257 /**
6258  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
6259  * @hw: pointer to the hardware structure
6260  * @vsi_handle: VSI handle to remove filters from
6261  * @recp_list: recipe list from which function remove fltr
6262  * @lkup: switch rule filter lookup type
6263  */
6264 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,struct ice_sw_recipe * recp_list,enum ice_sw_lkup_type lkup)6265 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
6266 			 struct ice_sw_recipe *recp_list,
6267 			 enum ice_sw_lkup_type lkup)
6268 {
6269 	struct ice_fltr_list_entry *fm_entry;
6270 	struct LIST_HEAD_TYPE remove_list_head;
6271 	struct LIST_HEAD_TYPE *rule_head;
6272 	struct ice_fltr_list_entry *tmp;
6273 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
6274 	enum ice_status status;
6275 
6276 	INIT_LIST_HEAD(&remove_list_head);
6277 	rule_lock = &recp_list[lkup].filt_rule_lock;
6278 	rule_head = &recp_list[lkup].filt_rules;
6279 	ice_acquire_lock(rule_lock);
6280 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
6281 					  &remove_list_head);
6282 	ice_release_lock(rule_lock);
6283 	if (status)
6284 		goto free_fltr_list;
6285 
6286 	switch (lkup) {
6287 	case ICE_SW_LKUP_MAC:
6288 		ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
6289 		break;
6290 	case ICE_SW_LKUP_VLAN:
6291 		ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
6292 		break;
6293 	case ICE_SW_LKUP_PROMISC:
6294 	case ICE_SW_LKUP_PROMISC_VLAN:
6295 		ice_remove_promisc(hw, lkup, &remove_list_head);
6296 		break;
6297 	case ICE_SW_LKUP_MAC_VLAN:
6298 		ice_remove_mac_vlan(hw, &remove_list_head);
6299 		break;
6300 	case ICE_SW_LKUP_ETHERTYPE:
6301 	case ICE_SW_LKUP_ETHERTYPE_MAC:
6302 		ice_remove_eth_mac(hw, &remove_list_head);
6303 		break;
6304 	case ICE_SW_LKUP_DFLT:
6305 		ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
6306 		break;
6307 	case ICE_SW_LKUP_LAST:
6308 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
6309 		break;
6310 	}
6311 
6312 free_fltr_list:
6313 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
6314 				 ice_fltr_list_entry, list_entry) {
6315 		LIST_DEL(&fm_entry->list_entry);
6316 		ice_free(hw, fm_entry);
6317 	}
6318 }
6319 
6320 /**
6321  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
6322  * @hw: pointer to the hardware structure
6323  * @vsi_handle: VSI handle to remove filters from
6324  * @sw: pointer to switch info struct
6325  */
6326 static void
ice_remove_vsi_fltr_rule(struct ice_hw * hw,u16 vsi_handle,struct ice_switch_info * sw)6327 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
6328 			 struct ice_switch_info *sw)
6329 {
6330 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
6331 
6332 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6333 				 sw->recp_list, ICE_SW_LKUP_MAC);
6334 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6335 				 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
6336 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6337 				 sw->recp_list, ICE_SW_LKUP_PROMISC);
6338 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6339 				 sw->recp_list, ICE_SW_LKUP_VLAN);
6340 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6341 				 sw->recp_list, ICE_SW_LKUP_DFLT);
6342 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6343 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
6344 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6345 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
6346 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
6347 				 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
6348 }
6349 
6350 /**
6351  * ice_remove_vsi_fltr - Remove all filters for a VSI
6352  * @hw: pointer to the hardware structure
6353  * @vsi_handle: VSI handle to remove filters from
6354  */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)6355 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
6356 {
6357 	ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
6358 }
6359 
6360 /**
6361  * ice_alloc_res_cntr - allocating resource counter
6362  * @hw: pointer to the hardware structure
6363  * @type: type of resource
6364  * @alloc_shared: if set it is shared else dedicated
6365  * @num_items: number of entries requested for FD resource type
6366  * @counter_id: counter index returned by AQ call
6367  */
6368 enum ice_status
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)6369 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6370 		   u16 *counter_id)
6371 {
6372 	struct ice_aqc_alloc_free_res_elem *buf;
6373 	enum ice_status status;
6374 	u16 buf_len;
6375 
6376 	/* Allocate resource */
6377 	buf_len = ice_struct_size(buf, elem, 1);
6378 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6379 	if (!buf)
6380 		return ICE_ERR_NO_MEMORY;
6381 
6382 	buf->num_elems = CPU_TO_LE16(num_items);
6383 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6384 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
6385 
6386 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6387 				       ice_aqc_opc_alloc_res, NULL);
6388 	if (status)
6389 		goto exit;
6390 
6391 	*counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
6392 
6393 exit:
6394 	ice_free(hw, buf);
6395 	return status;
6396 }
6397 
6398 /**
6399  * ice_free_res_cntr - free resource counter
6400  * @hw: pointer to the hardware structure
6401  * @type: type of resource
6402  * @alloc_shared: if set it is shared else dedicated
6403  * @num_items: number of entries to be freed for FD resource type
6404  * @counter_id: counter ID resource which needs to be freed
6405  */
6406 enum ice_status
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)6407 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
6408 		  u16 counter_id)
6409 {
6410 	struct ice_aqc_alloc_free_res_elem *buf;
6411 	enum ice_status status;
6412 	u16 buf_len;
6413 
6414 	/* Free resource */
6415 	buf_len = ice_struct_size(buf, elem, 1);
6416 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6417 	if (!buf)
6418 		return ICE_ERR_NO_MEMORY;
6419 
6420 	buf->num_elems = CPU_TO_LE16(num_items);
6421 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
6422 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
6423 	buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
6424 
6425 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
6426 				       ice_aqc_opc_free_res, NULL);
6427 	if (status)
6428 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
6429 
6430 	ice_free(hw, buf);
6431 	return status;
6432 }
6433 
6434 /**
6435  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
6436  * @hw: pointer to the hardware structure
6437  * @counter_id: returns counter index
6438  */
ice_alloc_vlan_res_counter(struct ice_hw * hw,u16 * counter_id)6439 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
6440 {
6441 	return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6442 				  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6443 				  counter_id);
6444 }
6445 
6446 /**
6447  * ice_free_vlan_res_counter - Free counter resource for VLAN type
6448  * @hw: pointer to the hardware structure
6449  * @counter_id: counter index to be freed
6450  */
ice_free_vlan_res_counter(struct ice_hw * hw,u16 counter_id)6451 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
6452 {
6453 	return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
6454 				 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
6455 				 counter_id);
6456 }
6457 
6458 /**
6459  * ice_alloc_res_lg_act - add large action resource
6460  * @hw: pointer to the hardware structure
6461  * @l_id: large action ID to fill it in
6462  * @num_acts: number of actions to hold with a large action entry
6463  */
6464 static enum ice_status
ice_alloc_res_lg_act(struct ice_hw * hw,u16 * l_id,u16 num_acts)6465 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
6466 {
6467 	struct ice_aqc_alloc_free_res_elem *sw_buf;
6468 	enum ice_status status;
6469 	u16 buf_len;
6470 
6471 	if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
6472 		return ICE_ERR_PARAM;
6473 
6474 	/* Allocate resource for large action */
6475 	buf_len = ice_struct_size(sw_buf, elem, 1);
6476 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
6477 	if (!sw_buf)
6478 		return ICE_ERR_NO_MEMORY;
6479 
6480 	sw_buf->num_elems = CPU_TO_LE16(1);
6481 
6482 	/* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
6483 	 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
6484 	 * If num_acts is greater than 2, then use
6485 	 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
6486 	 * The num_acts cannot exceed 4. This was ensured at the
6487 	 * beginning of the function.
6488 	 */
6489 	if (num_acts == 1)
6490 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
6491 	else if (num_acts == 2)
6492 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
6493 	else
6494 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
6495 
6496 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
6497 				       ice_aqc_opc_alloc_res, NULL);
6498 	if (!status)
6499 		*l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
6500 
6501 	ice_free(hw, sw_buf);
6502 	return status;
6503 }
6504 
6505 /**
6506  * ice_add_mac_with_sw_marker - add filter with sw marker
6507  * @hw: pointer to the hardware structure
6508  * @f_info: filter info structure containing the MAC filter information
6509  * @sw_marker: sw marker to tag the Rx descriptor with
6510  */
6511 enum ice_status
ice_add_mac_with_sw_marker(struct ice_hw * hw,struct ice_fltr_info * f_info,u16 sw_marker)6512 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
6513 			   u16 sw_marker)
6514 {
6515 	struct ice_fltr_mgmt_list_entry *m_entry;
6516 	struct ice_fltr_list_entry fl_info;
6517 	struct ice_sw_recipe *recp_list;
6518 	struct LIST_HEAD_TYPE l_head;
6519 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
6520 	enum ice_status ret;
6521 	bool entry_exists;
6522 	u16 lg_act_id;
6523 
6524 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
6525 		return ICE_ERR_PARAM;
6526 
6527 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6528 		return ICE_ERR_PARAM;
6529 
6530 	if (sw_marker == ICE_INVAL_SW_MARKER_ID)
6531 		return ICE_ERR_PARAM;
6532 
6533 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6534 		return ICE_ERR_PARAM;
6535 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6536 
6537 	/* Add filter if it doesn't exist so then the adding of large
6538 	 * action always results in update
6539 	 */
6540 
6541 	INIT_LIST_HEAD(&l_head);
6542 	fl_info.fltr_info = *f_info;
6543 	LIST_ADD(&fl_info.list_entry, &l_head);
6544 
6545 	entry_exists = false;
6546 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6547 			       hw->port_info->lport);
6548 	if (ret == ICE_ERR_ALREADY_EXISTS)
6549 		entry_exists = true;
6550 	else if (ret)
6551 		return ret;
6552 
6553 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6554 	rule_lock = &recp_list->filt_rule_lock;
6555 	ice_acquire_lock(rule_lock);
6556 	/* Get the book keeping entry for the filter */
6557 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6558 	if (!m_entry)
6559 		goto exit_error;
6560 
6561 	/* If counter action was enabled for this rule then don't enable
6562 	 * sw marker large action
6563 	 */
6564 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6565 		ret = ICE_ERR_PARAM;
6566 		goto exit_error;
6567 	}
6568 
6569 	/* if same marker was added before */
6570 	if (m_entry->sw_marker_id == sw_marker) {
6571 		ret = ICE_ERR_ALREADY_EXISTS;
6572 		goto exit_error;
6573 	}
6574 
6575 	/* Allocate a hardware table entry to hold large act. Three actions
6576 	 * for marker based large action
6577 	 */
6578 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
6579 	if (ret)
6580 		goto exit_error;
6581 
6582 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6583 		goto exit_error;
6584 
6585 	/* Update the switch rule to add the marker action */
6586 	ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
6587 	if (!ret) {
6588 		ice_release_lock(rule_lock);
6589 		return ret;
6590 	}
6591 
6592 exit_error:
6593 	ice_release_lock(rule_lock);
6594 	/* only remove entry if it did not exist previously */
6595 	if (!entry_exists)
6596 		ret = ice_remove_mac(hw, &l_head);
6597 
6598 	return ret;
6599 }
6600 
6601 /**
6602  * ice_add_mac_with_counter - add filter with counter enabled
6603  * @hw: pointer to the hardware structure
6604  * @f_info: pointer to filter info structure containing the MAC filter
6605  *          information
6606  */
6607 enum ice_status
ice_add_mac_with_counter(struct ice_hw * hw,struct ice_fltr_info * f_info)6608 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
6609 {
6610 	struct ice_fltr_mgmt_list_entry *m_entry;
6611 	struct ice_fltr_list_entry fl_info;
6612 	struct ice_sw_recipe *recp_list;
6613 	struct LIST_HEAD_TYPE l_head;
6614 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
6615 	enum ice_status ret;
6616 	bool entry_exist;
6617 	u16 counter_id;
6618 	u16 lg_act_id;
6619 
6620 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
6621 		return ICE_ERR_PARAM;
6622 
6623 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
6624 		return ICE_ERR_PARAM;
6625 
6626 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
6627 		return ICE_ERR_PARAM;
6628 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
6629 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
6630 
6631 	entry_exist = false;
6632 
6633 	rule_lock = &recp_list->filt_rule_lock;
6634 
6635 	/* Add filter if it doesn't exist so then the adding of large
6636 	 * action always results in update
6637 	 */
6638 	INIT_LIST_HEAD(&l_head);
6639 
6640 	fl_info.fltr_info = *f_info;
6641 	LIST_ADD(&fl_info.list_entry, &l_head);
6642 
6643 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
6644 			       hw->port_info->lport);
6645 	if (ret == ICE_ERR_ALREADY_EXISTS)
6646 		entry_exist = true;
6647 	else if (ret)
6648 		return ret;
6649 
6650 	ice_acquire_lock(rule_lock);
6651 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
6652 	if (!m_entry) {
6653 		ret = ICE_ERR_BAD_PTR;
6654 		goto exit_error;
6655 	}
6656 
6657 	/* Don't enable counter for a filter for which sw marker was enabled */
6658 	if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
6659 		ret = ICE_ERR_PARAM;
6660 		goto exit_error;
6661 	}
6662 
6663 	/* If a counter was already enabled then don't need to add again */
6664 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
6665 		ret = ICE_ERR_ALREADY_EXISTS;
6666 		goto exit_error;
6667 	}
6668 
6669 	/* Allocate a hardware table entry to VLAN counter */
6670 	ret = ice_alloc_vlan_res_counter(hw, &counter_id);
6671 	if (ret)
6672 		goto exit_error;
6673 
6674 	/* Allocate a hardware table entry to hold large act. Two actions for
6675 	 * counter based large action
6676 	 */
6677 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
6678 	if (ret)
6679 		goto exit_error;
6680 
6681 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6682 		goto exit_error;
6683 
6684 	/* Update the switch rule to add the counter action */
6685 	ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6686 	if (!ret) {
6687 		ice_release_lock(rule_lock);
6688 		return ret;
6689 	}
6690 
6691 exit_error:
6692 	ice_release_lock(rule_lock);
6693 	/* only remove entry if it did not exist previously */
6694 	if (!entry_exist)
6695 		ret = ice_remove_mac(hw, &l_head);
6696 
6697 	return ret;
6698 }
6699 
6700 /* This is mapping table entry that maps every word within a given protocol
6701  * structure to the real byte offset as per the specification of that
6702  * protocol header.
6703  * for example dst address is 3 words in ethertype header and corresponding
6704  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6705  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6706  * matching entry describing its field. This needs to be updated if new
6707  * structure is added to that union.
6708  */
6709 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6710 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
6711 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
6712 	{ ICE_ETYPE_OL,		{ 0 } },
6713 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
6714 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6715 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6716 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6717 				 26, 28, 30, 32, 34, 36, 38 } },
6718 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6719 				 26, 28, 30, 32, 34, 36, 38 } },
6720 	{ ICE_TCP_IL,		{ 0, 2 } },
6721 	{ ICE_UDP_OF,		{ 0, 2 } },
6722 	{ ICE_UDP_ILOS,		{ 0, 2 } },
6723 	{ ICE_SCTP_IL,		{ 0, 2 } },
6724 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
6725 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
6726 	{ ICE_VXLAN_GPE,	{ 8, 10, 12, 14 } },
6727 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
6728 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
6729 	{ ICE_PPPOE,		{ 0, 2, 4, 6 } },
6730 	{ ICE_PFCP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
6731 	{ ICE_L2TPV3,		{ 0, 2, 4, 6, 8, 10 } },
6732 	{ ICE_ESP,		{ 0, 2, 4, 6 } },
6733 	{ ICE_AH,		{ 0, 2, 4, 6, 8, 10 } },
6734 	{ ICE_NAT_T,		{ 8, 10, 12, 14 } },
6735 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
6736 	{ ICE_VLAN_EX,		{ 2, 0 } },
6737 	{ ICE_VLAN_IN,		{ 2, 0 } },
6738 };
6739 
6740 /* The following table describes preferred grouping of recipes.
6741  * If a recipe that needs to be programmed is a superset or matches one of the
6742  * following combinations, then the recipe needs to be chained as per the
6743  * following policy.
6744  */
6745 
6746 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6747 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
6748 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
6749 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
6750 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
6751 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
6752 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
6753 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
6754 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
6755 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
6756 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
6757 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
6758 	{ ICE_SCTP_IL,		ICE_SCTP_IL_HW },
6759 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
6760 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
6761 	{ ICE_VXLAN_GPE,	ICE_UDP_OF_HW },
6762 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
6763 	{ ICE_GTP,		ICE_UDP_OF_HW },
6764 	{ ICE_PPPOE,		ICE_PPPOE_HW },
6765 	{ ICE_PFCP,		ICE_UDP_ILOS_HW },
6766 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
6767 	{ ICE_ESP,		ICE_ESP_HW },
6768 	{ ICE_AH,		ICE_AH_HW },
6769 	{ ICE_NAT_T,		ICE_UDP_ILOS_HW },
6770 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
6771 	{ ICE_VLAN_EX,		ICE_VLAN_OF_HW },
6772 	{ ICE_VLAN_IN,		ICE_VLAN_OL_HW },
6773 };
6774 
6775 /**
6776  * ice_find_recp - find a recipe
6777  * @hw: pointer to the hardware structure
6778  * @lkup_exts: extension sequence to match
6779  *
6780  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6781  */
ice_find_recp(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,enum ice_sw_tunnel_type tun_type,u32 priority)6782 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6783 			 enum ice_sw_tunnel_type tun_type, u32 priority)
6784 {
6785 	bool refresh_required = true;
6786 	struct ice_sw_recipe *recp;
6787 	u8 i;
6788 
6789 	/* Walk through existing recipes to find a match */
6790 	recp = hw->switch_info->recp_list;
6791 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6792 		/* If recipe was not created for this ID, in SW bookkeeping,
6793 		 * check if FW has an entry for this recipe. If the FW has an
6794 		 * entry update it in our SW bookkeeping and continue with the
6795 		 * matching.
6796 		 */
6797 		if (!recp[i].recp_created)
6798 			if (ice_get_recp_frm_fw(hw,
6799 						hw->switch_info->recp_list, i,
6800 						&refresh_required))
6801 				continue;
6802 
6803 		/* Skip inverse action recipes */
6804 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6805 		    ICE_AQ_RECIPE_ACT_INV_ACT)
6806 			continue;
6807 
6808 		/* if number of words we are looking for match */
6809 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6810 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6811 			struct ice_fv_word *be = lkup_exts->fv_words;
6812 			u16 *cr = recp[i].lkup_exts.field_mask;
6813 			u16 *de = lkup_exts->field_mask;
6814 			bool found = true;
6815 			u8 pe, qr;
6816 
6817 			/* ar, cr, and qr are related to the recipe words, while
6818 			 * be, de, and pe are related to the lookup words
6819 			 */
6820 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6821 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6822 				     qr++) {
6823 					if (ar[qr].off == be[pe].off &&
6824 					    ar[qr].prot_id == be[pe].prot_id &&
6825 					    cr[qr] == de[pe])
6826 						/* Found the "pe"th word in the
6827 						 * given recipe
6828 						 */
6829 						break;
6830 				}
6831 				/* After walking through all the words in the
6832 				 * "i"th recipe if "p"th word was not found then
6833 				 * this recipe is not what we are looking for.
6834 				 * So break out from this loop and try the next
6835 				 * recipe
6836 				 */
6837 				if (qr >= recp[i].lkup_exts.n_val_words) {
6838 					found = false;
6839 					break;
6840 				}
6841 			}
6842 			/* If for "i"th recipe the found was never set to false
6843 			 * then it means we found our match
6844 			 */
6845 			if (tun_type == recp[i].tun_type && found &&
6846 			    priority == recp[i].priority)
6847 				return i; /* Return the recipe ID */
6848 		}
6849 	}
6850 	return ICE_MAX_NUM_RECIPES;
6851 }
6852 
6853 /**
6854  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
6855  *
6856  * As protocol id for outer vlan is different in dvm and svm, if dvm is
6857  * supported protocol array record for outer vlan has to be modified to
6858  * reflect the value proper for DVM.
6859  */
ice_change_proto_id_to_dvm(void)6860 void ice_change_proto_id_to_dvm(void)
6861 {
6862 	u8 i;
6863 
6864 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6865 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
6866 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
6867 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
6868 }
6869 
6870 /**
6871  * ice_prot_type_to_id - get protocol ID from protocol type
6872  * @type: protocol type
6873  * @id: pointer to variable that will receive the ID
6874  *
6875  * Returns true if found, false otherwise
6876  */
ice_prot_type_to_id(enum ice_protocol_type type,u8 * id)6877 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6878 {
6879 	u8 i;
6880 
6881 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6882 		if (ice_prot_id_tbl[i].type == type) {
6883 			*id = ice_prot_id_tbl[i].protocol_id;
6884 			return true;
6885 		}
6886 	return false;
6887 }
6888 
6889 /**
6890  * ice_fill_valid_words - count valid words
6891  * @rule: advanced rule with lookup information
6892  * @lkup_exts: byte offset extractions of the words that are valid
6893  *
6894  * calculate valid words in a lookup rule using mask value
6895  */
6896 static u8
ice_fill_valid_words(struct ice_adv_lkup_elem * rule,struct ice_prot_lkup_ext * lkup_exts)6897 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6898 		     struct ice_prot_lkup_ext *lkup_exts)
6899 {
6900 	u8 j, word, prot_id, ret_val;
6901 
6902 	if (!ice_prot_type_to_id(rule->type, &prot_id))
6903 		return 0;
6904 
6905 	word = lkup_exts->n_val_words;
6906 
6907 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6908 		if (((u16 *)&rule->m_u)[j] &&
6909 		    (size_t)rule->type < ARRAY_SIZE(ice_prot_ext)) {
6910 			/* No more space to accommodate */
6911 			if (word >= ICE_MAX_CHAIN_WORDS)
6912 				return 0;
6913 			lkup_exts->fv_words[word].off =
6914 				ice_prot_ext[rule->type].offs[j];
6915 			lkup_exts->fv_words[word].prot_id =
6916 				ice_prot_id_tbl[rule->type].protocol_id;
6917 			lkup_exts->field_mask[word] =
6918 				BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6919 			word++;
6920 		}
6921 
6922 	ret_val = word - lkup_exts->n_val_words;
6923 	lkup_exts->n_val_words = word;
6924 
6925 	return ret_val;
6926 }
6927 
6928 /**
6929  * ice_create_first_fit_recp_def - Create a recipe grouping
6930  * @hw: pointer to the hardware structure
6931  * @lkup_exts: an array of protocol header extractions
6932  * @rg_list: pointer to a list that stores new recipe groups
6933  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6934  *
6935  * Using first fit algorithm, take all the words that are still not done
6936  * and start grouping them in 4-word groups. Each group makes up one
6937  * recipe.
6938  */
6939 static enum ice_status
ice_create_first_fit_recp_def(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,struct LIST_HEAD_TYPE * rg_list,u8 * recp_cnt)6940 ice_create_first_fit_recp_def(struct ice_hw *hw,
6941 			      struct ice_prot_lkup_ext *lkup_exts,
6942 			      struct LIST_HEAD_TYPE *rg_list,
6943 			      u8 *recp_cnt)
6944 {
6945 	struct ice_pref_recipe_group *grp = NULL;
6946 	u8 j;
6947 
6948 	*recp_cnt = 0;
6949 
6950 	if (!lkup_exts->n_val_words) {
6951 		struct ice_recp_grp_entry *entry;
6952 
6953 		entry = (struct ice_recp_grp_entry *)
6954 			ice_malloc(hw, sizeof(*entry));
6955 		if (!entry)
6956 			return ICE_ERR_NO_MEMORY;
6957 		LIST_ADD(&entry->l_entry, rg_list);
6958 		grp = &entry->r_group;
6959 		(*recp_cnt)++;
6960 		grp->n_val_pairs = 0;
6961 	}
6962 
6963 	/* Walk through every word in the rule to check if it is not done. If so
6964 	 * then this word needs to be part of a new recipe.
6965 	 */
6966 	for (j = 0; j < lkup_exts->n_val_words; j++)
6967 		if (!ice_is_bit_set(lkup_exts->done, j)) {
6968 			if (!grp ||
6969 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6970 				struct ice_recp_grp_entry *entry;
6971 
6972 				entry = (struct ice_recp_grp_entry *)
6973 					ice_malloc(hw, sizeof(*entry));
6974 				if (!entry)
6975 					return ICE_ERR_NO_MEMORY;
6976 				LIST_ADD(&entry->l_entry, rg_list);
6977 				grp = &entry->r_group;
6978 				(*recp_cnt)++;
6979 			}
6980 
6981 			grp->pairs[grp->n_val_pairs].prot_id =
6982 				lkup_exts->fv_words[j].prot_id;
6983 			grp->pairs[grp->n_val_pairs].off =
6984 				lkup_exts->fv_words[j].off;
6985 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6986 			grp->n_val_pairs++;
6987 		}
6988 
6989 	return ICE_SUCCESS;
6990 }
6991 
6992 /**
6993  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6994  * @hw: pointer to the hardware structure
6995  * @fv_list: field vector with the extraction sequence information
6996  * @rg_list: recipe groupings with protocol-offset pairs
6997  *
6998  * Helper function to fill in the field vector indices for protocol-offset
6999  * pairs. These indexes are then ultimately programmed into a recipe.
7000  */
7001 static enum ice_status
ice_fill_fv_word_index(struct ice_hw * hw,struct LIST_HEAD_TYPE * fv_list,struct LIST_HEAD_TYPE * rg_list)7002 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
7003 		       struct LIST_HEAD_TYPE *rg_list)
7004 {
7005 	struct ice_sw_fv_list_entry *fv;
7006 	struct ice_recp_grp_entry *rg;
7007 	struct ice_fv_word *fv_ext;
7008 
7009 	if (LIST_EMPTY(fv_list))
7010 		return ICE_SUCCESS;
7011 
7012 	fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
7013 	fv_ext = fv->fv_ptr->ew;
7014 
7015 	LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
7016 		u8 i;
7017 
7018 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
7019 			struct ice_fv_word *pr;
7020 			bool found = false;
7021 			u16 mask;
7022 			u8 j;
7023 
7024 			pr = &rg->r_group.pairs[i];
7025 			mask = rg->r_group.mask[i];
7026 
7027 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
7028 				if (fv_ext[j].prot_id == pr->prot_id &&
7029 				    fv_ext[j].off == pr->off) {
7030 					found = true;
7031 
7032 					/* Store index of field vector */
7033 					rg->fv_idx[i] = j;
7034 					rg->fv_mask[i] = mask;
7035 					break;
7036 				}
7037 
7038 			/* Protocol/offset could not be found, caller gave an
7039 			 * invalid pair
7040 			 */
7041 			if (!found)
7042 				return ICE_ERR_PARAM;
7043 		}
7044 	}
7045 
7046 	return ICE_SUCCESS;
7047 }
7048 
7049 /**
7050  * ice_find_free_recp_res_idx - find free result indexes for recipe
7051  * @hw: pointer to hardware structure
7052  * @profiles: bitmap of profiles that will be associated with the new recipe
7053  * @free_idx: pointer to variable to receive the free index bitmap
7054  *
7055  * The algorithm used here is:
7056  *	1. When creating a new recipe, create a set P which contains all
7057  *	   Profiles that will be associated with our new recipe
7058  *
7059  *	2. For each Profile p in set P:
7060  *	    a. Add all recipes associated with Profile p into set R
7061  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
7062  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
7063  *		i. Or just assume they all have the same possible indexes:
7064  *			44, 45, 46, 47
7065  *			i.e., PossibleIndexes = 0x0000F00000000000
7066  *
7067  *	3. For each Recipe r in set R:
7068  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
7069  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
7070  *
7071  *	FreeIndexes will contain the bits indicating the indexes free for use,
7072  *      then the code needs to update the recipe[r].used_result_idx_bits to
7073  *      indicate which indexes were selected for use by this recipe.
7074  */
7075 static u16
ice_find_free_recp_res_idx(struct ice_hw * hw,const ice_bitmap_t * profiles,ice_bitmap_t * free_idx)7076 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
7077 			   ice_bitmap_t *free_idx)
7078 {
7079 	ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7080 	ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7081 	ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
7082 	u16 bit;
7083 
7084 	ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
7085 	ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
7086 	ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
7087 	ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
7088 
7089 	ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
7090 
7091 	/* For each profile we are going to associate the recipe with, add the
7092 	 * recipes that are associated with that profile. This will give us
7093 	 * the set of recipes that our recipe may collide with. Also, determine
7094 	 * what possible result indexes are usable given this set of profiles.
7095 	 */
7096 	ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
7097 		ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
7098 			      ICE_MAX_NUM_RECIPES);
7099 		ice_and_bitmap(possible_idx, possible_idx,
7100 			       hw->switch_info->prof_res_bm[bit],
7101 			       ICE_MAX_FV_WORDS);
7102 	}
7103 
7104 	/* For each recipe that our new recipe may collide with, determine
7105 	 * which indexes have been used.
7106 	 */
7107 	ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
7108 		ice_or_bitmap(used_idx, used_idx,
7109 			      hw->switch_info->recp_list[bit].res_idxs,
7110 			      ICE_MAX_FV_WORDS);
7111 
7112 	ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
7113 
7114 	/* return number of free indexes */
7115 	return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
7116 }
7117 
7118 /**
7119  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
7120  * @hw: pointer to hardware structure
7121  * @rm: recipe management list entry
7122  * @profiles: bitmap of profiles that will be associated.
7123  */
7124 static enum ice_status
ice_add_sw_recipe(struct ice_hw * hw,struct ice_sw_recipe * rm,ice_bitmap_t * profiles)7125 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
7126 		  ice_bitmap_t *profiles)
7127 {
7128 	ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7129 	struct ice_aqc_recipe_data_elem *tmp;
7130 	struct ice_aqc_recipe_data_elem *buf;
7131 	struct ice_recp_grp_entry *entry;
7132 	enum ice_status status;
7133 	u16 free_res_idx;
7134 	u16 recipe_count;
7135 	u8 chain_idx;
7136 	u8 recps = 0;
7137 
7138 	/* When more than one recipe are required, another recipe is needed to
7139 	 * chain them together. Matching a tunnel metadata ID takes up one of
7140 	 * the match fields in the chaining recipe reducing the number of
7141 	 * chained recipes by one.
7142 	 */
7143 	 /* check number of free result indices */
7144 	ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
7145 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
7146 
7147 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
7148 		  free_res_idx, rm->n_grp_count);
7149 
7150 	if (rm->n_grp_count > 1) {
7151 		if (rm->n_grp_count > free_res_idx)
7152 			return ICE_ERR_MAX_LIMIT;
7153 
7154 		rm->n_grp_count++;
7155 	}
7156 
7157 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
7158 		return ICE_ERR_MAX_LIMIT;
7159 
7160 	tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
7161 							    ICE_MAX_NUM_RECIPES,
7162 							    sizeof(*tmp));
7163 	if (!tmp)
7164 		return ICE_ERR_NO_MEMORY;
7165 
7166 	buf = (struct ice_aqc_recipe_data_elem *)
7167 		ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
7168 	if (!buf) {
7169 		status = ICE_ERR_NO_MEMORY;
7170 		goto err_mem;
7171 	}
7172 
7173 	ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
7174 	recipe_count = ICE_MAX_NUM_RECIPES;
7175 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
7176 				   NULL);
7177 	if (status || recipe_count == 0)
7178 		goto err_unroll;
7179 
7180 	/* Allocate the recipe resources, and configure them according to the
7181 	 * match fields from protocol headers and extracted field vectors.
7182 	 */
7183 	chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
7184 	LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7185 		u8 i;
7186 
7187 		status = ice_alloc_recipe(hw, &entry->rid);
7188 		if (status)
7189 			goto err_unroll;
7190 
7191 		/* Clear the result index of the located recipe, as this will be
7192 		 * updated, if needed, later in the recipe creation process.
7193 		 */
7194 		tmp[0].content.result_indx = 0;
7195 
7196 		buf[recps] = tmp[0];
7197 		buf[recps].recipe_indx = (u8)entry->rid;
7198 		/* if the recipe is a non-root recipe RID should be programmed
7199 		 * as 0 for the rules to be applied correctly.
7200 		 */
7201 		buf[recps].content.rid = 0;
7202 		ice_memset(&buf[recps].content.lkup_indx, 0,
7203 			   sizeof(buf[recps].content.lkup_indx),
7204 			   ICE_NONDMA_MEM);
7205 
7206 		/* All recipes use look-up index 0 to match switch ID. */
7207 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7208 		buf[recps].content.mask[0] =
7209 			CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7210 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
7211 		 * to be 0
7212 		 */
7213 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7214 			buf[recps].content.lkup_indx[i] = 0x80;
7215 			buf[recps].content.mask[i] = 0;
7216 		}
7217 
7218 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
7219 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
7220 			buf[recps].content.mask[i + 1] =
7221 				CPU_TO_LE16(entry->fv_mask[i]);
7222 		}
7223 
7224 		if (rm->n_grp_count > 1) {
7225 			/* Checks to see if there really is a valid result index
7226 			 * that can be used.
7227 			 */
7228 			if (chain_idx >= ICE_MAX_FV_WORDS) {
7229 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
7230 				status = ICE_ERR_MAX_LIMIT;
7231 				goto err_unroll;
7232 			}
7233 
7234 			entry->chain_idx = chain_idx;
7235 			buf[recps].content.result_indx =
7236 				ICE_AQ_RECIPE_RESULT_EN |
7237 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
7238 				 ICE_AQ_RECIPE_RESULT_DATA_M);
7239 			ice_clear_bit(chain_idx, result_idx_bm);
7240 			chain_idx = ice_find_first_bit(result_idx_bm,
7241 						       ICE_MAX_FV_WORDS);
7242 		}
7243 
7244 		/* fill recipe dependencies */
7245 		ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
7246 				ICE_MAX_NUM_RECIPES);
7247 		ice_set_bit(buf[recps].recipe_indx,
7248 			    (ice_bitmap_t *)buf[recps].recipe_bitmap);
7249 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7250 		recps++;
7251 	}
7252 
7253 	if (rm->n_grp_count == 1) {
7254 		rm->root_rid = buf[0].recipe_indx;
7255 		ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
7256 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
7257 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
7258 			ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
7259 				   sizeof(buf[0].recipe_bitmap),
7260 				   ICE_NONDMA_TO_NONDMA);
7261 		} else {
7262 			status = ICE_ERR_BAD_PTR;
7263 			goto err_unroll;
7264 		}
7265 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
7266 		 * the recipe which is getting created if specified
7267 		 * by user. Usually any advanced switch filter, which results
7268 		 * into new extraction sequence, ended up creating a new recipe
7269 		 * of type ROOT and usually recipes are associated with profiles
7270 		 * Switch rule referreing newly created recipe, needs to have
7271 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
7272 		 * evaluation will not happen correctly. In other words, if
7273 		 * switch rule to be evaluated on priority basis, then recipe
7274 		 * needs to have priority, otherwise it will be evaluated last.
7275 		 */
7276 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
7277 	} else {
7278 		struct ice_recp_grp_entry *last_chain_entry;
7279 		u16 rid, i;
7280 
7281 		/* Allocate the last recipe that will chain the outcomes of the
7282 		 * other recipes together
7283 		 */
7284 		status = ice_alloc_recipe(hw, &rid);
7285 		if (status)
7286 			goto err_unroll;
7287 
7288 		buf[recps].recipe_indx = (u8)rid;
7289 		buf[recps].content.rid = (u8)rid;
7290 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
7291 		/* the new entry created should also be part of rg_list to
7292 		 * make sure we have complete recipe
7293 		 */
7294 		last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
7295 			sizeof(*last_chain_entry));
7296 		if (!last_chain_entry) {
7297 			status = ICE_ERR_NO_MEMORY;
7298 			goto err_unroll;
7299 		}
7300 		last_chain_entry->rid = rid;
7301 		ice_memset(&buf[recps].content.lkup_indx, 0,
7302 			   sizeof(buf[recps].content.lkup_indx),
7303 			   ICE_NONDMA_MEM);
7304 		/* All recipes use look-up index 0 to match switch ID. */
7305 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
7306 		buf[recps].content.mask[0] =
7307 			CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
7308 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
7309 			buf[recps].content.lkup_indx[i] =
7310 				ICE_AQ_RECIPE_LKUP_IGNORE;
7311 			buf[recps].content.mask[i] = 0;
7312 		}
7313 
7314 		i = 1;
7315 		/* update r_bitmap with the recp that is used for chaining */
7316 		ice_set_bit(rid, rm->r_bitmap);
7317 		/* this is the recipe that chains all the other recipes so it
7318 		 * should not have a chaining ID to indicate the same
7319 		 */
7320 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
7321 		LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
7322 				    l_entry) {
7323 			last_chain_entry->fv_idx[i] = entry->chain_idx;
7324 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
7325 			buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
7326 			ice_set_bit(entry->rid, rm->r_bitmap);
7327 		}
7328 		LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
7329 		if (sizeof(buf[recps].recipe_bitmap) >=
7330 		    sizeof(rm->r_bitmap)) {
7331 			ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
7332 				   sizeof(buf[recps].recipe_bitmap),
7333 				   ICE_NONDMA_TO_NONDMA);
7334 		} else {
7335 			status = ICE_ERR_BAD_PTR;
7336 			goto err_unroll;
7337 		}
7338 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
7339 
7340 		recps++;
7341 		rm->root_rid = (u8)rid;
7342 	}
7343 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7344 	if (status)
7345 		goto err_unroll;
7346 
7347 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
7348 	ice_release_change_lock(hw);
7349 	if (status)
7350 		goto err_unroll;
7351 
7352 	/* Every recipe that just got created add it to the recipe
7353 	 * book keeping list
7354 	 */
7355 	LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
7356 		struct ice_switch_info *sw = hw->switch_info;
7357 		bool is_root, idx_found = false;
7358 		struct ice_sw_recipe *recp;
7359 		u16 idx, buf_idx = 0;
7360 
7361 		/* find buffer index for copying some data */
7362 		for (idx = 0; idx < rm->n_grp_count; idx++)
7363 			if (buf[idx].recipe_indx == entry->rid) {
7364 				buf_idx = idx;
7365 				idx_found = true;
7366 			}
7367 
7368 		if (!idx_found) {
7369 			status = ICE_ERR_OUT_OF_RANGE;
7370 			goto err_unroll;
7371 		}
7372 
7373 		recp = &sw->recp_list[entry->rid];
7374 		is_root = (rm->root_rid == entry->rid);
7375 		recp->is_root = is_root;
7376 
7377 		recp->root_rid = entry->rid;
7378 		recp->big_recp = (is_root && rm->n_grp_count > 1);
7379 
7380 		ice_memcpy(&recp->ext_words, entry->r_group.pairs,
7381 			   entry->r_group.n_val_pairs *
7382 			   sizeof(struct ice_fv_word),
7383 			   ICE_NONDMA_TO_NONDMA);
7384 
7385 		ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
7386 			   sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
7387 
7388 		/* Copy non-result fv index values and masks to recipe. This
7389 		 * call will also update the result recipe bitmask.
7390 		 */
7391 		ice_collect_result_idx(&buf[buf_idx], recp);
7392 
7393 		/* for non-root recipes, also copy to the root, this allows
7394 		 * easier matching of a complete chained recipe
7395 		 */
7396 		if (!is_root)
7397 			ice_collect_result_idx(&buf[buf_idx],
7398 					       &sw->recp_list[rm->root_rid]);
7399 
7400 		recp->n_ext_words = entry->r_group.n_val_pairs;
7401 		recp->chain_idx = entry->chain_idx;
7402 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
7403 		recp->n_grp_count = rm->n_grp_count;
7404 		recp->tun_type = rm->tun_type;
7405 		recp->recp_created = true;
7406 	}
7407 	rm->root_buf = buf;
7408 	ice_free(hw, tmp);
7409 	return status;
7410 
7411 err_unroll:
7412 err_mem:
7413 	ice_free(hw, tmp);
7414 	ice_free(hw, buf);
7415 	return status;
7416 }
7417 
7418 /**
7419  * ice_create_recipe_group - creates recipe group
7420  * @hw: pointer to hardware structure
7421  * @rm: recipe management list entry
7422  * @lkup_exts: lookup elements
7423  */
7424 static enum ice_status
ice_create_recipe_group(struct ice_hw * hw,struct ice_sw_recipe * rm,struct ice_prot_lkup_ext * lkup_exts)7425 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
7426 			struct ice_prot_lkup_ext *lkup_exts)
7427 {
7428 	enum ice_status status;
7429 	u8 recp_count = 0;
7430 
7431 	rm->n_grp_count = 0;
7432 
7433 	/* Create recipes for words that are marked not done by packing them
7434 	 * as best fit.
7435 	 */
7436 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
7437 					       &rm->rg_list, &recp_count);
7438 	if (!status) {
7439 		rm->n_grp_count += recp_count;
7440 		rm->n_ext_words = lkup_exts->n_val_words;
7441 		ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
7442 			   sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
7443 		ice_memcpy(rm->word_masks, lkup_exts->field_mask,
7444 			   sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
7445 	}
7446 
7447 	return status;
7448 }
7449 
7450 /**
7451  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
7452  * @hw: pointer to hardware structure
7453  * @lkups: lookup elements or match criteria for the advanced recipe, one
7454  *	   structure per protocol header
7455  * @lkups_cnt: number of protocols
7456  * @bm: bitmap of field vectors to consider
7457  * @fv_list: pointer to a list that holds the returned field vectors
7458  */
7459 static enum ice_status
ice_get_fv(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,ice_bitmap_t * bm,struct LIST_HEAD_TYPE * fv_list)7460 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7461 	   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
7462 {
7463 	enum ice_status status;
7464 	u8 *prot_ids;
7465 	u16 i;
7466 
7467 	if (!lkups_cnt)
7468 		return ICE_SUCCESS;
7469 
7470 	prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
7471 	if (!prot_ids)
7472 		return ICE_ERR_NO_MEMORY;
7473 
7474 	for (i = 0; i < lkups_cnt; i++)
7475 		if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
7476 			status = ICE_ERR_CFG;
7477 			goto free_mem;
7478 		}
7479 
7480 	/* Find field vectors that include all specified protocol types */
7481 	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
7482 
7483 free_mem:
7484 	ice_free(hw, prot_ids);
7485 	return status;
7486 }
7487 
7488 /**
7489  * ice_tun_type_match_word - determine if tun type needs a match mask
7490  * @tun_type: tunnel type
7491  * @mask: mask to be used for the tunnel
7492  */
ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type,u16 * mask)7493 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
7494 {
7495 	switch (tun_type) {
7496 	case ICE_SW_TUN_VXLAN_GPE:
7497 	case ICE_SW_TUN_GENEVE:
7498 	case ICE_SW_TUN_VXLAN:
7499 	case ICE_SW_TUN_NVGRE:
7500 	case ICE_SW_TUN_UDP:
7501 	case ICE_ALL_TUNNELS:
7502 	case ICE_SW_TUN_AND_NON_TUN_QINQ:
7503 	case ICE_NON_TUN_QINQ:
7504 	case ICE_SW_TUN_PPPOE_QINQ:
7505 	case ICE_SW_TUN_PPPOE_PAY_QINQ:
7506 	case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7507 	case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7508 		*mask = ICE_TUN_FLAG_MASK;
7509 		return true;
7510 
7511 	case ICE_SW_TUN_GENEVE_VLAN:
7512 	case ICE_SW_TUN_VXLAN_VLAN:
7513 		*mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
7514 		return true;
7515 
7516 	default:
7517 		*mask = 0;
7518 		return false;
7519 	}
7520 }
7521 
7522 /**
7523  * ice_add_special_words - Add words that are not protocols, such as metadata
7524  * @rinfo: other information regarding the rule e.g. priority and action info
7525  * @lkup_exts: lookup word structure
7526  */
7527 static enum ice_status
ice_add_special_words(struct ice_adv_rule_info * rinfo,struct ice_prot_lkup_ext * lkup_exts)7528 ice_add_special_words(struct ice_adv_rule_info *rinfo,
7529 		      struct ice_prot_lkup_ext *lkup_exts)
7530 {
7531 	u16 mask;
7532 
7533 	/* If this is a tunneled packet, then add recipe index to match the
7534 	 * tunnel bit in the packet metadata flags.
7535 	 */
7536 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
7537 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
7538 			u8 word = lkup_exts->n_val_words++;
7539 
7540 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
7541 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
7542 			lkup_exts->field_mask[word] = mask;
7543 		} else {
7544 			return ICE_ERR_MAX_LIMIT;
7545 		}
7546 	}
7547 
7548 	return ICE_SUCCESS;
7549 }
7550 
7551 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
7552  * @hw: pointer to hardware structure
7553  * @rinfo: other information regarding the rule e.g. priority and action info
7554  * @bm: pointer to memory for returning the bitmap of field vectors
7555  */
7556 static void
ice_get_compat_fv_bitmap(struct ice_hw * hw,struct ice_adv_rule_info * rinfo,ice_bitmap_t * bm)7557 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
7558 			 ice_bitmap_t *bm)
7559 {
7560 	enum ice_prof_type prof_type;
7561 
7562 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
7563 
7564 	switch (rinfo->tun_type) {
7565 	case ICE_NON_TUN:
7566 	case ICE_NON_TUN_QINQ:
7567 		prof_type = ICE_PROF_NON_TUN;
7568 		break;
7569 	case ICE_ALL_TUNNELS:
7570 		prof_type = ICE_PROF_TUN_ALL;
7571 		break;
7572 	case ICE_SW_TUN_VXLAN_GPE:
7573 	case ICE_SW_TUN_GENEVE:
7574 	case ICE_SW_TUN_GENEVE_VLAN:
7575 	case ICE_SW_TUN_VXLAN:
7576 	case ICE_SW_TUN_VXLAN_VLAN:
7577 	case ICE_SW_TUN_UDP:
7578 	case ICE_SW_TUN_GTP:
7579 		prof_type = ICE_PROF_TUN_UDP;
7580 		break;
7581 	case ICE_SW_TUN_NVGRE:
7582 		prof_type = ICE_PROF_TUN_GRE;
7583 		break;
7584 	case ICE_SW_TUN_PPPOE:
7585 	case ICE_SW_TUN_PPPOE_QINQ:
7586 		prof_type = ICE_PROF_TUN_PPPOE;
7587 		break;
7588 	case ICE_SW_TUN_PPPOE_PAY:
7589 	case ICE_SW_TUN_PPPOE_PAY_QINQ:
7590 		ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
7591 		return;
7592 	case ICE_SW_TUN_PPPOE_IPV4:
7593 	case ICE_SW_TUN_PPPOE_IPV4_QINQ:
7594 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
7595 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7596 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7597 		return;
7598 	case ICE_SW_TUN_PPPOE_IPV4_TCP:
7599 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
7600 		return;
7601 	case ICE_SW_TUN_PPPOE_IPV4_UDP:
7602 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
7603 		return;
7604 	case ICE_SW_TUN_PPPOE_IPV6:
7605 	case ICE_SW_TUN_PPPOE_IPV6_QINQ:
7606 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
7607 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7608 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7609 		return;
7610 	case ICE_SW_TUN_PPPOE_IPV6_TCP:
7611 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
7612 		return;
7613 	case ICE_SW_TUN_PPPOE_IPV6_UDP:
7614 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
7615 		return;
7616 	case ICE_SW_TUN_PROFID_IPV6_ESP:
7617 	case ICE_SW_TUN_IPV6_ESP:
7618 		ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
7619 		return;
7620 	case ICE_SW_TUN_PROFID_IPV6_AH:
7621 	case ICE_SW_TUN_IPV6_AH:
7622 		ice_set_bit(ICE_PROFID_IPV6_AH, bm);
7623 		return;
7624 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7625 	case ICE_SW_TUN_IPV6_L2TPV3:
7626 		ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
7627 		return;
7628 	case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7629 	case ICE_SW_TUN_IPV6_NAT_T:
7630 		ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
7631 		return;
7632 	case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7633 		ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
7634 		return;
7635 	case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7636 		ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
7637 		return;
7638 	case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7639 		ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
7640 		return;
7641 	case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7642 		ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
7643 		return;
7644 	case ICE_SW_TUN_IPV4_NAT_T:
7645 		ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
7646 		return;
7647 	case ICE_SW_TUN_IPV4_L2TPV3:
7648 		ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
7649 		return;
7650 	case ICE_SW_TUN_IPV4_ESP:
7651 		ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
7652 		return;
7653 	case ICE_SW_TUN_IPV4_AH:
7654 		ice_set_bit(ICE_PROFID_IPV4_AH, bm);
7655 		return;
7656 	case ICE_SW_IPV4_TCP:
7657 		ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
7658 		return;
7659 	case ICE_SW_IPV4_UDP:
7660 		ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
7661 		return;
7662 	case ICE_SW_IPV6_TCP:
7663 		ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
7664 		return;
7665 	case ICE_SW_IPV6_UDP:
7666 		ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
7667 		return;
7668 	case ICE_SW_TUN_IPV4_GTPU_NO_PAY:
7669 		ice_set_bit(ICE_PROFID_IPV4_GTPU_TEID, bm);
7670 		return;
7671 	case ICE_SW_TUN_IPV6_GTPU_NO_PAY:
7672 		ice_set_bit(ICE_PROFID_IPV6_GTPU_TEID, bm);
7673 		return;
7674 	case ICE_SW_TUN_IPV4_GTPU_IPV4:
7675 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
7676 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7677 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7678 		return;
7679 	case ICE_SW_TUN_IPV4_GTPU_IPV4_UDP:
7680 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
7681 		return;
7682 	case ICE_SW_TUN_IPV4_GTPU_IPV4_TCP:
7683 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
7684 		return;
7685 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV4:
7686 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
7687 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7688 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7689 		return;
7690 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP:
7691 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
7692 		return;
7693 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP:
7694 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
7695 		return;
7696 	case ICE_SW_TUN_IPV6_GTPU_IPV4:
7697 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
7698 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7699 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7700 		return;
7701 	case ICE_SW_TUN_IPV6_GTPU_IPV4_UDP:
7702 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
7703 		return;
7704 	case ICE_SW_TUN_IPV6_GTPU_IPV4_TCP:
7705 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
7706 		return;
7707 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV4:
7708 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
7709 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7710 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7711 		return;
7712 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP:
7713 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
7714 		return;
7715 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP:
7716 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
7717 		return;
7718 	case ICE_SW_TUN_IPV4_GTPU_IPV6:
7719 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
7720 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7721 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7722 		return;
7723 	case ICE_SW_TUN_IPV4_GTPU_IPV6_UDP:
7724 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
7725 		return;
7726 	case ICE_SW_TUN_IPV4_GTPU_IPV6_TCP:
7727 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
7728 		return;
7729 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV6:
7730 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
7731 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7732 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7733 		return;
7734 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP:
7735 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
7736 		return;
7737 	case ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP:
7738 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
7739 		return;
7740 	case ICE_SW_TUN_IPV6_GTPU_IPV6:
7741 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
7742 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7743 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7744 		return;
7745 	case ICE_SW_TUN_IPV6_GTPU_IPV6_UDP:
7746 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
7747 		return;
7748 	case ICE_SW_TUN_IPV6_GTPU_IPV6_TCP:
7749 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
7750 		return;
7751 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV6:
7752 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
7753 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7754 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7755 		return;
7756 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP:
7757 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
7758 		return;
7759 	case ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP:
7760 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
7761 		return;
7762 	case ICE_SW_TUN_AND_NON_TUN:
7763 	case ICE_SW_TUN_AND_NON_TUN_QINQ:
7764 	default:
7765 		prof_type = ICE_PROF_ALL;
7766 		break;
7767 	}
7768 
7769 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
7770 }
7771 
7772 /**
7773  * ice_is_prof_rule - determine if rule type is a profile rule
7774  * @type: the rule type
7775  *
7776  * if the rule type is a profile rule, that means that there no field value
7777  * match required, in this case just a profile hit is required.
7778  */
ice_is_prof_rule(enum ice_sw_tunnel_type type)7779 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7780 {
7781 	switch (type) {
7782 	case ICE_SW_TUN_AND_NON_TUN:
7783 	case ICE_SW_TUN_PROFID_IPV6_ESP:
7784 	case ICE_SW_TUN_PROFID_IPV6_AH:
7785 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7786 	case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7787 	case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7788 	case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7789 	case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7790 	case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7791 		return true;
7792 	default:
7793 		break;
7794 	}
7795 
7796 	return false;
7797 }
7798 
7799 /**
7800  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7801  * @hw: pointer to hardware structure
7802  * @lkups: lookup elements or match criteria for the advanced recipe, one
7803  *  structure per protocol header
7804  * @lkups_cnt: number of protocols
7805  * @rinfo: other information regarding the rule e.g. priority and action info
7806  * @rid: return the recipe ID of the recipe created
7807  */
7808 static enum ice_status
ice_add_adv_recipe(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,u16 * rid)7809 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7810 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7811 {
7812 	ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7813 	ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7814 	struct ice_prot_lkup_ext *lkup_exts;
7815 	struct ice_recp_grp_entry *r_entry;
7816 	struct ice_sw_fv_list_entry *fvit;
7817 	struct ice_recp_grp_entry *r_tmp;
7818 	struct ice_sw_fv_list_entry *tmp;
7819 	enum ice_status status = ICE_SUCCESS;
7820 	struct ice_sw_recipe *rm;
7821 	u8 i;
7822 
7823 	if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7824 		return ICE_ERR_PARAM;
7825 
7826 	lkup_exts = (struct ice_prot_lkup_ext *)
7827 		ice_malloc(hw, sizeof(*lkup_exts));
7828 	if (!lkup_exts)
7829 		return ICE_ERR_NO_MEMORY;
7830 
7831 	/* Determine the number of words to be matched and if it exceeds a
7832 	 * recipe's restrictions
7833 	 */
7834 	for (i = 0; i < lkups_cnt; i++) {
7835 		u16 count;
7836 
7837 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7838 			status = ICE_ERR_CFG;
7839 			goto err_free_lkup_exts;
7840 		}
7841 
7842 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
7843 		if (!count) {
7844 			status = ICE_ERR_CFG;
7845 			goto err_free_lkup_exts;
7846 		}
7847 	}
7848 
7849 	rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7850 	if (!rm) {
7851 		status = ICE_ERR_NO_MEMORY;
7852 		goto err_free_lkup_exts;
7853 	}
7854 
7855 	/* Get field vectors that contain fields extracted from all the protocol
7856 	 * headers being programmed.
7857 	 */
7858 	INIT_LIST_HEAD(&rm->fv_list);
7859 	INIT_LIST_HEAD(&rm->rg_list);
7860 
7861 	/* Get bitmap of field vectors (profiles) that are compatible with the
7862 	 * rule request; only these will be searched in the subsequent call to
7863 	 * ice_get_fv.
7864 	 */
7865 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7866 
7867 	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7868 	if (status)
7869 		goto err_unroll;
7870 
7871 	/* Create any special protocol/offset pairs, such as looking at tunnel
7872 	 * bits by extracting metadata
7873 	 */
7874 	status = ice_add_special_words(rinfo, lkup_exts);
7875 	if (status)
7876 		goto err_free_lkup_exts;
7877 
7878 	/* Group match words into recipes using preferred recipe grouping
7879 	 * criteria.
7880 	 */
7881 	status = ice_create_recipe_group(hw, rm, lkup_exts);
7882 	if (status)
7883 		goto err_unroll;
7884 
7885 	/* set the recipe priority if specified */
7886 	rm->priority = (u8)rinfo->priority;
7887 
7888 	/* Find offsets from the field vector. Pick the first one for all the
7889 	 * recipes.
7890 	 */
7891 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7892 	if (status)
7893 		goto err_unroll;
7894 
7895 	/* An empty FV list means to use all the profiles returned in the
7896 	 * profile bitmap
7897 	 */
7898 	if (LIST_EMPTY(&rm->fv_list)) {
7899 		u16 j;
7900 
7901 		ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7902 			struct ice_sw_fv_list_entry *fvl;
7903 
7904 			fvl = (struct ice_sw_fv_list_entry *)
7905 				ice_malloc(hw, sizeof(*fvl));
7906 			if (!fvl)
7907 				goto err_unroll;
7908 			fvl->fv_ptr = NULL;
7909 			fvl->profile_id = j;
7910 			LIST_ADD(&fvl->list_entry, &rm->fv_list);
7911 		}
7912 	}
7913 
7914 	/* get bitmap of all profiles the recipe will be associated with */
7915 	ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7916 	LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7917 			    list_entry) {
7918 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7919 		ice_set_bit((u16)fvit->profile_id, profiles);
7920 	}
7921 
7922 	/* Look for a recipe which matches our requested fv / mask list */
7923 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type, rinfo->priority);
7924 	if (*rid < ICE_MAX_NUM_RECIPES)
7925 		/* Success if found a recipe that match the existing criteria */
7926 		goto err_unroll;
7927 
7928 	rm->tun_type = rinfo->tun_type;
7929 	/* Recipe we need does not exist, add a recipe */
7930 	status = ice_add_sw_recipe(hw, rm, profiles);
7931 	if (status)
7932 		goto err_unroll;
7933 
7934 	/* Associate all the recipes created with all the profiles in the
7935 	 * common field vector.
7936 	 */
7937 	LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7938 			    list_entry) {
7939 		ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7940 		u16 j;
7941 
7942 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7943 						      (u8 *)r_bitmap, NULL);
7944 		if (status)
7945 			goto err_unroll;
7946 
7947 		ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7948 			      ICE_MAX_NUM_RECIPES);
7949 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7950 		if (status)
7951 			goto err_unroll;
7952 
7953 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7954 						      (u8 *)r_bitmap,
7955 						      NULL);
7956 		ice_release_change_lock(hw);
7957 
7958 		if (status)
7959 			goto err_unroll;
7960 
7961 		/* Update profile to recipe bitmap array */
7962 		ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7963 			      ICE_MAX_NUM_RECIPES);
7964 
7965 		/* Update recipe to profile bitmap array */
7966 		ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7967 			ice_set_bit((u16)fvit->profile_id,
7968 				    recipe_to_profile[j]);
7969 	}
7970 
7971 	*rid = rm->root_rid;
7972 	ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7973 		   lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7974 err_unroll:
7975 	LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7976 				 ice_recp_grp_entry, l_entry) {
7977 		LIST_DEL(&r_entry->l_entry);
7978 		ice_free(hw, r_entry);
7979 	}
7980 
7981 	LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7982 				 list_entry) {
7983 		LIST_DEL(&fvit->list_entry);
7984 		ice_free(hw, fvit);
7985 	}
7986 
7987 	if (rm->root_buf)
7988 		ice_free(hw, rm->root_buf);
7989 
7990 	ice_free(hw, rm);
7991 
7992 err_free_lkup_exts:
7993 	ice_free(hw, lkup_exts);
7994 
7995 	return status;
7996 }
7997 
7998 /**
7999  * ice_find_dummy_packet - find dummy packet by tunnel type
8000  *
8001  * @lkups: lookup elements or match criteria for the advanced recipe, one
8002  *	   structure per protocol header
8003  * @lkups_cnt: number of protocols
8004  * @tun_type: tunnel type from the match criteria
8005  * @pkt: dummy packet to fill according to filter match criteria
8006  * @pkt_len: packet length of dummy packet
8007  * @offsets: pointer to receive the pointer to the offsets for the packet
8008  */
8009 static void
ice_find_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,enum ice_sw_tunnel_type tun_type,const u8 ** pkt,u16 * pkt_len,const struct ice_dummy_pkt_offsets ** offsets)8010 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8011 		      enum ice_sw_tunnel_type tun_type, const u8 **pkt,
8012 		      u16 *pkt_len,
8013 		      const struct ice_dummy_pkt_offsets **offsets)
8014 {
8015 	bool tcp = false, udp = false, ipv6 = false, vlan = false;
8016 	bool gre = false, mpls = false;
8017 	u16 i;
8018 
8019 	for (i = 0; i < lkups_cnt; i++) {
8020 		if (lkups[i].type == ICE_UDP_ILOS)
8021 			udp = true;
8022 		else if (lkups[i].type == ICE_TCP_IL)
8023 			tcp = true;
8024 		else if (lkups[i].type == ICE_IPV6_OFOS)
8025 			ipv6 = true;
8026 		else if (lkups[i].type == ICE_VLAN_OFOS)
8027 			vlan = true;
8028 		else if (lkups[i].type == ICE_ETYPE_OL &&
8029 			 lkups[i].h_u.ethertype.ethtype_id ==
8030 				CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
8031 			 lkups[i].m_u.ethertype.ethtype_id ==
8032 				CPU_TO_BE16(0xFFFF))
8033 			ipv6 = true;
8034 		else if (lkups[i].type == ICE_IPV4_OFOS &&
8035 			 lkups[i].h_u.ipv4_hdr.protocol ==
8036 				ICE_IPV4_NVGRE_PROTO_ID &&
8037 			 lkups[i].m_u.ipv4_hdr.protocol ==
8038 				0xFF)
8039 			gre = true;
8040 		else if (lkups[i].type == ICE_PPPOE &&
8041 			 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
8042 				CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
8043 			 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
8044 				0xFFFF)
8045 			ipv6 = true;
8046 		else if (lkups[i].type == ICE_IPV4_IL &&
8047 			 lkups[i].h_u.ipv4_hdr.protocol ==
8048 				ICE_TCP_PROTO_ID &&
8049 			 lkups[i].m_u.ipv4_hdr.protocol ==
8050 				0xFF)
8051 			tcp = true;
8052 		else if (lkups[i].type == ICE_ETYPE_OL &&
8053 			 lkups[i].h_u.ethertype.ethtype_id ==
8054 				CPU_TO_BE16(ICE_MPLS_ETHER_ID) &&
8055 			 lkups[i].m_u.ethertype.ethtype_id == 0xFFFF)
8056 			mpls = true;
8057 	}
8058 
8059 	if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8060 	     tun_type == ICE_NON_TUN_QINQ) && ipv6) {
8061 		if (tcp) {
8062 			*pkt = dummy_qinq_ipv6_tcp_pkt;
8063 			*pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt);
8064 			*offsets = dummy_qinq_ipv6_tcp_packet_offsets;
8065 			return;
8066 		}
8067 
8068 		if (udp) {
8069 			*pkt = dummy_qinq_ipv6_udp_pkt;
8070 			*pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt);
8071 			*offsets = dummy_qinq_ipv6_udp_packet_offsets;
8072 			return;
8073 		}
8074 
8075 		*pkt = dummy_qinq_ipv6_pkt;
8076 		*pkt_len = sizeof(dummy_qinq_ipv6_pkt);
8077 		*offsets = dummy_qinq_ipv6_packet_offsets;
8078 		return;
8079 	} else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
8080 			   tun_type == ICE_NON_TUN_QINQ) {
8081 		if (tcp) {
8082 			*pkt = dummy_qinq_ipv4_tcp_pkt;
8083 			*pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt);
8084 			*offsets = dummy_qinq_ipv4_tcp_packet_offsets;
8085 			return;
8086 		}
8087 
8088 		if (udp) {
8089 			*pkt = dummy_qinq_ipv4_udp_pkt;
8090 			*pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt);
8091 			*offsets = dummy_qinq_ipv4_udp_packet_offsets;
8092 			return;
8093 		}
8094 
8095 		*pkt = dummy_qinq_ipv4_pkt;
8096 		*pkt_len = sizeof(dummy_qinq_ipv4_pkt);
8097 		*offsets = dummy_qinq_ipv4_packet_offsets;
8098 		return;
8099 	}
8100 
8101 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
8102 		*pkt = dummy_qinq_pppoe_ipv6_packet;
8103 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8104 		*offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
8105 		return;
8106 	} else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
8107 		*pkt = dummy_qinq_pppoe_ipv4_pkt;
8108 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8109 		*offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
8110 		return;
8111 	} else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
8112 		*pkt = dummy_qinq_pppoe_ipv6_packet;
8113 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
8114 		*offsets = dummy_qinq_pppoe_packet_offsets;
8115 		return;
8116 	} else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
8117 			tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
8118 		*pkt = dummy_qinq_pppoe_ipv4_pkt;
8119 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
8120 		*offsets = dummy_qinq_pppoe_packet_offsets;
8121 		return;
8122 	}
8123 
8124 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
8125 		*pkt = dummy_ipv4_gtpu_ipv4_packet;
8126 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8127 		*offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
8128 		return;
8129 	} else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
8130 		*pkt = dummy_ipv6_gtp_packet;
8131 		*pkt_len = sizeof(dummy_ipv6_gtp_packet);
8132 		*offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
8133 		return;
8134 	}
8135 
8136 	if (tun_type == ICE_SW_TUN_IPV4_ESP) {
8137 		*pkt = dummy_ipv4_esp_pkt;
8138 		*pkt_len = sizeof(dummy_ipv4_esp_pkt);
8139 		*offsets = dummy_ipv4_esp_packet_offsets;
8140 		return;
8141 	}
8142 
8143 	if (tun_type == ICE_SW_TUN_IPV6_ESP) {
8144 		*pkt = dummy_ipv6_esp_pkt;
8145 		*pkt_len = sizeof(dummy_ipv6_esp_pkt);
8146 		*offsets = dummy_ipv6_esp_packet_offsets;
8147 		return;
8148 	}
8149 
8150 	if (tun_type == ICE_SW_TUN_IPV4_AH) {
8151 		*pkt = dummy_ipv4_ah_pkt;
8152 		*pkt_len = sizeof(dummy_ipv4_ah_pkt);
8153 		*offsets = dummy_ipv4_ah_packet_offsets;
8154 		return;
8155 	}
8156 
8157 	if (tun_type == ICE_SW_TUN_IPV6_AH) {
8158 		*pkt = dummy_ipv6_ah_pkt;
8159 		*pkt_len = sizeof(dummy_ipv6_ah_pkt);
8160 		*offsets = dummy_ipv6_ah_packet_offsets;
8161 		return;
8162 	}
8163 
8164 	if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
8165 		*pkt = dummy_ipv4_nat_pkt;
8166 		*pkt_len = sizeof(dummy_ipv4_nat_pkt);
8167 		*offsets = dummy_ipv4_nat_packet_offsets;
8168 		return;
8169 	}
8170 
8171 	if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
8172 		*pkt = dummy_ipv6_nat_pkt;
8173 		*pkt_len = sizeof(dummy_ipv6_nat_pkt);
8174 		*offsets = dummy_ipv6_nat_packet_offsets;
8175 		return;
8176 	}
8177 
8178 	if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
8179 		*pkt = dummy_ipv4_l2tpv3_pkt;
8180 		*pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
8181 		*offsets = dummy_ipv4_l2tpv3_packet_offsets;
8182 		return;
8183 	}
8184 
8185 	if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
8186 		*pkt = dummy_ipv6_l2tpv3_pkt;
8187 		*pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
8188 		*offsets = dummy_ipv6_l2tpv3_packet_offsets;
8189 		return;
8190 	}
8191 
8192 	if (tun_type == ICE_SW_TUN_GTP) {
8193 		*pkt = dummy_udp_gtp_packet;
8194 		*pkt_len = sizeof(dummy_udp_gtp_packet);
8195 		*offsets = dummy_udp_gtp_packet_offsets;
8196 		return;
8197 	}
8198 
8199 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4 ||
8200 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4) {
8201 		*pkt = dummy_ipv4_gtpu_ipv4_packet;
8202 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
8203 		*offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
8204 		return;
8205 	}
8206 
8207 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_UDP ||
8208 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP) {
8209 		*pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
8210 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
8211 		*offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
8212 		return;
8213 	}
8214 
8215 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4_TCP ||
8216 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP) {
8217 		*pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
8218 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
8219 		*offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
8220 		return;
8221 	}
8222 
8223 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6 ||
8224 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6) {
8225 		*pkt = dummy_ipv4_gtpu_ipv6_packet;
8226 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
8227 		*offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
8228 		return;
8229 	}
8230 
8231 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_UDP ||
8232 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP) {
8233 		*pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
8234 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
8235 		*offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
8236 		return;
8237 	}
8238 
8239 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6_TCP ||
8240 	    tun_type == ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP) {
8241 		*pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
8242 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
8243 		*offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
8244 		return;
8245 	}
8246 
8247 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4 ||
8248 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4) {
8249 		*pkt = dummy_ipv6_gtpu_ipv4_packet;
8250 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
8251 		*offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
8252 		return;
8253 	}
8254 
8255 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_UDP ||
8256 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP) {
8257 		*pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
8258 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
8259 		*offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
8260 		return;
8261 	}
8262 
8263 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4_TCP ||
8264 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP) {
8265 		*pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
8266 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
8267 		*offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
8268 		return;
8269 	}
8270 
8271 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6 ||
8272 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6) {
8273 		*pkt = dummy_ipv6_gtpu_ipv6_packet;
8274 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
8275 		*offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
8276 		return;
8277 	}
8278 
8279 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_UDP ||
8280 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP) {
8281 		*pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
8282 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
8283 		*offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
8284 		return;
8285 	}
8286 
8287 	if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6_TCP ||
8288 	    tun_type == ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP) {
8289 		*pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
8290 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
8291 		*offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
8292 		return;
8293 	}
8294 
8295 	if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
8296 		*pkt = dummy_pppoe_ipv6_packet;
8297 		*pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8298 		*offsets = dummy_pppoe_packet_offsets;
8299 		return;
8300 	} else if (tun_type == ICE_SW_TUN_PPPOE ||
8301 		tun_type == ICE_SW_TUN_PPPOE_PAY) {
8302 		*pkt = dummy_pppoe_ipv4_packet;
8303 		*pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8304 		*offsets = dummy_pppoe_packet_offsets;
8305 		return;
8306 	}
8307 
8308 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
8309 		*pkt = dummy_pppoe_ipv4_packet;
8310 		*pkt_len = sizeof(dummy_pppoe_ipv4_packet);
8311 		*offsets = dummy_pppoe_packet_ipv4_offsets;
8312 		return;
8313 	}
8314 
8315 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
8316 		*pkt = dummy_pppoe_ipv4_tcp_packet;
8317 		*pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
8318 		*offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
8319 		return;
8320 	}
8321 
8322 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
8323 		*pkt = dummy_pppoe_ipv4_udp_packet;
8324 		*pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
8325 		*offsets = dummy_pppoe_ipv4_udp_packet_offsets;
8326 		return;
8327 	}
8328 
8329 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
8330 		*pkt = dummy_pppoe_ipv6_packet;
8331 		*pkt_len = sizeof(dummy_pppoe_ipv6_packet);
8332 		*offsets = dummy_pppoe_packet_ipv6_offsets;
8333 		return;
8334 	}
8335 
8336 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
8337 		*pkt = dummy_pppoe_ipv6_tcp_packet;
8338 		*pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
8339 		*offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
8340 		return;
8341 	}
8342 
8343 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
8344 		*pkt = dummy_pppoe_ipv6_udp_packet;
8345 		*pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
8346 		*offsets = dummy_pppoe_packet_ipv6_udp_offsets;
8347 		return;
8348 	}
8349 
8350 	if (tun_type == ICE_SW_IPV4_TCP) {
8351 		*pkt = dummy_tcp_packet;
8352 		*pkt_len = sizeof(dummy_tcp_packet);
8353 		*offsets = dummy_tcp_packet_offsets;
8354 		return;
8355 	}
8356 
8357 	if (tun_type == ICE_SW_IPV4_UDP) {
8358 		*pkt = dummy_udp_packet;
8359 		*pkt_len = sizeof(dummy_udp_packet);
8360 		*offsets = dummy_udp_packet_offsets;
8361 		return;
8362 	}
8363 
8364 	if (tun_type == ICE_SW_IPV6_TCP) {
8365 		*pkt = dummy_tcp_ipv6_packet;
8366 		*pkt_len = sizeof(dummy_tcp_ipv6_packet);
8367 		*offsets = dummy_tcp_ipv6_packet_offsets;
8368 		return;
8369 	}
8370 
8371 	if (tun_type == ICE_SW_IPV6_UDP) {
8372 		*pkt = dummy_udp_ipv6_packet;
8373 		*pkt_len = sizeof(dummy_udp_ipv6_packet);
8374 		*offsets = dummy_udp_ipv6_packet_offsets;
8375 		return;
8376 	}
8377 
8378 	if (tun_type == ICE_ALL_TUNNELS) {
8379 		*pkt = dummy_gre_udp_packet;
8380 		*pkt_len = sizeof(dummy_gre_udp_packet);
8381 		*offsets = dummy_gre_udp_packet_offsets;
8382 		return;
8383 	}
8384 
8385 	if (tun_type == ICE_SW_TUN_NVGRE || gre) {
8386 		if (tcp) {
8387 			*pkt = dummy_gre_tcp_packet;
8388 			*pkt_len = sizeof(dummy_gre_tcp_packet);
8389 			*offsets = dummy_gre_tcp_packet_offsets;
8390 			return;
8391 		}
8392 
8393 		*pkt = dummy_gre_udp_packet;
8394 		*pkt_len = sizeof(dummy_gre_udp_packet);
8395 		*offsets = dummy_gre_udp_packet_offsets;
8396 		return;
8397 	}
8398 
8399 	if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
8400 	    tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
8401 	    tun_type == ICE_SW_TUN_GENEVE_VLAN ||
8402 	    tun_type == ICE_SW_TUN_VXLAN_VLAN) {
8403 		if (tcp) {
8404 			*pkt = dummy_udp_tun_tcp_packet;
8405 			*pkt_len = sizeof(dummy_udp_tun_tcp_packet);
8406 			*offsets = dummy_udp_tun_tcp_packet_offsets;
8407 			return;
8408 		}
8409 
8410 		*pkt = dummy_udp_tun_udp_packet;
8411 		*pkt_len = sizeof(dummy_udp_tun_udp_packet);
8412 		*offsets = dummy_udp_tun_udp_packet_offsets;
8413 		return;
8414 	}
8415 
8416 	if (udp && !ipv6) {
8417 		if (vlan) {
8418 			*pkt = dummy_vlan_udp_packet;
8419 			*pkt_len = sizeof(dummy_vlan_udp_packet);
8420 			*offsets = dummy_vlan_udp_packet_offsets;
8421 			return;
8422 		}
8423 		*pkt = dummy_udp_packet;
8424 		*pkt_len = sizeof(dummy_udp_packet);
8425 		*offsets = dummy_udp_packet_offsets;
8426 		return;
8427 	} else if (udp && ipv6) {
8428 		if (vlan) {
8429 			*pkt = dummy_vlan_udp_ipv6_packet;
8430 			*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
8431 			*offsets = dummy_vlan_udp_ipv6_packet_offsets;
8432 			return;
8433 		}
8434 		*pkt = dummy_udp_ipv6_packet;
8435 		*pkt_len = sizeof(dummy_udp_ipv6_packet);
8436 		*offsets = dummy_udp_ipv6_packet_offsets;
8437 		return;
8438 	} else if ((tcp && ipv6) || ipv6) {
8439 		if (vlan) {
8440 			*pkt = dummy_vlan_tcp_ipv6_packet;
8441 			*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
8442 			*offsets = dummy_vlan_tcp_ipv6_packet_offsets;
8443 			return;
8444 		}
8445 		*pkt = dummy_tcp_ipv6_packet;
8446 		*pkt_len = sizeof(dummy_tcp_ipv6_packet);
8447 		*offsets = dummy_tcp_ipv6_packet_offsets;
8448 		return;
8449 	}
8450 
8451 	if (vlan) {
8452 		*pkt = dummy_vlan_tcp_packet;
8453 		*pkt_len = sizeof(dummy_vlan_tcp_packet);
8454 		*offsets = dummy_vlan_tcp_packet_offsets;
8455 	}  else if (mpls) {
8456 		*pkt = dummy_mpls_packet;
8457 		*pkt_len = sizeof(dummy_mpls_packet);
8458 		*offsets = dummy_mpls_packet_offsets;
8459 	} else {
8460 		*pkt = dummy_tcp_packet;
8461 		*pkt_len = sizeof(dummy_tcp_packet);
8462 		*offsets = dummy_tcp_packet_offsets;
8463 	}
8464 }
8465 
8466 /**
8467  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
8468  *
8469  * @lkups: lookup elements or match criteria for the advanced recipe, one
8470  *	   structure per protocol header
8471  * @lkups_cnt: number of protocols
8472  * @s_rule: stores rule information from the match criteria
8473  * @dummy_pkt: dummy packet to fill according to filter match criteria
8474  * @pkt_len: packet length of dummy packet
8475  * @offsets: offset info for the dummy packet
8476  */
8477 static enum ice_status
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_aqc_sw_rules_elem * s_rule,const u8 * dummy_pkt,u16 pkt_len,const struct ice_dummy_pkt_offsets * offsets)8478 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
8479 			  struct ice_aqc_sw_rules_elem *s_rule,
8480 			  const u8 *dummy_pkt, u16 pkt_len,
8481 			  const struct ice_dummy_pkt_offsets *offsets)
8482 {
8483 	u8 *pkt;
8484 	u16 i;
8485 
8486 	/* Start with a packet with a pre-defined/dummy content. Then, fill
8487 	 * in the header values to be looked up or matched.
8488 	 */
8489 	pkt = s_rule->pdata.lkup_tx_rx.hdr;
8490 
8491 	ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
8492 
8493 	for (i = 0; i < lkups_cnt; i++) {
8494 		enum ice_protocol_type type;
8495 		u16 offset = 0, len = 0, j;
8496 		bool found = false;
8497 
8498 		/* find the start of this layer; it should be found since this
8499 		 * was already checked when search for the dummy packet
8500 		 */
8501 		type = lkups[i].type;
8502 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
8503 			if (type == offsets[j].type) {
8504 				offset = offsets[j].offset;
8505 				found = true;
8506 				break;
8507 			}
8508 		}
8509 		/* this should never happen in a correct calling sequence */
8510 		if (!found)
8511 			return ICE_ERR_PARAM;
8512 
8513 		switch (lkups[i].type) {
8514 		case ICE_MAC_OFOS:
8515 		case ICE_MAC_IL:
8516 			len = sizeof(struct ice_ether_hdr);
8517 			break;
8518 		case ICE_ETYPE_OL:
8519 			len = sizeof(struct ice_ethtype_hdr);
8520 			break;
8521 		case ICE_VLAN_OFOS:
8522 		case ICE_VLAN_EX:
8523 		case ICE_VLAN_IN:
8524 			len = sizeof(struct ice_vlan_hdr);
8525 			break;
8526 		case ICE_IPV4_OFOS:
8527 		case ICE_IPV4_IL:
8528 			len = sizeof(struct ice_ipv4_hdr);
8529 			break;
8530 		case ICE_IPV6_OFOS:
8531 		case ICE_IPV6_IL:
8532 			len = sizeof(struct ice_ipv6_hdr);
8533 			break;
8534 		case ICE_TCP_IL:
8535 		case ICE_UDP_OF:
8536 		case ICE_UDP_ILOS:
8537 			len = sizeof(struct ice_l4_hdr);
8538 			break;
8539 		case ICE_SCTP_IL:
8540 			len = sizeof(struct ice_sctp_hdr);
8541 			break;
8542 		case ICE_NVGRE:
8543 			len = sizeof(struct ice_nvgre);
8544 			break;
8545 		case ICE_VXLAN:
8546 		case ICE_GENEVE:
8547 		case ICE_VXLAN_GPE:
8548 			len = sizeof(struct ice_udp_tnl_hdr);
8549 			break;
8550 
8551 		case ICE_GTP:
8552 		case ICE_GTP_NO_PAY:
8553 			len = sizeof(struct ice_udp_gtp_hdr);
8554 			break;
8555 		case ICE_PPPOE:
8556 			len = sizeof(struct ice_pppoe_hdr);
8557 			break;
8558 		case ICE_ESP:
8559 			len = sizeof(struct ice_esp_hdr);
8560 			break;
8561 		case ICE_NAT_T:
8562 			len = sizeof(struct ice_nat_t_hdr);
8563 			break;
8564 		case ICE_AH:
8565 			len = sizeof(struct ice_ah_hdr);
8566 			break;
8567 		case ICE_L2TPV3:
8568 			len = sizeof(struct ice_l2tpv3_sess_hdr);
8569 			break;
8570 		default:
8571 			return ICE_ERR_PARAM;
8572 		}
8573 
8574 		/* the length should be a word multiple */
8575 		if (len % ICE_BYTES_PER_WORD)
8576 			return ICE_ERR_CFG;
8577 
8578 		/* We have the offset to the header start, the length, the
8579 		 * caller's header values and mask. Use this information to
8580 		 * copy the data into the dummy packet appropriately based on
8581 		 * the mask. Note that we need to only write the bits as
8582 		 * indicated by the mask to make sure we don't improperly write
8583 		 * over any significant packet data.
8584 		 */
8585 		for (j = 0; j < len / sizeof(u16); j++)
8586 			if (((u16 *)&lkups[i].m_u)[j])
8587 				((u16 *)(pkt + offset))[j] =
8588 					(((u16 *)(pkt + offset))[j] &
8589 					 ~((u16 *)&lkups[i].m_u)[j]) |
8590 					(((u16 *)&lkups[i].h_u)[j] &
8591 					 ((u16 *)&lkups[i].m_u)[j]);
8592 	}
8593 
8594 	s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
8595 
8596 	return ICE_SUCCESS;
8597 }
8598 
8599 /**
8600  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
8601  * @hw: pointer to the hardware structure
8602  * @tun_type: tunnel type
8603  * @pkt: dummy packet to fill in
8604  * @offsets: offset info for the dummy packet
8605  */
8606 static enum ice_status
ice_fill_adv_packet_tun(struct ice_hw * hw,enum ice_sw_tunnel_type tun_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)8607 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
8608 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
8609 {
8610 	u16 open_port, i;
8611 
8612 	switch (tun_type) {
8613 	case ICE_SW_TUN_AND_NON_TUN:
8614 	case ICE_SW_TUN_VXLAN_GPE:
8615 	case ICE_SW_TUN_VXLAN:
8616 	case ICE_SW_TUN_VXLAN_VLAN:
8617 	case ICE_SW_TUN_UDP:
8618 		if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
8619 			return ICE_ERR_CFG;
8620 		break;
8621 
8622 	case ICE_SW_TUN_GENEVE:
8623 	case ICE_SW_TUN_GENEVE_VLAN:
8624 		if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
8625 			return ICE_ERR_CFG;
8626 		break;
8627 
8628 	default:
8629 		/* Nothing needs to be done for this tunnel type */
8630 		return ICE_SUCCESS;
8631 	}
8632 
8633 	/* Find the outer UDP protocol header and insert the port number */
8634 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
8635 		if (offsets[i].type == ICE_UDP_OF) {
8636 			struct ice_l4_hdr *hdr;
8637 			u16 offset;
8638 
8639 			offset = offsets[i].offset;
8640 			hdr = (struct ice_l4_hdr *)&pkt[offset];
8641 			hdr->dst_port = CPU_TO_BE16(open_port);
8642 
8643 			return ICE_SUCCESS;
8644 		}
8645 	}
8646 
8647 	return ICE_ERR_CFG;
8648 }
8649 
8650 /**
8651  * ice_find_adv_rule_entry - Search a rule entry
8652  * @hw: pointer to the hardware structure
8653  * @lkups: lookup elements or match criteria for the advanced recipe, one
8654  *	   structure per protocol header
8655  * @lkups_cnt: number of protocols
8656  * @recp_id: recipe ID for which we are finding the rule
8657  * @rinfo: other information regarding the rule e.g. priority and action info
8658  *
8659  * Helper function to search for a given advance rule entry
8660  * Returns pointer to entry storing the rule if found
8661  */
8662 static struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,u16 recp_id,struct ice_adv_rule_info * rinfo)8663 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8664 			u16 lkups_cnt, u16 recp_id,
8665 			struct ice_adv_rule_info *rinfo)
8666 {
8667 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
8668 	struct ice_switch_info *sw = hw->switch_info;
8669 	int i;
8670 
8671 	LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
8672 			    ice_adv_fltr_mgmt_list_entry, list_entry) {
8673 		bool lkups_matched = true;
8674 
8675 		if (lkups_cnt != list_itr->lkups_cnt)
8676 			continue;
8677 		for (i = 0; i < list_itr->lkups_cnt; i++)
8678 			if (memcmp(&list_itr->lkups[i], &lkups[i],
8679 				   sizeof(*lkups))) {
8680 				lkups_matched = false;
8681 				break;
8682 			}
8683 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
8684 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
8685 		    lkups_matched)
8686 			return list_itr;
8687 	}
8688 	return NULL;
8689 }
8690 
8691 /**
8692  * ice_adv_add_update_vsi_list
8693  * @hw: pointer to the hardware structure
8694  * @m_entry: pointer to current adv filter management list entry
8695  * @cur_fltr: filter information from the book keeping entry
8696  * @new_fltr: filter information with the new VSI to be added
8697  *
8698  * Call AQ command to add or update previously created VSI list with new VSI.
8699  *
8700  * Helper function to do book keeping associated with adding filter information
8701  * The algorithm to do the booking keeping is described below :
8702  * When a VSI needs to subscribe to a given advanced filter
8703  *	if only one VSI has been added till now
8704  *		Allocate a new VSI list and add two VSIs
8705  *		to this list using switch rule command
8706  *		Update the previously created switch rule with the
8707  *		newly created VSI list ID
8708  *	if a VSI list was previously created
8709  *		Add the new VSI to the previously created VSI list set
8710  *		using the update switch rule command
8711  */
8712 static enum ice_status
ice_adv_add_update_vsi_list(struct ice_hw * hw,struct ice_adv_fltr_mgmt_list_entry * m_entry,struct ice_adv_rule_info * cur_fltr,struct ice_adv_rule_info * new_fltr)8713 ice_adv_add_update_vsi_list(struct ice_hw *hw,
8714 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
8715 			    struct ice_adv_rule_info *cur_fltr,
8716 			    struct ice_adv_rule_info *new_fltr)
8717 {
8718 	enum ice_status status;
8719 	u16 vsi_list_id = 0;
8720 
8721 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8722 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8723 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
8724 		return ICE_ERR_NOT_IMPL;
8725 
8726 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
8727 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
8728 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8729 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
8730 		return ICE_ERR_NOT_IMPL;
8731 
8732 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
8733 		 /* Only one entry existed in the mapping and it was not already
8734 		  * a part of a VSI list. So, create a VSI list with the old and
8735 		  * new VSIs.
8736 		  */
8737 		struct ice_fltr_info tmp_fltr;
8738 		u16 vsi_handle_arr[2];
8739 
8740 		/* A rule already exists with the new VSI being added */
8741 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
8742 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
8743 			return ICE_ERR_ALREADY_EXISTS;
8744 
8745 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
8746 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
8747 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
8748 						  &vsi_list_id,
8749 						  ICE_SW_LKUP_LAST);
8750 		if (status)
8751 			return status;
8752 
8753 		ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8754 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
8755 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
8756 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
8757 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
8758 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
8759 
8760 		/* Update the previous switch rule of "forward to VSI" to
8761 		 * "fwd to VSI list"
8762 		 */
8763 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8764 		if (status)
8765 			return status;
8766 
8767 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
8768 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
8769 		m_entry->vsi_list_info =
8770 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
8771 						vsi_list_id);
8772 	} else {
8773 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
8774 
8775 		if (!m_entry->vsi_list_info)
8776 			return ICE_ERR_CFG;
8777 
8778 		/* A rule already exists with the new VSI being added */
8779 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
8780 			return ICE_SUCCESS;
8781 
8782 		/* Update the previously created VSI list set with
8783 		 * the new VSI ID passed in
8784 		 */
8785 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
8786 
8787 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
8788 						  vsi_list_id, false,
8789 						  ice_aqc_opc_update_sw_rules,
8790 						  ICE_SW_LKUP_LAST);
8791 		/* update VSI list mapping info with new VSI ID */
8792 		if (!status)
8793 			ice_set_bit(vsi_handle,
8794 				    m_entry->vsi_list_info->vsi_map);
8795 	}
8796 	if (!status)
8797 		m_entry->vsi_count++;
8798 	return status;
8799 }
8800 
8801 /**
8802  * ice_add_adv_rule - helper function to create an advanced switch rule
8803  * @hw: pointer to the hardware structure
8804  * @lkups: information on the words that needs to be looked up. All words
8805  * together makes one recipe
8806  * @lkups_cnt: num of entries in the lkups array
8807  * @rinfo: other information related to the rule that needs to be programmed
8808  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
8809  *               ignored is case of error.
8810  *
8811  * This function can program only 1 rule at a time. The lkups is used to
8812  * describe the all the words that forms the "lookup" portion of the recipe.
8813  * These words can span multiple protocols. Callers to this function need to
8814  * pass in a list of protocol headers with lookup information along and mask
8815  * that determines which words are valid from the given protocol header.
8816  * rinfo describes other information related to this rule such as forwarding
8817  * IDs, priority of this rule, etc.
8818  */
8819 enum ice_status
ice_add_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,struct ice_rule_query_data * added_entry)8820 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8821 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
8822 		 struct ice_rule_query_data *added_entry)
8823 {
8824 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
8825 	u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
8826 	const struct ice_dummy_pkt_offsets *pkt_offsets;
8827 	struct ice_aqc_sw_rules_elem *s_rule = NULL;
8828 	struct LIST_HEAD_TYPE *rule_head;
8829 	struct ice_switch_info *sw;
8830 	enum ice_status status;
8831 	const u8 *pkt = NULL;
8832 	bool prof_rule;
8833 	u16 word_cnt;
8834 	u32 act = 0;
8835 	u8 q_rgn;
8836 
8837 	/* Initialize profile to result index bitmap */
8838 	if (!hw->switch_info->prof_res_bm_init) {
8839 		hw->switch_info->prof_res_bm_init = 1;
8840 		ice_init_prof_result_bm(hw);
8841 	}
8842 
8843 	prof_rule = ice_is_prof_rule(rinfo->tun_type);
8844 	if (!prof_rule && !lkups_cnt)
8845 		return ICE_ERR_PARAM;
8846 
8847 	/* get # of words we need to match */
8848 	word_cnt = 0;
8849 	for (i = 0; i < lkups_cnt; i++) {
8850 		u16 j, *ptr;
8851 
8852 		ptr = (u16 *)&lkups[i].m_u;
8853 		for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
8854 			if (ptr[j] != 0)
8855 				word_cnt++;
8856 	}
8857 
8858 	if (prof_rule) {
8859 		if (word_cnt > ICE_MAX_CHAIN_WORDS)
8860 			return ICE_ERR_PARAM;
8861 	} else {
8862 		if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
8863 			return ICE_ERR_PARAM;
8864 	}
8865 
8866 	/* make sure that we can locate a dummy packet */
8867 	ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
8868 			      &pkt_offsets);
8869 	if (!pkt) {
8870 		status = ICE_ERR_PARAM;
8871 		goto err_ice_add_adv_rule;
8872 	}
8873 
8874 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
8875 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
8876 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
8877 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
8878 		return ICE_ERR_CFG;
8879 
8880 	vsi_handle = rinfo->sw_act.vsi_handle;
8881 	if (!ice_is_vsi_valid(hw, vsi_handle))
8882 		return ICE_ERR_PARAM;
8883 
8884 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8885 		rinfo->sw_act.fwd_id.hw_vsi_id =
8886 			ice_get_hw_vsi_num(hw, vsi_handle);
8887 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
8888 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8889 
8890 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8891 	if (status)
8892 		return status;
8893 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8894 	if (m_entry) {
8895 		/* we have to add VSI to VSI_LIST and increment vsi_count.
8896 		 * Also Update VSI list so that we can change forwarding rule
8897 		 * if the rule already exists, we will check if it exists with
8898 		 * same vsi_id, if not then add it to the VSI list if it already
8899 		 * exists if not then create a VSI list and add the existing VSI
8900 		 * ID and the new VSI ID to the list
8901 		 * We will add that VSI to the list
8902 		 */
8903 		status = ice_adv_add_update_vsi_list(hw, m_entry,
8904 						     &m_entry->rule_info,
8905 						     rinfo);
8906 		if (added_entry) {
8907 			added_entry->rid = rid;
8908 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8909 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8910 		}
8911 		return status;
8912 	}
8913 	rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8914 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8915 	if (!s_rule)
8916 		return ICE_ERR_NO_MEMORY;
8917 	if (!rinfo->flags_info.act_valid)
8918 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
8919 	else
8920 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
8921 						ICE_SINGLE_ACT_LB_ENABLE);
8922 
8923 	switch (rinfo->sw_act.fltr_act) {
8924 	case ICE_FWD_TO_VSI:
8925 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8926 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8927 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8928 		break;
8929 	case ICE_FWD_TO_Q:
8930 		act |= ICE_SINGLE_ACT_TO_Q;
8931 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8932 		       ICE_SINGLE_ACT_Q_INDEX_M;
8933 		break;
8934 	case ICE_FWD_TO_QGRP:
8935 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8936 			(u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8937 		act |= ICE_SINGLE_ACT_TO_Q;
8938 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8939 		       ICE_SINGLE_ACT_Q_INDEX_M;
8940 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8941 		       ICE_SINGLE_ACT_Q_REGION_M;
8942 		break;
8943 	case ICE_DROP_PACKET:
8944 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8945 		       ICE_SINGLE_ACT_VALID_BIT;
8946 		break;
8947 	default:
8948 		status = ICE_ERR_CFG;
8949 		goto err_ice_add_adv_rule;
8950 	}
8951 
8952 	/* set the rule LOOKUP type based on caller specified 'RX'
8953 	 * instead of hardcoding it to be either LOOKUP_TX/RX
8954 	 *
8955 	 * for 'RX' set the source to be the port number
8956 	 * for 'TX' set the source to be the source HW VSI number (determined
8957 	 * by caller)
8958 	 */
8959 	if (rinfo->rx) {
8960 		s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8961 		s_rule->pdata.lkup_tx_rx.src =
8962 			CPU_TO_LE16(hw->port_info->lport);
8963 	} else {
8964 		s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8965 		s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8966 	}
8967 
8968 	s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8969 	s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8970 
8971 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8972 					   pkt_len, pkt_offsets);
8973 	if (status)
8974 		goto err_ice_add_adv_rule;
8975 
8976 	if (rinfo->tun_type != ICE_NON_TUN &&
8977 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8978 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8979 						 s_rule->pdata.lkup_tx_rx.hdr,
8980 						 pkt_offsets);
8981 		if (status)
8982 			goto err_ice_add_adv_rule;
8983 	}
8984 
8985 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8986 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8987 				 NULL);
8988 	if (status)
8989 		goto err_ice_add_adv_rule;
8990 	adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8991 		ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8992 	if (!adv_fltr) {
8993 		status = ICE_ERR_NO_MEMORY;
8994 		goto err_ice_add_adv_rule;
8995 	}
8996 
8997 	adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8998 		ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8999 			   ICE_NONDMA_TO_NONDMA);
9000 	if (!adv_fltr->lkups && !prof_rule) {
9001 		status = ICE_ERR_NO_MEMORY;
9002 		goto err_ice_add_adv_rule;
9003 	}
9004 
9005 	adv_fltr->lkups_cnt = lkups_cnt;
9006 	adv_fltr->rule_info = *rinfo;
9007 	adv_fltr->rule_info.fltr_rule_id =
9008 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
9009 	sw = hw->switch_info;
9010 	sw->recp_list[rid].adv_rule = true;
9011 	rule_head = &sw->recp_list[rid].filt_rules;
9012 
9013 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
9014 		adv_fltr->vsi_count = 1;
9015 
9016 	/* Add rule entry to book keeping list */
9017 	LIST_ADD(&adv_fltr->list_entry, rule_head);
9018 	if (added_entry) {
9019 		added_entry->rid = rid;
9020 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
9021 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
9022 	}
9023 err_ice_add_adv_rule:
9024 	if (status && adv_fltr) {
9025 		ice_free(hw, adv_fltr->lkups);
9026 		ice_free(hw, adv_fltr);
9027 	}
9028 
9029 	ice_free(hw, s_rule);
9030 
9031 	return status;
9032 }
9033 
9034 /**
9035  * ice_adv_rem_update_vsi_list
9036  * @hw: pointer to the hardware structure
9037  * @vsi_handle: VSI handle of the VSI to remove
9038  * @fm_list: filter management entry for which the VSI list management needs to
9039  *	     be done
9040  */
9041 static enum ice_status
ice_adv_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_adv_fltr_mgmt_list_entry * fm_list)9042 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
9043 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
9044 {
9045 	struct ice_vsi_list_map_info *vsi_list_info;
9046 	enum ice_sw_lkup_type lkup_type;
9047 	enum ice_status status;
9048 	u16 vsi_list_id;
9049 
9050 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
9051 	    fm_list->vsi_count == 0)
9052 		return ICE_ERR_PARAM;
9053 
9054 	/* A rule with the VSI being removed does not exist */
9055 	if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
9056 		return ICE_ERR_DOES_NOT_EXIST;
9057 
9058 	lkup_type = ICE_SW_LKUP_LAST;
9059 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
9060 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
9061 					  ice_aqc_opc_update_sw_rules,
9062 					  lkup_type);
9063 	if (status)
9064 		return status;
9065 
9066 	fm_list->vsi_count--;
9067 	ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
9068 	vsi_list_info = fm_list->vsi_list_info;
9069 	if (fm_list->vsi_count == 1) {
9070 		struct ice_fltr_info tmp_fltr;
9071 		u16 rem_vsi_handle;
9072 
9073 		rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
9074 						    ICE_MAX_VSI);
9075 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
9076 			return ICE_ERR_OUT_OF_RANGE;
9077 
9078 		/* Make sure VSI list is empty before removing it below */
9079 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
9080 						  vsi_list_id, true,
9081 						  ice_aqc_opc_update_sw_rules,
9082 						  lkup_type);
9083 		if (status)
9084 			return status;
9085 
9086 		ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
9087 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
9088 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
9089 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
9090 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
9091 		tmp_fltr.fwd_id.hw_vsi_id =
9092 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
9093 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
9094 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
9095 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
9096 
9097 		/* Update the previous switch rule of "MAC forward to VSI" to
9098 		 * "MAC fwd to VSI list"
9099 		 */
9100 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
9101 		if (status) {
9102 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
9103 				  tmp_fltr.fwd_id.hw_vsi_id, status);
9104 			return status;
9105 		}
9106 		fm_list->vsi_list_info->ref_cnt--;
9107 
9108 		/* Remove the VSI list since it is no longer used */
9109 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
9110 		if (status) {
9111 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
9112 				  vsi_list_id, status);
9113 			return status;
9114 		}
9115 
9116 		LIST_DEL(&vsi_list_info->list_entry);
9117 		ice_free(hw, vsi_list_info);
9118 		fm_list->vsi_list_info = NULL;
9119 	}
9120 
9121 	return status;
9122 }
9123 
9124 /**
9125  * ice_rem_adv_rule - removes existing advanced switch rule
9126  * @hw: pointer to the hardware structure
9127  * @lkups: information on the words that needs to be looked up. All words
9128  *         together makes one recipe
9129  * @lkups_cnt: num of entries in the lkups array
9130  * @rinfo: Its the pointer to the rule information for the rule
9131  *
9132  * This function can be used to remove 1 rule at a time. The lkups is
9133  * used to describe all the words that forms the "lookup" portion of the
9134  * rule. These words can span multiple protocols. Callers to this function
9135  * need to pass in a list of protocol headers with lookup information along
9136  * and mask that determines which words are valid from the given protocol
9137  * header. rinfo describes other information related to this rule such as
9138  * forwarding IDs, priority of this rule, etc.
9139  */
9140 enum ice_status
ice_rem_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo)9141 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
9142 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
9143 {
9144 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
9145 	struct ice_prot_lkup_ext lkup_exts;
9146 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
9147 	enum ice_status status = ICE_SUCCESS;
9148 	bool remove_rule = false;
9149 	u16 i, rid, vsi_handle;
9150 
9151 	ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
9152 	for (i = 0; i < lkups_cnt; i++) {
9153 		u16 count;
9154 
9155 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
9156 			return ICE_ERR_CFG;
9157 
9158 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
9159 		if (!count)
9160 			return ICE_ERR_CFG;
9161 	}
9162 
9163 	/* Create any special protocol/offset pairs, such as looking at tunnel
9164 	 * bits by extracting metadata
9165 	 */
9166 	status = ice_add_special_words(rinfo, &lkup_exts);
9167 	if (status)
9168 		return status;
9169 
9170 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type, rinfo->priority);
9171 	/* If did not find a recipe that match the existing criteria */
9172 	if (rid == ICE_MAX_NUM_RECIPES)
9173 		return ICE_ERR_PARAM;
9174 
9175 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
9176 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
9177 	/* the rule is already removed */
9178 	if (!list_elem)
9179 		return ICE_SUCCESS;
9180 	ice_acquire_lock(rule_lock);
9181 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
9182 		remove_rule = true;
9183 	} else if (list_elem->vsi_count > 1) {
9184 		remove_rule = false;
9185 		vsi_handle = rinfo->sw_act.vsi_handle;
9186 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9187 	} else {
9188 		vsi_handle = rinfo->sw_act.vsi_handle;
9189 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
9190 		if (status) {
9191 			ice_release_lock(rule_lock);
9192 			return status;
9193 		}
9194 		if (list_elem->vsi_count == 0)
9195 			remove_rule = true;
9196 	}
9197 	ice_release_lock(rule_lock);
9198 	if (remove_rule) {
9199 		struct ice_aqc_sw_rules_elem *s_rule;
9200 		u16 rule_buf_sz;
9201 
9202 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
9203 		s_rule = (struct ice_aqc_sw_rules_elem *)
9204 			ice_malloc(hw, rule_buf_sz);
9205 		if (!s_rule)
9206 			return ICE_ERR_NO_MEMORY;
9207 		s_rule->pdata.lkup_tx_rx.act = 0;
9208 		s_rule->pdata.lkup_tx_rx.index =
9209 			CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
9210 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
9211 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
9212 					 rule_buf_sz, 1,
9213 					 ice_aqc_opc_remove_sw_rules, NULL);
9214 		if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
9215 			struct ice_switch_info *sw = hw->switch_info;
9216 
9217 			ice_acquire_lock(rule_lock);
9218 			LIST_DEL(&list_elem->list_entry);
9219 			ice_free(hw, list_elem->lkups);
9220 			ice_free(hw, list_elem);
9221 			ice_release_lock(rule_lock);
9222 			if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
9223 				sw->recp_list[rid].adv_rule = false;
9224 		}
9225 		ice_free(hw, s_rule);
9226 	}
9227 	return status;
9228 }
9229 
9230 /**
9231  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
9232  * @hw: pointer to the hardware structure
9233  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
9234  *
9235  * This function is used to remove 1 rule at a time. The removal is based on
9236  * the remove_entry parameter. This function will remove rule for a given
9237  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
9238  */
9239 enum ice_status
ice_rem_adv_rule_by_id(struct ice_hw * hw,struct ice_rule_query_data * remove_entry)9240 ice_rem_adv_rule_by_id(struct ice_hw *hw,
9241 		       struct ice_rule_query_data *remove_entry)
9242 {
9243 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
9244 	struct LIST_HEAD_TYPE *list_head;
9245 	struct ice_adv_rule_info rinfo;
9246 	struct ice_switch_info *sw;
9247 
9248 	sw = hw->switch_info;
9249 	if (!sw->recp_list[remove_entry->rid].recp_created)
9250 		return ICE_ERR_PARAM;
9251 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
9252 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
9253 			    list_entry) {
9254 		if (list_itr->rule_info.fltr_rule_id ==
9255 		    remove_entry->rule_id) {
9256 			rinfo = list_itr->rule_info;
9257 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
9258 			return ice_rem_adv_rule(hw, list_itr->lkups,
9259 						list_itr->lkups_cnt, &rinfo);
9260 		}
9261 	}
9262 	/* either list is empty or unable to find rule */
9263 	return ICE_ERR_DOES_NOT_EXIST;
9264 }
9265 
9266 /**
9267  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
9268  *                       given VSI handle
9269  * @hw: pointer to the hardware structure
9270  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
9271  *
9272  * This function is used to remove all the rules for a given VSI and as soon
9273  * as removing a rule fails, it will return immediately with the error code,
9274  * else it will return ICE_SUCCESS
9275  */
ice_rem_adv_rule_for_vsi(struct ice_hw * hw,u16 vsi_handle)9276 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
9277 {
9278 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
9279 	struct ice_vsi_list_map_info *map_info;
9280 	struct LIST_HEAD_TYPE *list_head;
9281 	struct ice_adv_rule_info rinfo;
9282 	struct ice_switch_info *sw;
9283 	enum ice_status status;
9284 	u8 rid;
9285 
9286 	sw = hw->switch_info;
9287 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
9288 		if (!sw->recp_list[rid].recp_created)
9289 			continue;
9290 		if (!sw->recp_list[rid].adv_rule)
9291 			continue;
9292 
9293 		list_head = &sw->recp_list[rid].filt_rules;
9294 		LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
9295 					 ice_adv_fltr_mgmt_list_entry,
9296 					 list_entry) {
9297 			rinfo = list_itr->rule_info;
9298 
9299 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
9300 				map_info = list_itr->vsi_list_info;
9301 				if (!map_info)
9302 					continue;
9303 
9304 				if (!ice_is_bit_set(map_info->vsi_map,
9305 						    vsi_handle))
9306 					continue;
9307 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
9308 				continue;
9309 			}
9310 
9311 			rinfo.sw_act.vsi_handle = vsi_handle;
9312 			status = ice_rem_adv_rule(hw, list_itr->lkups,
9313 						  list_itr->lkups_cnt, &rinfo);
9314 
9315 			if (status)
9316 				return status;
9317 		}
9318 	}
9319 	return ICE_SUCCESS;
9320 }
9321 
9322 /**
9323  * ice_replay_fltr - Replay all the filters stored by a specific list head
9324  * @hw: pointer to the hardware structure
9325  * @list_head: list for which filters needs to be replayed
9326  * @recp_id: Recipe ID for which rules need to be replayed
9327  */
9328 static enum ice_status
ice_replay_fltr(struct ice_hw * hw,u8 recp_id,struct LIST_HEAD_TYPE * list_head)9329 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
9330 {
9331 	struct ice_fltr_mgmt_list_entry *itr;
9332 	enum ice_status status = ICE_SUCCESS;
9333 	struct ice_sw_recipe *recp_list;
9334 	u8 lport = hw->port_info->lport;
9335 	struct LIST_HEAD_TYPE l_head;
9336 
9337 	if (LIST_EMPTY(list_head))
9338 		return status;
9339 
9340 	recp_list = &hw->switch_info->recp_list[recp_id];
9341 	/* Move entries from the given list_head to a temporary l_head so that
9342 	 * they can be replayed. Otherwise when trying to re-add the same
9343 	 * filter, the function will return already exists
9344 	 */
9345 	LIST_REPLACE_INIT(list_head, &l_head);
9346 
9347 	/* Mark the given list_head empty by reinitializing it so filters
9348 	 * could be added again by *handler
9349 	 */
9350 	LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
9351 			    list_entry) {
9352 		struct ice_fltr_list_entry f_entry;
9353 		u16 vsi_handle;
9354 
9355 		f_entry.fltr_info = itr->fltr_info;
9356 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
9357 			status = ice_add_rule_internal(hw, recp_list, lport,
9358 						       &f_entry);
9359 			if (status != ICE_SUCCESS)
9360 				goto end;
9361 			continue;
9362 		}
9363 
9364 		/* Add a filter per VSI separately */
9365 		ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
9366 				     ICE_MAX_VSI) {
9367 			if (!ice_is_vsi_valid(hw, vsi_handle))
9368 				break;
9369 
9370 			ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9371 			f_entry.fltr_info.vsi_handle = vsi_handle;
9372 			f_entry.fltr_info.fwd_id.hw_vsi_id =
9373 				ice_get_hw_vsi_num(hw, vsi_handle);
9374 			f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9375 			if (recp_id == ICE_SW_LKUP_VLAN)
9376 				status = ice_add_vlan_internal(hw, recp_list,
9377 							       &f_entry);
9378 			else
9379 				status = ice_add_rule_internal(hw, recp_list,
9380 							       lport,
9381 							       &f_entry);
9382 			if (status != ICE_SUCCESS)
9383 				goto end;
9384 		}
9385 	}
9386 end:
9387 	/* Clear the filter management list */
9388 	ice_rem_sw_rule_info(hw, &l_head);
9389 	return status;
9390 }
9391 
9392 /**
9393  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
9394  * @hw: pointer to the hardware structure
9395  *
9396  * NOTE: This function does not clean up partially added filters on error.
9397  * It is up to caller of the function to issue a reset or fail early.
9398  */
ice_replay_all_fltr(struct ice_hw * hw)9399 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
9400 {
9401 	struct ice_switch_info *sw = hw->switch_info;
9402 	enum ice_status status = ICE_SUCCESS;
9403 	u8 i;
9404 
9405 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9406 		struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
9407 
9408 		status = ice_replay_fltr(hw, i, head);
9409 		if (status != ICE_SUCCESS)
9410 			return status;
9411 	}
9412 	return status;
9413 }
9414 
9415 /**
9416  * ice_replay_vsi_fltr - Replay filters for requested VSI
9417  * @hw: pointer to the hardware structure
9418  * @pi: pointer to port information structure
9419  * @sw: pointer to switch info struct for which function replays filters
9420  * @vsi_handle: driver VSI handle
9421  * @recp_id: Recipe ID for which rules need to be replayed
9422  * @list_head: list for which filters need to be replayed
9423  *
9424  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
9425  * It is required to pass valid VSI handle.
9426  */
9427 static enum ice_status
ice_replay_vsi_fltr(struct ice_hw * hw,struct ice_port_info * pi,struct ice_switch_info * sw,u16 vsi_handle,u8 recp_id,struct LIST_HEAD_TYPE * list_head)9428 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9429 		    struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
9430 		    struct LIST_HEAD_TYPE *list_head)
9431 {
9432 	struct ice_fltr_mgmt_list_entry *itr;
9433 	enum ice_status status = ICE_SUCCESS;
9434 	struct ice_sw_recipe *recp_list;
9435 	u16 hw_vsi_id;
9436 
9437 	if (LIST_EMPTY(list_head))
9438 		return status;
9439 	recp_list = &sw->recp_list[recp_id];
9440 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
9441 
9442 	LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
9443 			    list_entry) {
9444 		struct ice_fltr_list_entry f_entry;
9445 
9446 		f_entry.fltr_info = itr->fltr_info;
9447 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
9448 		    itr->fltr_info.vsi_handle == vsi_handle) {
9449 			/* update the src in case it is VSI num */
9450 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9451 				f_entry.fltr_info.src = hw_vsi_id;
9452 			status = ice_add_rule_internal(hw, recp_list,
9453 						       pi->lport,
9454 						       &f_entry);
9455 			if (status != ICE_SUCCESS)
9456 				goto end;
9457 			continue;
9458 		}
9459 		if (!itr->vsi_list_info ||
9460 		    !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
9461 			continue;
9462 		/* Clearing it so that the logic can add it back */
9463 		ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
9464 		f_entry.fltr_info.vsi_handle = vsi_handle;
9465 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
9466 		/* update the src in case it is VSI num */
9467 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
9468 			f_entry.fltr_info.src = hw_vsi_id;
9469 		if (recp_id == ICE_SW_LKUP_VLAN)
9470 			status = ice_add_vlan_internal(hw, recp_list, &f_entry);
9471 		else
9472 			status = ice_add_rule_internal(hw, recp_list,
9473 						       pi->lport,
9474 						       &f_entry);
9475 		if (status != ICE_SUCCESS)
9476 			goto end;
9477 	}
9478 end:
9479 	return status;
9480 }
9481 
9482 /**
9483  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
9484  * @hw: pointer to the hardware structure
9485  * @vsi_handle: driver VSI handle
9486  * @list_head: list for which filters need to be replayed
9487  *
9488  * Replay the advanced rule for the given VSI.
9489  */
9490 static enum ice_status
ice_replay_vsi_adv_rule(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * list_head)9491 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
9492 			struct LIST_HEAD_TYPE *list_head)
9493 {
9494 	struct ice_rule_query_data added_entry = { 0 };
9495 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
9496 	enum ice_status status = ICE_SUCCESS;
9497 
9498 	if (LIST_EMPTY(list_head))
9499 		return status;
9500 	LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
9501 			    list_entry) {
9502 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
9503 		u16 lk_cnt = adv_fltr->lkups_cnt;
9504 
9505 		if (vsi_handle != rinfo->sw_act.vsi_handle)
9506 			continue;
9507 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
9508 					  &added_entry);
9509 		if (status)
9510 			break;
9511 	}
9512 	return status;
9513 }
9514 
9515 /**
9516  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
9517  * @hw: pointer to the hardware structure
9518  * @pi: pointer to port information structure
9519  * @vsi_handle: driver VSI handle
9520  *
9521  * Replays filters for requested VSI via vsi_handle.
9522  */
9523 enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw * hw,struct ice_port_info * pi,u16 vsi_handle)9524 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
9525 			u16 vsi_handle)
9526 {
9527 	struct ice_switch_info *sw = hw->switch_info;
9528 	enum ice_status status;
9529 	u8 i;
9530 
9531 	/* Update the recipes that were created */
9532 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9533 		struct LIST_HEAD_TYPE *head;
9534 
9535 		head = &sw->recp_list[i].filt_replay_rules;
9536 		if (!sw->recp_list[i].adv_rule)
9537 			status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
9538 						     head);
9539 		else
9540 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
9541 		if (status != ICE_SUCCESS)
9542 			return status;
9543 	}
9544 
9545 	return ICE_SUCCESS;
9546 }
9547 
9548 /**
9549  * ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
9550  * @hw: pointer to the HW struct
9551  * @sw: pointer to switch info struct for which function removes filters
9552  *
9553  * Deletes the filter replay rules for given switch
9554  */
ice_rm_sw_replay_rule_info(struct ice_hw * hw,struct ice_switch_info * sw)9555 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
9556 {
9557 	u8 i;
9558 
9559 	if (!sw)
9560 		return;
9561 
9562 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
9563 		if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
9564 			struct LIST_HEAD_TYPE *l_head;
9565 
9566 			l_head = &sw->recp_list[i].filt_replay_rules;
9567 			if (!sw->recp_list[i].adv_rule)
9568 				ice_rem_sw_rule_info(hw, l_head);
9569 			else
9570 				ice_rem_adv_rule_info(hw, l_head);
9571 		}
9572 	}
9573 }
9574 
9575 /**
9576  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
9577  * @hw: pointer to the HW struct
9578  *
9579  * Deletes the filter replay rules.
9580  */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)9581 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
9582 {
9583 	ice_rm_sw_replay_rule_info(hw, hw->switch_info);
9584 }
9585