xref: /f-stack/dpdk/drivers/net/ice/base/ice_switch.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8 
9 #define ICE_ETH_DA_OFFSET		0
10 #define ICE_ETH_ETHTYPE_OFFSET		12
11 #define ICE_ETH_VLAN_TCI_OFFSET		14
12 #define ICE_MAX_VLAN_ID			0xFFF
13 #define ICE_IPV4_NVGRE_PROTO_ID		0x002F
14 #define ICE_PPP_IPV6_PROTO_ID		0x0057
15 #define ICE_IPV6_ETHER_ID		0x86DD
16 #define ICE_TCP_PROTO_ID		0x06
17 
18 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
19  * struct to configure any switch filter rules.
20  * {DA (6 bytes), SA(6 bytes),
21  * Ether type (2 bytes for header without VLAN tag) OR
22  * VLAN tag (4 bytes for header with VLAN tag) }
23  *
24  * Word on Hardcoded values
25  * byte 0 = 0x2: to identify it as locally administered DA MAC
26  * byte 6 = 0x2: to identify it as locally administered SA MAC
27  * byte 12 = 0x81 & byte 13 = 0x00:
28  *	In case of VLAN filter first two bytes defines ether type (0x8100)
29  *	and remaining two bytes are placeholder for programming a given VLAN ID
30  *	In case of Ether type filter it is treated as header without VLAN tag
31  *	and byte 12 and 13 is used to program a given Ether type instead
32  */
33 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 							0x2, 0, 0, 0, 0, 0,
35 							0x81, 0, 0, 0};
36 
37 struct ice_dummy_pkt_offsets {
38 	enum ice_protocol_type type;
39 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
40 };
41 
42 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
43 	{ ICE_MAC_OFOS,		0 },
44 	{ ICE_ETYPE_OL,		12 },
45 	{ ICE_IPV4_OFOS,	14 },
46 	{ ICE_NVGRE,		34 },
47 	{ ICE_MAC_IL,		42 },
48 	{ ICE_IPV4_IL,		56 },
49 	{ ICE_TCP_IL,		76 },
50 	{ ICE_PROTOCOL_LAST,	0 },
51 };
52 
53 static const u8 dummy_gre_tcp_packet[] = {
54 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
55 	0x00, 0x00, 0x00, 0x00,
56 	0x00, 0x00, 0x00, 0x00,
57 
58 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
59 
60 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
61 	0x00, 0x00, 0x00, 0x00,
62 	0x00, 0x2F, 0x00, 0x00,
63 	0x00, 0x00, 0x00, 0x00,
64 	0x00, 0x00, 0x00, 0x00,
65 
66 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
67 	0x00, 0x00, 0x00, 0x00,
68 
69 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
70 	0x00, 0x00, 0x00, 0x00,
71 	0x00, 0x00, 0x00, 0x00,
72 	0x08, 0x00,
73 
74 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
75 	0x00, 0x00, 0x00, 0x00,
76 	0x00, 0x06, 0x00, 0x00,
77 	0x00, 0x00, 0x00, 0x00,
78 	0x00, 0x00, 0x00, 0x00,
79 
80 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
81 	0x00, 0x00, 0x00, 0x00,
82 	0x00, 0x00, 0x00, 0x00,
83 	0x50, 0x02, 0x20, 0x00,
84 	0x00, 0x00, 0x00, 0x00
85 };
86 
87 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
88 	{ ICE_MAC_OFOS,		0 },
89 	{ ICE_ETYPE_OL,		12 },
90 	{ ICE_IPV4_OFOS,	14 },
91 	{ ICE_NVGRE,		34 },
92 	{ ICE_MAC_IL,		42 },
93 	{ ICE_IPV4_IL,		56 },
94 	{ ICE_UDP_ILOS,		76 },
95 	{ ICE_PROTOCOL_LAST,	0 },
96 };
97 
98 static const u8 dummy_gre_udp_packet[] = {
99 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
100 	0x00, 0x00, 0x00, 0x00,
101 	0x00, 0x00, 0x00, 0x00,
102 
103 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
104 
105 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
106 	0x00, 0x00, 0x00, 0x00,
107 	0x00, 0x2F, 0x00, 0x00,
108 	0x00, 0x00, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 
111 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
112 	0x00, 0x00, 0x00, 0x00,
113 
114 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
115 	0x00, 0x00, 0x00, 0x00,
116 	0x00, 0x00, 0x00, 0x00,
117 	0x08, 0x00,
118 
119 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
120 	0x00, 0x00, 0x00, 0x00,
121 	0x00, 0x11, 0x00, 0x00,
122 	0x00, 0x00, 0x00, 0x00,
123 	0x00, 0x00, 0x00, 0x00,
124 
125 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
126 	0x00, 0x08, 0x00, 0x00,
127 };
128 
129 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
130 	{ ICE_MAC_OFOS,		0 },
131 	{ ICE_ETYPE_OL,		12 },
132 	{ ICE_IPV4_OFOS,	14 },
133 	{ ICE_UDP_OF,		34 },
134 	{ ICE_VXLAN,		42 },
135 	{ ICE_GENEVE,		42 },
136 	{ ICE_VXLAN_GPE,	42 },
137 	{ ICE_MAC_IL,		50 },
138 	{ ICE_IPV4_IL,		64 },
139 	{ ICE_TCP_IL,		84 },
140 	{ ICE_PROTOCOL_LAST,	0 },
141 };
142 
143 static const u8 dummy_udp_tun_tcp_packet[] = {
144 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
145 	0x00, 0x00, 0x00, 0x00,
146 	0x00, 0x00, 0x00, 0x00,
147 
148 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
149 
150 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
151 	0x00, 0x01, 0x00, 0x00,
152 	0x40, 0x11, 0x00, 0x00,
153 	0x00, 0x00, 0x00, 0x00,
154 	0x00, 0x00, 0x00, 0x00,
155 
156 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
157 	0x00, 0x46, 0x00, 0x00,
158 
159 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
160 	0x00, 0x00, 0x00, 0x00,
161 
162 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
163 	0x00, 0x00, 0x00, 0x00,
164 	0x00, 0x00, 0x00, 0x00,
165 	0x08, 0x00,
166 
167 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
168 	0x00, 0x01, 0x00, 0x00,
169 	0x40, 0x06, 0x00, 0x00,
170 	0x00, 0x00, 0x00, 0x00,
171 	0x00, 0x00, 0x00, 0x00,
172 
173 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
174 	0x00, 0x00, 0x00, 0x00,
175 	0x00, 0x00, 0x00, 0x00,
176 	0x50, 0x02, 0x20, 0x00,
177 	0x00, 0x00, 0x00, 0x00
178 };
179 
180 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
181 	{ ICE_MAC_OFOS,		0 },
182 	{ ICE_ETYPE_OL,		12 },
183 	{ ICE_IPV4_OFOS,	14 },
184 	{ ICE_UDP_OF,		34 },
185 	{ ICE_VXLAN,		42 },
186 	{ ICE_GENEVE,		42 },
187 	{ ICE_VXLAN_GPE,	42 },
188 	{ ICE_MAC_IL,		50 },
189 	{ ICE_IPV4_IL,		64 },
190 	{ ICE_UDP_ILOS,		84 },
191 	{ ICE_PROTOCOL_LAST,	0 },
192 };
193 
194 static const u8 dummy_udp_tun_udp_packet[] = {
195 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
196 	0x00, 0x00, 0x00, 0x00,
197 	0x00, 0x00, 0x00, 0x00,
198 
199 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
200 
201 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
202 	0x00, 0x01, 0x00, 0x00,
203 	0x00, 0x11, 0x00, 0x00,
204 	0x00, 0x00, 0x00, 0x00,
205 	0x00, 0x00, 0x00, 0x00,
206 
207 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
208 	0x00, 0x3a, 0x00, 0x00,
209 
210 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
211 	0x00, 0x00, 0x00, 0x00,
212 
213 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
214 	0x00, 0x00, 0x00, 0x00,
215 	0x00, 0x00, 0x00, 0x00,
216 	0x08, 0x00,
217 
218 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
219 	0x00, 0x01, 0x00, 0x00,
220 	0x00, 0x11, 0x00, 0x00,
221 	0x00, 0x00, 0x00, 0x00,
222 	0x00, 0x00, 0x00, 0x00,
223 
224 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
225 	0x00, 0x08, 0x00, 0x00,
226 };
227 
228 /* offset info for MAC + IPv4 + UDP dummy packet */
229 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
230 	{ ICE_MAC_OFOS,		0 },
231 	{ ICE_ETYPE_OL,		12 },
232 	{ ICE_IPV4_OFOS,	14 },
233 	{ ICE_UDP_ILOS,		34 },
234 	{ ICE_PROTOCOL_LAST,	0 },
235 };
236 
237 /* Dummy packet for MAC + IPv4 + UDP */
238 static const u8 dummy_udp_packet[] = {
239 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
240 	0x00, 0x00, 0x00, 0x00,
241 	0x00, 0x00, 0x00, 0x00,
242 
243 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
244 
245 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
246 	0x00, 0x01, 0x00, 0x00,
247 	0x00, 0x11, 0x00, 0x00,
248 	0x00, 0x00, 0x00, 0x00,
249 	0x00, 0x00, 0x00, 0x00,
250 
251 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
252 	0x00, 0x08, 0x00, 0x00,
253 
254 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
255 };
256 
257 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
258 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
259 	{ ICE_MAC_OFOS,		0 },
260 	{ ICE_ETYPE_OL,		12 },
261 	{ ICE_VLAN_OFOS,	14 },
262 	{ ICE_IPV4_OFOS,	18 },
263 	{ ICE_UDP_ILOS,		38 },
264 	{ ICE_PROTOCOL_LAST,	0 },
265 };
266 
267 /* C-tag (801.1Q), IPv4:UDP dummy packet */
268 static const u8 dummy_vlan_udp_packet[] = {
269 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
270 	0x00, 0x00, 0x00, 0x00,
271 	0x00, 0x00, 0x00, 0x00,
272 
273 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
274 
275 	0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
276 
277 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
278 	0x00, 0x01, 0x00, 0x00,
279 	0x00, 0x11, 0x00, 0x00,
280 	0x00, 0x00, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 
283 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
284 	0x00, 0x08, 0x00, 0x00,
285 
286 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
287 };
288 
289 /* offset info for MAC + IPv4 + TCP dummy packet */
290 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
291 	{ ICE_MAC_OFOS,		0 },
292 	{ ICE_ETYPE_OL,		12 },
293 	{ ICE_IPV4_OFOS,	14 },
294 	{ ICE_TCP_IL,		34 },
295 	{ ICE_PROTOCOL_LAST,	0 },
296 };
297 
298 /* Dummy packet for MAC + IPv4 + TCP */
299 static const u8 dummy_tcp_packet[] = {
300 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
301 	0x00, 0x00, 0x00, 0x00,
302 	0x00, 0x00, 0x00, 0x00,
303 
304 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
305 
306 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
307 	0x00, 0x01, 0x00, 0x00,
308 	0x00, 0x06, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x00,
310 	0x00, 0x00, 0x00, 0x00,
311 
312 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
313 	0x00, 0x00, 0x00, 0x00,
314 	0x00, 0x00, 0x00, 0x00,
315 	0x50, 0x00, 0x00, 0x00,
316 	0x00, 0x00, 0x00, 0x00,
317 
318 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
319 };
320 
321 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
322 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
323 	{ ICE_MAC_OFOS,		0 },
324 	{ ICE_ETYPE_OL,		12 },
325 	{ ICE_VLAN_OFOS,	14 },
326 	{ ICE_IPV4_OFOS,	18 },
327 	{ ICE_TCP_IL,		38 },
328 	{ ICE_PROTOCOL_LAST,	0 },
329 };
330 
331 /* C-tag (801.1Q), IPv4:TCP dummy packet */
332 static const u8 dummy_vlan_tcp_packet[] = {
333 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
334 	0x00, 0x00, 0x00, 0x00,
335 	0x00, 0x00, 0x00, 0x00,
336 
337 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
338 
339 	0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 14 */
340 
341 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
342 	0x00, 0x01, 0x00, 0x00,
343 	0x00, 0x06, 0x00, 0x00,
344 	0x00, 0x00, 0x00, 0x00,
345 	0x00, 0x00, 0x00, 0x00,
346 
347 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
348 	0x00, 0x00, 0x00, 0x00,
349 	0x00, 0x00, 0x00, 0x00,
350 	0x50, 0x00, 0x00, 0x00,
351 	0x00, 0x00, 0x00, 0x00,
352 
353 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
354 };
355 
356 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
357 	{ ICE_MAC_OFOS,		0 },
358 	{ ICE_ETYPE_OL,		12 },
359 	{ ICE_IPV6_OFOS,	14 },
360 	{ ICE_TCP_IL,		54 },
361 	{ ICE_PROTOCOL_LAST,	0 },
362 };
363 
364 static const u8 dummy_tcp_ipv6_packet[] = {
365 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
366 	0x00, 0x00, 0x00, 0x00,
367 	0x00, 0x00, 0x00, 0x00,
368 
369 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
370 
371 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
372 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
373 	0x00, 0x00, 0x00, 0x00,
374 	0x00, 0x00, 0x00, 0x00,
375 	0x00, 0x00, 0x00, 0x00,
376 	0x00, 0x00, 0x00, 0x00,
377 	0x00, 0x00, 0x00, 0x00,
378 	0x00, 0x00, 0x00, 0x00,
379 	0x00, 0x00, 0x00, 0x00,
380 	0x00, 0x00, 0x00, 0x00,
381 
382 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
383 	0x00, 0x00, 0x00, 0x00,
384 	0x00, 0x00, 0x00, 0x00,
385 	0x50, 0x00, 0x00, 0x00,
386 	0x00, 0x00, 0x00, 0x00,
387 
388 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
389 };
390 
391 /* C-tag (802.1Q): IPv6 + TCP */
392 static const struct ice_dummy_pkt_offsets
393 dummy_vlan_tcp_ipv6_packet_offsets[] = {
394 	{ ICE_MAC_OFOS,		0 },
395 	{ ICE_ETYPE_OL,		12 },
396 	{ ICE_VLAN_OFOS,	14 },
397 	{ ICE_IPV6_OFOS,	18 },
398 	{ ICE_TCP_IL,		58 },
399 	{ ICE_PROTOCOL_LAST,	0 },
400 };
401 
402 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
403 static const u8 dummy_vlan_tcp_ipv6_packet[] = {
404 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
405 	0x00, 0x00, 0x00, 0x00,
406 	0x00, 0x00, 0x00, 0x00,
407 
408 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
409 
410 	0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
411 
412 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
413 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 	0x00, 0x00, 0x00, 0x00,
417 	0x00, 0x00, 0x00, 0x00,
418 	0x00, 0x00, 0x00, 0x00,
419 	0x00, 0x00, 0x00, 0x00,
420 	0x00, 0x00, 0x00, 0x00,
421 	0x00, 0x00, 0x00, 0x00,
422 
423 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
424 	0x00, 0x00, 0x00, 0x00,
425 	0x00, 0x00, 0x00, 0x00,
426 	0x50, 0x00, 0x00, 0x00,
427 	0x00, 0x00, 0x00, 0x00,
428 
429 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
430 };
431 
432 /* IPv6 + UDP */
433 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
434 	{ ICE_MAC_OFOS,		0 },
435 	{ ICE_ETYPE_OL,		12 },
436 	{ ICE_IPV6_OFOS,	14 },
437 	{ ICE_UDP_ILOS,		54 },
438 	{ ICE_PROTOCOL_LAST,	0 },
439 };
440 
441 /* IPv6 + UDP dummy packet */
442 static const u8 dummy_udp_ipv6_packet[] = {
443 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
444 	0x00, 0x00, 0x00, 0x00,
445 	0x00, 0x00, 0x00, 0x00,
446 
447 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
448 
449 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
450 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
451 	0x00, 0x00, 0x00, 0x00,
452 	0x00, 0x00, 0x00, 0x00,
453 	0x00, 0x00, 0x00, 0x00,
454 	0x00, 0x00, 0x00, 0x00,
455 	0x00, 0x00, 0x00, 0x00,
456 	0x00, 0x00, 0x00, 0x00,
457 	0x00, 0x00, 0x00, 0x00,
458 	0x00, 0x00, 0x00, 0x00,
459 
460 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
461 	0x00, 0x10, 0x00, 0x00,
462 
463 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
464 	0x00, 0x00, 0x00, 0x00,
465 
466 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
467 };
468 
469 /* C-tag (802.1Q): IPv6 + UDP */
470 static const struct ice_dummy_pkt_offsets
471 dummy_vlan_udp_ipv6_packet_offsets[] = {
472 	{ ICE_MAC_OFOS,		0 },
473 	{ ICE_ETYPE_OL,		12 },
474 	{ ICE_VLAN_OFOS,	14 },
475 	{ ICE_IPV6_OFOS,	18 },
476 	{ ICE_UDP_ILOS,		58 },
477 	{ ICE_PROTOCOL_LAST,	0 },
478 };
479 
480 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
481 static const u8 dummy_vlan_udp_ipv6_packet[] = {
482 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
483 	0x00, 0x00, 0x00, 0x00,
484 	0x00, 0x00, 0x00, 0x00,
485 
486 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
487 
488 	0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 14 */
489 
490 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
491 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 	0x00, 0x00, 0x00, 0x00,
495 	0x00, 0x00, 0x00, 0x00,
496 	0x00, 0x00, 0x00, 0x00,
497 	0x00, 0x00, 0x00, 0x00,
498 	0x00, 0x00, 0x00, 0x00,
499 	0x00, 0x00, 0x00, 0x00,
500 
501 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
502 	0x00, 0x08, 0x00, 0x00,
503 
504 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
505 };
506 
507 static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
508 	{ ICE_MAC_OFOS,		0 },
509 	{ ICE_IPV4_OFOS,	14 },
510 	{ ICE_UDP_OF,		34 },
511 	{ ICE_GTP,		42 },
512 	{ ICE_PROTOCOL_LAST,	0 },
513 };
514 
515 static const u8 dummy_udp_gtp_packet[] = {
516 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
517 	0x00, 0x00, 0x00, 0x00,
518 	0x00, 0x00, 0x00, 0x00,
519 	0x08, 0x00,
520 
521 	0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x11, 0x00, 0x00,
524 	0x00, 0x00, 0x00, 0x00,
525 	0x00, 0x00, 0x00, 0x00,
526 
527 	0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
528 	0x00, 0x1c, 0x00, 0x00,
529 
530 	0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
531 	0x00, 0x00, 0x00, 0x00,
532 	0x00, 0x00, 0x00, 0x85,
533 
534 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
535 	0x00, 0x00, 0x00, 0x00,
536 };
537 
538 static const
539 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = {
540 	{ ICE_MAC_OFOS,		0 },
541 	{ ICE_IPV4_OFOS,	14 },
542 	{ ICE_UDP_OF,		34 },
543 	{ ICE_GTP,		42 },
544 	{ ICE_IPV4_IL,		62 },
545 	{ ICE_PROTOCOL_LAST,	0 },
546 };
547 
548 static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
549 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
550 	0x00, 0x00, 0x00, 0x00,
551 	0x00, 0x00, 0x00, 0x00,
552 	0x08, 0x00,
553 
554 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
555 	0x00, 0x00, 0x40, 0x00,
556 	0x40, 0x11, 0x00, 0x00,
557 	0x00, 0x00, 0x00, 0x00,
558 	0x00, 0x00, 0x00, 0x00,
559 
560 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
561 	0x00, 0x00, 0x00, 0x00,
562 
563 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
564 	0x00, 0x00, 0x00, 0x00,
565 	0x00, 0x00, 0x00, 0x85,
566 
567 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
568 	0x00, 0x00, 0x00, 0x00,
569 
570 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
571 	0x00, 0x00, 0x40, 0x00,
572 	0x40, 0x00, 0x00, 0x00,
573 	0x00, 0x00, 0x00, 0x00,
574 	0x00, 0x00, 0x00, 0x00,
575 	0x00, 0x00,
576 };
577 
578 static const
579 struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_packet_offsets[] = {
580 	{ ICE_MAC_OFOS,		0 },
581 	{ ICE_IPV4_OFOS,	14 },
582 	{ ICE_UDP_OF,		34 },
583 	{ ICE_GTP,		42 },
584 	{ ICE_IPV6_IL,		62 },
585 	{ ICE_PROTOCOL_LAST,	0 },
586 };
587 
588 static const u8 dummy_ipv4_gtpu_ipv6_packet[] = {
589 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
590 	0x00, 0x00, 0x00, 0x00,
591 	0x00, 0x00, 0x00, 0x00,
592 	0x08, 0x00,
593 
594 	0x45, 0x00, 0x00, 0x58, /* ICE_IPV4_OFOS 14 */
595 	0x00, 0x00, 0x40, 0x00,
596 	0x40, 0x11, 0x00, 0x00,
597 	0x00, 0x00, 0x00, 0x00,
598 	0x00, 0x00, 0x00, 0x00,
599 
600 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
601 	0x00, 0x00, 0x00, 0x00,
602 
603 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 42 */
604 	0x00, 0x00, 0x00, 0x00,
605 	0x00, 0x00, 0x00, 0x85,
606 
607 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
608 	0x00, 0x00, 0x00, 0x00,
609 
610 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 62 */
611 	0x00, 0x00, 0x3b, 0x00,
612 	0x00, 0x00, 0x00, 0x00,
613 	0x00, 0x00, 0x00, 0x00,
614 	0x00, 0x00, 0x00, 0x00,
615 	0x00, 0x00, 0x00, 0x00,
616 	0x00, 0x00, 0x00, 0x00,
617 	0x00, 0x00, 0x00, 0x00,
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 
621 	0x00, 0x00,
622 };
623 
624 static const
625 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_packet_offsets[] = {
626 	{ ICE_MAC_OFOS,		0 },
627 	{ ICE_IPV6_OFOS,	14 },
628 	{ ICE_UDP_OF,		54 },
629 	{ ICE_GTP,		62 },
630 	{ ICE_IPV4_IL,		82 },
631 	{ ICE_PROTOCOL_LAST,	0 },
632 };
633 
634 static const u8 dummy_ipv6_gtpu_ipv4_packet[] = {
635 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
636 	0x00, 0x00, 0x00, 0x00,
637 	0x00, 0x00, 0x00, 0x00,
638 	0x86, 0xdd,
639 
640 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
641 	0x00, 0x58, 0x11, 0x00, /* Next header UDP*/
642 	0x00, 0x00, 0x00, 0x00,
643 	0x00, 0x00, 0x00, 0x00,
644 	0x00, 0x00, 0x00, 0x00,
645 	0x00, 0x00, 0x00, 0x00,
646 	0x00, 0x00, 0x00, 0x00,
647 	0x00, 0x00, 0x00, 0x00,
648 	0x00, 0x00, 0x00, 0x00,
649 	0x00, 0x00, 0x00, 0x00,
650 
651 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
652 	0x00, 0x00, 0x00, 0x00,
653 
654 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
655 	0x00, 0x00, 0x00, 0x00,
656 	0x00, 0x00, 0x00, 0x85,
657 
658 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
659 	0x00, 0x00, 0x00, 0x00,
660 
661 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 82 */
662 	0x00, 0x00, 0x40, 0x00,
663 	0x40, 0x00, 0x00, 0x00,
664 	0x00, 0x00, 0x00, 0x00,
665 	0x00, 0x00, 0x00, 0x00,
666 
667 	0x00, 0x00,
668 };
669 
670 static const
671 struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_packet_offsets[] = {
672 	{ ICE_MAC_OFOS,		0 },
673 	{ ICE_IPV6_OFOS,	14 },
674 	{ ICE_UDP_OF,		54 },
675 	{ ICE_GTP,		62 },
676 	{ ICE_IPV6_IL,		82 },
677 	{ ICE_PROTOCOL_LAST,	0 },
678 };
679 
680 static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
681 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
682 	0x00, 0x00, 0x00, 0x00,
683 	0x00, 0x00, 0x00, 0x00,
684 	0x86, 0xdd,
685 
686 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
687 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
688 	0x00, 0x00, 0x00, 0x00,
689 	0x00, 0x00, 0x00, 0x00,
690 	0x00, 0x00, 0x00, 0x00,
691 	0x00, 0x00, 0x00, 0x00,
692 	0x00, 0x00, 0x00, 0x00,
693 	0x00, 0x00, 0x00, 0x00,
694 	0x00, 0x00, 0x00, 0x00,
695 	0x00, 0x00, 0x00, 0x00,
696 
697 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
698 	0x00, 0x00, 0x00, 0x00,
699 
700 	0x34, 0xff, 0x00, 0x28,  /* ICE_GTP 62 */
701 	0x00, 0x00, 0x00, 0x00,
702 	0x00, 0x00, 0x00, 0x85,
703 
704 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
705 	0x00, 0x00, 0x00, 0x00,
706 
707 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFIL 82 */
708 	0x00, 0x00, 0x3b, 0x00,
709 	0x00, 0x00, 0x00, 0x00,
710 	0x00, 0x00, 0x00, 0x00,
711 	0x00, 0x00, 0x00, 0x00,
712 	0x00, 0x00, 0x00, 0x00,
713 	0x00, 0x00, 0x00, 0x00,
714 	0x00, 0x00, 0x00, 0x00,
715 	0x00, 0x00, 0x00, 0x00,
716 	0x00, 0x00, 0x00, 0x00,
717 
718 	0x00, 0x00,
719 };
720 
721 static const
722 struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
723 	{ ICE_MAC_OFOS,		0 },
724 	{ ICE_IPV4_OFOS,	14 },
725 	{ ICE_UDP_OF,		34 },
726 	{ ICE_GTP_NO_PAY,	42 },
727 	{ ICE_PROTOCOL_LAST,	0 },
728 };
729 
730 static const
731 struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
732 	{ ICE_MAC_OFOS,		0 },
733 	{ ICE_IPV6_OFOS,	14 },
734 	{ ICE_UDP_OF,		54 },
735 	{ ICE_GTP_NO_PAY,	62 },
736 	{ ICE_PROTOCOL_LAST,	0 },
737 };
738 
739 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
740 	{ ICE_MAC_OFOS,		0 },
741 	{ ICE_ETYPE_OL,		12 },
742 	{ ICE_VLAN_OFOS,	14},
743 	{ ICE_PPPOE,		18 },
744 	{ ICE_PROTOCOL_LAST,	0 },
745 };
746 
747 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv4_offsets[] = {
748 	{ ICE_MAC_OFOS,		0 },
749 	{ ICE_ETYPE_OL,		12 },
750 	{ ICE_VLAN_OFOS,	14},
751 	{ ICE_PPPOE,		18 },
752 	{ ICE_IPV4_OFOS,	26 },
753 	{ ICE_PROTOCOL_LAST,	0 },
754 };
755 
756 static const u8 dummy_pppoe_ipv4_packet[] = {
757 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
758 	0x00, 0x00, 0x00, 0x00,
759 	0x00, 0x00, 0x00, 0x00,
760 
761 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
762 
763 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
764 
765 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
766 	0x00, 0x16,
767 
768 	0x00, 0x21,		/* PPP Link Layer 24 */
769 
770 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 26 */
771 	0x00, 0x00, 0x00, 0x00,
772 	0x00, 0x00, 0x00, 0x00,
773 	0x00, 0x00, 0x00, 0x00,
774 	0x00, 0x00, 0x00, 0x00,
775 
776 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
777 };
778 
779 static const
780 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_tcp_packet_offsets[] = {
781 	{ ICE_MAC_OFOS,		0 },
782 	{ ICE_ETYPE_OL,		12 },
783 	{ ICE_VLAN_OFOS,	14},
784 	{ ICE_PPPOE,		18 },
785 	{ ICE_IPV4_OFOS,	26 },
786 	{ ICE_TCP_IL,		46 },
787 	{ ICE_PROTOCOL_LAST,	0 },
788 };
789 
790 static const u8 dummy_pppoe_ipv4_tcp_packet[] = {
791 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
792 	0x00, 0x00, 0x00, 0x00,
793 	0x00, 0x00, 0x00, 0x00,
794 
795 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
796 
797 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
798 
799 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
800 	0x00, 0x16,
801 
802 	0x00, 0x21,		/* PPP Link Layer 24 */
803 
804 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 26 */
805 	0x00, 0x01, 0x00, 0x00,
806 	0x00, 0x06, 0x00, 0x00,
807 	0x00, 0x00, 0x00, 0x00,
808 	0x00, 0x00, 0x00, 0x00,
809 
810 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 46 */
811 	0x00, 0x00, 0x00, 0x00,
812 	0x00, 0x00, 0x00, 0x00,
813 	0x50, 0x00, 0x00, 0x00,
814 	0x00, 0x00, 0x00, 0x00,
815 
816 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
817 };
818 
819 static const
820 struct ice_dummy_pkt_offsets dummy_pppoe_ipv4_udp_packet_offsets[] = {
821 	{ ICE_MAC_OFOS,		0 },
822 	{ ICE_ETYPE_OL,		12 },
823 	{ ICE_VLAN_OFOS,	14},
824 	{ ICE_PPPOE,		18 },
825 	{ ICE_IPV4_OFOS,	26 },
826 	{ ICE_UDP_ILOS,		46 },
827 	{ ICE_PROTOCOL_LAST,	0 },
828 };
829 
830 static const u8 dummy_pppoe_ipv4_udp_packet[] = {
831 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
832 	0x00, 0x00, 0x00, 0x00,
833 	0x00, 0x00, 0x00, 0x00,
834 
835 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
836 
837 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
838 
839 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
840 	0x00, 0x16,
841 
842 	0x00, 0x21,		/* PPP Link Layer 24 */
843 
844 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 26 */
845 	0x00, 0x01, 0x00, 0x00,
846 	0x00, 0x11, 0x00, 0x00,
847 	0x00, 0x00, 0x00, 0x00,
848 	0x00, 0x00, 0x00, 0x00,
849 
850 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 46 */
851 	0x00, 0x08, 0x00, 0x00,
852 
853 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
854 };
855 
856 static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_offsets[] = {
857 	{ ICE_MAC_OFOS,		0 },
858 	{ ICE_ETYPE_OL,		12 },
859 	{ ICE_VLAN_OFOS,	14},
860 	{ ICE_PPPOE,		18 },
861 	{ ICE_IPV6_OFOS,	26 },
862 	{ ICE_PROTOCOL_LAST,	0 },
863 };
864 
865 static const u8 dummy_pppoe_ipv6_packet[] = {
866 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
867 	0x00, 0x00, 0x00, 0x00,
868 	0x00, 0x00, 0x00, 0x00,
869 
870 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
871 
872 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
873 
874 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
875 	0x00, 0x2a,
876 
877 	0x00, 0x57,		/* PPP Link Layer 24 */
878 
879 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
880 	0x00, 0x00, 0x3b, 0x00,
881 	0x00, 0x00, 0x00, 0x00,
882 	0x00, 0x00, 0x00, 0x00,
883 	0x00, 0x00, 0x00, 0x00,
884 	0x00, 0x00, 0x00, 0x00,
885 	0x00, 0x00, 0x00, 0x00,
886 	0x00, 0x00, 0x00, 0x00,
887 	0x00, 0x00, 0x00, 0x00,
888 	0x00, 0x00, 0x00, 0x00,
889 
890 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
891 };
892 
893 static const
894 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_tcp_offsets[] = {
895 	{ ICE_MAC_OFOS,		0 },
896 	{ ICE_ETYPE_OL,		12 },
897 	{ ICE_VLAN_OFOS,	14},
898 	{ ICE_PPPOE,		18 },
899 	{ ICE_IPV6_OFOS,	26 },
900 	{ ICE_TCP_IL,		66 },
901 	{ ICE_PROTOCOL_LAST,	0 },
902 };
903 
904 static const u8 dummy_pppoe_ipv6_tcp_packet[] = {
905 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
906 	0x00, 0x00, 0x00, 0x00,
907 	0x00, 0x00, 0x00, 0x00,
908 
909 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
910 
911 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
912 
913 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
914 	0x00, 0x2a,
915 
916 	0x00, 0x57,		/* PPP Link Layer 24 */
917 
918 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
919 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x00, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 	0x00, 0x00, 0x00, 0x00,
925 	0x00, 0x00, 0x00, 0x00,
926 	0x00, 0x00, 0x00, 0x00,
927 	0x00, 0x00, 0x00, 0x00,
928 
929 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 66 */
930 	0x00, 0x00, 0x00, 0x00,
931 	0x00, 0x00, 0x00, 0x00,
932 	0x50, 0x00, 0x00, 0x00,
933 	0x00, 0x00, 0x00, 0x00,
934 
935 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
936 };
937 
938 static const
939 struct ice_dummy_pkt_offsets dummy_pppoe_packet_ipv6_udp_offsets[] = {
940 	{ ICE_MAC_OFOS,		0 },
941 	{ ICE_ETYPE_OL,		12 },
942 	{ ICE_VLAN_OFOS,	14},
943 	{ ICE_PPPOE,		18 },
944 	{ ICE_IPV6_OFOS,	26 },
945 	{ ICE_UDP_ILOS,		66 },
946 	{ ICE_PROTOCOL_LAST,	0 },
947 };
948 
949 static const u8 dummy_pppoe_ipv6_udp_packet[] = {
950 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
951 	0x00, 0x00, 0x00, 0x00,
952 	0x00, 0x00, 0x00, 0x00,
953 
954 	0x81, 0x00,		/* ICE_ETYPE_OL 12 */
955 
956 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 14 */
957 
958 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 18 */
959 	0x00, 0x2a,
960 
961 	0x00, 0x57,		/* PPP Link Layer 24 */
962 
963 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 26 */
964 	0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
965 	0x00, 0x00, 0x00, 0x00,
966 	0x00, 0x00, 0x00, 0x00,
967 	0x00, 0x00, 0x00, 0x00,
968 	0x00, 0x00, 0x00, 0x00,
969 	0x00, 0x00, 0x00, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 
974 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 66 */
975 	0x00, 0x08, 0x00, 0x00,
976 
977 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
978 };
979 
980 static const struct ice_dummy_pkt_offsets dummy_ipv4_esp_packet_offsets[] = {
981 	{ ICE_MAC_OFOS,		0 },
982 	{ ICE_IPV4_OFOS,	14 },
983 	{ ICE_ESP,			34 },
984 	{ ICE_PROTOCOL_LAST,	0 },
985 };
986 
987 static const u8 dummy_ipv4_esp_pkt[] = {
988 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
989 	0x00, 0x00, 0x00, 0x00,
990 	0x00, 0x00, 0x00, 0x00,
991 	0x08, 0x00,
992 
993 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 14 */
994 	0x00, 0x00, 0x40, 0x00,
995 	0x40, 0x32, 0x00, 0x00,
996 	0x00, 0x00, 0x00, 0x00,
997 	0x00, 0x00, 0x00, 0x00,
998 
999 	0x00, 0x00, 0x00, 0x00, /* ICE_ESP 34 */
1000 	0x00, 0x00, 0x00, 0x00,
1001 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1002 };
1003 
1004 static const struct ice_dummy_pkt_offsets dummy_ipv6_esp_packet_offsets[] = {
1005 	{ ICE_MAC_OFOS,		0 },
1006 	{ ICE_IPV6_OFOS,	14 },
1007 	{ ICE_ESP,			54 },
1008 	{ ICE_PROTOCOL_LAST,	0 },
1009 };
1010 
1011 static const u8 dummy_ipv6_esp_pkt[] = {
1012 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1013 	0x00, 0x00, 0x00, 0x00,
1014 	0x00, 0x00, 0x00, 0x00,
1015 	0x86, 0xDD,
1016 
1017 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1018 	0x00, 0x08, 0x32, 0x00, /* Next header ESP */
1019 	0x00, 0x00, 0x00, 0x00,
1020 	0x00, 0x00, 0x00, 0x00,
1021 	0x00, 0x00, 0x00, 0x00,
1022 	0x00, 0x00, 0x00, 0x00,
1023 	0x00, 0x00, 0x00, 0x00,
1024 	0x00, 0x00, 0x00, 0x00,
1025 	0x00, 0x00, 0x00, 0x00,
1026 	0x00, 0x00, 0x00, 0x00,
1027 
1028 	0x00, 0x00, 0x00, 0x00, /* ICE_ESP 54 */
1029 	0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1031 };
1032 
1033 static const struct ice_dummy_pkt_offsets dummy_ipv4_ah_packet_offsets[] = {
1034 	{ ICE_MAC_OFOS,		0 },
1035 	{ ICE_IPV4_OFOS,	14 },
1036 	{ ICE_AH,			34 },
1037 	{ ICE_PROTOCOL_LAST,	0 },
1038 };
1039 
1040 static const u8 dummy_ipv4_ah_pkt[] = {
1041 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1042 	0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x00, 0x00, 0x00,
1044 	0x08, 0x00,
1045 
1046 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1047 	0x00, 0x00, 0x40, 0x00,
1048 	0x40, 0x33, 0x00, 0x00,
1049 	0x00, 0x00, 0x00, 0x00,
1050 	0x00, 0x00, 0x00, 0x00,
1051 
1052 	0x00, 0x00, 0x00, 0x00, /* ICE_AH 34 */
1053 	0x00, 0x00, 0x00, 0x00,
1054 	0x00, 0x00, 0x00, 0x00,
1055 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1056 };
1057 
1058 static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
1059 	{ ICE_MAC_OFOS,		0 },
1060 	{ ICE_IPV6_OFOS,	14 },
1061 	{ ICE_AH,			54 },
1062 	{ ICE_PROTOCOL_LAST,	0 },
1063 };
1064 
1065 static const u8 dummy_ipv6_ah_pkt[] = {
1066 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1067 	0x00, 0x00, 0x00, 0x00,
1068 	0x00, 0x00, 0x00, 0x00,
1069 	0x86, 0xDD,
1070 
1071 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1072 	0x00, 0x0c, 0x33, 0x00, /* Next header AH */
1073 	0x00, 0x00, 0x00, 0x00,
1074 	0x00, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00, 0x00, 0x00,
1078 	0x00, 0x00, 0x00, 0x00,
1079 	0x00, 0x00, 0x00, 0x00,
1080 	0x00, 0x00, 0x00, 0x00,
1081 
1082 	0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
1083 	0x00, 0x00, 0x00, 0x00,
1084 	0x00, 0x00, 0x00, 0x00,
1085 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1086 };
1087 
1088 static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
1089 	{ ICE_MAC_OFOS,		0 },
1090 	{ ICE_IPV4_OFOS,	14 },
1091 	{ ICE_UDP_ILOS,		34 },
1092 	{ ICE_NAT_T,		42 },
1093 	{ ICE_PROTOCOL_LAST,	0 },
1094 };
1095 
1096 static const u8 dummy_ipv4_nat_pkt[] = {
1097 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 	0x08, 0x00,
1101 
1102 	0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
1103 	0x00, 0x00, 0x40, 0x00,
1104 	0x40, 0x11, 0x00, 0x00,
1105 	0x00, 0x00, 0x00, 0x00,
1106 	0x00, 0x00, 0x00, 0x00,
1107 
1108 	0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
1109 	0x00, 0x00, 0x00, 0x00,
1110 
1111 	0x00, 0x00, 0x00, 0x00,
1112 	0x00, 0x00, 0x00, 0x00,
1113 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1114 };
1115 
1116 static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
1117 	{ ICE_MAC_OFOS,		0 },
1118 	{ ICE_IPV6_OFOS,	14 },
1119 	{ ICE_UDP_ILOS,		54 },
1120 	{ ICE_NAT_T,		62 },
1121 	{ ICE_PROTOCOL_LAST,	0 },
1122 };
1123 
1124 static const u8 dummy_ipv6_nat_pkt[] = {
1125 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1126 	0x00, 0x00, 0x00, 0x00,
1127 	0x00, 0x00, 0x00, 0x00,
1128 	0x86, 0xDD,
1129 
1130 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1131 	0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
1132 	0x00, 0x00, 0x00, 0x00,
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x00, 0x00, 0x00,
1135 	0x00, 0x00, 0x00, 0x00,
1136 	0x00, 0x00, 0x00, 0x00,
1137 	0x00, 0x00, 0x00, 0x00,
1138 	0x00, 0x00, 0x00, 0x00,
1139 	0x00, 0x00, 0x00, 0x00,
1140 
1141 	0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
1142 	0x00, 0x00, 0x00, 0x00,
1143 
1144 	0x00, 0x00, 0x00, 0x00,
1145 	0x00, 0x00, 0x00, 0x00,
1146 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1147 
1148 };
1149 
1150 static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
1151 	{ ICE_MAC_OFOS,		0 },
1152 	{ ICE_IPV4_OFOS,	14 },
1153 	{ ICE_L2TPV3,		34 },
1154 	{ ICE_PROTOCOL_LAST,	0 },
1155 };
1156 
1157 static const u8 dummy_ipv4_l2tpv3_pkt[] = {
1158 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1159 	0x00, 0x00, 0x00, 0x00,
1160 	0x00, 0x00, 0x00, 0x00,
1161 	0x08, 0x00,
1162 
1163 	0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1164 	0x00, 0x00, 0x40, 0x00,
1165 	0x40, 0x73, 0x00, 0x00,
1166 	0x00, 0x00, 0x00, 0x00,
1167 	0x00, 0x00, 0x00, 0x00,
1168 
1169 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1170 	0x00, 0x00, 0x00, 0x00,
1171 	0x00, 0x00, 0x00, 0x00,
1172 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1173 };
1174 
1175 static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
1176 	{ ICE_MAC_OFOS,		0 },
1177 	{ ICE_IPV6_OFOS,	14 },
1178 	{ ICE_L2TPV3,		54 },
1179 	{ ICE_PROTOCOL_LAST,	0 },
1180 };
1181 
1182 static const u8 dummy_ipv6_l2tpv3_pkt[] = {
1183 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1184 	0x00, 0x00, 0x00, 0x00,
1185 	0x00, 0x00, 0x00, 0x00,
1186 	0x86, 0xDD,
1187 
1188 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1189 	0x00, 0x0c, 0x73, 0x40,
1190 	0x00, 0x00, 0x00, 0x00,
1191 	0x00, 0x00, 0x00, 0x00,
1192 	0x00, 0x00, 0x00, 0x00,
1193 	0x00, 0x00, 0x00, 0x00,
1194 	0x00, 0x00, 0x00, 0x00,
1195 	0x00, 0x00, 0x00, 0x00,
1196 	0x00, 0x00, 0x00, 0x00,
1197 	0x00, 0x00, 0x00, 0x00,
1198 
1199 	0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1200 	0x00, 0x00, 0x00, 0x00,
1201 	0x00, 0x00, 0x00, 0x00,
1202 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1203 };
1204 
1205 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
1206 	{ ICE_MAC_OFOS,		0 },
1207 	{ ICE_VLAN_EX,		14 },
1208 	{ ICE_VLAN_OFOS,	18 },
1209 	{ ICE_IPV4_OFOS,	22 },
1210 	{ ICE_PROTOCOL_LAST,	0 },
1211 };
1212 
1213 static const u8 dummy_qinq_ipv4_pkt[] = {
1214 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1215 	0x00, 0x00, 0x00, 0x00,
1216 	0x00, 0x00, 0x00, 0x00,
1217 	0x91, 0x00,
1218 
1219 	0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1220 	0x00, 0x00, 0x08, 0x00, /* ICE_VLAN_OFOS 18 */
1221 
1222 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1223 	0x00, 0x01, 0x00, 0x00,
1224 	0x00, 0x11, 0x00, 0x00,
1225 	0x00, 0x00, 0x00, 0x00,
1226 	0x00, 0x00, 0x00, 0x00,
1227 
1228 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1229 	0x00, 0x08, 0x00, 0x00,
1230 
1231 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1232 };
1233 
1234 static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
1235 	{ ICE_MAC_OFOS,		0 },
1236 	{ ICE_VLAN_EX,		14 },
1237 	{ ICE_VLAN_OFOS,	18 },
1238 	{ ICE_IPV6_OFOS,	22 },
1239 	{ ICE_PROTOCOL_LAST,	0 },
1240 };
1241 
1242 static const u8 dummy_qinq_ipv6_pkt[] = {
1243 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1244 	0x00, 0x00, 0x00, 0x00,
1245 	0x00, 0x00, 0x00, 0x00,
1246 	0x91, 0x00,
1247 
1248 	0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1249 	0x00, 0x00, 0x86, 0xDD, /* ICE_VLAN_OFOS 18 */
1250 
1251 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1252 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
1253 	0x00, 0x00, 0x00, 0x00,
1254 	0x00, 0x00, 0x00, 0x00,
1255 	0x00, 0x00, 0x00, 0x00,
1256 	0x00, 0x00, 0x00, 0x00,
1257 	0x00, 0x00, 0x00, 0x00,
1258 	0x00, 0x00, 0x00, 0x00,
1259 	0x00, 0x00, 0x00, 0x00,
1260 	0x00, 0x00, 0x00, 0x00,
1261 
1262 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1263 	0x00, 0x10, 0x00, 0x00,
1264 
1265 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
1266 	0x00, 0x00, 0x00, 0x00,
1267 
1268 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1269 };
1270 
1271 static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
1272 	{ ICE_MAC_OFOS,		0 },
1273 	{ ICE_VLAN_EX,		14 },
1274 	{ ICE_VLAN_OFOS,	18 },
1275 	{ ICE_PPPOE,		22 },
1276 	{ ICE_PROTOCOL_LAST,	0 },
1277 };
1278 
1279 static const
1280 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_ipv4_packet_offsets[] = {
1281 	{ ICE_MAC_OFOS,		0 },
1282 	{ ICE_VLAN_EX,		14 },
1283 	{ ICE_VLAN_OFOS,	18 },
1284 	{ ICE_PPPOE,		22 },
1285 	{ ICE_IPV4_OFOS,	30 },
1286 	{ ICE_PROTOCOL_LAST,	0 },
1287 };
1288 
1289 static const u8 dummy_qinq_pppoe_ipv4_pkt[] = {
1290 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1291 	0x00, 0x00, 0x00, 0x00,
1292 	0x00, 0x00, 0x00, 0x00,
1293 	0x91, 0x00,
1294 
1295 	0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1296 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1297 
1298 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1299 	0x00, 0x16,
1300 
1301 	0x00, 0x21,		/* PPP Link Layer 28 */
1302 
1303 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 30 */
1304 	0x00, 0x00, 0x00, 0x00,
1305 	0x00, 0x00, 0x00, 0x00,
1306 	0x00, 0x00, 0x00, 0x00,
1307 	0x00, 0x00, 0x00, 0x00,
1308 
1309 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
1310 };
1311 
1312 static const
1313 struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_ipv6_offsets[] = {
1314 	{ ICE_MAC_OFOS,		0 },
1315 	{ ICE_ETYPE_OL,		12 },
1316 	{ ICE_VLAN_EX,		14},
1317 	{ ICE_VLAN_OFOS,	18 },
1318 	{ ICE_PPPOE,		22 },
1319 	{ ICE_IPV6_OFOS,	30 },
1320 	{ ICE_PROTOCOL_LAST,	0 },
1321 };
1322 
1323 static const u8 dummy_qinq_pppoe_ipv6_packet[] = {
1324 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1325 	0x00, 0x00, 0x00, 0x00,
1326 	0x00, 0x00, 0x00, 0x00,
1327 
1328 	0x91, 0x00,		/* ICE_ETYPE_OL 12 */
1329 
1330 	0x00, 0x00, 0x81, 0x00, /* ICE_VLAN_EX 14 */
1331 	0x00, 0x00, 0x88, 0x64, /* ICE_VLAN_OFOS 18 */
1332 
1333 	0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 22 */
1334 	0x00, 0x2a,
1335 
1336 	0x00, 0x57,		/* PPP Link Layer 28*/
1337 
1338 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 30 */
1339 	0x00, 0x00, 0x3b, 0x00,
1340 	0x00, 0x00, 0x00, 0x00,
1341 	0x00, 0x00, 0x00, 0x00,
1342 	0x00, 0x00, 0x00, 0x00,
1343 	0x00, 0x00, 0x00, 0x00,
1344 	0x00, 0x00, 0x00, 0x00,
1345 	0x00, 0x00, 0x00, 0x00,
1346 	0x00, 0x00, 0x00, 0x00,
1347 	0x00, 0x00, 0x00, 0x00,
1348 
1349 	0x00, 0x00,		/* 2 bytes for 4 bytes alignment */
1350 };
1351 
1352 /* this is a recipe to profile association bitmap */
1353 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1354 			  ICE_MAX_NUM_PROFILES);
1355 
1356 /* this is a profile to recipe association bitmap */
1357 static ice_declare_bitmap(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1358 			  ICE_MAX_NUM_RECIPES);
1359 
1360 static void ice_get_recp_to_prof_map(struct ice_hw *hw);
1361 
1362 /**
1363  * ice_collect_result_idx - copy result index values
1364  * @buf: buffer that contains the result index
1365  * @recp: the recipe struct to copy data into
1366  */
ice_collect_result_idx(struct ice_aqc_recipe_data_elem * buf,struct ice_sw_recipe * recp)1367 static void ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
1368 				   struct ice_sw_recipe *recp)
1369 {
1370 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1371 		ice_set_bit(buf->content.result_indx &
1372 			    ~ICE_AQ_RECIPE_RESULT_EN, recp->res_idxs);
1373 }
1374 
1375 /**
1376  * ice_get_tun_type_for_recipe - get tunnel type for the recipe
1377  * @rid: recipe ID that we are populating
1378  */
ice_get_tun_type_for_recipe(u8 rid,bool vlan)1379 static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan)
1380 {
1381 	u8 vxlan_profile[12] = {10, 11, 12, 16, 17, 18, 22, 23, 24, 25, 26, 27};
1382 	u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33};
1383 	u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40};
1384 	u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9};
1385 	enum ice_sw_tunnel_type tun_type;
1386 	u16 i, j, profile_num = 0;
1387 	bool non_tun_valid = false;
1388 	bool pppoe_valid = false;
1389 	bool vxlan_valid = false;
1390 	bool gre_valid = false;
1391 	bool gtp_valid = false;
1392 	bool flag_valid = false;
1393 
1394 	for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1395 		if (!ice_is_bit_set(recipe_to_profile[rid], j))
1396 			continue;
1397 		else
1398 			profile_num++;
1399 
1400 		for (i = 0; i < 12; i++) {
1401 			if (gre_profile[i] == j)
1402 				gre_valid = true;
1403 		}
1404 
1405 		for (i = 0; i < 12; i++) {
1406 			if (vxlan_profile[i] == j)
1407 				vxlan_valid = true;
1408 		}
1409 
1410 		for (i = 0; i < 7; i++) {
1411 			if (pppoe_profile[i] == j)
1412 				pppoe_valid = true;
1413 		}
1414 
1415 		for (i = 0; i < 6; i++) {
1416 			if (non_tun_profile[i] == j)
1417 				non_tun_valid = true;
1418 		}
1419 
1420 		if (j >= ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER &&
1421 		    j <= ICE_PROFID_IPV6_GTPU_IPV6_TCP)
1422 			gtp_valid = true;
1423 
1424 		if ((j >= ICE_PROFID_IPV4_ESP &&
1425 		     j <= ICE_PROFID_IPV6_PFCP_SESSION) ||
1426 		    (j >= ICE_PROFID_IPV4_GTPC_TEID &&
1427 		     j <= ICE_PROFID_IPV6_GTPU_TEID))
1428 			flag_valid = true;
1429 	}
1430 
1431 	if (!non_tun_valid && vxlan_valid)
1432 		tun_type = ICE_SW_TUN_VXLAN;
1433 	else if (!non_tun_valid && gre_valid)
1434 		tun_type = ICE_SW_TUN_NVGRE;
1435 	else if (!non_tun_valid && pppoe_valid)
1436 		tun_type = ICE_SW_TUN_PPPOE;
1437 	else if (!non_tun_valid && gtp_valid)
1438 		tun_type = ICE_SW_TUN_GTP;
1439 	else if (non_tun_valid &&
1440 		 (vxlan_valid || gre_valid || gtp_valid || pppoe_valid))
1441 		tun_type = ICE_SW_TUN_AND_NON_TUN;
1442 	else if (non_tun_valid && !vxlan_valid && !gre_valid && !gtp_valid &&
1443 		 !pppoe_valid)
1444 		tun_type = ICE_NON_TUN;
1445 	else
1446 		tun_type = ICE_NON_TUN;
1447 
1448 	if (profile_num > 1 && tun_type == ICE_SW_TUN_PPPOE) {
1449 		i = ice_is_bit_set(recipe_to_profile[rid],
1450 				   ICE_PROFID_PPPOE_IPV4_OTHER);
1451 		j = ice_is_bit_set(recipe_to_profile[rid],
1452 				   ICE_PROFID_PPPOE_IPV6_OTHER);
1453 		if (i && !j)
1454 			tun_type = ICE_SW_TUN_PPPOE_IPV4;
1455 		else if (!i && j)
1456 			tun_type = ICE_SW_TUN_PPPOE_IPV6;
1457 	}
1458 
1459 	if (tun_type == ICE_SW_TUN_GTP) {
1460 		if (ice_is_bit_set(recipe_to_profile[rid],
1461 				   ICE_PROFID_IPV4_GTPU_IPV4_OTHER))
1462 			tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1463 		else if (ice_is_bit_set(recipe_to_profile[rid],
1464 					ICE_PROFID_IPV4_GTPU_IPV6_OTHER))
1465 			tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1466 		else if (ice_is_bit_set(recipe_to_profile[rid],
1467 					ICE_PROFID_IPV6_GTPU_IPV4_OTHER))
1468 			tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469 		else if (ice_is_bit_set(recipe_to_profile[rid],
1470 					ICE_PROFID_IPV6_GTPU_IPV6_OTHER))
1471 			tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1472 	}
1473 
1474 	if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) {
1475 		for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) {
1476 			if (ice_is_bit_set(recipe_to_profile[rid], j)) {
1477 				switch (j) {
1478 				case ICE_PROFID_IPV4_TCP:
1479 					tun_type = ICE_SW_IPV4_TCP;
1480 					break;
1481 				case ICE_PROFID_IPV4_UDP:
1482 					tun_type = ICE_SW_IPV4_UDP;
1483 					break;
1484 				case ICE_PROFID_IPV6_TCP:
1485 					tun_type = ICE_SW_IPV6_TCP;
1486 					break;
1487 				case ICE_PROFID_IPV6_UDP:
1488 					tun_type = ICE_SW_IPV6_UDP;
1489 					break;
1490 				case ICE_PROFID_PPPOE_PAY:
1491 					tun_type = ICE_SW_TUN_PPPOE_PAY;
1492 					break;
1493 				case ICE_PROFID_PPPOE_IPV4_TCP:
1494 					tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1495 					break;
1496 				case ICE_PROFID_PPPOE_IPV4_UDP:
1497 					tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1498 					break;
1499 				case ICE_PROFID_PPPOE_IPV4_OTHER:
1500 					tun_type = ICE_SW_TUN_PPPOE_IPV4;
1501 					break;
1502 				case ICE_PROFID_PPPOE_IPV6_TCP:
1503 					tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1504 					break;
1505 				case ICE_PROFID_PPPOE_IPV6_UDP:
1506 					tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1507 					break;
1508 				case ICE_PROFID_PPPOE_IPV6_OTHER:
1509 					tun_type = ICE_SW_TUN_PPPOE_IPV6;
1510 					break;
1511 				case ICE_PROFID_IPV4_ESP:
1512 					tun_type = ICE_SW_TUN_IPV4_ESP;
1513 					break;
1514 				case ICE_PROFID_IPV6_ESP:
1515 					tun_type = ICE_SW_TUN_IPV6_ESP;
1516 					break;
1517 				case ICE_PROFID_IPV4_AH:
1518 					tun_type = ICE_SW_TUN_IPV4_AH;
1519 					break;
1520 				case ICE_PROFID_IPV6_AH:
1521 					tun_type = ICE_SW_TUN_IPV6_AH;
1522 					break;
1523 				case ICE_PROFID_IPV4_NAT_T:
1524 					tun_type = ICE_SW_TUN_IPV4_NAT_T;
1525 					break;
1526 				case ICE_PROFID_IPV6_NAT_T:
1527 					tun_type = ICE_SW_TUN_IPV6_NAT_T;
1528 					break;
1529 				case ICE_PROFID_IPV4_PFCP_NODE:
1530 					tun_type =
1531 					ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1532 					break;
1533 				case ICE_PROFID_IPV6_PFCP_NODE:
1534 					tun_type =
1535 					ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1536 					break;
1537 				case ICE_PROFID_IPV4_PFCP_SESSION:
1538 					tun_type =
1539 					ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1540 					break;
1541 				case ICE_PROFID_IPV6_PFCP_SESSION:
1542 					tun_type =
1543 					ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1544 					break;
1545 				case ICE_PROFID_MAC_IPV4_L2TPV3:
1546 					tun_type = ICE_SW_TUN_IPV4_L2TPV3;
1547 					break;
1548 				case ICE_PROFID_MAC_IPV6_L2TPV3:
1549 					tun_type = ICE_SW_TUN_IPV6_L2TPV3;
1550 					break;
1551 				case ICE_PROFID_IPV4_GTPU_TEID:
1552 					tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1553 					break;
1554 				case ICE_PROFID_IPV6_GTPU_TEID:
1555 					tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1556 					break;
1557 				default:
1558 					break;
1559 				}
1560 
1561 				return tun_type;
1562 			}
1563 		}
1564 	}
1565 
1566 	if (vlan && tun_type == ICE_SW_TUN_PPPOE)
1567 		tun_type = ICE_SW_TUN_PPPOE_QINQ;
1568 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV6)
1569 		tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1570 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_IPV4)
1571 		tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1572 	else if (vlan && tun_type == ICE_SW_TUN_PPPOE_PAY)
1573 		tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1574 	else if (vlan && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575 		tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576 	else if (vlan && tun_type == ICE_NON_TUN)
1577 		tun_type = ICE_NON_TUN_QINQ;
1578 
1579 	return tun_type;
1580 }
1581 
1582 /**
1583  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
1584  * @hw: pointer to hardware structure
1585  * @recps: struct that we need to populate
1586  * @rid: recipe ID that we are populating
1587  * @refresh_required: true if we should get recipe to profile mapping from FW
1588  *
1589  * This function is used to populate all the necessary entries into our
1590  * bookkeeping so that we have a current list of all the recipes that are
1591  * programmed in the firmware.
1592  */
1593 static enum ice_status
ice_get_recp_frm_fw(struct ice_hw * hw,struct ice_sw_recipe * recps,u8 rid,bool * refresh_required)1594 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
1595 		    bool *refresh_required)
1596 {
1597 	ice_declare_bitmap(result_bm, ICE_MAX_FV_WORDS);
1598 	struct ice_aqc_recipe_data_elem *tmp;
1599 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1600 	struct ice_prot_lkup_ext *lkup_exts;
1601 	enum ice_status status;
1602 	u8 fv_word_idx = 0;
1603 	bool vlan = false;
1604 	u16 sub_recps;
1605 
1606 	ice_zero_bitmap(result_bm, ICE_MAX_FV_WORDS);
1607 
1608 	/* we need a buffer big enough to accommodate all the recipes */
1609 	tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
1610 		ICE_MAX_NUM_RECIPES, sizeof(*tmp));
1611 	if (!tmp)
1612 		return ICE_ERR_NO_MEMORY;
1613 
1614 	tmp[0].recipe_indx = rid;
1615 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
1616 	/* non-zero status meaning recipe doesn't exist */
1617 	if (status)
1618 		goto err_unroll;
1619 
1620 	/* Get recipe to profile map so that we can get the fv from lkups that
1621 	 * we read for a recipe from FW. Since we want to minimize the number of
1622 	 * times we make this FW call, just make one call and cache the copy
1623 	 * until a new recipe is added. This operation is only required the
1624 	 * first time to get the changes from FW. Then to search existing
1625 	 * entries we don't need to update the cache again until another recipe
1626 	 * gets added.
1627 	 */
1628 	if (*refresh_required) {
1629 		ice_get_recp_to_prof_map(hw);
1630 		*refresh_required = false;
1631 	}
1632 
1633 	/* Start populating all the entries for recps[rid] based on lkups from
1634 	 * firmware. Note that we are only creating the root recipe in our
1635 	 * database.
1636 	 */
1637 	lkup_exts = &recps[rid].lkup_exts;
1638 
1639 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
1640 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
1641 		struct ice_recp_grp_entry *rg_entry;
1642 		u8 i, prof, idx, prot = 0;
1643 		bool is_root;
1644 		u16 off = 0;
1645 
1646 		rg_entry = (struct ice_recp_grp_entry *)
1647 			ice_malloc(hw, sizeof(*rg_entry));
1648 		if (!rg_entry) {
1649 			status = ICE_ERR_NO_MEMORY;
1650 			goto err_unroll;
1651 		}
1652 
1653 		idx = root_bufs.recipe_indx;
1654 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
1655 
1656 		/* Mark all result indices in this chain */
1657 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
1658 			ice_set_bit(root_bufs.content.result_indx &
1659 				    ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
1660 
1661 		/* get the first profile that is associated with rid */
1662 		prof = ice_find_first_bit(recipe_to_profile[idx],
1663 					  ICE_MAX_NUM_PROFILES);
1664 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
1665 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
1666 
1667 			rg_entry->fv_idx[i] = lkup_indx;
1668 			rg_entry->fv_mask[i] =
1669 				LE16_TO_CPU(root_bufs.content.mask[i + 1]);
1670 
1671 			/* If the recipe is a chained recipe then all its
1672 			 * child recipe's result will have a result index.
1673 			 * To fill fv_words we should not use those result
1674 			 * index, we only need the protocol ids and offsets.
1675 			 * We will skip all the fv_idx which stores result
1676 			 * index in them. We also need to skip any fv_idx which
1677 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
1678 			 * valid offset value.
1679 			 */
1680 			if (ice_is_bit_set(hw->switch_info->prof_res_bm[prof],
1681 					   rg_entry->fv_idx[i]) ||
1682 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
1683 			    rg_entry->fv_idx[i] == 0)
1684 				continue;
1685 
1686 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
1687 					  rg_entry->fv_idx[i], &prot, &off);
1688 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
1689 			lkup_exts->fv_words[fv_word_idx].off = off;
1690 			lkup_exts->field_mask[fv_word_idx] =
1691 				rg_entry->fv_mask[i];
1692 			if (prot == ICE_META_DATA_ID_HW &&
1693 			    off == ICE_TUN_FLAG_MDID_OFF)
1694 				vlan = true;
1695 			fv_word_idx++;
1696 		}
1697 		/* populate rg_list with the data from the child entry of this
1698 		 * recipe
1699 		 */
1700 		LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
1701 
1702 		/* Propagate some data to the recipe database */
1703 		recps[idx].is_root = !!is_root;
1704 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1705 		ice_zero_bitmap(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
1706 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
1707 			recps[idx].chain_idx = root_bufs.content.result_indx &
1708 				~ICE_AQ_RECIPE_RESULT_EN;
1709 			ice_set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
1710 		} else {
1711 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
1712 		}
1713 
1714 		if (!is_root)
1715 			continue;
1716 
1717 		/* Only do the following for root recipes entries */
1718 		ice_memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
1719 			   sizeof(recps[idx].r_bitmap), ICE_NONDMA_TO_NONDMA);
1720 		recps[idx].root_rid = root_bufs.content.rid &
1721 			~ICE_AQ_RECIPE_ID_IS_ROOT;
1722 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
1723 	}
1724 
1725 	/* Complete initialization of the root recipe entry */
1726 	lkup_exts->n_val_words = fv_word_idx;
1727 	recps[rid].big_recp = (num_recps > 1);
1728 	recps[rid].n_grp_count = (u8)num_recps;
1729 	recps[rid].tun_type = ice_get_tun_type_for_recipe(rid, vlan);
1730 	recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
1731 		ice_memdup(hw, tmp, recps[rid].n_grp_count *
1732 			   sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
1733 	if (!recps[rid].root_buf)
1734 		goto err_unroll;
1735 
1736 	/* Copy result indexes */
1737 	ice_cp_bitmap(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
1738 	recps[rid].recp_created = true;
1739 
1740 err_unroll:
1741 	ice_free(hw, tmp);
1742 	return status;
1743 }
1744 
1745 /**
1746  * ice_get_recp_to_prof_map - updates recipe to profile mapping
1747  * @hw: pointer to hardware structure
1748  *
1749  * This function is used to populate recipe_to_profile matrix where index to
1750  * this array is the recipe ID and the element is the mapping of which profiles
1751  * is this recipe mapped to.
1752  */
ice_get_recp_to_prof_map(struct ice_hw * hw)1753 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
1754 {
1755 	ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1756 	u16 i;
1757 
1758 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
1759 		u16 j;
1760 
1761 		ice_zero_bitmap(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
1762 		ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
1763 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
1764 			continue;
1765 		ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
1766 			      ICE_MAX_NUM_RECIPES);
1767 		ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
1768 			ice_set_bit(i, recipe_to_profile[j]);
1769 	}
1770 }
1771 
1772 /**
1773  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1774  * @hw: pointer to the HW struct
1775  * @recp_list: pointer to sw recipe list
1776  *
1777  * Allocate memory for the entire recipe table and initialize the structures/
1778  * entries corresponding to basic recipes.
1779  */
1780 enum ice_status
ice_init_def_sw_recp(struct ice_hw * hw,struct ice_sw_recipe ** recp_list)1781 ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
1782 {
1783 	struct ice_sw_recipe *recps;
1784 	u8 i;
1785 
1786 	recps = (struct ice_sw_recipe *)
1787 		ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
1788 	if (!recps)
1789 		return ICE_ERR_NO_MEMORY;
1790 
1791 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1792 		recps[i].root_rid = i;
1793 		INIT_LIST_HEAD(&recps[i].filt_rules);
1794 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1795 		INIT_LIST_HEAD(&recps[i].rg_list);
1796 		ice_init_lock(&recps[i].filt_rule_lock);
1797 	}
1798 
1799 	*recp_list = recps;
1800 
1801 	return ICE_SUCCESS;
1802 }
1803 
1804 /**
1805  * ice_aq_get_sw_cfg - get switch configuration
1806  * @hw: pointer to the hardware structure
1807  * @buf: pointer to the result buffer
1808  * @buf_size: length of the buffer available for response
1809  * @req_desc: pointer to requested descriptor
1810  * @num_elems: pointer to number of elements
1811  * @cd: pointer to command details structure or NULL
1812  *
1813  * Get switch configuration (0x0200) to be placed in buf.
1814  * This admin command returns information such as initial VSI/port number
1815  * and switch ID it belongs to.
1816  *
1817  * NOTE: *req_desc is both an input/output parameter.
1818  * The caller of this function first calls this function with *request_desc set
1819  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1820  * configuration information has been returned; if non-zero (meaning not all
1821  * the information was returned), the caller should call this function again
1822  * with *req_desc set to the previous value returned by f/w to get the
1823  * next block of switch configuration information.
1824  *
1825  * *num_elems is output only parameter. This reflects the number of elements
1826  * in response buffer. The caller of this function to use *num_elems while
1827  * parsing the response buffer.
1828  */
1829 static enum ice_status
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)1830 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1831 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1832 		  struct ice_sq_cd *cd)
1833 {
1834 	struct ice_aqc_get_sw_cfg *cmd;
1835 	struct ice_aq_desc desc;
1836 	enum ice_status status;
1837 
1838 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1839 	cmd = &desc.params.get_sw_conf;
1840 	cmd->element = CPU_TO_LE16(*req_desc);
1841 
1842 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1843 	if (!status) {
1844 		*req_desc = LE16_TO_CPU(cmd->element);
1845 		*num_elems = LE16_TO_CPU(cmd->num_elems);
1846 	}
1847 
1848 	return status;
1849 }
1850 
1851 /**
1852  * ice_alloc_rss_global_lut - allocate a RSS global LUT
1853  * @hw: pointer to the HW struct
1854  * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
1855  * @global_lut_id: output parameter for the RSS global LUT's ID
1856  */
ice_alloc_rss_global_lut(struct ice_hw * hw,bool shared_res,u16 * global_lut_id)1857 enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
1858 {
1859 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1860 	enum ice_status status;
1861 	u16 buf_len;
1862 
1863 	buf_len = ice_struct_size(sw_buf, elem, 1);
1864 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1865 	if (!sw_buf)
1866 		return ICE_ERR_NO_MEMORY;
1867 
1868 	sw_buf->num_elems = CPU_TO_LE16(1);
1869 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
1870 				       (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1871 				       ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1872 
1873 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
1874 	if (status) {
1875 		ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
1876 			  shared_res ? "shared" : "dedicated", status);
1877 		goto ice_alloc_global_lut_exit;
1878 	}
1879 
1880 	*global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1881 
1882 ice_alloc_global_lut_exit:
1883 	ice_free(hw, sw_buf);
1884 	return status;
1885 }
1886 
1887 /**
1888  * ice_free_global_lut - free a RSS global LUT
1889  * @hw: pointer to the HW struct
1890  * @global_lut_id: ID of the RSS global LUT to free
1891  */
ice_free_rss_global_lut(struct ice_hw * hw,u16 global_lut_id)1892 enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
1893 {
1894 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1895 	u16 buf_len, num_elems = 1;
1896 	enum ice_status status;
1897 
1898 	buf_len = ice_struct_size(sw_buf, elem, num_elems);
1899 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1900 	if (!sw_buf)
1901 		return ICE_ERR_NO_MEMORY;
1902 
1903 	sw_buf->num_elems = CPU_TO_LE16(num_elems);
1904 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
1905 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
1906 
1907 	status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
1908 	if (status)
1909 		ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
1910 			  global_lut_id, status);
1911 
1912 	ice_free(hw, sw_buf);
1913 	return status;
1914 }
1915 
1916 /**
1917  * ice_alloc_sw - allocate resources specific to switch
1918  * @hw: pointer to the HW struct
1919  * @ena_stats: true to turn on VEB stats
1920  * @shared_res: true for shared resource, false for dedicated resource
1921  * @sw_id: switch ID returned
1922  * @counter_id: VEB counter ID returned
1923  *
1924  * allocates switch resources (SWID and VEB counter) (0x0208)
1925  */
1926 enum ice_status
ice_alloc_sw(struct ice_hw * hw,bool ena_stats,bool shared_res,u16 * sw_id,u16 * counter_id)1927 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
1928 	     u16 *counter_id)
1929 {
1930 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1931 	struct ice_aqc_res_elem *sw_ele;
1932 	enum ice_status status;
1933 	u16 buf_len;
1934 
1935 	buf_len = ice_struct_size(sw_buf, elem, 1);
1936 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1937 	if (!sw_buf)
1938 		return ICE_ERR_NO_MEMORY;
1939 
1940 	/* Prepare buffer for switch ID.
1941 	 * The number of resource entries in buffer is passed as 1 since only a
1942 	 * single switch/VEB instance is allocated, and hence a single sw_id
1943 	 * is requested.
1944 	 */
1945 	sw_buf->num_elems = CPU_TO_LE16(1);
1946 	sw_buf->res_type =
1947 		CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
1948 			    (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
1949 			    ICE_AQC_RES_TYPE_FLAG_DEDICATED));
1950 
1951 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1952 				       ice_aqc_opc_alloc_res, NULL);
1953 
1954 	if (status)
1955 		goto ice_alloc_sw_exit;
1956 
1957 	sw_ele = &sw_buf->elem[0];
1958 	*sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
1959 
1960 	if (ena_stats) {
1961 		/* Prepare buffer for VEB Counter */
1962 		enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
1963 		struct ice_aqc_alloc_free_res_elem *counter_buf;
1964 		struct ice_aqc_res_elem *counter_ele;
1965 
1966 		counter_buf = (struct ice_aqc_alloc_free_res_elem *)
1967 				ice_malloc(hw, buf_len);
1968 		if (!counter_buf) {
1969 			status = ICE_ERR_NO_MEMORY;
1970 			goto ice_alloc_sw_exit;
1971 		}
1972 
1973 		/* The number of resource entries in buffer is passed as 1 since
1974 		 * only a single switch/VEB instance is allocated, and hence a
1975 		 * single VEB counter is requested.
1976 		 */
1977 		counter_buf->num_elems = CPU_TO_LE16(1);
1978 		counter_buf->res_type =
1979 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
1980 				    ICE_AQC_RES_TYPE_FLAG_DEDICATED);
1981 		status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
1982 					       opc, NULL);
1983 
1984 		if (status) {
1985 			ice_free(hw, counter_buf);
1986 			goto ice_alloc_sw_exit;
1987 		}
1988 		counter_ele = &counter_buf->elem[0];
1989 		*counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
1990 		ice_free(hw, counter_buf);
1991 	}
1992 
1993 ice_alloc_sw_exit:
1994 	ice_free(hw, sw_buf);
1995 	return status;
1996 }
1997 
1998 /**
1999  * ice_free_sw - free resources specific to switch
2000  * @hw: pointer to the HW struct
2001  * @sw_id: switch ID returned
2002  * @counter_id: VEB counter ID returned
2003  *
2004  * free switch resources (SWID and VEB counter) (0x0209)
2005  *
2006  * NOTE: This function frees multiple resources. It continues
2007  * releasing other resources even after it encounters error.
2008  * The error code returned is the last error it encountered.
2009  */
ice_free_sw(struct ice_hw * hw,u16 sw_id,u16 counter_id)2010 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
2011 {
2012 	struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
2013 	enum ice_status status, ret_status;
2014 	u16 buf_len;
2015 
2016 	buf_len = ice_struct_size(sw_buf, elem, 1);
2017 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2018 	if (!sw_buf)
2019 		return ICE_ERR_NO_MEMORY;
2020 
2021 	/* Prepare buffer to free for switch ID res.
2022 	 * The number of resource entries in buffer is passed as 1 since only a
2023 	 * single switch/VEB instance is freed, and hence a single sw_id
2024 	 * is released.
2025 	 */
2026 	sw_buf->num_elems = CPU_TO_LE16(1);
2027 	sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
2028 	sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
2029 
2030 	ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2031 					   ice_aqc_opc_free_res, NULL);
2032 
2033 	if (ret_status)
2034 		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2035 
2036 	/* Prepare buffer to free for VEB Counter resource */
2037 	counter_buf = (struct ice_aqc_alloc_free_res_elem *)
2038 			ice_malloc(hw, buf_len);
2039 	if (!counter_buf) {
2040 		ice_free(hw, sw_buf);
2041 		return ICE_ERR_NO_MEMORY;
2042 	}
2043 
2044 	/* The number of resource entries in buffer is passed as 1 since only a
2045 	 * single switch/VEB instance is freed, and hence a single VEB counter
2046 	 * is released
2047 	 */
2048 	counter_buf->num_elems = CPU_TO_LE16(1);
2049 	counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
2050 	counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
2051 
2052 	status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
2053 				       ice_aqc_opc_free_res, NULL);
2054 	if (status) {
2055 		ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
2056 		ret_status = status;
2057 	}
2058 
2059 	ice_free(hw, counter_buf);
2060 	ice_free(hw, sw_buf);
2061 	return ret_status;
2062 }
2063 
2064 /**
2065  * ice_aq_add_vsi
2066  * @hw: pointer to the HW struct
2067  * @vsi_ctx: pointer to a VSI context struct
2068  * @cd: pointer to command details structure or NULL
2069  *
2070  * Add a VSI context to the hardware (0x0210)
2071  */
2072 enum ice_status
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2073 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2074 	       struct ice_sq_cd *cd)
2075 {
2076 	struct ice_aqc_add_update_free_vsi_resp *res;
2077 	struct ice_aqc_add_get_update_free_vsi *cmd;
2078 	struct ice_aq_desc desc;
2079 	enum ice_status status;
2080 
2081 	cmd = &desc.params.vsi_cmd;
2082 	res = &desc.params.add_update_free_vsi_res;
2083 
2084 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
2085 
2086 	if (!vsi_ctx->alloc_from_pool)
2087 		cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
2088 					   ICE_AQ_VSI_IS_VALID);
2089 
2090 	cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
2091 
2092 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2093 
2094 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2095 				 sizeof(vsi_ctx->info), cd);
2096 
2097 	if (!status) {
2098 		vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
2099 		vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
2100 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
2101 	}
2102 
2103 	return status;
2104 }
2105 
2106 /**
2107  * ice_aq_free_vsi
2108  * @hw: pointer to the HW struct
2109  * @vsi_ctx: pointer to a VSI context struct
2110  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2111  * @cd: pointer to command details structure or NULL
2112  *
2113  * Free VSI context info from hardware (0x0213)
2114  */
2115 enum ice_status
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)2116 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2117 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
2118 {
2119 	struct ice_aqc_add_update_free_vsi_resp *resp;
2120 	struct ice_aqc_add_get_update_free_vsi *cmd;
2121 	struct ice_aq_desc desc;
2122 	enum ice_status status;
2123 
2124 	cmd = &desc.params.vsi_cmd;
2125 	resp = &desc.params.add_update_free_vsi_res;
2126 
2127 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
2128 
2129 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2130 	if (keep_vsi_alloc)
2131 		cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
2132 
2133 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2134 	if (!status) {
2135 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2136 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2137 	}
2138 
2139 	return status;
2140 }
2141 
2142 /**
2143  * ice_aq_update_vsi
2144  * @hw: pointer to the HW struct
2145  * @vsi_ctx: pointer to a VSI context struct
2146  * @cd: pointer to command details structure or NULL
2147  *
2148  * Update VSI context in the hardware (0x0211)
2149  */
2150 enum ice_status
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2151 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2152 		  struct ice_sq_cd *cd)
2153 {
2154 	struct ice_aqc_add_update_free_vsi_resp *resp;
2155 	struct ice_aqc_add_get_update_free_vsi *cmd;
2156 	struct ice_aq_desc desc;
2157 	enum ice_status status;
2158 
2159 	cmd = &desc.params.vsi_cmd;
2160 	resp = &desc.params.add_update_free_vsi_res;
2161 
2162 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
2163 
2164 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2165 
2166 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2167 
2168 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2169 				 sizeof(vsi_ctx->info), cd);
2170 
2171 	if (!status) {
2172 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2173 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2174 	}
2175 
2176 	return status;
2177 }
2178 
2179 /**
2180  * ice_is_vsi_valid - check whether the VSI is valid or not
2181  * @hw: pointer to the HW struct
2182  * @vsi_handle: VSI handle
2183  *
2184  * check whether the VSI is valid or not
2185  */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)2186 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
2187 {
2188 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
2189 }
2190 
2191 /**
2192  * ice_get_hw_vsi_num - return the HW VSI number
2193  * @hw: pointer to the HW struct
2194  * @vsi_handle: VSI handle
2195  *
2196  * return the HW VSI number
2197  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
2198  */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)2199 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
2200 {
2201 	return hw->vsi_ctx[vsi_handle]->vsi_num;
2202 }
2203 
2204 /**
2205  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
2206  * @hw: pointer to the HW struct
2207  * @vsi_handle: VSI handle
2208  *
2209  * return the VSI context entry for a given VSI handle
2210  */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)2211 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2212 {
2213 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
2214 }
2215 
2216 /**
2217  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
2218  * @hw: pointer to the HW struct
2219  * @vsi_handle: VSI handle
2220  * @vsi: VSI context pointer
2221  *
2222  * save the VSI context entry for a given VSI handle
2223  */
2224 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)2225 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
2226 {
2227 	hw->vsi_ctx[vsi_handle] = vsi;
2228 }
2229 
2230 /**
2231  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
2232  * @hw: pointer to the HW struct
2233  * @vsi_handle: VSI handle
2234  */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)2235 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
2236 {
2237 	struct ice_vsi_ctx *vsi;
2238 	u8 i;
2239 
2240 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
2241 	if (!vsi)
2242 		return;
2243 	ice_for_each_traffic_class(i) {
2244 		if (vsi->lan_q_ctx[i]) {
2245 			ice_free(hw, vsi->lan_q_ctx[i]);
2246 			vsi->lan_q_ctx[i] = NULL;
2247 		}
2248 	}
2249 }
2250 
2251 /**
2252  * ice_clear_vsi_ctx - clear the VSI context entry
2253  * @hw: pointer to the HW struct
2254  * @vsi_handle: VSI handle
2255  *
2256  * clear the VSI context entry
2257  */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)2258 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
2259 {
2260 	struct ice_vsi_ctx *vsi;
2261 
2262 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
2263 	if (vsi) {
2264 		ice_clear_vsi_q_ctx(hw, vsi_handle);
2265 		ice_free(hw, vsi);
2266 		hw->vsi_ctx[vsi_handle] = NULL;
2267 	}
2268 }
2269 
2270 /**
2271  * ice_clear_all_vsi_ctx - clear all the VSI context entries
2272  * @hw: pointer to the HW struct
2273  */
ice_clear_all_vsi_ctx(struct ice_hw * hw)2274 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
2275 {
2276 	u16 i;
2277 
2278 	for (i = 0; i < ICE_MAX_VSI; i++)
2279 		ice_clear_vsi_ctx(hw, i);
2280 }
2281 
2282 /**
2283  * ice_add_vsi - add VSI context to the hardware and VSI handle list
2284  * @hw: pointer to the HW struct
2285  * @vsi_handle: unique VSI handle provided by drivers
2286  * @vsi_ctx: pointer to a VSI context struct
2287  * @cd: pointer to command details structure or NULL
2288  *
2289  * Add a VSI context to the hardware also add it into the VSI handle list.
2290  * If this function gets called after reset for existing VSIs then update
2291  * with the new HW VSI number in the corresponding VSI handle list entry.
2292  */
2293 enum ice_status
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2294 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2295 	    struct ice_sq_cd *cd)
2296 {
2297 	struct ice_vsi_ctx *tmp_vsi_ctx;
2298 	enum ice_status status;
2299 
2300 	if (vsi_handle >= ICE_MAX_VSI)
2301 		return ICE_ERR_PARAM;
2302 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
2303 	if (status)
2304 		return status;
2305 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2306 	if (!tmp_vsi_ctx) {
2307 		/* Create a new VSI context */
2308 		tmp_vsi_ctx = (struct ice_vsi_ctx *)
2309 			ice_malloc(hw, sizeof(*tmp_vsi_ctx));
2310 		if (!tmp_vsi_ctx) {
2311 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
2312 			return ICE_ERR_NO_MEMORY;
2313 		}
2314 		*tmp_vsi_ctx = *vsi_ctx;
2315 
2316 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
2317 	} else {
2318 		/* update with new HW VSI num */
2319 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
2320 	}
2321 
2322 	return ICE_SUCCESS;
2323 }
2324 
2325 /**
2326  * ice_free_vsi- free VSI context from hardware and VSI handle list
2327  * @hw: pointer to the HW struct
2328  * @vsi_handle: unique VSI handle
2329  * @vsi_ctx: pointer to a VSI context struct
2330  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
2331  * @cd: pointer to command details structure or NULL
2332  *
2333  * Free VSI context info from hardware as well as from VSI handle list
2334  */
2335 enum ice_status
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)2336 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2337 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
2338 {
2339 	enum ice_status status;
2340 
2341 	if (!ice_is_vsi_valid(hw, vsi_handle))
2342 		return ICE_ERR_PARAM;
2343 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2344 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
2345 	if (!status)
2346 		ice_clear_vsi_ctx(hw, vsi_handle);
2347 	return status;
2348 }
2349 
2350 /**
2351  * ice_update_vsi
2352  * @hw: pointer to the HW struct
2353  * @vsi_handle: unique VSI handle
2354  * @vsi_ctx: pointer to a VSI context struct
2355  * @cd: pointer to command details structure or NULL
2356  *
2357  * Update VSI context in the hardware
2358  */
2359 enum ice_status
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2360 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
2361 	       struct ice_sq_cd *cd)
2362 {
2363 	if (!ice_is_vsi_valid(hw, vsi_handle))
2364 		return ICE_ERR_PARAM;
2365 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
2366 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
2367 }
2368 
2369 /**
2370  * ice_aq_get_vsi_params
2371  * @hw: pointer to the HW struct
2372  * @vsi_ctx: pointer to a VSI context struct
2373  * @cd: pointer to command details structure or NULL
2374  *
2375  * Get VSI context info from hardware (0x0212)
2376  */
2377 enum ice_status
ice_aq_get_vsi_params(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)2378 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
2379 		      struct ice_sq_cd *cd)
2380 {
2381 	struct ice_aqc_add_get_update_free_vsi *cmd;
2382 	struct ice_aqc_get_vsi_resp *resp;
2383 	struct ice_aq_desc desc;
2384 	enum ice_status status;
2385 
2386 	cmd = &desc.params.vsi_cmd;
2387 	resp = &desc.params.get_vsi_resp;
2388 
2389 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
2390 
2391 	cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
2392 
2393 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
2394 				 sizeof(vsi_ctx->info), cd);
2395 	if (!status) {
2396 		vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
2397 					ICE_AQ_VSI_NUM_M;
2398 		vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
2399 		vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
2400 	}
2401 
2402 	return status;
2403 }
2404 
2405 /**
2406  * ice_aq_add_update_mir_rule - add/update a mirror rule
2407  * @hw: pointer to the HW struct
2408  * @rule_type: Rule Type
2409  * @dest_vsi: VSI number to which packets will be mirrored
2410  * @count: length of the list
2411  * @mr_buf: buffer for list of mirrored VSI numbers
2412  * @cd: pointer to command details structure or NULL
2413  * @rule_id: Rule ID
2414  *
2415  * Add/Update Mirror Rule (0x260).
2416  */
2417 enum ice_status
ice_aq_add_update_mir_rule(struct ice_hw * hw,u16 rule_type,u16 dest_vsi,u16 count,struct ice_mir_rule_buf * mr_buf,struct ice_sq_cd * cd,u16 * rule_id)2418 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
2419 			   u16 count, struct ice_mir_rule_buf *mr_buf,
2420 			   struct ice_sq_cd *cd, u16 *rule_id)
2421 {
2422 	struct ice_aqc_add_update_mir_rule *cmd;
2423 	struct ice_aq_desc desc;
2424 	enum ice_status status;
2425 	__le16 *mr_list = NULL;
2426 	u16 buf_size = 0;
2427 
2428 	switch (rule_type) {
2429 	case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
2430 	case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
2431 		/* Make sure count and mr_buf are set for these rule_types */
2432 		if (!(count && mr_buf))
2433 			return ICE_ERR_PARAM;
2434 
2435 		buf_size = count * sizeof(__le16);
2436 		mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
2437 		if (!mr_list)
2438 			return ICE_ERR_NO_MEMORY;
2439 		break;
2440 	case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
2441 	case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
2442 		/* Make sure count and mr_buf are not set for these
2443 		 * rule_types
2444 		 */
2445 		if (count || mr_buf)
2446 			return ICE_ERR_PARAM;
2447 		break;
2448 	default:
2449 		ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
2450 		return ICE_ERR_OUT_OF_RANGE;
2451 	}
2452 
2453 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
2454 
2455 	/* Pre-process 'mr_buf' items for add/update of virtual port
2456 	 * ingress/egress mirroring (but not physical port ingress/egress
2457 	 * mirroring)
2458 	 */
2459 	if (mr_buf) {
2460 		int i;
2461 
2462 		for (i = 0; i < count; i++) {
2463 			u16 id;
2464 
2465 			id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
2466 
2467 			/* Validate specified VSI number, make sure it is less
2468 			 * than ICE_MAX_VSI, if not return with error.
2469 			 */
2470 			if (id >= ICE_MAX_VSI) {
2471 				ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
2472 					  id);
2473 				ice_free(hw, mr_list);
2474 				return ICE_ERR_OUT_OF_RANGE;
2475 			}
2476 
2477 			/* add VSI to mirror rule */
2478 			if (mr_buf[i].add)
2479 				mr_list[i] =
2480 					CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
2481 			else /* remove VSI from mirror rule */
2482 				mr_list[i] = CPU_TO_LE16(id);
2483 		}
2484 	}
2485 
2486 	cmd = &desc.params.add_update_rule;
2487 	if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
2488 		cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
2489 					   ICE_AQC_RULE_ID_VALID_M);
2490 	cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
2491 	cmd->num_entries = CPU_TO_LE16(count);
2492 	cmd->dest = CPU_TO_LE16(dest_vsi);
2493 
2494 	status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
2495 	if (!status)
2496 		*rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
2497 
2498 	ice_free(hw, mr_list);
2499 
2500 	return status;
2501 }
2502 
2503 /**
2504  * ice_aq_delete_mir_rule - delete a mirror rule
2505  * @hw: pointer to the HW struct
2506  * @rule_id: Mirror rule ID (to be deleted)
2507  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
2508  *		 otherwise it is returned to the shared pool
2509  * @cd: pointer to command details structure or NULL
2510  *
2511  * Delete Mirror Rule (0x261).
2512  */
2513 enum ice_status
ice_aq_delete_mir_rule(struct ice_hw * hw,u16 rule_id,bool keep_allocd,struct ice_sq_cd * cd)2514 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
2515 		       struct ice_sq_cd *cd)
2516 {
2517 	struct ice_aqc_delete_mir_rule *cmd;
2518 	struct ice_aq_desc desc;
2519 
2520 	/* rule_id should be in the range 0...63 */
2521 	if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
2522 		return ICE_ERR_OUT_OF_RANGE;
2523 
2524 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
2525 
2526 	cmd = &desc.params.del_rule;
2527 	rule_id |= ICE_AQC_RULE_ID_VALID_M;
2528 	cmd->rule_id = CPU_TO_LE16(rule_id);
2529 
2530 	if (keep_allocd)
2531 		cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
2532 
2533 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2534 }
2535 
2536 /**
2537  * ice_aq_alloc_free_vsi_list
2538  * @hw: pointer to the HW struct
2539  * @vsi_list_id: VSI list ID returned or used for lookup
2540  * @lkup_type: switch rule filter lookup type
2541  * @opc: switch rules population command type - pass in the command opcode
2542  *
2543  * allocates or free a VSI list resource
2544  */
2545 static enum ice_status
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)2546 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
2547 			   enum ice_sw_lkup_type lkup_type,
2548 			   enum ice_adminq_opc opc)
2549 {
2550 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2551 	struct ice_aqc_res_elem *vsi_ele;
2552 	enum ice_status status;
2553 	u16 buf_len;
2554 
2555 	buf_len = ice_struct_size(sw_buf, elem, 1);
2556 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2557 	if (!sw_buf)
2558 		return ICE_ERR_NO_MEMORY;
2559 	sw_buf->num_elems = CPU_TO_LE16(1);
2560 
2561 	if (lkup_type == ICE_SW_LKUP_MAC ||
2562 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2563 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2564 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2565 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2566 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2567 	    lkup_type == ICE_SW_LKUP_LAST) {
2568 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
2569 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
2570 		sw_buf->res_type =
2571 			CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
2572 	} else {
2573 		status = ICE_ERR_PARAM;
2574 		goto ice_aq_alloc_free_vsi_list_exit;
2575 	}
2576 
2577 	if (opc == ice_aqc_opc_free_res)
2578 		sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
2579 
2580 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
2581 	if (status)
2582 		goto ice_aq_alloc_free_vsi_list_exit;
2583 
2584 	if (opc == ice_aqc_opc_alloc_res) {
2585 		vsi_ele = &sw_buf->elem[0];
2586 		*vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
2587 	}
2588 
2589 ice_aq_alloc_free_vsi_list_exit:
2590 	ice_free(hw, sw_buf);
2591 	return status;
2592 }
2593 
2594 /**
2595  * ice_aq_set_storm_ctrl - Sets storm control configuration
2596  * @hw: pointer to the HW struct
2597  * @bcast_thresh: represents the upper threshold for broadcast storm control
2598  * @mcast_thresh: represents the upper threshold for multicast storm control
2599  * @ctl_bitmask: storm control knobs
2600  *
2601  * Sets the storm control configuration (0x0280)
2602  */
2603 enum ice_status
ice_aq_set_storm_ctrl(struct ice_hw * hw,u32 bcast_thresh,u32 mcast_thresh,u32 ctl_bitmask)2604 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
2605 		      u32 ctl_bitmask)
2606 {
2607 	struct ice_aqc_storm_cfg *cmd;
2608 	struct ice_aq_desc desc;
2609 
2610 	cmd = &desc.params.storm_conf;
2611 
2612 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
2613 
2614 	cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
2615 	cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
2616 	cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
2617 
2618 	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2619 }
2620 
2621 /**
2622  * ice_aq_get_storm_ctrl - gets storm control configuration
2623  * @hw: pointer to the HW struct
2624  * @bcast_thresh: represents the upper threshold for broadcast storm control
2625  * @mcast_thresh: represents the upper threshold for multicast storm control
2626  * @ctl_bitmask: storm control knobs
2627  *
2628  * Gets the storm control configuration (0x0281)
2629  */
2630 enum ice_status
ice_aq_get_storm_ctrl(struct ice_hw * hw,u32 * bcast_thresh,u32 * mcast_thresh,u32 * ctl_bitmask)2631 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
2632 		      u32 *ctl_bitmask)
2633 {
2634 	enum ice_status status;
2635 	struct ice_aq_desc desc;
2636 
2637 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
2638 
2639 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2640 	if (!status) {
2641 		struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
2642 
2643 		if (bcast_thresh)
2644 			*bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
2645 				ICE_AQ_THRESHOLD_M;
2646 		if (mcast_thresh)
2647 			*mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
2648 				ICE_AQ_THRESHOLD_M;
2649 		if (ctl_bitmask)
2650 			*ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
2651 	}
2652 
2653 	return status;
2654 }
2655 
2656 /**
2657  * ice_aq_sw_rules - add/update/remove switch rules
2658  * @hw: pointer to the HW struct
2659  * @rule_list: pointer to switch rule population list
2660  * @rule_list_sz: total size of the rule list in bytes
2661  * @num_rules: number of switch rules in the rule_list
2662  * @opc: switch rules population command type - pass in the command opcode
2663  * @cd: pointer to command details structure or NULL
2664  *
2665  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
2666  */
2667 static enum ice_status
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2668 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
2669 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2670 {
2671 	struct ice_aq_desc desc;
2672 	enum ice_status status;
2673 
2674 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2675 
2676 	if (opc != ice_aqc_opc_add_sw_rules &&
2677 	    opc != ice_aqc_opc_update_sw_rules &&
2678 	    opc != ice_aqc_opc_remove_sw_rules)
2679 		return ICE_ERR_PARAM;
2680 
2681 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2682 
2683 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2684 	desc.params.sw_rules.num_rules_fltr_entry_index =
2685 		CPU_TO_LE16(num_rules);
2686 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
2687 	if (opc != ice_aqc_opc_add_sw_rules &&
2688 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
2689 		status = ICE_ERR_DOES_NOT_EXIST;
2690 
2691 	return status;
2692 }
2693 
2694 /**
2695  * ice_aq_add_recipe - add switch recipe
2696  * @hw: pointer to the HW struct
2697  * @s_recipe_list: pointer to switch rule population list
2698  * @num_recipes: number of switch recipes in the list
2699  * @cd: pointer to command details structure or NULL
2700  *
2701  * Add(0x0290)
2702  */
2703 enum ice_status
ice_aq_add_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 num_recipes,struct ice_sq_cd * cd)2704 ice_aq_add_recipe(struct ice_hw *hw,
2705 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
2706 		  u16 num_recipes, struct ice_sq_cd *cd)
2707 {
2708 	struct ice_aqc_add_get_recipe *cmd;
2709 	struct ice_aq_desc desc;
2710 	u16 buf_size;
2711 
2712 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2713 	cmd = &desc.params.add_get_recipe;
2714 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
2715 
2716 	cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
2717 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2718 
2719 	buf_size = num_recipes * sizeof(*s_recipe_list);
2720 
2721 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2722 }
2723 
2724 /**
2725  * ice_aq_get_recipe - get switch recipe
2726  * @hw: pointer to the HW struct
2727  * @s_recipe_list: pointer to switch rule population list
2728  * @num_recipes: pointer to the number of recipes (input and output)
2729  * @recipe_root: root recipe number of recipe(s) to retrieve
2730  * @cd: pointer to command details structure or NULL
2731  *
2732  * Get(0x0292)
2733  *
2734  * On input, *num_recipes should equal the number of entries in s_recipe_list.
2735  * On output, *num_recipes will equal the number of entries returned in
2736  * s_recipe_list.
2737  *
2738  * The caller must supply enough space in s_recipe_list to hold all possible
2739  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
2740  */
2741 enum ice_status
ice_aq_get_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 * num_recipes,u16 recipe_root,struct ice_sq_cd * cd)2742 ice_aq_get_recipe(struct ice_hw *hw,
2743 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
2744 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
2745 {
2746 	struct ice_aqc_add_get_recipe *cmd;
2747 	struct ice_aq_desc desc;
2748 	enum ice_status status;
2749 	u16 buf_size;
2750 
2751 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
2752 		return ICE_ERR_PARAM;
2753 
2754 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2755 	cmd = &desc.params.add_get_recipe;
2756 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
2757 
2758 	cmd->return_index = CPU_TO_LE16(recipe_root);
2759 	cmd->num_sub_recipes = 0;
2760 
2761 	buf_size = *num_recipes * sizeof(*s_recipe_list);
2762 
2763 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
2764 	/* cppcheck-suppress constArgument */
2765 	*num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
2766 
2767 	return status;
2768 }
2769 
2770 /**
2771  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2772  * @hw: pointer to the HW struct
2773  * @profile_id: package profile ID to associate the recipe with
2774  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2775  * @cd: pointer to command details structure or NULL
2776  * Recipe to profile association (0x0291)
2777  */
2778 enum ice_status
ice_aq_map_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)2779 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2780 			     struct ice_sq_cd *cd)
2781 {
2782 	struct ice_aqc_recipe_to_profile *cmd;
2783 	struct ice_aq_desc desc;
2784 
2785 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2786 	cmd = &desc.params.recipe_to_profile;
2787 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2788 	cmd->profile_id = CPU_TO_LE16(profile_id);
2789 	/* Set the recipe ID bit in the bitmask to let the device know which
2790 	 * profile we are associating the recipe to
2791 	 */
2792 	ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
2793 		   ICE_NONDMA_TO_NONDMA);
2794 
2795 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2796 }
2797 
2798 /**
2799  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2800  * @hw: pointer to the HW struct
2801  * @profile_id: package profile ID to associate the recipe with
2802  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2803  * @cd: pointer to command details structure or NULL
2804  * Associate profile ID with given recipe (0x0293)
2805  */
2806 enum ice_status
ice_aq_get_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)2807 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2808 			     struct ice_sq_cd *cd)
2809 {
2810 	struct ice_aqc_recipe_to_profile *cmd;
2811 	struct ice_aq_desc desc;
2812 	enum ice_status status;
2813 
2814 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2815 	cmd = &desc.params.recipe_to_profile;
2816 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2817 	cmd->profile_id = CPU_TO_LE16(profile_id);
2818 
2819 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2820 	if (!status)
2821 		ice_memcpy(r_bitmap, cmd->recipe_assoc,
2822 			   sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
2823 
2824 	return status;
2825 }
2826 
2827 /**
2828  * ice_alloc_recipe - add recipe resource
2829  * @hw: pointer to the hardware structure
2830  * @rid: recipe ID returned as response to AQ call
2831  */
ice_alloc_recipe(struct ice_hw * hw,u16 * rid)2832 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2833 {
2834 	struct ice_aqc_alloc_free_res_elem *sw_buf;
2835 	enum ice_status status;
2836 	u16 buf_len;
2837 
2838 	buf_len = ice_struct_size(sw_buf, elem, 1);
2839 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2840 	if (!sw_buf)
2841 		return ICE_ERR_NO_MEMORY;
2842 
2843 	sw_buf->num_elems = CPU_TO_LE16(1);
2844 	sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
2845 					ICE_AQC_RES_TYPE_S) |
2846 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2847 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2848 				       ice_aqc_opc_alloc_res, NULL);
2849 	if (!status)
2850 		*rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
2851 	ice_free(hw, sw_buf);
2852 
2853 	return status;
2854 }
2855 
2856 /* ice_init_port_info - Initialize port_info with switch configuration data
2857  * @pi: pointer to port_info
2858  * @vsi_port_num: VSI number or port number
2859  * @type: Type of switch element (port or VSI)
2860  * @swid: switch ID of the switch the element is attached to
2861  * @pf_vf_num: PF or VF number
2862  * @is_vf: true if the element is a VF, false otherwise
2863  */
2864 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)2865 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2866 		   u16 swid, u16 pf_vf_num, bool is_vf)
2867 {
2868 	switch (type) {
2869 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2870 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2871 		pi->sw_id = swid;
2872 		pi->pf_vf_num = pf_vf_num;
2873 		pi->is_vf = is_vf;
2874 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2875 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2876 		break;
2877 	default:
2878 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2879 		break;
2880 	}
2881 }
2882 
2883 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2884  * @hw: pointer to the hardware structure
2885  */
ice_get_initial_sw_cfg(struct ice_hw * hw)2886 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
2887 {
2888 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2889 	enum ice_status status;
2890 	u8 num_total_ports;
2891 	u16 req_desc = 0;
2892 	u16 num_elems;
2893 	u8 j = 0;
2894 	u16 i;
2895 
2896 	num_total_ports = 1;
2897 
2898 	rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
2899 		ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
2900 
2901 	if (!rbuf)
2902 		return ICE_ERR_NO_MEMORY;
2903 
2904 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2905 	 * to get all the switch configuration information. The need
2906 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2907 	 * writing a non-zero value in req_desc
2908 	 */
2909 	do {
2910 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2911 
2912 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2913 					   &req_desc, &num_elems, NULL);
2914 
2915 		if (status)
2916 			break;
2917 
2918 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2919 			u16 pf_vf_num, swid, vsi_port_num;
2920 			bool is_vf = false;
2921 			u8 res_type;
2922 
2923 			vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
2924 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2925 
2926 			pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
2927 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2928 
2929 			swid = LE16_TO_CPU(ele->swid);
2930 
2931 			if (LE16_TO_CPU(ele->pf_vf_num) &
2932 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2933 				is_vf = true;
2934 
2935 			res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
2936 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2937 
2938 			switch (res_type) {
2939 			case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2940 			case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
2941 				if (j == num_total_ports) {
2942 					ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
2943 					status = ICE_ERR_CFG;
2944 					goto out;
2945 				}
2946 				ice_init_port_info(hw->port_info,
2947 						   vsi_port_num, res_type, swid,
2948 						   pf_vf_num, is_vf);
2949 				j++;
2950 				break;
2951 			default:
2952 				break;
2953 			}
2954 		}
2955 	} while (req_desc && !status);
2956 
2957 out:
2958 	ice_free(hw, rbuf);
2959 	return status;
2960 }
2961 
2962 /**
2963  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2964  * @hw: pointer to the hardware structure
2965  * @fi: filter info structure to fill/update
2966  *
2967  * This helper function populates the lb_en and lan_en elements of the provided
2968  * ice_fltr_info struct using the switch's type and characteristics of the
2969  * switch rule being configured.
2970  */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)2971 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2972 {
2973 	if ((fi->flag & ICE_FLTR_RX) &&
2974 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2975 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST) &&
2976 	    fi->lkup_type == ICE_SW_LKUP_LAST)
2977 		fi->lan_en = true;
2978 	fi->lb_en = false;
2979 	fi->lan_en = false;
2980 	if ((fi->flag & ICE_FLTR_TX) &&
2981 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2982 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2983 	     fi->fltr_act == ICE_FWD_TO_Q ||
2984 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2985 		/* Setting LB for prune actions will result in replicated
2986 		 * packets to the internal switch that will be dropped.
2987 		 */
2988 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2989 			fi->lb_en = true;
2990 
2991 		/* Set lan_en to TRUE if
2992 		 * 1. The switch is a VEB AND
2993 		 * 2
2994 		 * 2.1 The lookup is a directional lookup like ethertype,
2995 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2996 		 * and default-port OR
2997 		 * 2.2 The lookup is VLAN, OR
2998 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2999 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
3000 		 *
3001 		 * OR
3002 		 *
3003 		 * The switch is a VEPA.
3004 		 *
3005 		 * In all other cases, the LAN enable has to be set to false.
3006 		 */
3007 		if (hw->evb_veb) {
3008 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3009 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
3010 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3011 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3012 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
3013 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
3014 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
3015 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
3016 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
3017 			     !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
3018 				fi->lan_en = true;
3019 		} else {
3020 			fi->lan_en = true;
3021 		}
3022 	}
3023 }
3024 
3025 /**
3026  * ice_fill_sw_rule - Helper function to fill switch rule structure
3027  * @hw: pointer to the hardware structure
3028  * @f_info: entry containing packet forwarding information
3029  * @s_rule: switch rule structure to be filled in based on mac_entry
3030  * @opc: switch rules population command type - pass in the command opcode
3031  */
3032 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_aqc_sw_rules_elem * s_rule,enum ice_adminq_opc opc)3033 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
3034 		 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
3035 {
3036 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
3037 	void *daddr = NULL;
3038 	u16 eth_hdr_sz;
3039 	u8 *eth_hdr;
3040 	u32 act = 0;
3041 	__be16 *off;
3042 	u8 q_rgn;
3043 
3044 	if (opc == ice_aqc_opc_remove_sw_rules) {
3045 		s_rule->pdata.lkup_tx_rx.act = 0;
3046 		s_rule->pdata.lkup_tx_rx.index =
3047 			CPU_TO_LE16(f_info->fltr_rule_id);
3048 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
3049 		return;
3050 	}
3051 
3052 	eth_hdr_sz = sizeof(dummy_eth_header);
3053 	eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
3054 
3055 	/* initialize the ether header with a dummy header */
3056 	ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
3057 	ice_fill_sw_info(hw, f_info);
3058 
3059 	switch (f_info->fltr_act) {
3060 	case ICE_FWD_TO_VSI:
3061 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
3062 			ICE_SINGLE_ACT_VSI_ID_M;
3063 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3064 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3065 				ICE_SINGLE_ACT_VALID_BIT;
3066 		break;
3067 	case ICE_FWD_TO_VSI_LIST:
3068 		act |= ICE_SINGLE_ACT_VSI_LIST;
3069 		act |= (f_info->fwd_id.vsi_list_id <<
3070 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
3071 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
3072 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
3073 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
3074 				ICE_SINGLE_ACT_VALID_BIT;
3075 		break;
3076 	case ICE_FWD_TO_Q:
3077 		act |= ICE_SINGLE_ACT_TO_Q;
3078 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3079 			ICE_SINGLE_ACT_Q_INDEX_M;
3080 		break;
3081 	case ICE_DROP_PACKET:
3082 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
3083 			ICE_SINGLE_ACT_VALID_BIT;
3084 		break;
3085 	case ICE_FWD_TO_QGRP:
3086 		q_rgn = f_info->qgrp_size > 0 ?
3087 			(u8)ice_ilog2(f_info->qgrp_size) : 0;
3088 		act |= ICE_SINGLE_ACT_TO_Q;
3089 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
3090 			ICE_SINGLE_ACT_Q_INDEX_M;
3091 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
3092 			ICE_SINGLE_ACT_Q_REGION_M;
3093 		break;
3094 	default:
3095 		return;
3096 	}
3097 
3098 	if (f_info->lb_en)
3099 		act |= ICE_SINGLE_ACT_LB_ENABLE;
3100 	if (f_info->lan_en)
3101 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
3102 
3103 	switch (f_info->lkup_type) {
3104 	case ICE_SW_LKUP_MAC:
3105 		daddr = f_info->l_data.mac.mac_addr;
3106 		break;
3107 	case ICE_SW_LKUP_VLAN:
3108 		vlan_id = f_info->l_data.vlan.vlan_id;
3109 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
3110 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3111 			act |= ICE_SINGLE_ACT_PRUNE;
3112 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
3113 		}
3114 		break;
3115 	case ICE_SW_LKUP_ETHERTYPE_MAC:
3116 		daddr = f_info->l_data.ethertype_mac.mac_addr;
3117 		/* fall-through */
3118 	case ICE_SW_LKUP_ETHERTYPE:
3119 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
3120 		*off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
3121 		break;
3122 	case ICE_SW_LKUP_MAC_VLAN:
3123 		daddr = f_info->l_data.mac_vlan.mac_addr;
3124 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
3125 		break;
3126 	case ICE_SW_LKUP_PROMISC_VLAN:
3127 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
3128 		/* fall-through */
3129 	case ICE_SW_LKUP_PROMISC:
3130 		daddr = f_info->l_data.mac_vlan.mac_addr;
3131 		break;
3132 	default:
3133 		break;
3134 	}
3135 
3136 	s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
3137 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
3138 		CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
3139 
3140 	/* Recipe set depending on lookup type */
3141 	s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
3142 	s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
3143 	s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3144 
3145 	if (daddr)
3146 		ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
3147 			   ICE_NONDMA_TO_NONDMA);
3148 
3149 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
3150 		off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
3151 		*off = CPU_TO_BE16(vlan_id);
3152 	}
3153 
3154 	/* Create the switch rule with the final dummy Ethernet header */
3155 	if (opc != ice_aqc_opc_update_sw_rules)
3156 		s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
3157 }
3158 
3159 /**
3160  * ice_add_marker_act
3161  * @hw: pointer to the hardware structure
3162  * @m_ent: the management entry for which sw marker needs to be added
3163  * @sw_marker: sw marker to tag the Rx descriptor with
3164  * @l_id: large action resource ID
3165  *
3166  * Create a large action to hold software marker and update the switch rule
3167  * entry pointed by m_ent with newly created large action
3168  */
3169 static enum ice_status
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)3170 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3171 		   u16 sw_marker, u16 l_id)
3172 {
3173 	struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
3174 	/* For software marker we need 3 large actions
3175 	 * 1. FWD action: FWD TO VSI or VSI LIST
3176 	 * 2. GENERIC VALUE action to hold the profile ID
3177 	 * 3. GENERIC VALUE action to hold the software marker ID
3178 	 */
3179 	const u16 num_lg_acts = 3;
3180 	enum ice_status status;
3181 	u16 lg_act_size;
3182 	u16 rules_size;
3183 	u32 act;
3184 	u16 id;
3185 
3186 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3187 		return ICE_ERR_PARAM;
3188 
3189 	/* Create two back-to-back switch rules and submit them to the HW using
3190 	 * one memory buffer:
3191 	 *    1. Large Action
3192 	 *    2. Look up Tx Rx
3193 	 */
3194 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
3195 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3196 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3197 	if (!lg_act)
3198 		return ICE_ERR_NO_MEMORY;
3199 
3200 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3201 
3202 	/* Fill in the first switch rule i.e. large action */
3203 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3204 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3205 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
3206 
3207 	/* First action VSI forwarding or VSI list forwarding depending on how
3208 	 * many VSIs
3209 	 */
3210 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
3211 		m_ent->fltr_info.fwd_id.hw_vsi_id;
3212 
3213 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3214 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
3215 	if (m_ent->vsi_count > 1)
3216 		act |= ICE_LG_ACT_VSI_LIST;
3217 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3218 
3219 	/* Second action descriptor type */
3220 	act = ICE_LG_ACT_GENERIC;
3221 
3222 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
3223 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3224 
3225 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
3226 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
3227 
3228 	/* Third action Marker value */
3229 	act |= ICE_LG_ACT_GENERIC;
3230 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
3231 		ICE_LG_ACT_GENERIC_VALUE_M;
3232 
3233 	lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
3234 
3235 	/* call the fill switch rule to fill the lookup Tx Rx structure */
3236 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3237 			 ice_aqc_opc_update_sw_rules);
3238 
3239 	/* Update the action to point to the large action ID */
3240 	rx_tx->pdata.lkup_tx_rx.act =
3241 		CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
3242 			    ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
3243 			     ICE_SINGLE_ACT_PTR_VAL_M));
3244 
3245 	/* Use the filter rule ID of the previously created rule with single
3246 	 * act. Once the update happens, hardware will treat this as large
3247 	 * action
3248 	 */
3249 	rx_tx->pdata.lkup_tx_rx.index =
3250 		CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
3251 
3252 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3253 				 ice_aqc_opc_update_sw_rules, NULL);
3254 	if (!status) {
3255 		m_ent->lg_act_idx = l_id;
3256 		m_ent->sw_marker_id = sw_marker;
3257 	}
3258 
3259 	ice_free(hw, lg_act);
3260 	return status;
3261 }
3262 
3263 /**
3264  * ice_add_counter_act - add/update filter rule with counter action
3265  * @hw: pointer to the hardware structure
3266  * @m_ent: the management entry for which counter needs to be added
3267  * @counter_id: VLAN counter ID returned as part of allocate resource
3268  * @l_id: large action resource ID
3269  */
3270 static enum ice_status
ice_add_counter_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 counter_id,u16 l_id)3271 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
3272 		    u16 counter_id, u16 l_id)
3273 {
3274 	struct ice_aqc_sw_rules_elem *lg_act;
3275 	struct ice_aqc_sw_rules_elem *rx_tx;
3276 	enum ice_status status;
3277 	/* 2 actions will be added while adding a large action counter */
3278 	const int num_acts = 2;
3279 	u16 lg_act_size;
3280 	u16 rules_size;
3281 	u16 f_rule_id;
3282 	u32 act;
3283 	u16 id;
3284 
3285 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
3286 		return ICE_ERR_PARAM;
3287 
3288 	/* Create two back-to-back switch rules and submit them to the HW using
3289 	 * one memory buffer:
3290 	 * 1. Large Action
3291 	 * 2. Look up Tx Rx
3292 	 */
3293 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
3294 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
3295 	lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
3296 	if (!lg_act)
3297 		return ICE_ERR_NO_MEMORY;
3298 
3299 	rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
3300 
3301 	/* Fill in the first switch rule i.e. large action */
3302 	lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
3303 	lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
3304 	lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
3305 
3306 	/* First action VSI forwarding or VSI list forwarding depending on how
3307 	 * many VSIs
3308 	 */
3309 	id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
3310 		m_ent->fltr_info.fwd_id.hw_vsi_id;
3311 
3312 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
3313 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
3314 		ICE_LG_ACT_VSI_LIST_ID_M;
3315 	if (m_ent->vsi_count > 1)
3316 		act |= ICE_LG_ACT_VSI_LIST;
3317 	lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
3318 
3319 	/* Second action counter ID */
3320 	act = ICE_LG_ACT_STAT_COUNT;
3321 	act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
3322 		ICE_LG_ACT_STAT_COUNT_M;
3323 	lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
3324 
3325 	/* call the fill switch rule to fill the lookup Tx Rx structure */
3326 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
3327 			 ice_aqc_opc_update_sw_rules);
3328 
3329 	act = ICE_SINGLE_ACT_PTR;
3330 	act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
3331 	rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
3332 
3333 	/* Use the filter rule ID of the previously created rule with single
3334 	 * act. Once the update happens, hardware will treat this as large
3335 	 * action
3336 	 */
3337 	f_rule_id = m_ent->fltr_info.fltr_rule_id;
3338 	rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
3339 
3340 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
3341 				 ice_aqc_opc_update_sw_rules, NULL);
3342 	if (!status) {
3343 		m_ent->lg_act_idx = l_id;
3344 		m_ent->counter_index = counter_id;
3345 	}
3346 
3347 	ice_free(hw, lg_act);
3348 	return status;
3349 }
3350 
3351 /**
3352  * ice_create_vsi_list_map
3353  * @hw: pointer to the hardware structure
3354  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
3355  * @num_vsi: number of VSI handles in the array
3356  * @vsi_list_id: VSI list ID generated as part of allocate resource
3357  *
3358  * Helper function to create a new entry of VSI list ID to VSI mapping
3359  * using the given VSI list ID
3360  */
3361 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)3362 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3363 			u16 vsi_list_id)
3364 {
3365 	struct ice_switch_info *sw = hw->switch_info;
3366 	struct ice_vsi_list_map_info *v_map;
3367 	int i;
3368 
3369 	v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
3370 	if (!v_map)
3371 		return NULL;
3372 
3373 	v_map->vsi_list_id = vsi_list_id;
3374 	v_map->ref_cnt = 1;
3375 	for (i = 0; i < num_vsi; i++)
3376 		ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
3377 
3378 	LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
3379 	return v_map;
3380 }
3381 
3382 /**
3383  * ice_update_vsi_list_rule
3384  * @hw: pointer to the hardware structure
3385  * @vsi_handle_arr: array of VSI handles to form a VSI list
3386  * @num_vsi: number of VSI handles in the array
3387  * @vsi_list_id: VSI list ID generated as part of allocate resource
3388  * @remove: Boolean value to indicate if this is a remove action
3389  * @opc: switch rules population command type - pass in the command opcode
3390  * @lkup_type: lookup type of the filter
3391  *
3392  * Call AQ command to add a new switch rule or update existing switch rule
3393  * using the given VSI list ID
3394  */
3395 static enum ice_status
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)3396 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3397 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
3398 			 enum ice_sw_lkup_type lkup_type)
3399 {
3400 	struct ice_aqc_sw_rules_elem *s_rule;
3401 	enum ice_status status;
3402 	u16 s_rule_size;
3403 	u16 rule_type;
3404 	int i;
3405 
3406 	if (!num_vsi)
3407 		return ICE_ERR_PARAM;
3408 
3409 	if (lkup_type == ICE_SW_LKUP_MAC ||
3410 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
3411 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
3412 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
3413 	    lkup_type == ICE_SW_LKUP_PROMISC ||
3414 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
3415 	    lkup_type == ICE_SW_LKUP_LAST)
3416 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
3417 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
3418 	else if (lkup_type == ICE_SW_LKUP_VLAN)
3419 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
3420 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
3421 	else
3422 		return ICE_ERR_PARAM;
3423 
3424 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
3425 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3426 	if (!s_rule)
3427 		return ICE_ERR_NO_MEMORY;
3428 	for (i = 0; i < num_vsi; i++) {
3429 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
3430 			status = ICE_ERR_PARAM;
3431 			goto exit;
3432 		}
3433 		/* AQ call requires hw_vsi_id(s) */
3434 		s_rule->pdata.vsi_list.vsi[i] =
3435 			CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
3436 	}
3437 
3438 	s_rule->type = CPU_TO_LE16(rule_type);
3439 	s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
3440 	s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
3441 
3442 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
3443 
3444 exit:
3445 	ice_free(hw, s_rule);
3446 	return status;
3447 }
3448 
3449 /**
3450  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
3451  * @hw: pointer to the HW struct
3452  * @vsi_handle_arr: array of VSI handles to form a VSI list
3453  * @num_vsi: number of VSI handles in the array
3454  * @vsi_list_id: stores the ID of the VSI list to be created
3455  * @lkup_type: switch rule filter's lookup type
3456  */
3457 static enum ice_status
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)3458 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
3459 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
3460 {
3461 	enum ice_status status;
3462 
3463 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
3464 					    ice_aqc_opc_alloc_res);
3465 	if (status)
3466 		return status;
3467 
3468 	/* Update the newly created VSI list to include the specified VSIs */
3469 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
3470 					*vsi_list_id, false,
3471 					ice_aqc_opc_add_sw_rules, lkup_type);
3472 }
3473 
3474 /**
3475  * ice_create_pkt_fwd_rule
3476  * @hw: pointer to the hardware structure
3477  * @recp_list: corresponding filter management list
3478  * @f_entry: entry containing packet forwarding information
3479  *
3480  * Create switch rule with given filter information and add an entry
3481  * to the corresponding filter management list to track this switch rule
3482  * and VSI mapping
3483  */
3484 static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)3485 ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3486 			struct ice_fltr_list_entry *f_entry)
3487 {
3488 	struct ice_fltr_mgmt_list_entry *fm_entry;
3489 	struct ice_aqc_sw_rules_elem *s_rule;
3490 	enum ice_status status;
3491 
3492 	s_rule = (struct ice_aqc_sw_rules_elem *)
3493 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3494 	if (!s_rule)
3495 		return ICE_ERR_NO_MEMORY;
3496 	fm_entry = (struct ice_fltr_mgmt_list_entry *)
3497 		   ice_malloc(hw, sizeof(*fm_entry));
3498 	if (!fm_entry) {
3499 		status = ICE_ERR_NO_MEMORY;
3500 		goto ice_create_pkt_fwd_rule_exit;
3501 	}
3502 
3503 	fm_entry->fltr_info = f_entry->fltr_info;
3504 
3505 	/* Initialize all the fields for the management entry */
3506 	fm_entry->vsi_count = 1;
3507 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
3508 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
3509 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
3510 
3511 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
3512 			 ice_aqc_opc_add_sw_rules);
3513 
3514 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3515 				 ice_aqc_opc_add_sw_rules, NULL);
3516 	if (status) {
3517 		ice_free(hw, fm_entry);
3518 		goto ice_create_pkt_fwd_rule_exit;
3519 	}
3520 
3521 	f_entry->fltr_info.fltr_rule_id =
3522 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3523 	fm_entry->fltr_info.fltr_rule_id =
3524 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3525 
3526 	/* The book keeping entries will get removed when base driver
3527 	 * calls remove filter AQ command
3528 	 */
3529 	LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
3530 
3531 ice_create_pkt_fwd_rule_exit:
3532 	ice_free(hw, s_rule);
3533 	return status;
3534 }
3535 
3536 /**
3537  * ice_update_pkt_fwd_rule
3538  * @hw: pointer to the hardware structure
3539  * @f_info: filter information for switch rule
3540  *
3541  * Call AQ command to update a previously created switch rule with a
3542  * VSI list ID
3543  */
3544 static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)3545 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
3546 {
3547 	struct ice_aqc_sw_rules_elem *s_rule;
3548 	enum ice_status status;
3549 
3550 	s_rule = (struct ice_aqc_sw_rules_elem *)
3551 		ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
3552 	if (!s_rule)
3553 		return ICE_ERR_NO_MEMORY;
3554 
3555 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
3556 
3557 	s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
3558 
3559 	/* Update switch rule with new rule set to forward VSI list */
3560 	status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
3561 				 ice_aqc_opc_update_sw_rules, NULL);
3562 
3563 	ice_free(hw, s_rule);
3564 	return status;
3565 }
3566 
3567 /**
3568  * ice_update_sw_rule_bridge_mode
3569  * @hw: pointer to the HW struct
3570  *
3571  * Updates unicast switch filter rules based on VEB/VEPA mode
3572  */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)3573 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
3574 {
3575 	struct ice_switch_info *sw = hw->switch_info;
3576 	struct ice_fltr_mgmt_list_entry *fm_entry;
3577 	enum ice_status status = ICE_SUCCESS;
3578 	struct LIST_HEAD_TYPE *rule_head;
3579 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3580 
3581 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3582 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3583 
3584 	ice_acquire_lock(rule_lock);
3585 	LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
3586 			    list_entry) {
3587 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
3588 		u8 *addr = fi->l_data.mac.mac_addr;
3589 
3590 		/* Update unicast Tx rules to reflect the selected
3591 		 * VEB/VEPA mode
3592 		 */
3593 		if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
3594 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
3595 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
3596 		     fi->fltr_act == ICE_FWD_TO_Q ||
3597 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
3598 			status = ice_update_pkt_fwd_rule(hw, fi);
3599 			if (status)
3600 				break;
3601 		}
3602 	}
3603 
3604 	ice_release_lock(rule_lock);
3605 
3606 	return status;
3607 }
3608 
3609 /**
3610  * ice_add_update_vsi_list
3611  * @hw: pointer to the hardware structure
3612  * @m_entry: pointer to current filter management list entry
3613  * @cur_fltr: filter information from the book keeping entry
3614  * @new_fltr: filter information with the new VSI to be added
3615  *
3616  * Call AQ command to add or update previously created VSI list with new VSI.
3617  *
3618  * Helper function to do book keeping associated with adding filter information
3619  * The algorithm to do the book keeping is described below :
3620  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
3621  *	if only one VSI has been added till now
3622  *		Allocate a new VSI list and add two VSIs
3623  *		to this list using switch rule command
3624  *		Update the previously created switch rule with the
3625  *		newly created VSI list ID
3626  *	if a VSI list was previously created
3627  *		Add the new VSI to the previously created VSI list set
3628  *		using the update switch rule command
3629  */
3630 static enum ice_status
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)3631 ice_add_update_vsi_list(struct ice_hw *hw,
3632 			struct ice_fltr_mgmt_list_entry *m_entry,
3633 			struct ice_fltr_info *cur_fltr,
3634 			struct ice_fltr_info *new_fltr)
3635 {
3636 	enum ice_status status = ICE_SUCCESS;
3637 	u16 vsi_list_id = 0;
3638 
3639 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
3640 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
3641 		return ICE_ERR_NOT_IMPL;
3642 
3643 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3644 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3645 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3646 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3647 		return ICE_ERR_NOT_IMPL;
3648 
3649 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3650 		/* Only one entry existed in the mapping and it was not already
3651 		 * a part of a VSI list. So, create a VSI list with the old and
3652 		 * new VSIs.
3653 		 */
3654 		struct ice_fltr_info tmp_fltr;
3655 		u16 vsi_handle_arr[2];
3656 
3657 		/* A rule already exists with the new VSI being added */
3658 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3659 			return ICE_ERR_ALREADY_EXISTS;
3660 
3661 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
3662 		vsi_handle_arr[1] = new_fltr->vsi_handle;
3663 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3664 						  &vsi_list_id,
3665 						  new_fltr->lkup_type);
3666 		if (status)
3667 			return status;
3668 
3669 		tmp_fltr = *new_fltr;
3670 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3671 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3672 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3673 		/* Update the previous switch rule of "MAC forward to VSI" to
3674 		 * "MAC fwd to VSI list"
3675 		 */
3676 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3677 		if (status)
3678 			return status;
3679 
3680 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3681 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3682 		m_entry->vsi_list_info =
3683 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3684 						vsi_list_id);
3685 
3686 		/* If this entry was large action then the large action needs
3687 		 * to be updated to point to FWD to VSI list
3688 		 */
3689 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3690 			status =
3691 			    ice_add_marker_act(hw, m_entry,
3692 					       m_entry->sw_marker_id,
3693 					       m_entry->lg_act_idx);
3694 	} else {
3695 		u16 vsi_handle = new_fltr->vsi_handle;
3696 		enum ice_adminq_opc opcode;
3697 
3698 		if (!m_entry->vsi_list_info)
3699 			return ICE_ERR_CFG;
3700 
3701 		/* A rule already exists with the new VSI being added */
3702 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
3703 			return ICE_SUCCESS;
3704 
3705 		/* Update the previously created VSI list set with
3706 		 * the new VSI ID passed in
3707 		 */
3708 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3709 		opcode = ice_aqc_opc_update_sw_rules;
3710 
3711 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3712 						  vsi_list_id, false, opcode,
3713 						  new_fltr->lkup_type);
3714 		/* update VSI list mapping info with new VSI ID */
3715 		if (!status)
3716 			ice_set_bit(vsi_handle,
3717 				    m_entry->vsi_list_info->vsi_map);
3718 	}
3719 	if (!status)
3720 		m_entry->vsi_count++;
3721 	return status;
3722 }
3723 
3724 /**
3725  * ice_find_rule_entry - Search a rule entry
3726  * @list_head: head of rule list
3727  * @f_info: rule information
3728  *
3729  * Helper function to search for a given rule entry
3730  * Returns pointer to entry storing the rule if found
3731  */
3732 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct LIST_HEAD_TYPE * list_head,struct ice_fltr_info * f_info)3733 ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
3734 		    struct ice_fltr_info *f_info)
3735 {
3736 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3737 
3738 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3739 			    list_entry) {
3740 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3741 			    sizeof(f_info->l_data)) &&
3742 		    f_info->flag == list_itr->fltr_info.flag) {
3743 			ret = list_itr;
3744 			break;
3745 		}
3746 	}
3747 	return ret;
3748 }
3749 
3750 /**
3751  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3752  * @recp_list: VSI lists needs to be searched
3753  * @vsi_handle: VSI handle to be found in VSI list
3754  * @vsi_list_id: VSI list ID found containing vsi_handle
3755  *
3756  * Helper function to search a VSI list with single entry containing given VSI
3757  * handle element. This can be extended further to search VSI list with more
3758  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3759  */
3760 static struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_sw_recipe * recp_list,u16 vsi_handle,u16 * vsi_list_id)3761 ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
3762 			u16 *vsi_list_id)
3763 {
3764 	struct ice_vsi_list_map_info *map_info = NULL;
3765 	struct LIST_HEAD_TYPE *list_head;
3766 
3767 	list_head = &recp_list->filt_rules;
3768 	if (recp_list->adv_rule) {
3769 		struct ice_adv_fltr_mgmt_list_entry *list_itr;
3770 
3771 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
3772 				    ice_adv_fltr_mgmt_list_entry,
3773 				    list_entry) {
3774 			if (list_itr->vsi_list_info) {
3775 				map_info = list_itr->vsi_list_info;
3776 				if (ice_is_bit_set(map_info->vsi_map,
3777 						   vsi_handle)) {
3778 					*vsi_list_id = map_info->vsi_list_id;
3779 					return map_info;
3780 				}
3781 			}
3782 		}
3783 	} else {
3784 		struct ice_fltr_mgmt_list_entry *list_itr;
3785 
3786 		LIST_FOR_EACH_ENTRY(list_itr, list_head,
3787 				    ice_fltr_mgmt_list_entry,
3788 				    list_entry) {
3789 			if (list_itr->vsi_count == 1 &&
3790 			    list_itr->vsi_list_info) {
3791 				map_info = list_itr->vsi_list_info;
3792 				if (ice_is_bit_set(map_info->vsi_map,
3793 						   vsi_handle)) {
3794 					*vsi_list_id = map_info->vsi_list_id;
3795 					return map_info;
3796 				}
3797 			}
3798 		}
3799 	}
3800 	return NULL;
3801 }
3802 
3803 /**
3804  * ice_add_rule_internal - add rule for a given lookup type
3805  * @hw: pointer to the hardware structure
3806  * @recp_list: recipe list for which rule has to be added
3807  * @lport: logic port number on which function add rule
3808  * @f_entry: structure containing MAC forwarding information
3809  *
3810  * Adds or updates the rule lists for a given recipe
3811  */
3812 static enum ice_status
ice_add_rule_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,u8 lport,struct ice_fltr_list_entry * f_entry)3813 ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3814 		      u8 lport, struct ice_fltr_list_entry *f_entry)
3815 {
3816 	struct ice_fltr_info *new_fltr, *cur_fltr;
3817 	struct ice_fltr_mgmt_list_entry *m_entry;
3818 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3819 	enum ice_status status = ICE_SUCCESS;
3820 
3821 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3822 		return ICE_ERR_PARAM;
3823 
3824 	/* Load the hw_vsi_id only if the fwd action is fwd to VSI */
3825 	if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
3826 		f_entry->fltr_info.fwd_id.hw_vsi_id =
3827 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3828 
3829 	rule_lock = &recp_list->filt_rule_lock;
3830 
3831 	ice_acquire_lock(rule_lock);
3832 	new_fltr = &f_entry->fltr_info;
3833 	if (new_fltr->flag & ICE_FLTR_RX)
3834 		new_fltr->src = lport;
3835 	else if (new_fltr->flag & ICE_FLTR_TX)
3836 		new_fltr->src =
3837 			ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3838 
3839 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
3840 	if (!m_entry) {
3841 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
3842 		goto exit_add_rule_internal;
3843 	}
3844 
3845 	cur_fltr = &m_entry->fltr_info;
3846 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3847 
3848 exit_add_rule_internal:
3849 	ice_release_lock(rule_lock);
3850 	return status;
3851 }
3852 
3853 /**
3854  * ice_remove_vsi_list_rule
3855  * @hw: pointer to the hardware structure
3856  * @vsi_list_id: VSI list ID generated as part of allocate resource
3857  * @lkup_type: switch rule filter lookup type
3858  *
3859  * The VSI list should be emptied before this function is called to remove the
3860  * VSI list.
3861  */
3862 static enum ice_status
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)3863 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3864 			 enum ice_sw_lkup_type lkup_type)
3865 {
3866 	/* Free the vsi_list resource that we allocated. It is assumed that the
3867 	 * list is empty at this point.
3868 	 */
3869 	return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3870 					    ice_aqc_opc_free_res);
3871 }
3872 
3873 /**
3874  * ice_rem_update_vsi_list
3875  * @hw: pointer to the hardware structure
3876  * @vsi_handle: VSI handle of the VSI to remove
3877  * @fm_list: filter management entry for which the VSI list management needs to
3878  *	     be done
3879  */
3880 static enum ice_status
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)3881 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3882 			struct ice_fltr_mgmt_list_entry *fm_list)
3883 {
3884 	enum ice_sw_lkup_type lkup_type;
3885 	enum ice_status status = ICE_SUCCESS;
3886 	u16 vsi_list_id;
3887 
3888 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3889 	    fm_list->vsi_count == 0)
3890 		return ICE_ERR_PARAM;
3891 
3892 	/* A rule with the VSI being removed does not exist */
3893 	if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
3894 		return ICE_ERR_DOES_NOT_EXIST;
3895 
3896 	lkup_type = fm_list->fltr_info.lkup_type;
3897 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3898 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3899 					  ice_aqc_opc_update_sw_rules,
3900 					  lkup_type);
3901 	if (status)
3902 		return status;
3903 
3904 	fm_list->vsi_count--;
3905 	ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3906 
3907 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3908 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3909 		struct ice_vsi_list_map_info *vsi_list_info =
3910 			fm_list->vsi_list_info;
3911 		u16 rem_vsi_handle;
3912 
3913 		rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
3914 						    ICE_MAX_VSI);
3915 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3916 			return ICE_ERR_OUT_OF_RANGE;
3917 
3918 		/* Make sure VSI list is empty before removing it below */
3919 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3920 						  vsi_list_id, true,
3921 						  ice_aqc_opc_update_sw_rules,
3922 						  lkup_type);
3923 		if (status)
3924 			return status;
3925 
3926 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3927 		tmp_fltr_info.fwd_id.hw_vsi_id =
3928 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3929 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3930 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3931 		if (status) {
3932 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3933 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3934 			return status;
3935 		}
3936 
3937 		fm_list->fltr_info = tmp_fltr_info;
3938 	}
3939 
3940 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3941 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3942 		struct ice_vsi_list_map_info *vsi_list_info =
3943 			fm_list->vsi_list_info;
3944 
3945 		/* Remove the VSI list since it is no longer used */
3946 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3947 		if (status) {
3948 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3949 				  vsi_list_id, status);
3950 			return status;
3951 		}
3952 
3953 		LIST_DEL(&vsi_list_info->list_entry);
3954 		ice_free(hw, vsi_list_info);
3955 		fm_list->vsi_list_info = NULL;
3956 	}
3957 
3958 	return status;
3959 }
3960 
3961 /**
3962  * ice_remove_rule_internal - Remove a filter rule of a given type
3963  *
3964  * @hw: pointer to the hardware structure
3965  * @recp_list: recipe list for which the rule needs to removed
3966  * @f_entry: rule entry containing filter information
3967  */
3968 static enum ice_status
ice_remove_rule_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)3969 ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
3970 			 struct ice_fltr_list_entry *f_entry)
3971 {
3972 	struct ice_fltr_mgmt_list_entry *list_elem;
3973 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3974 	enum ice_status status = ICE_SUCCESS;
3975 	bool remove_rule = false;
3976 	u16 vsi_handle;
3977 
3978 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3979 		return ICE_ERR_PARAM;
3980 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3981 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3982 
3983 	rule_lock = &recp_list->filt_rule_lock;
3984 	ice_acquire_lock(rule_lock);
3985 	list_elem = ice_find_rule_entry(&recp_list->filt_rules,
3986 					&f_entry->fltr_info);
3987 	if (!list_elem) {
3988 		status = ICE_ERR_DOES_NOT_EXIST;
3989 		goto exit;
3990 	}
3991 
3992 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3993 		remove_rule = true;
3994 	} else if (!list_elem->vsi_list_info) {
3995 		status = ICE_ERR_DOES_NOT_EXIST;
3996 		goto exit;
3997 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3998 		/* a ref_cnt > 1 indicates that the vsi_list is being
3999 		 * shared by multiple rules. Decrement the ref_cnt and
4000 		 * remove this rule, but do not modify the list, as it
4001 		 * is in-use by other rules.
4002 		 */
4003 		list_elem->vsi_list_info->ref_cnt--;
4004 		remove_rule = true;
4005 	} else {
4006 		/* a ref_cnt of 1 indicates the vsi_list is only used
4007 		 * by one rule. However, the original removal request is only
4008 		 * for a single VSI. Update the vsi_list first, and only
4009 		 * remove the rule if there are no further VSIs in this list.
4010 		 */
4011 		vsi_handle = f_entry->fltr_info.vsi_handle;
4012 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
4013 		if (status)
4014 			goto exit;
4015 		/* if VSI count goes to zero after updating the VSI list */
4016 		if (list_elem->vsi_count == 0)
4017 			remove_rule = true;
4018 	}
4019 
4020 	if (remove_rule) {
4021 		/* Remove the lookup rule */
4022 		struct ice_aqc_sw_rules_elem *s_rule;
4023 
4024 		s_rule = (struct ice_aqc_sw_rules_elem *)
4025 			ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
4026 		if (!s_rule) {
4027 			status = ICE_ERR_NO_MEMORY;
4028 			goto exit;
4029 		}
4030 
4031 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
4032 				 ice_aqc_opc_remove_sw_rules);
4033 
4034 		status = ice_aq_sw_rules(hw, s_rule,
4035 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
4036 					 ice_aqc_opc_remove_sw_rules, NULL);
4037 
4038 		/* Remove a book keeping from the list */
4039 		ice_free(hw, s_rule);
4040 
4041 		if (status)
4042 			goto exit;
4043 
4044 		LIST_DEL(&list_elem->list_entry);
4045 		ice_free(hw, list_elem);
4046 	}
4047 exit:
4048 	ice_release_lock(rule_lock);
4049 	return status;
4050 }
4051 
4052 /**
4053  * ice_aq_get_res_alloc - get allocated resources
4054  * @hw: pointer to the HW struct
4055  * @num_entries: pointer to u16 to store the number of resource entries returned
4056  * @buf: pointer to buffer
4057  * @buf_size: size of buf
4058  * @cd: pointer to command details structure or NULL
4059  *
4060  * The caller-supplied buffer must be large enough to store the resource
4061  * information for all resource types. Each resource type is an
4062  * ice_aqc_get_res_resp_elem structure.
4063  */
4064 enum ice_status
ice_aq_get_res_alloc(struct ice_hw * hw,u16 * num_entries,struct ice_aqc_get_res_resp_elem * buf,u16 buf_size,struct ice_sq_cd * cd)4065 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
4066 		     struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
4067 		     struct ice_sq_cd *cd)
4068 {
4069 	struct ice_aqc_get_res_alloc *resp;
4070 	enum ice_status status;
4071 	struct ice_aq_desc desc;
4072 
4073 	if (!buf)
4074 		return ICE_ERR_BAD_PTR;
4075 
4076 	if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
4077 		return ICE_ERR_INVAL_SIZE;
4078 
4079 	resp = &desc.params.get_res;
4080 
4081 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
4082 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4083 
4084 	if (!status && num_entries)
4085 		*num_entries = LE16_TO_CPU(resp->resp_elem_num);
4086 
4087 	return status;
4088 }
4089 
4090 /**
4091  * ice_aq_get_res_descs - get allocated resource descriptors
4092  * @hw: pointer to the hardware structure
4093  * @num_entries: number of resource entries in buffer
4094  * @buf: structure to hold response data buffer
4095  * @buf_size: size of buffer
4096  * @res_type: resource type
4097  * @res_shared: is resource shared
4098  * @desc_id: input - first desc ID to start; output - next desc ID
4099  * @cd: pointer to command details structure or NULL
4100  */
4101 enum ice_status
ice_aq_get_res_descs(struct ice_hw * hw,u16 num_entries,struct ice_aqc_res_elem * buf,u16 buf_size,u16 res_type,bool res_shared,u16 * desc_id,struct ice_sq_cd * cd)4102 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
4103 		     struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
4104 		     bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
4105 {
4106 	struct ice_aqc_get_allocd_res_desc *cmd;
4107 	struct ice_aq_desc desc;
4108 	enum ice_status status;
4109 
4110 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4111 
4112 	cmd = &desc.params.get_res_desc;
4113 
4114 	if (!buf)
4115 		return ICE_ERR_PARAM;
4116 
4117 	if (buf_size != (num_entries * sizeof(*buf)))
4118 		return ICE_ERR_PARAM;
4119 
4120 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
4121 
4122 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
4123 					 ICE_AQC_RES_TYPE_M) | (res_shared ?
4124 					ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
4125 	cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
4126 
4127 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4128 	if (!status)
4129 		*desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
4130 
4131 	return status;
4132 }
4133 
4134 /**
4135  * ice_add_mac_rule - Add a MAC address based filter rule
4136  * @hw: pointer to the hardware structure
4137  * @m_list: list of MAC addresses and forwarding information
4138  * @sw: pointer to switch info struct for which function add rule
4139  * @lport: logic port number on which function add rule
4140  *
4141  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
4142  * multiple unicast addresses, the function assumes that all the
4143  * addresses are unique in a given add_mac call. It doesn't
4144  * check for duplicates in this case, removing duplicates from a given
4145  * list should be taken care of in the caller of this function.
4146  */
4147 static enum ice_status
ice_add_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list,struct ice_switch_info * sw,u8 lport)4148 ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4149 		 struct ice_switch_info *sw, u8 lport)
4150 {
4151 	struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
4152 	struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
4153 	struct ice_fltr_list_entry *m_list_itr;
4154 	struct LIST_HEAD_TYPE *rule_head;
4155 	u16 total_elem_left, s_rule_size;
4156 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4157 	enum ice_status status = ICE_SUCCESS;
4158 	u16 num_unicast = 0;
4159 	u8 elem_sent;
4160 
4161 	s_rule = NULL;
4162 	rule_lock = &recp_list->filt_rule_lock;
4163 	rule_head = &recp_list->filt_rules;
4164 
4165 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4166 			    list_entry) {
4167 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
4168 		u16 vsi_handle;
4169 		u16 hw_vsi_id;
4170 
4171 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
4172 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
4173 		if (!ice_is_vsi_valid(hw, vsi_handle))
4174 			return ICE_ERR_PARAM;
4175 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4176 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
4177 		/* update the src in case it is VSI num */
4178 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
4179 			return ICE_ERR_PARAM;
4180 		m_list_itr->fltr_info.src = hw_vsi_id;
4181 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
4182 		    IS_ZERO_ETHER_ADDR(add))
4183 			return ICE_ERR_PARAM;
4184 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4185 			/* Don't overwrite the unicast address */
4186 			ice_acquire_lock(rule_lock);
4187 			if (ice_find_rule_entry(rule_head,
4188 						&m_list_itr->fltr_info)) {
4189 				ice_release_lock(rule_lock);
4190 				return ICE_ERR_ALREADY_EXISTS;
4191 			}
4192 			ice_release_lock(rule_lock);
4193 			num_unicast++;
4194 		} else if (IS_MULTICAST_ETHER_ADDR(add) ||
4195 			   (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
4196 			m_list_itr->status =
4197 				ice_add_rule_internal(hw, recp_list, lport,
4198 						      m_list_itr);
4199 			if (m_list_itr->status)
4200 				return m_list_itr->status;
4201 		}
4202 	}
4203 
4204 	ice_acquire_lock(rule_lock);
4205 	/* Exit if no suitable entries were found for adding bulk switch rule */
4206 	if (!num_unicast) {
4207 		status = ICE_SUCCESS;
4208 		goto ice_add_mac_exit;
4209 	}
4210 
4211 	/* Allocate switch rule buffer for the bulk update for unicast */
4212 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
4213 	s_rule = (struct ice_aqc_sw_rules_elem *)
4214 		ice_calloc(hw, num_unicast, s_rule_size);
4215 	if (!s_rule) {
4216 		status = ICE_ERR_NO_MEMORY;
4217 		goto ice_add_mac_exit;
4218 	}
4219 
4220 	r_iter = s_rule;
4221 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4222 			    list_entry) {
4223 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4224 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4225 
4226 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4227 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
4228 					 ice_aqc_opc_add_sw_rules);
4229 			r_iter = (struct ice_aqc_sw_rules_elem *)
4230 				((u8 *)r_iter + s_rule_size);
4231 		}
4232 	}
4233 
4234 	/* Call AQ bulk switch rule update for all unicast addresses */
4235 	r_iter = s_rule;
4236 	/* Call AQ switch rule in AQ_MAX chunk */
4237 	for (total_elem_left = num_unicast; total_elem_left > 0;
4238 	     total_elem_left -= elem_sent) {
4239 		struct ice_aqc_sw_rules_elem *entry = r_iter;
4240 
4241 		elem_sent = MIN_T(u8, total_elem_left,
4242 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
4243 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
4244 					 elem_sent, ice_aqc_opc_add_sw_rules,
4245 					 NULL);
4246 		if (status)
4247 			goto ice_add_mac_exit;
4248 		r_iter = (struct ice_aqc_sw_rules_elem *)
4249 			((u8 *)r_iter + (elem_sent * s_rule_size));
4250 	}
4251 
4252 	/* Fill up rule ID based on the value returned from FW */
4253 	r_iter = s_rule;
4254 	LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
4255 			    list_entry) {
4256 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
4257 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
4258 		struct ice_fltr_mgmt_list_entry *fm_entry;
4259 
4260 		if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
4261 			f_info->fltr_rule_id =
4262 				LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
4263 			f_info->fltr_act = ICE_FWD_TO_VSI;
4264 			/* Create an entry to track this MAC address */
4265 			fm_entry = (struct ice_fltr_mgmt_list_entry *)
4266 				ice_malloc(hw, sizeof(*fm_entry));
4267 			if (!fm_entry) {
4268 				status = ICE_ERR_NO_MEMORY;
4269 				goto ice_add_mac_exit;
4270 			}
4271 			fm_entry->fltr_info = *f_info;
4272 			fm_entry->vsi_count = 1;
4273 			/* The book keeping entries will get removed when
4274 			 * base driver calls remove filter AQ command
4275 			 */
4276 
4277 			LIST_ADD(&fm_entry->list_entry, rule_head);
4278 			r_iter = (struct ice_aqc_sw_rules_elem *)
4279 				((u8 *)r_iter + s_rule_size);
4280 		}
4281 	}
4282 
4283 ice_add_mac_exit:
4284 	ice_release_lock(rule_lock);
4285 	if (s_rule)
4286 		ice_free(hw, s_rule);
4287 	return status;
4288 }
4289 
4290 /**
4291  * ice_add_mac - Add a MAC address based filter rule
4292  * @hw: pointer to the hardware structure
4293  * @m_list: list of MAC addresses and forwarding information
4294  *
4295  * Function add MAC rule for logical port from HW struct
4296  */
ice_add_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list)4297 enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4298 {
4299 	if (!m_list || !hw)
4300 		return ICE_ERR_PARAM;
4301 
4302 	return ice_add_mac_rule(hw, m_list, hw->switch_info,
4303 				hw->port_info->lport);
4304 }
4305 
4306 /**
4307  * ice_add_vlan_internal - Add one VLAN based filter rule
4308  * @hw: pointer to the hardware structure
4309  * @recp_list: recipe list for which rule has to be added
4310  * @f_entry: filter entry containing one VLAN information
4311  */
4312 static enum ice_status
ice_add_vlan_internal(struct ice_hw * hw,struct ice_sw_recipe * recp_list,struct ice_fltr_list_entry * f_entry)4313 ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
4314 		      struct ice_fltr_list_entry *f_entry)
4315 {
4316 	struct ice_fltr_mgmt_list_entry *v_list_itr;
4317 	struct ice_fltr_info *new_fltr, *cur_fltr;
4318 	enum ice_sw_lkup_type lkup_type;
4319 	u16 vsi_list_id = 0, vsi_handle;
4320 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4321 	enum ice_status status = ICE_SUCCESS;
4322 
4323 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
4324 		return ICE_ERR_PARAM;
4325 
4326 	f_entry->fltr_info.fwd_id.hw_vsi_id =
4327 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
4328 	new_fltr = &f_entry->fltr_info;
4329 
4330 	/* VLAN ID should only be 12 bits */
4331 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
4332 		return ICE_ERR_PARAM;
4333 
4334 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
4335 		return ICE_ERR_PARAM;
4336 
4337 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
4338 	lkup_type = new_fltr->lkup_type;
4339 	vsi_handle = new_fltr->vsi_handle;
4340 	rule_lock = &recp_list->filt_rule_lock;
4341 	ice_acquire_lock(rule_lock);
4342 	v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
4343 	if (!v_list_itr) {
4344 		struct ice_vsi_list_map_info *map_info = NULL;
4345 
4346 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
4347 			/* All VLAN pruning rules use a VSI list. Check if
4348 			 * there is already a VSI list containing VSI that we
4349 			 * want to add. If found, use the same vsi_list_id for
4350 			 * this new VLAN rule or else create a new list.
4351 			 */
4352 			map_info = ice_find_vsi_list_entry(recp_list,
4353 							   vsi_handle,
4354 							   &vsi_list_id);
4355 			if (!map_info) {
4356 				status = ice_create_vsi_list_rule(hw,
4357 								  &vsi_handle,
4358 								  1,
4359 								  &vsi_list_id,
4360 								  lkup_type);
4361 				if (status)
4362 					goto exit;
4363 			}
4364 			/* Convert the action to forwarding to a VSI list. */
4365 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
4366 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
4367 		}
4368 
4369 		status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
4370 		if (!status) {
4371 			v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
4372 							 new_fltr);
4373 			if (!v_list_itr) {
4374 				status = ICE_ERR_DOES_NOT_EXIST;
4375 				goto exit;
4376 			}
4377 			/* reuse VSI list for new rule and increment ref_cnt */
4378 			if (map_info) {
4379 				v_list_itr->vsi_list_info = map_info;
4380 				map_info->ref_cnt++;
4381 			} else {
4382 				v_list_itr->vsi_list_info =
4383 					ice_create_vsi_list_map(hw, &vsi_handle,
4384 								1, vsi_list_id);
4385 			}
4386 		}
4387 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
4388 		/* Update existing VSI list to add new VSI ID only if it used
4389 		 * by one VLAN rule.
4390 		 */
4391 		cur_fltr = &v_list_itr->fltr_info;
4392 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
4393 						 new_fltr);
4394 	} else {
4395 		/* If VLAN rule exists and VSI list being used by this rule is
4396 		 * referenced by more than 1 VLAN rule. Then create a new VSI
4397 		 * list appending previous VSI with new VSI and update existing
4398 		 * VLAN rule to point to new VSI list ID
4399 		 */
4400 		struct ice_fltr_info tmp_fltr;
4401 		u16 vsi_handle_arr[2];
4402 		u16 cur_handle;
4403 
4404 		/* Current implementation only supports reusing VSI list with
4405 		 * one VSI count. We should never hit below condition
4406 		 */
4407 		if (v_list_itr->vsi_count > 1 &&
4408 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
4409 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
4410 			status = ICE_ERR_CFG;
4411 			goto exit;
4412 		}
4413 
4414 		cur_handle =
4415 			ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
4416 					   ICE_MAX_VSI);
4417 
4418 		/* A rule already exists with the new VSI being added */
4419 		if (cur_handle == vsi_handle) {
4420 			status = ICE_ERR_ALREADY_EXISTS;
4421 			goto exit;
4422 		}
4423 
4424 		vsi_handle_arr[0] = cur_handle;
4425 		vsi_handle_arr[1] = vsi_handle;
4426 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
4427 						  &vsi_list_id, lkup_type);
4428 		if (status)
4429 			goto exit;
4430 
4431 		tmp_fltr = v_list_itr->fltr_info;
4432 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
4433 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
4434 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
4435 		/* Update the previous switch rule to a new VSI list which
4436 		 * includes current VSI that is requested
4437 		 */
4438 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
4439 		if (status)
4440 			goto exit;
4441 
4442 		/* before overriding VSI list map info. decrement ref_cnt of
4443 		 * previous VSI list
4444 		 */
4445 		v_list_itr->vsi_list_info->ref_cnt--;
4446 
4447 		/* now update to newly created list */
4448 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
4449 		v_list_itr->vsi_list_info =
4450 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
4451 						vsi_list_id);
4452 		v_list_itr->vsi_count++;
4453 	}
4454 
4455 exit:
4456 	ice_release_lock(rule_lock);
4457 	return status;
4458 }
4459 
4460 /**
4461  * ice_add_vlan_rule - Add VLAN based filter rule
4462  * @hw: pointer to the hardware structure
4463  * @v_list: list of VLAN entries and forwarding information
4464  * @sw: pointer to switch info struct for which function add rule
4465  */
4466 static enum ice_status
ice_add_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_switch_info * sw)4467 ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4468 		  struct ice_switch_info *sw)
4469 {
4470 	struct ice_fltr_list_entry *v_list_itr;
4471 	struct ice_sw_recipe *recp_list;
4472 
4473 	recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
4474 	LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
4475 			    list_entry) {
4476 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
4477 			return ICE_ERR_PARAM;
4478 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
4479 		v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
4480 							   v_list_itr);
4481 		if (v_list_itr->status)
4482 			return v_list_itr->status;
4483 	}
4484 	return ICE_SUCCESS;
4485 }
4486 
4487 /**
4488  * ice_add_vlan - Add a VLAN based filter rule
4489  * @hw: pointer to the hardware structure
4490  * @v_list: list of VLAN and forwarding information
4491  *
4492  * Function add VLAN rule for logical port from HW struct
4493  */
ice_add_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list)4494 enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4495 {
4496 	if (!v_list || !hw)
4497 		return ICE_ERR_PARAM;
4498 
4499 	return ice_add_vlan_rule(hw, v_list, hw->switch_info);
4500 }
4501 
4502 /**
4503  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
4504  * @hw: pointer to the hardware structure
4505  * @mv_list: list of MAC and VLAN filters
4506  * @sw: pointer to switch info struct for which function add rule
4507  * @lport: logic port number on which function add rule
4508  *
4509  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
4510  * pruning bits enabled, then it is the responsibility of the caller to make
4511  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
4512  * VLAN won't be received on that VSI otherwise.
4513  */
4514 static enum ice_status
ice_add_mac_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list,struct ice_switch_info * sw,u8 lport)4515 ice_add_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list,
4516 		      struct ice_switch_info *sw, u8 lport)
4517 {
4518 	struct ice_fltr_list_entry *mv_list_itr;
4519 	struct ice_sw_recipe *recp_list;
4520 
4521 	if (!mv_list || !hw)
4522 		return ICE_ERR_PARAM;
4523 
4524 	recp_list = &sw->recp_list[ICE_SW_LKUP_MAC_VLAN];
4525 	LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
4526 			    list_entry) {
4527 		enum ice_sw_lkup_type l_type =
4528 			mv_list_itr->fltr_info.lkup_type;
4529 
4530 		if (l_type != ICE_SW_LKUP_MAC_VLAN)
4531 			return ICE_ERR_PARAM;
4532 		mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
4533 		mv_list_itr->status =
4534 			ice_add_rule_internal(hw, recp_list, lport,
4535 					      mv_list_itr);
4536 		if (mv_list_itr->status)
4537 			return mv_list_itr->status;
4538 	}
4539 	return ICE_SUCCESS;
4540 }
4541 
4542 /**
4543  * ice_add_mac_vlan - Add a MAC VLAN address based filter rule
4544  * @hw: pointer to the hardware structure
4545  * @mv_list: list of MAC VLAN addresses and forwarding information
4546  *
4547  * Function add MAC VLAN rule for logical port from HW struct
4548  */
4549 enum ice_status
ice_add_mac_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list)4550 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4551 {
4552 	if (!mv_list || !hw)
4553 		return ICE_ERR_PARAM;
4554 
4555 	return ice_add_mac_vlan_rule(hw, mv_list, hw->switch_info,
4556 				     hw->port_info->lport);
4557 }
4558 
4559 /**
4560  * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
4561  * @hw: pointer to the hardware structure
4562  * @em_list: list of ether type MAC filter, MAC is optional
4563  * @sw: pointer to switch info struct for which function add rule
4564  * @lport: logic port number on which function add rule
4565  *
4566  * This function requires the caller to populate the entries in
4567  * the filter list with the necessary fields (including flags to
4568  * indicate Tx or Rx rules).
4569  */
4570 static enum ice_status
ice_add_eth_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list,struct ice_switch_info * sw,u8 lport)4571 ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4572 		     struct ice_switch_info *sw, u8 lport)
4573 {
4574 	struct ice_fltr_list_entry *em_list_itr;
4575 
4576 	LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
4577 			    list_entry) {
4578 		struct ice_sw_recipe *recp_list;
4579 		enum ice_sw_lkup_type l_type;
4580 
4581 		l_type = em_list_itr->fltr_info.lkup_type;
4582 		recp_list = &sw->recp_list[l_type];
4583 
4584 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4585 		    l_type != ICE_SW_LKUP_ETHERTYPE)
4586 			return ICE_ERR_PARAM;
4587 
4588 		em_list_itr->status = ice_add_rule_internal(hw, recp_list,
4589 							    lport,
4590 							    em_list_itr);
4591 		if (em_list_itr->status)
4592 			return em_list_itr->status;
4593 	}
4594 	return ICE_SUCCESS;
4595 }
4596 
4597 /**
4598  * ice_add_eth_mac - Add a ethertype based filter rule
4599  * @hw: pointer to the hardware structure
4600  * @em_list: list of ethertype and forwarding information
4601  *
4602  * Function add ethertype rule for logical port from HW struct
4603  */
4604 enum ice_status
ice_add_eth_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list)4605 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4606 {
4607 	if (!em_list || !hw)
4608 		return ICE_ERR_PARAM;
4609 
4610 	return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
4611 				    hw->port_info->lport);
4612 }
4613 
4614 /**
4615  * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
4616  * @hw: pointer to the hardware structure
4617  * @em_list: list of ethertype or ethertype MAC entries
4618  * @sw: pointer to switch info struct for which function add rule
4619  */
4620 static enum ice_status
ice_remove_eth_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list,struct ice_switch_info * sw)4621 ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
4622 			struct ice_switch_info *sw)
4623 {
4624 	struct ice_fltr_list_entry *em_list_itr, *tmp;
4625 
4626 	LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
4627 				 list_entry) {
4628 		struct ice_sw_recipe *recp_list;
4629 		enum ice_sw_lkup_type l_type;
4630 
4631 		l_type = em_list_itr->fltr_info.lkup_type;
4632 
4633 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
4634 		    l_type != ICE_SW_LKUP_ETHERTYPE)
4635 			return ICE_ERR_PARAM;
4636 
4637 		recp_list = &sw->recp_list[l_type];
4638 		em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4639 							       em_list_itr);
4640 		if (em_list_itr->status)
4641 			return em_list_itr->status;
4642 	}
4643 	return ICE_SUCCESS;
4644 }
4645 
4646 /**
4647  * ice_remove_eth_mac - remove a ethertype based filter rule
4648  * @hw: pointer to the hardware structure
4649  * @em_list: list of ethertype and forwarding information
4650  *
4651  */
4652 enum ice_status
ice_remove_eth_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * em_list)4653 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
4654 {
4655 	if (!em_list || !hw)
4656 		return ICE_ERR_PARAM;
4657 
4658 	return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
4659 }
4660 
4661 /**
4662  * ice_rem_sw_rule_info
4663  * @hw: pointer to the hardware structure
4664  * @rule_head: pointer to the switch list structure that we want to delete
4665  */
4666 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct LIST_HEAD_TYPE * rule_head)4667 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4668 {
4669 	if (!LIST_EMPTY(rule_head)) {
4670 		struct ice_fltr_mgmt_list_entry *entry;
4671 		struct ice_fltr_mgmt_list_entry *tmp;
4672 
4673 		LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
4674 					 ice_fltr_mgmt_list_entry, list_entry) {
4675 			LIST_DEL(&entry->list_entry);
4676 			ice_free(hw, entry);
4677 		}
4678 	}
4679 }
4680 
4681 /**
4682  * ice_rem_adv_rule_info
4683  * @hw: pointer to the hardware structure
4684  * @rule_head: pointer to the switch list structure that we want to delete
4685  */
4686 static void
ice_rem_adv_rule_info(struct ice_hw * hw,struct LIST_HEAD_TYPE * rule_head)4687 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
4688 {
4689 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
4690 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
4691 
4692 	if (LIST_EMPTY(rule_head))
4693 		return;
4694 
4695 	LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
4696 				 ice_adv_fltr_mgmt_list_entry, list_entry) {
4697 		LIST_DEL(&lst_itr->list_entry);
4698 		ice_free(hw, lst_itr->lkups);
4699 		ice_free(hw, lst_itr);
4700 	}
4701 }
4702 
4703 /**
4704  * ice_rem_all_sw_rules_info
4705  * @hw: pointer to the hardware structure
4706  */
ice_rem_all_sw_rules_info(struct ice_hw * hw)4707 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
4708 {
4709 	struct ice_switch_info *sw = hw->switch_info;
4710 	u8 i;
4711 
4712 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4713 		struct LIST_HEAD_TYPE *rule_head;
4714 
4715 		rule_head = &sw->recp_list[i].filt_rules;
4716 		if (!sw->recp_list[i].adv_rule)
4717 			ice_rem_sw_rule_info(hw, rule_head);
4718 		else
4719 			ice_rem_adv_rule_info(hw, rule_head);
4720 		if (sw->recp_list[i].adv_rule &&
4721 		    LIST_EMPTY(&sw->recp_list[i].filt_rules))
4722 			sw->recp_list[i].adv_rule = false;
4723 	}
4724 }
4725 
4726 /**
4727  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
4728  * @pi: pointer to the port_info structure
4729  * @vsi_handle: VSI handle to set as default
4730  * @set: true to add the above mentioned switch rule, false to remove it
4731  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
4732  *
4733  * add filter rule to set/unset given VSI as default VSI for the switch
4734  * (represented by swid)
4735  */
4736 enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info * pi,u16 vsi_handle,bool set,u8 direction)4737 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
4738 		 u8 direction)
4739 {
4740 	struct ice_aqc_sw_rules_elem *s_rule;
4741 	struct ice_fltr_info f_info;
4742 	struct ice_hw *hw = pi->hw;
4743 	enum ice_adminq_opc opcode;
4744 	enum ice_status status;
4745 	u16 s_rule_size;
4746 	u16 hw_vsi_id;
4747 
4748 	if (!ice_is_vsi_valid(hw, vsi_handle))
4749 		return ICE_ERR_PARAM;
4750 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4751 
4752 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
4753 		ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
4754 
4755 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
4756 	if (!s_rule)
4757 		return ICE_ERR_NO_MEMORY;
4758 
4759 	ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
4760 
4761 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
4762 	f_info.flag = direction;
4763 	f_info.fltr_act = ICE_FWD_TO_VSI;
4764 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
4765 
4766 	if (f_info.flag & ICE_FLTR_RX) {
4767 		f_info.src = pi->lport;
4768 		f_info.src_id = ICE_SRC_ID_LPORT;
4769 		if (!set)
4770 			f_info.fltr_rule_id =
4771 				pi->dflt_rx_vsi_rule_id;
4772 	} else if (f_info.flag & ICE_FLTR_TX) {
4773 		f_info.src_id = ICE_SRC_ID_VSI;
4774 		f_info.src = hw_vsi_id;
4775 		if (!set)
4776 			f_info.fltr_rule_id =
4777 				pi->dflt_tx_vsi_rule_id;
4778 	}
4779 
4780 	if (set)
4781 		opcode = ice_aqc_opc_add_sw_rules;
4782 	else
4783 		opcode = ice_aqc_opc_remove_sw_rules;
4784 
4785 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
4786 
4787 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
4788 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
4789 		goto out;
4790 	if (set) {
4791 		u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
4792 
4793 		if (f_info.flag & ICE_FLTR_TX) {
4794 			pi->dflt_tx_vsi_num = hw_vsi_id;
4795 			pi->dflt_tx_vsi_rule_id = index;
4796 		} else if (f_info.flag & ICE_FLTR_RX) {
4797 			pi->dflt_rx_vsi_num = hw_vsi_id;
4798 			pi->dflt_rx_vsi_rule_id = index;
4799 		}
4800 	} else {
4801 		if (f_info.flag & ICE_FLTR_TX) {
4802 			pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
4803 			pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
4804 		} else if (f_info.flag & ICE_FLTR_RX) {
4805 			pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
4806 			pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
4807 		}
4808 	}
4809 
4810 out:
4811 	ice_free(hw, s_rule);
4812 	return status;
4813 }
4814 
4815 /**
4816  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
4817  * @list_head: head of rule list
4818  * @f_info: rule information
4819  *
4820  * Helper function to search for a unicast rule entry - this is to be used
4821  * to remove unicast MAC filter that is not shared with other VSIs on the
4822  * PF switch.
4823  *
4824  * Returns pointer to entry storing the rule if found
4825  */
4826 static struct ice_fltr_mgmt_list_entry *
ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE * list_head,struct ice_fltr_info * f_info)4827 ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
4828 			  struct ice_fltr_info *f_info)
4829 {
4830 	struct ice_fltr_mgmt_list_entry *list_itr;
4831 
4832 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
4833 			    list_entry) {
4834 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
4835 			    sizeof(f_info->l_data)) &&
4836 		    f_info->fwd_id.hw_vsi_id ==
4837 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
4838 		    f_info->flag == list_itr->fltr_info.flag)
4839 			return list_itr;
4840 	}
4841 	return NULL;
4842 }
4843 
4844 /**
4845  * ice_remove_mac_rule - remove a MAC based filter rule
4846  * @hw: pointer to the hardware structure
4847  * @m_list: list of MAC addresses and forwarding information
4848  * @recp_list: list from which function remove MAC address
4849  *
4850  * This function removes either a MAC filter rule or a specific VSI from a
4851  * VSI list for a multicast MAC address.
4852  *
4853  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
4854  * ice_add_mac. Caller should be aware that this call will only work if all
4855  * the entries passed into m_list were added previously. It will not attempt to
4856  * do a partial remove of entries that were found.
4857  */
4858 static enum ice_status
ice_remove_mac_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list,struct ice_sw_recipe * recp_list)4859 ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
4860 		    struct ice_sw_recipe *recp_list)
4861 {
4862 	struct ice_fltr_list_entry *list_itr, *tmp;
4863 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4864 
4865 	if (!m_list)
4866 		return ICE_ERR_PARAM;
4867 
4868 	rule_lock = &recp_list->filt_rule_lock;
4869 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
4870 				 list_entry) {
4871 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
4872 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4873 		u16 vsi_handle;
4874 
4875 		if (l_type != ICE_SW_LKUP_MAC)
4876 			return ICE_ERR_PARAM;
4877 
4878 		vsi_handle = list_itr->fltr_info.vsi_handle;
4879 		if (!ice_is_vsi_valid(hw, vsi_handle))
4880 			return ICE_ERR_PARAM;
4881 
4882 		list_itr->fltr_info.fwd_id.hw_vsi_id =
4883 					ice_get_hw_vsi_num(hw, vsi_handle);
4884 		if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
4885 			/* Don't remove the unicast address that belongs to
4886 			 * another VSI on the switch, since it is not being
4887 			 * shared...
4888 			 */
4889 			ice_acquire_lock(rule_lock);
4890 			if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
4891 						       &list_itr->fltr_info)) {
4892 				ice_release_lock(rule_lock);
4893 				return ICE_ERR_DOES_NOT_EXIST;
4894 			}
4895 			ice_release_lock(rule_lock);
4896 		}
4897 		list_itr->status = ice_remove_rule_internal(hw, recp_list,
4898 							    list_itr);
4899 		if (list_itr->status)
4900 			return list_itr->status;
4901 	}
4902 	return ICE_SUCCESS;
4903 }
4904 
4905 /**
4906  * ice_remove_mac - remove a MAC address based filter rule
4907  * @hw: pointer to the hardware structure
4908  * @m_list: list of MAC addresses and forwarding information
4909  *
4910  */
ice_remove_mac(struct ice_hw * hw,struct LIST_HEAD_TYPE * m_list)4911 enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
4912 {
4913 	struct ice_sw_recipe *recp_list;
4914 
4915 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
4916 	return ice_remove_mac_rule(hw, m_list, recp_list);
4917 }
4918 
4919 /**
4920  * ice_remove_vlan_rule - Remove VLAN based filter rule
4921  * @hw: pointer to the hardware structure
4922  * @v_list: list of VLAN entries and forwarding information
4923  * @recp_list: list from which function remove VLAN
4924  */
4925 static enum ice_status
ice_remove_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_sw_recipe * recp_list)4926 ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4927 		     struct ice_sw_recipe *recp_list)
4928 {
4929 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4930 
4931 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4932 				 list_entry) {
4933 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4934 
4935 		if (l_type != ICE_SW_LKUP_VLAN)
4936 			return ICE_ERR_PARAM;
4937 		v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
4938 							      v_list_itr);
4939 		if (v_list_itr->status)
4940 			return v_list_itr->status;
4941 	}
4942 	return ICE_SUCCESS;
4943 }
4944 
4945 /**
4946  * ice_remove_vlan - remove a VLAN address based filter rule
4947  * @hw: pointer to the hardware structure
4948  * @v_list: list of VLAN and forwarding information
4949  *
4950  */
4951 enum ice_status
ice_remove_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list)4952 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
4953 {
4954 	struct ice_sw_recipe *recp_list;
4955 
4956 	if (!v_list || !hw)
4957 		return ICE_ERR_PARAM;
4958 
4959 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
4960 	return ice_remove_vlan_rule(hw, v_list, recp_list);
4961 }
4962 
4963 /**
4964  * ice_remove_mac_vlan_rule - Remove MAC VLAN based filter rule
4965  * @hw: pointer to the hardware structure
4966  * @v_list: list of MAC VLAN entries and forwarding information
4967  * @recp_list: list from which function remove MAC VLAN
4968  */
4969 static enum ice_status
ice_remove_mac_vlan_rule(struct ice_hw * hw,struct LIST_HEAD_TYPE * v_list,struct ice_sw_recipe * recp_list)4970 ice_remove_mac_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
4971 			 struct ice_sw_recipe *recp_list)
4972 {
4973 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4974 
4975 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
4976 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
4977 				 list_entry) {
4978 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4979 
4980 		if (l_type != ICE_SW_LKUP_MAC_VLAN)
4981 			return ICE_ERR_PARAM;
4982 		v_list_itr->status =
4983 			ice_remove_rule_internal(hw, recp_list,
4984 						 v_list_itr);
4985 		if (v_list_itr->status)
4986 			return v_list_itr->status;
4987 	}
4988 	return ICE_SUCCESS;
4989 }
4990 
4991 /**
4992  * ice_remove_mac_vlan - remove a MAC VLAN address based filter rule
4993  * @hw: pointer to the hardware structure
4994  * @mv_list: list of MAC VLAN and forwarding information
4995  */
4996 enum ice_status
ice_remove_mac_vlan(struct ice_hw * hw,struct LIST_HEAD_TYPE * mv_list)4997 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
4998 {
4999 	struct ice_sw_recipe *recp_list;
5000 
5001 	if (!mv_list || !hw)
5002 		return ICE_ERR_PARAM;
5003 
5004 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC_VLAN];
5005 	return ice_remove_mac_vlan_rule(hw, mv_list, recp_list);
5006 }
5007 
5008 /**
5009  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
5010  * @fm_entry: filter entry to inspect
5011  * @vsi_handle: VSI handle to compare with filter info
5012  */
5013 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)5014 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
5015 {
5016 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
5017 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
5018 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
5019 		 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
5020 				 vsi_handle))));
5021 }
5022 
5023 /**
5024  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
5025  * @hw: pointer to the hardware structure
5026  * @vsi_handle: VSI handle to remove filters from
5027  * @vsi_list_head: pointer to the list to add entry to
5028  * @fi: pointer to fltr_info of filter entry to copy & add
5029  *
5030  * Helper function, used when creating a list of filters to remove from
5031  * a specific VSI. The entry added to vsi_list_head is a COPY of the
5032  * original filter entry, with the exception of fltr_info.fltr_act and
5033  * fltr_info.fwd_id fields. These are set such that later logic can
5034  * extract which VSI to remove the fltr from, and pass on that information.
5035  */
5036 static enum ice_status
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * vsi_list_head,struct ice_fltr_info * fi)5037 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5038 			       struct LIST_HEAD_TYPE *vsi_list_head,
5039 			       struct ice_fltr_info *fi)
5040 {
5041 	struct ice_fltr_list_entry *tmp;
5042 
5043 	/* this memory is freed up in the caller function
5044 	 * once filters for this VSI are removed
5045 	 */
5046 	tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
5047 	if (!tmp)
5048 		return ICE_ERR_NO_MEMORY;
5049 
5050 	tmp->fltr_info = *fi;
5051 
5052 	/* Overwrite these fields to indicate which VSI to remove filter from,
5053 	 * so find and remove logic can extract the information from the
5054 	 * list entries. Note that original entries will still have proper
5055 	 * values.
5056 	 */
5057 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
5058 	tmp->fltr_info.vsi_handle = vsi_handle;
5059 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5060 
5061 	LIST_ADD(&tmp->list_entry, vsi_list_head);
5062 
5063 	return ICE_SUCCESS;
5064 }
5065 
5066 /**
5067  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
5068  * @hw: pointer to the hardware structure
5069  * @vsi_handle: VSI handle to remove filters from
5070  * @lkup_list_head: pointer to the list that has certain lookup type filters
5071  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
5072  *
5073  * Locates all filters in lkup_list_head that are used by the given VSI,
5074  * and adds COPIES of those entries to vsi_list_head (intended to be used
5075  * to remove the listed filters).
5076  * Note that this means all entries in vsi_list_head must be explicitly
5077  * deallocated by the caller when done with list.
5078  */
5079 static enum ice_status
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * lkup_list_head,struct LIST_HEAD_TYPE * vsi_list_head)5080 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
5081 			 struct LIST_HEAD_TYPE *lkup_list_head,
5082 			 struct LIST_HEAD_TYPE *vsi_list_head)
5083 {
5084 	struct ice_fltr_mgmt_list_entry *fm_entry;
5085 	enum ice_status status = ICE_SUCCESS;
5086 
5087 	/* check to make sure VSI ID is valid and within boundary */
5088 	if (!ice_is_vsi_valid(hw, vsi_handle))
5089 		return ICE_ERR_PARAM;
5090 
5091 	LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
5092 			    ice_fltr_mgmt_list_entry, list_entry) {
5093 		struct ice_fltr_info *fi;
5094 
5095 		fi = &fm_entry->fltr_info;
5096 		if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
5097 			continue;
5098 
5099 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5100 							vsi_list_head, fi);
5101 		if (status)
5102 			return status;
5103 	}
5104 	return status;
5105 }
5106 
5107 /**
5108  * ice_determine_promisc_mask
5109  * @fi: filter info to parse
5110  *
5111  * Helper function to determine which ICE_PROMISC_ mask corresponds
5112  * to given filter into.
5113  */
ice_determine_promisc_mask(struct ice_fltr_info * fi)5114 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
5115 {
5116 	u16 vid = fi->l_data.mac_vlan.vlan_id;
5117 	u8 *macaddr = fi->l_data.mac.mac_addr;
5118 	bool is_tx_fltr = false;
5119 	u8 promisc_mask = 0;
5120 
5121 	if (fi->flag == ICE_FLTR_TX)
5122 		is_tx_fltr = true;
5123 
5124 	if (IS_BROADCAST_ETHER_ADDR(macaddr))
5125 		promisc_mask |= is_tx_fltr ?
5126 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
5127 	else if (IS_MULTICAST_ETHER_ADDR(macaddr))
5128 		promisc_mask |= is_tx_fltr ?
5129 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
5130 	else if (IS_UNICAST_ETHER_ADDR(macaddr))
5131 		promisc_mask |= is_tx_fltr ?
5132 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
5133 	if (vid)
5134 		promisc_mask |= is_tx_fltr ?
5135 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
5136 
5137 	return promisc_mask;
5138 }
5139 
5140 /**
5141  * _ice_get_vsi_promisc - get promiscuous mode of given VSI
5142  * @hw: pointer to the hardware structure
5143  * @vsi_handle: VSI handle to retrieve info from
5144  * @promisc_mask: pointer to mask to be filled in
5145  * @vid: VLAN ID of promisc VLAN VSI
5146  * @sw: pointer to switch info struct for which function add rule
5147  */
5148 static enum ice_status
_ice_get_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid,struct ice_switch_info * sw)5149 _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5150 		     u16 *vid, struct ice_switch_info *sw)
5151 {
5152 	struct ice_fltr_mgmt_list_entry *itr;
5153 	struct LIST_HEAD_TYPE *rule_head;
5154 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5155 
5156 	if (!ice_is_vsi_valid(hw, vsi_handle))
5157 		return ICE_ERR_PARAM;
5158 
5159 	*vid = 0;
5160 	*promisc_mask = 0;
5161 	rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
5162 	rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
5163 
5164 	ice_acquire_lock(rule_lock);
5165 	LIST_FOR_EACH_ENTRY(itr, rule_head,
5166 			    ice_fltr_mgmt_list_entry, list_entry) {
5167 		/* Continue if this filter doesn't apply to this VSI or the
5168 		 * VSI ID is not in the VSI map for this filter
5169 		 */
5170 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5171 			continue;
5172 
5173 		*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5174 	}
5175 	ice_release_lock(rule_lock);
5176 
5177 	return ICE_SUCCESS;
5178 }
5179 
5180 /**
5181  * ice_get_vsi_promisc - get promiscuous mode of given VSI
5182  * @hw: pointer to the hardware structure
5183  * @vsi_handle: VSI handle to retrieve info from
5184  * @promisc_mask: pointer to mask to be filled in
5185  * @vid: VLAN ID of promisc VLAN VSI
5186  */
5187 enum ice_status
ice_get_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid)5188 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5189 		    u16 *vid)
5190 {
5191 	return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
5192 				    vid, hw->switch_info);
5193 }
5194 
5195 /**
5196  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5197  * @hw: pointer to the hardware structure
5198  * @vsi_handle: VSI handle to retrieve info from
5199  * @promisc_mask: pointer to mask to be filled in
5200  * @vid: VLAN ID of promisc VLAN VSI
5201  * @sw: pointer to switch info struct for which function add rule
5202  */
5203 static enum ice_status
_ice_get_vsi_vlan_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid,struct ice_switch_info * sw)5204 _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5205 			  u16 *vid, struct ice_switch_info *sw)
5206 {
5207 	struct ice_fltr_mgmt_list_entry *itr;
5208 	struct LIST_HEAD_TYPE *rule_head;
5209 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5210 
5211 	if (!ice_is_vsi_valid(hw, vsi_handle))
5212 		return ICE_ERR_PARAM;
5213 
5214 	*vid = 0;
5215 	*promisc_mask = 0;
5216 	rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
5217 	rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
5218 
5219 	ice_acquire_lock(rule_lock);
5220 	LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
5221 			    list_entry) {
5222 		/* Continue if this filter doesn't apply to this VSI or the
5223 		 * VSI ID is not in the VSI map for this filter
5224 		 */
5225 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5226 			continue;
5227 
5228 		*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
5229 	}
5230 	ice_release_lock(rule_lock);
5231 
5232 	return ICE_SUCCESS;
5233 }
5234 
5235 /**
5236  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
5237  * @hw: pointer to the hardware structure
5238  * @vsi_handle: VSI handle to retrieve info from
5239  * @promisc_mask: pointer to mask to be filled in
5240  * @vid: VLAN ID of promisc VLAN VSI
5241  */
5242 enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw * hw,u16 vsi_handle,u8 * promisc_mask,u16 * vid)5243 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
5244 			 u16 *vid)
5245 {
5246 	return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
5247 					 vid, hw->switch_info);
5248 }
5249 
5250 /**
5251  * ice_remove_promisc - Remove promisc based filter rules
5252  * @hw: pointer to the hardware structure
5253  * @recp_id: recipe ID for which the rule needs to removed
5254  * @v_list: list of promisc entries
5255  */
5256 static enum ice_status
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct LIST_HEAD_TYPE * v_list)5257 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
5258 		   struct LIST_HEAD_TYPE *v_list)
5259 {
5260 	struct ice_fltr_list_entry *v_list_itr, *tmp;
5261 	struct ice_sw_recipe *recp_list;
5262 
5263 	recp_list = &hw->switch_info->recp_list[recp_id];
5264 	LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
5265 				 list_entry) {
5266 		v_list_itr->status =
5267 			ice_remove_rule_internal(hw, recp_list, v_list_itr);
5268 		if (v_list_itr->status)
5269 			return v_list_itr->status;
5270 	}
5271 	return ICE_SUCCESS;
5272 }
5273 
5274 /**
5275  * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
5276  * @hw: pointer to the hardware structure
5277  * @vsi_handle: VSI handle to clear mode
5278  * @promisc_mask: mask of promiscuous config bits to clear
5279  * @vid: VLAN ID to clear VLAN promiscuous
5280  * @sw: pointer to switch info struct for which function add rule
5281  */
5282 static enum ice_status
_ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid,struct ice_switch_info * sw)5283 _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5284 		       u16 vid, struct ice_switch_info *sw)
5285 {
5286 	struct ice_fltr_list_entry *fm_entry, *tmp;
5287 	struct LIST_HEAD_TYPE remove_list_head;
5288 	struct ice_fltr_mgmt_list_entry *itr;
5289 	struct LIST_HEAD_TYPE *rule_head;
5290 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5291 	enum ice_status status = ICE_SUCCESS;
5292 	u8 recipe_id;
5293 
5294 	if (!ice_is_vsi_valid(hw, vsi_handle))
5295 		return ICE_ERR_PARAM;
5296 
5297 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
5298 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5299 	else
5300 		recipe_id = ICE_SW_LKUP_PROMISC;
5301 
5302 	rule_head = &sw->recp_list[recipe_id].filt_rules;
5303 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
5304 
5305 	INIT_LIST_HEAD(&remove_list_head);
5306 
5307 	ice_acquire_lock(rule_lock);
5308 	LIST_FOR_EACH_ENTRY(itr, rule_head,
5309 			    ice_fltr_mgmt_list_entry, list_entry) {
5310 		struct ice_fltr_info *fltr_info;
5311 		u8 fltr_promisc_mask = 0;
5312 
5313 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
5314 			continue;
5315 		fltr_info = &itr->fltr_info;
5316 
5317 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
5318 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
5319 			continue;
5320 
5321 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
5322 
5323 		/* Skip if filter is not completely specified by given mask */
5324 		if (fltr_promisc_mask & ~promisc_mask)
5325 			continue;
5326 
5327 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
5328 							&remove_list_head,
5329 							fltr_info);
5330 		if (status) {
5331 			ice_release_lock(rule_lock);
5332 			goto free_fltr_list;
5333 		}
5334 	}
5335 	ice_release_lock(rule_lock);
5336 
5337 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
5338 
5339 free_fltr_list:
5340 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5341 				 ice_fltr_list_entry, list_entry) {
5342 		LIST_DEL(&fm_entry->list_entry);
5343 		ice_free(hw, fm_entry);
5344 	}
5345 
5346 	return status;
5347 }
5348 
5349 /**
5350  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
5351  * @hw: pointer to the hardware structure
5352  * @vsi_handle: VSI handle to clear mode
5353  * @promisc_mask: mask of promiscuous config bits to clear
5354  * @vid: VLAN ID to clear VLAN promiscuous
5355  */
5356 enum ice_status
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)5357 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
5358 		      u8 promisc_mask, u16 vid)
5359 {
5360 	return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
5361 				      vid, hw->switch_info);
5362 }
5363 
5364 /**
5365  * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5366  * @hw: pointer to the hardware structure
5367  * @vsi_handle: VSI handle to configure
5368  * @promisc_mask: mask of promiscuous config bits
5369  * @vid: VLAN ID to set VLAN promiscuous
5370  * @lport: logical port number to configure promisc mode
5371  * @sw: pointer to switch info struct for which function add rule
5372  */
5373 static enum ice_status
_ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid,u8 lport,struct ice_switch_info * sw)5374 _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5375 		     u16 vid, u8 lport, struct ice_switch_info *sw)
5376 {
5377 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
5378 	struct ice_fltr_list_entry f_list_entry;
5379 	struct ice_fltr_info new_fltr;
5380 	enum ice_status status = ICE_SUCCESS;
5381 	bool is_tx_fltr;
5382 	u16 hw_vsi_id;
5383 	int pkt_type;
5384 	u8 recipe_id;
5385 
5386 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5387 
5388 	if (!ice_is_vsi_valid(hw, vsi_handle))
5389 		return ICE_ERR_PARAM;
5390 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5391 
5392 	ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
5393 
5394 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
5395 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
5396 		new_fltr.l_data.mac_vlan.vlan_id = vid;
5397 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
5398 	} else {
5399 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
5400 		recipe_id = ICE_SW_LKUP_PROMISC;
5401 	}
5402 
5403 	/* Separate filters must be set for each direction/packet type
5404 	 * combination, so we will loop over the mask value, store the
5405 	 * individual type, and clear it out in the input mask as it
5406 	 * is found.
5407 	 */
5408 	while (promisc_mask) {
5409 		struct ice_sw_recipe *recp_list;
5410 		u8 *mac_addr;
5411 
5412 		pkt_type = 0;
5413 		is_tx_fltr = false;
5414 
5415 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
5416 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
5417 			pkt_type = UCAST_FLTR;
5418 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
5419 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
5420 			pkt_type = UCAST_FLTR;
5421 			is_tx_fltr = true;
5422 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
5423 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
5424 			pkt_type = MCAST_FLTR;
5425 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
5426 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
5427 			pkt_type = MCAST_FLTR;
5428 			is_tx_fltr = true;
5429 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
5430 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
5431 			pkt_type = BCAST_FLTR;
5432 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
5433 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
5434 			pkt_type = BCAST_FLTR;
5435 			is_tx_fltr = true;
5436 		}
5437 
5438 		/* Check for VLAN promiscuous flag */
5439 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
5440 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
5441 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
5442 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
5443 			is_tx_fltr = true;
5444 		}
5445 
5446 		/* Set filter DA based on packet type */
5447 		mac_addr = new_fltr.l_data.mac.mac_addr;
5448 		if (pkt_type == BCAST_FLTR) {
5449 			ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
5450 		} else if (pkt_type == MCAST_FLTR ||
5451 			   pkt_type == UCAST_FLTR) {
5452 			/* Use the dummy ether header DA */
5453 			ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
5454 				   ICE_NONDMA_TO_NONDMA);
5455 			if (pkt_type == MCAST_FLTR)
5456 				mac_addr[0] |= 0x1;	/* Set multicast bit */
5457 		}
5458 
5459 		/* Need to reset this to zero for all iterations */
5460 		new_fltr.flag = 0;
5461 		if (is_tx_fltr) {
5462 			new_fltr.flag |= ICE_FLTR_TX;
5463 			new_fltr.src = hw_vsi_id;
5464 		} else {
5465 			new_fltr.flag |= ICE_FLTR_RX;
5466 			new_fltr.src = lport;
5467 		}
5468 
5469 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
5470 		new_fltr.vsi_handle = vsi_handle;
5471 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
5472 		f_list_entry.fltr_info = new_fltr;
5473 		recp_list = &sw->recp_list[recipe_id];
5474 
5475 		status = ice_add_rule_internal(hw, recp_list, lport,
5476 					       &f_list_entry);
5477 		if (status != ICE_SUCCESS)
5478 			goto set_promisc_exit;
5479 	}
5480 
5481 set_promisc_exit:
5482 	return status;
5483 }
5484 
5485 /**
5486  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
5487  * @hw: pointer to the hardware structure
5488  * @vsi_handle: VSI handle to configure
5489  * @promisc_mask: mask of promiscuous config bits
5490  * @vid: VLAN ID to set VLAN promiscuous
5491  */
5492 enum ice_status
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)5493 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5494 		    u16 vid)
5495 {
5496 	return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
5497 				    hw->port_info->lport,
5498 				    hw->switch_info);
5499 }
5500 
5501 /**
5502  * _ice_set_vlan_vsi_promisc
5503  * @hw: pointer to the hardware structure
5504  * @vsi_handle: VSI handle to configure
5505  * @promisc_mask: mask of promiscuous config bits
5506  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5507  * @lport: logical port number to configure promisc mode
5508  * @sw: pointer to switch info struct for which function add rule
5509  *
5510  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5511  */
5512 static enum ice_status
_ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc,u8 lport,struct ice_switch_info * sw)5513 _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5514 			  bool rm_vlan_promisc, u8 lport,
5515 			  struct ice_switch_info *sw)
5516 {
5517 	struct ice_fltr_list_entry *list_itr, *tmp;
5518 	struct LIST_HEAD_TYPE vsi_list_head;
5519 	struct LIST_HEAD_TYPE *vlan_head;
5520 	struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
5521 	enum ice_status status;
5522 	u16 vlan_id;
5523 
5524 	INIT_LIST_HEAD(&vsi_list_head);
5525 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
5526 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
5527 	ice_acquire_lock(vlan_lock);
5528 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
5529 					  &vsi_list_head);
5530 	ice_release_lock(vlan_lock);
5531 	if (status)
5532 		goto free_fltr_list;
5533 
5534 	LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
5535 			    list_entry) {
5536 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
5537 		if (rm_vlan_promisc)
5538 			status =  _ice_clear_vsi_promisc(hw, vsi_handle,
5539 							 promisc_mask,
5540 							 vlan_id, sw);
5541 		else
5542 			status =  _ice_set_vsi_promisc(hw, vsi_handle,
5543 						       promisc_mask, vlan_id,
5544 						       lport, sw);
5545 		if (status)
5546 			break;
5547 	}
5548 
5549 free_fltr_list:
5550 	LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
5551 				 ice_fltr_list_entry, list_entry) {
5552 		LIST_DEL(&list_itr->list_entry);
5553 		ice_free(hw, list_itr);
5554 	}
5555 	return status;
5556 }
5557 
5558 /**
5559  * ice_set_vlan_vsi_promisc
5560  * @hw: pointer to the hardware structure
5561  * @vsi_handle: VSI handle to configure
5562  * @promisc_mask: mask of promiscuous config bits
5563  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
5564  *
5565  * Configure VSI with all associated VLANs to given promiscuous mode(s)
5566  */
5567 enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)5568 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
5569 			 bool rm_vlan_promisc)
5570 {
5571 	return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
5572 					 rm_vlan_promisc, hw->port_info->lport,
5573 					 hw->switch_info);
5574 }
5575 
5576 /**
5577  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
5578  * @hw: pointer to the hardware structure
5579  * @vsi_handle: VSI handle to remove filters from
5580  * @recp_list: recipe list from which function remove fltr
5581  * @lkup: switch rule filter lookup type
5582  */
5583 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,struct ice_sw_recipe * recp_list,enum ice_sw_lkup_type lkup)5584 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
5585 			 struct ice_sw_recipe *recp_list,
5586 			 enum ice_sw_lkup_type lkup)
5587 {
5588 	struct ice_fltr_list_entry *fm_entry;
5589 	struct LIST_HEAD_TYPE remove_list_head;
5590 	struct LIST_HEAD_TYPE *rule_head;
5591 	struct ice_fltr_list_entry *tmp;
5592 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5593 	enum ice_status status;
5594 
5595 	INIT_LIST_HEAD(&remove_list_head);
5596 	rule_lock = &recp_list[lkup].filt_rule_lock;
5597 	rule_head = &recp_list[lkup].filt_rules;
5598 	ice_acquire_lock(rule_lock);
5599 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
5600 					  &remove_list_head);
5601 	ice_release_lock(rule_lock);
5602 	if (status)
5603 		return;
5604 
5605 	switch (lkup) {
5606 	case ICE_SW_LKUP_MAC:
5607 		ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
5608 		break;
5609 	case ICE_SW_LKUP_VLAN:
5610 		ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
5611 		break;
5612 	case ICE_SW_LKUP_PROMISC:
5613 	case ICE_SW_LKUP_PROMISC_VLAN:
5614 		ice_remove_promisc(hw, lkup, &remove_list_head);
5615 		break;
5616 	case ICE_SW_LKUP_MAC_VLAN:
5617 		ice_remove_mac_vlan(hw, &remove_list_head);
5618 		break;
5619 	case ICE_SW_LKUP_ETHERTYPE:
5620 	case ICE_SW_LKUP_ETHERTYPE_MAC:
5621 		ice_remove_eth_mac(hw, &remove_list_head);
5622 		break;
5623 	case ICE_SW_LKUP_DFLT:
5624 		ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
5625 		break;
5626 	case ICE_SW_LKUP_LAST:
5627 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
5628 		break;
5629 	}
5630 
5631 	LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
5632 				 ice_fltr_list_entry, list_entry) {
5633 		LIST_DEL(&fm_entry->list_entry);
5634 		ice_free(hw, fm_entry);
5635 	}
5636 }
5637 
5638 /**
5639  * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
5640  * @hw: pointer to the hardware structure
5641  * @vsi_handle: VSI handle to remove filters from
5642  * @sw: pointer to switch info struct
5643  */
5644 static void
ice_remove_vsi_fltr_rule(struct ice_hw * hw,u16 vsi_handle,struct ice_switch_info * sw)5645 ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
5646 			 struct ice_switch_info *sw)
5647 {
5648 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
5649 
5650 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5651 				 sw->recp_list, ICE_SW_LKUP_MAC);
5652 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5653 				 sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
5654 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5655 				 sw->recp_list, ICE_SW_LKUP_PROMISC);
5656 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5657 				 sw->recp_list, ICE_SW_LKUP_VLAN);
5658 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5659 				 sw->recp_list, ICE_SW_LKUP_DFLT);
5660 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5661 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
5662 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5663 				 sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
5664 	ice_remove_vsi_lkup_fltr(hw, vsi_handle,
5665 				 sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
5666 }
5667 
5668 /**
5669  * ice_remove_vsi_fltr - Remove all filters for a VSI
5670  * @hw: pointer to the hardware structure
5671  * @vsi_handle: VSI handle to remove filters from
5672  */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)5673 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
5674 {
5675 	ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
5676 }
5677 
5678 /**
5679  * ice_alloc_res_cntr - allocating resource counter
5680  * @hw: pointer to the hardware structure
5681  * @type: type of resource
5682  * @alloc_shared: if set it is shared else dedicated
5683  * @num_items: number of entries requested for FD resource type
5684  * @counter_id: counter index returned by AQ call
5685  */
5686 enum ice_status
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)5687 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5688 		   u16 *counter_id)
5689 {
5690 	struct ice_aqc_alloc_free_res_elem *buf;
5691 	enum ice_status status;
5692 	u16 buf_len;
5693 
5694 	/* Allocate resource */
5695 	buf_len = ice_struct_size(buf, elem, 1);
5696 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5697 	if (!buf)
5698 		return ICE_ERR_NO_MEMORY;
5699 
5700 	buf->num_elems = CPU_TO_LE16(num_items);
5701 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5702 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
5703 
5704 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5705 				       ice_aqc_opc_alloc_res, NULL);
5706 	if (status)
5707 		goto exit;
5708 
5709 	*counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
5710 
5711 exit:
5712 	ice_free(hw, buf);
5713 	return status;
5714 }
5715 
5716 /**
5717  * ice_free_res_cntr - free resource counter
5718  * @hw: pointer to the hardware structure
5719  * @type: type of resource
5720  * @alloc_shared: if set it is shared else dedicated
5721  * @num_items: number of entries to be freed for FD resource type
5722  * @counter_id: counter ID resource which needs to be freed
5723  */
5724 enum ice_status
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)5725 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
5726 		  u16 counter_id)
5727 {
5728 	struct ice_aqc_alloc_free_res_elem *buf;
5729 	enum ice_status status;
5730 	u16 buf_len;
5731 
5732 	/* Free resource */
5733 	buf_len = ice_struct_size(buf, elem, 1);
5734 	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5735 	if (!buf)
5736 		return ICE_ERR_NO_MEMORY;
5737 
5738 	buf->num_elems = CPU_TO_LE16(num_items);
5739 	buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
5740 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
5741 	buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
5742 
5743 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
5744 				       ice_aqc_opc_free_res, NULL);
5745 	if (status)
5746 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
5747 
5748 	ice_free(hw, buf);
5749 	return status;
5750 }
5751 
5752 /**
5753  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
5754  * @hw: pointer to the hardware structure
5755  * @counter_id: returns counter index
5756  */
ice_alloc_vlan_res_counter(struct ice_hw * hw,u16 * counter_id)5757 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
5758 {
5759 	return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5760 				  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5761 				  counter_id);
5762 }
5763 
5764 /**
5765  * ice_free_vlan_res_counter - Free counter resource for VLAN type
5766  * @hw: pointer to the hardware structure
5767  * @counter_id: counter index to be freed
5768  */
ice_free_vlan_res_counter(struct ice_hw * hw,u16 counter_id)5769 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
5770 {
5771 	return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
5772 				 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
5773 				 counter_id);
5774 }
5775 
5776 /**
5777  * ice_alloc_res_lg_act - add large action resource
5778  * @hw: pointer to the hardware structure
5779  * @l_id: large action ID to fill it in
5780  * @num_acts: number of actions to hold with a large action entry
5781  */
5782 static enum ice_status
ice_alloc_res_lg_act(struct ice_hw * hw,u16 * l_id,u16 num_acts)5783 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
5784 {
5785 	struct ice_aqc_alloc_free_res_elem *sw_buf;
5786 	enum ice_status status;
5787 	u16 buf_len;
5788 
5789 	if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
5790 		return ICE_ERR_PARAM;
5791 
5792 	/* Allocate resource for large action */
5793 	buf_len = ice_struct_size(sw_buf, elem, 1);
5794 	sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
5795 	if (!sw_buf)
5796 		return ICE_ERR_NO_MEMORY;
5797 
5798 	sw_buf->num_elems = CPU_TO_LE16(1);
5799 
5800 	/* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
5801 	 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
5802 	 * If num_acts is greater than 2, then use
5803 	 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
5804 	 * The num_acts cannot exceed 4. This was ensured at the
5805 	 * beginning of the function.
5806 	 */
5807 	if (num_acts == 1)
5808 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
5809 	else if (num_acts == 2)
5810 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
5811 	else
5812 		sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
5813 
5814 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
5815 				       ice_aqc_opc_alloc_res, NULL);
5816 	if (!status)
5817 		*l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
5818 
5819 	ice_free(hw, sw_buf);
5820 	return status;
5821 }
5822 
5823 /**
5824  * ice_add_mac_with_sw_marker - add filter with sw marker
5825  * @hw: pointer to the hardware structure
5826  * @f_info: filter info structure containing the MAC filter information
5827  * @sw_marker: sw marker to tag the Rx descriptor with
5828  */
5829 enum ice_status
ice_add_mac_with_sw_marker(struct ice_hw * hw,struct ice_fltr_info * f_info,u16 sw_marker)5830 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
5831 			   u16 sw_marker)
5832 {
5833 	struct ice_fltr_mgmt_list_entry *m_entry;
5834 	struct ice_fltr_list_entry fl_info;
5835 	struct ice_sw_recipe *recp_list;
5836 	struct LIST_HEAD_TYPE l_head;
5837 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5838 	enum ice_status ret;
5839 	bool entry_exists;
5840 	u16 lg_act_id;
5841 
5842 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
5843 		return ICE_ERR_PARAM;
5844 
5845 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5846 		return ICE_ERR_PARAM;
5847 
5848 	if (sw_marker == ICE_INVAL_SW_MARKER_ID)
5849 		return ICE_ERR_PARAM;
5850 
5851 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5852 		return ICE_ERR_PARAM;
5853 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5854 
5855 	/* Add filter if it doesn't exist so then the adding of large
5856 	 * action always results in update
5857 	 */
5858 
5859 	INIT_LIST_HEAD(&l_head);
5860 	fl_info.fltr_info = *f_info;
5861 	LIST_ADD(&fl_info.list_entry, &l_head);
5862 
5863 	entry_exists = false;
5864 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5865 			       hw->port_info->lport);
5866 	if (ret == ICE_ERR_ALREADY_EXISTS)
5867 		entry_exists = true;
5868 	else if (ret)
5869 		return ret;
5870 
5871 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5872 	rule_lock = &recp_list->filt_rule_lock;
5873 	ice_acquire_lock(rule_lock);
5874 	/* Get the book keeping entry for the filter */
5875 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5876 	if (!m_entry)
5877 		goto exit_error;
5878 
5879 	/* If counter action was enabled for this rule then don't enable
5880 	 * sw marker large action
5881 	 */
5882 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5883 		ret = ICE_ERR_PARAM;
5884 		goto exit_error;
5885 	}
5886 
5887 	/* if same marker was added before */
5888 	if (m_entry->sw_marker_id == sw_marker) {
5889 		ret = ICE_ERR_ALREADY_EXISTS;
5890 		goto exit_error;
5891 	}
5892 
5893 	/* Allocate a hardware table entry to hold large act. Three actions
5894 	 * for marker based large action
5895 	 */
5896 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
5897 	if (ret)
5898 		goto exit_error;
5899 
5900 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
5901 		goto exit_error;
5902 
5903 	/* Update the switch rule to add the marker action */
5904 	ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
5905 	if (!ret) {
5906 		ice_release_lock(rule_lock);
5907 		return ret;
5908 	}
5909 
5910 exit_error:
5911 	ice_release_lock(rule_lock);
5912 	/* only remove entry if it did not exist previously */
5913 	if (!entry_exists)
5914 		ret = ice_remove_mac(hw, &l_head);
5915 
5916 	return ret;
5917 }
5918 
5919 /**
5920  * ice_add_mac_with_counter - add filter with counter enabled
5921  * @hw: pointer to the hardware structure
5922  * @f_info: pointer to filter info structure containing the MAC filter
5923  *          information
5924  */
5925 enum ice_status
ice_add_mac_with_counter(struct ice_hw * hw,struct ice_fltr_info * f_info)5926 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
5927 {
5928 	struct ice_fltr_mgmt_list_entry *m_entry;
5929 	struct ice_fltr_list_entry fl_info;
5930 	struct ice_sw_recipe *recp_list;
5931 	struct LIST_HEAD_TYPE l_head;
5932 	struct ice_lock *rule_lock;	/* Lock to protect filter rule list */
5933 	enum ice_status ret;
5934 	bool entry_exist;
5935 	u16 counter_id;
5936 	u16 lg_act_id;
5937 
5938 	if (f_info->fltr_act != ICE_FWD_TO_VSI)
5939 		return ICE_ERR_PARAM;
5940 
5941 	if (f_info->lkup_type != ICE_SW_LKUP_MAC)
5942 		return ICE_ERR_PARAM;
5943 
5944 	if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
5945 		return ICE_ERR_PARAM;
5946 	f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
5947 	recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
5948 
5949 	entry_exist = false;
5950 
5951 	rule_lock = &recp_list->filt_rule_lock;
5952 
5953 	/* Add filter if it doesn't exist so then the adding of large
5954 	 * action always results in update
5955 	 */
5956 	INIT_LIST_HEAD(&l_head);
5957 
5958 	fl_info.fltr_info = *f_info;
5959 	LIST_ADD(&fl_info.list_entry, &l_head);
5960 
5961 	ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
5962 			       hw->port_info->lport);
5963 	if (ret == ICE_ERR_ALREADY_EXISTS)
5964 		entry_exist = true;
5965 	else if (ret)
5966 		return ret;
5967 
5968 	ice_acquire_lock(rule_lock);
5969 	m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
5970 	if (!m_entry) {
5971 		ret = ICE_ERR_BAD_PTR;
5972 		goto exit_error;
5973 	}
5974 
5975 	/* Don't enable counter for a filter for which sw marker was enabled */
5976 	if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
5977 		ret = ICE_ERR_PARAM;
5978 		goto exit_error;
5979 	}
5980 
5981 	/* If a counter was already enabled then don't need to add again */
5982 	if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
5983 		ret = ICE_ERR_ALREADY_EXISTS;
5984 		goto exit_error;
5985 	}
5986 
5987 	/* Allocate a hardware table entry to VLAN counter */
5988 	ret = ice_alloc_vlan_res_counter(hw, &counter_id);
5989 	if (ret)
5990 		goto exit_error;
5991 
5992 	/* Allocate a hardware table entry to hold large act. Two actions for
5993 	 * counter based large action
5994 	 */
5995 	ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
5996 	if (ret)
5997 		goto exit_error;
5998 
5999 	if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
6000 		goto exit_error;
6001 
6002 	/* Update the switch rule to add the counter action */
6003 	ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
6004 	if (!ret) {
6005 		ice_release_lock(rule_lock);
6006 		return ret;
6007 	}
6008 
6009 exit_error:
6010 	ice_release_lock(rule_lock);
6011 	/* only remove entry if it did not exist previously */
6012 	if (!entry_exist)
6013 		ret = ice_remove_mac(hw, &l_head);
6014 
6015 	return ret;
6016 }
6017 
6018 /* This is mapping table entry that maps every word within a given protocol
6019  * structure to the real byte offset as per the specification of that
6020  * protocol header.
6021  * for example dst address is 3 words in ethertype header and corresponding
6022  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
6023  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
6024  * matching entry describing its field. This needs to be updated if new
6025  * structure is added to that union.
6026  */
6027 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
6028 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
6029 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
6030 	{ ICE_ETYPE_OL,		{ 0 } },
6031 	{ ICE_VLAN_OFOS,	{ 0, 2 } },
6032 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6033 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
6034 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6035 				 26, 28, 30, 32, 34, 36, 38 } },
6036 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
6037 				 26, 28, 30, 32, 34, 36, 38 } },
6038 	{ ICE_TCP_IL,		{ 0, 2 } },
6039 	{ ICE_UDP_OF,		{ 0, 2 } },
6040 	{ ICE_UDP_ILOS,		{ 0, 2 } },
6041 	{ ICE_SCTP_IL,		{ 0, 2 } },
6042 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
6043 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
6044 	{ ICE_VXLAN_GPE,	{ 8, 10, 12, 14 } },
6045 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
6046 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20 } },
6047 	{ ICE_PPPOE,		{ 0, 2, 4, 6 } },
6048 	{ ICE_PFCP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
6049 	{ ICE_L2TPV3,		{ 0, 2, 4, 6, 8, 10 } },
6050 	{ ICE_ESP,		{ 0, 2, 4, 6 } },
6051 	{ ICE_AH,		{ 0, 2, 4, 6, 8, 10 } },
6052 	{ ICE_NAT_T,		{ 8, 10, 12, 14 } },
6053 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
6054 	{ ICE_VLAN_EX,		{ 0, 2 } },
6055 };
6056 
6057 /* The following table describes preferred grouping of recipes.
6058  * If a recipe that needs to be programmed is a superset or matches one of the
6059  * following combinations, then the recipe needs to be chained as per the
6060  * following policy.
6061  */
6062 
6063 static const struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
6064 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
6065 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
6066 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
6067 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
6068 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
6069 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
6070 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
6071 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
6072 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
6073 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
6074 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
6075 	{ ICE_SCTP_IL,		ICE_SCTP_IL_HW },
6076 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
6077 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
6078 	{ ICE_VXLAN_GPE,	ICE_UDP_OF_HW },
6079 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
6080 	{ ICE_GTP,		ICE_UDP_OF_HW },
6081 	{ ICE_PPPOE,		ICE_PPPOE_HW },
6082 	{ ICE_PFCP,		ICE_UDP_ILOS_HW },
6083 	{ ICE_L2TPV3,		ICE_L2TPV3_HW },
6084 	{ ICE_ESP,		ICE_ESP_HW },
6085 	{ ICE_AH,		ICE_AH_HW },
6086 	{ ICE_NAT_T,		ICE_UDP_ILOS_HW },
6087 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
6088 	{ ICE_VLAN_EX,		ICE_VLAN_OF_HW },
6089 };
6090 
6091 /**
6092  * ice_find_recp - find a recipe
6093  * @hw: pointer to the hardware structure
6094  * @lkup_exts: extension sequence to match
6095  *
6096  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
6097  */
ice_find_recp(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,enum ice_sw_tunnel_type tun_type)6098 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
6099 			 enum ice_sw_tunnel_type tun_type)
6100 {
6101 	bool refresh_required = true;
6102 	struct ice_sw_recipe *recp;
6103 	u8 i;
6104 
6105 	/* Walk through existing recipes to find a match */
6106 	recp = hw->switch_info->recp_list;
6107 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6108 		/* If recipe was not created for this ID, in SW bookkeeping,
6109 		 * check if FW has an entry for this recipe. If the FW has an
6110 		 * entry update it in our SW bookkeeping and continue with the
6111 		 * matching.
6112 		 */
6113 		if (!recp[i].recp_created)
6114 			if (ice_get_recp_frm_fw(hw,
6115 						hw->switch_info->recp_list, i,
6116 						&refresh_required))
6117 				continue;
6118 
6119 		/* Skip inverse action recipes */
6120 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
6121 		    ICE_AQ_RECIPE_ACT_INV_ACT)
6122 			continue;
6123 
6124 		/* if number of words we are looking for match */
6125 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
6126 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
6127 			struct ice_fv_word *be = lkup_exts->fv_words;
6128 			u16 *cr = recp[i].lkup_exts.field_mask;
6129 			u16 *de = lkup_exts->field_mask;
6130 			bool found = true;
6131 			u8 pe, qr;
6132 
6133 			/* ar, cr, and qr are related to the recipe words, while
6134 			 * be, de, and pe are related to the lookup words
6135 			 */
6136 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
6137 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
6138 				     qr++) {
6139 					if (ar[qr].off == be[pe].off &&
6140 					    ar[qr].prot_id == be[pe].prot_id &&
6141 					    cr[qr] == de[pe])
6142 						/* Found the "pe"th word in the
6143 						 * given recipe
6144 						 */
6145 						break;
6146 				}
6147 				/* After walking through all the words in the
6148 				 * "i"th recipe if "p"th word was not found then
6149 				 * this recipe is not what we are looking for.
6150 				 * So break out from this loop and try the next
6151 				 * recipe
6152 				 */
6153 				if (qr >= recp[i].lkup_exts.n_val_words) {
6154 					found = false;
6155 					break;
6156 				}
6157 			}
6158 			/* If for "i"th recipe the found was never set to false
6159 			 * then it means we found our match
6160 			 */
6161 			if (tun_type == recp[i].tun_type && found)
6162 				return i; /* Return the recipe ID */
6163 		}
6164 	}
6165 	return ICE_MAX_NUM_RECIPES;
6166 }
6167 
6168 /**
6169  * ice_prot_type_to_id - get protocol ID from protocol type
6170  * @type: protocol type
6171  * @id: pointer to variable that will receive the ID
6172  *
6173  * Returns true if found, false otherwise
6174  */
ice_prot_type_to_id(enum ice_protocol_type type,u8 * id)6175 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
6176 {
6177 	u8 i;
6178 
6179 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
6180 		if (ice_prot_id_tbl[i].type == type) {
6181 			*id = ice_prot_id_tbl[i].protocol_id;
6182 			return true;
6183 		}
6184 	return false;
6185 }
6186 
6187 /**
6188  * ice_find_valid_words - count valid words
6189  * @rule: advanced rule with lookup information
6190  * @lkup_exts: byte offset extractions of the words that are valid
6191  *
6192  * calculate valid words in a lookup rule using mask value
6193  */
6194 static u8
ice_fill_valid_words(struct ice_adv_lkup_elem * rule,struct ice_prot_lkup_ext * lkup_exts)6195 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
6196 		     struct ice_prot_lkup_ext *lkup_exts)
6197 {
6198 	u8 j, word, prot_id, ret_val;
6199 
6200 	if (!ice_prot_type_to_id(rule->type, &prot_id))
6201 		return 0;
6202 
6203 	word = lkup_exts->n_val_words;
6204 
6205 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
6206 		if (((u16 *)&rule->m_u)[j] &&
6207 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
6208 			/* No more space to accommodate */
6209 			if (word >= ICE_MAX_CHAIN_WORDS)
6210 				return 0;
6211 			lkup_exts->fv_words[word].off =
6212 				ice_prot_ext[rule->type].offs[j];
6213 			lkup_exts->fv_words[word].prot_id =
6214 				ice_prot_id_tbl[rule->type].protocol_id;
6215 			lkup_exts->field_mask[word] =
6216 				BE16_TO_CPU(((_FORCE_ __be16 *)&rule->m_u)[j]);
6217 			word++;
6218 		}
6219 
6220 	ret_val = word - lkup_exts->n_val_words;
6221 	lkup_exts->n_val_words = word;
6222 
6223 	return ret_val;
6224 }
6225 
6226 /**
6227  * ice_create_first_fit_recp_def - Create a recipe grouping
6228  * @hw: pointer to the hardware structure
6229  * @lkup_exts: an array of protocol header extractions
6230  * @rg_list: pointer to a list that stores new recipe groups
6231  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
6232  *
6233  * Using first fit algorithm, take all the words that are still not done
6234  * and start grouping them in 4-word groups. Each group makes up one
6235  * recipe.
6236  */
6237 static enum ice_status
ice_create_first_fit_recp_def(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,struct LIST_HEAD_TYPE * rg_list,u8 * recp_cnt)6238 ice_create_first_fit_recp_def(struct ice_hw *hw,
6239 			      struct ice_prot_lkup_ext *lkup_exts,
6240 			      struct LIST_HEAD_TYPE *rg_list,
6241 			      u8 *recp_cnt)
6242 {
6243 	struct ice_pref_recipe_group *grp = NULL;
6244 	u8 j;
6245 
6246 	*recp_cnt = 0;
6247 
6248 	if (!lkup_exts->n_val_words) {
6249 		struct ice_recp_grp_entry *entry;
6250 
6251 		entry = (struct ice_recp_grp_entry *)
6252 			ice_malloc(hw, sizeof(*entry));
6253 		if (!entry)
6254 			return ICE_ERR_NO_MEMORY;
6255 		LIST_ADD(&entry->l_entry, rg_list);
6256 		grp = &entry->r_group;
6257 		(*recp_cnt)++;
6258 		grp->n_val_pairs = 0;
6259 	}
6260 
6261 	/* Walk through every word in the rule to check if it is not done. If so
6262 	 * then this word needs to be part of a new recipe.
6263 	 */
6264 	for (j = 0; j < lkup_exts->n_val_words; j++)
6265 		if (!ice_is_bit_set(lkup_exts->done, j)) {
6266 			if (!grp ||
6267 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
6268 				struct ice_recp_grp_entry *entry;
6269 
6270 				entry = (struct ice_recp_grp_entry *)
6271 					ice_malloc(hw, sizeof(*entry));
6272 				if (!entry)
6273 					return ICE_ERR_NO_MEMORY;
6274 				LIST_ADD(&entry->l_entry, rg_list);
6275 				grp = &entry->r_group;
6276 				(*recp_cnt)++;
6277 			}
6278 
6279 			grp->pairs[grp->n_val_pairs].prot_id =
6280 				lkup_exts->fv_words[j].prot_id;
6281 			grp->pairs[grp->n_val_pairs].off =
6282 				lkup_exts->fv_words[j].off;
6283 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
6284 			grp->n_val_pairs++;
6285 		}
6286 
6287 	return ICE_SUCCESS;
6288 }
6289 
6290 /**
6291  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
6292  * @hw: pointer to the hardware structure
6293  * @fv_list: field vector with the extraction sequence information
6294  * @rg_list: recipe groupings with protocol-offset pairs
6295  *
6296  * Helper function to fill in the field vector indices for protocol-offset
6297  * pairs. These indexes are then ultimately programmed into a recipe.
6298  */
6299 static enum ice_status
ice_fill_fv_word_index(struct ice_hw * hw,struct LIST_HEAD_TYPE * fv_list,struct LIST_HEAD_TYPE * rg_list)6300 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
6301 		       struct LIST_HEAD_TYPE *rg_list)
6302 {
6303 	struct ice_sw_fv_list_entry *fv;
6304 	struct ice_recp_grp_entry *rg;
6305 	struct ice_fv_word *fv_ext;
6306 
6307 	if (LIST_EMPTY(fv_list))
6308 		return ICE_SUCCESS;
6309 
6310 	fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
6311 	fv_ext = fv->fv_ptr->ew;
6312 
6313 	LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
6314 		u8 i;
6315 
6316 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
6317 			struct ice_fv_word *pr;
6318 			bool found = false;
6319 			u16 mask;
6320 			u8 j;
6321 
6322 			pr = &rg->r_group.pairs[i];
6323 			mask = rg->r_group.mask[i];
6324 
6325 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
6326 				if (fv_ext[j].prot_id == pr->prot_id &&
6327 				    fv_ext[j].off == pr->off) {
6328 					found = true;
6329 
6330 					/* Store index of field vector */
6331 					rg->fv_idx[i] = j;
6332 					rg->fv_mask[i] = mask;
6333 					break;
6334 				}
6335 
6336 			/* Protocol/offset could not be found, caller gave an
6337 			 * invalid pair
6338 			 */
6339 			if (!found)
6340 				return ICE_ERR_PARAM;
6341 		}
6342 	}
6343 
6344 	return ICE_SUCCESS;
6345 }
6346 
6347 /**
6348  * ice_find_free_recp_res_idx - find free result indexes for recipe
6349  * @hw: pointer to hardware structure
6350  * @profiles: bitmap of profiles that will be associated with the new recipe
6351  * @free_idx: pointer to variable to receive the free index bitmap
6352  *
6353  * The algorithm used here is:
6354  *	1. When creating a new recipe, create a set P which contains all
6355  *	   Profiles that will be associated with our new recipe
6356  *
6357  *	2. For each Profile p in set P:
6358  *	    a. Add all recipes associated with Profile p into set R
6359  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
6360  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
6361  *		i. Or just assume they all have the same possible indexes:
6362  *			44, 45, 46, 47
6363  *			i.e., PossibleIndexes = 0x0000F00000000000
6364  *
6365  *	3. For each Recipe r in set R:
6366  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
6367  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
6368  *
6369  *	FreeIndexes will contain the bits indicating the indexes free for use,
6370  *      then the code needs to update the recipe[r].used_result_idx_bits to
6371  *      indicate which indexes were selected for use by this recipe.
6372  */
6373 static u16
ice_find_free_recp_res_idx(struct ice_hw * hw,const ice_bitmap_t * profiles,ice_bitmap_t * free_idx)6374 ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
6375 			   ice_bitmap_t *free_idx)
6376 {
6377 	ice_declare_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6378 	ice_declare_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6379 	ice_declare_bitmap(used_idx, ICE_MAX_FV_WORDS);
6380 	u16 bit;
6381 
6382 	ice_zero_bitmap(possible_idx, ICE_MAX_FV_WORDS);
6383 	ice_zero_bitmap(recipes, ICE_MAX_NUM_RECIPES);
6384 	ice_zero_bitmap(used_idx, ICE_MAX_FV_WORDS);
6385 	ice_zero_bitmap(free_idx, ICE_MAX_FV_WORDS);
6386 
6387 	ice_bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
6388 
6389 	/* For each profile we are going to associate the recipe with, add the
6390 	 * recipes that are associated with that profile. This will give us
6391 	 * the set of recipes that our recipe may collide with. Also, determine
6392 	 * what possible result indexes are usable given this set of profiles.
6393 	 */
6394 	ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
6395 		ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
6396 			      ICE_MAX_NUM_RECIPES);
6397 		ice_and_bitmap(possible_idx, possible_idx,
6398 			       hw->switch_info->prof_res_bm[bit],
6399 			       ICE_MAX_FV_WORDS);
6400 	}
6401 
6402 	/* For each recipe that our new recipe may collide with, determine
6403 	 * which indexes have been used.
6404 	 */
6405 	ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
6406 		ice_or_bitmap(used_idx, used_idx,
6407 			      hw->switch_info->recp_list[bit].res_idxs,
6408 			      ICE_MAX_FV_WORDS);
6409 
6410 	ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
6411 
6412 	/* return number of free indexes */
6413 	return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS);
6414 }
6415 
6416 /**
6417  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
6418  * @hw: pointer to hardware structure
6419  * @rm: recipe management list entry
6420  * @profiles: bitmap of profiles that will be associated.
6421  */
6422 static enum ice_status
ice_add_sw_recipe(struct ice_hw * hw,struct ice_sw_recipe * rm,ice_bitmap_t * profiles)6423 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
6424 		  ice_bitmap_t *profiles)
6425 {
6426 	ice_declare_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6427 	struct ice_aqc_recipe_data_elem *tmp;
6428 	struct ice_aqc_recipe_data_elem *buf;
6429 	struct ice_recp_grp_entry *entry;
6430 	enum ice_status status;
6431 	u16 free_res_idx;
6432 	u16 recipe_count;
6433 	u8 chain_idx;
6434 	u8 recps = 0;
6435 
6436 	/* When more than one recipe are required, another recipe is needed to
6437 	 * chain them together. Matching a tunnel metadata ID takes up one of
6438 	 * the match fields in the chaining recipe reducing the number of
6439 	 * chained recipes by one.
6440 	 */
6441 	 /* check number of free result indices */
6442 	ice_zero_bitmap(result_idx_bm, ICE_MAX_FV_WORDS);
6443 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
6444 
6445 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
6446 		  free_res_idx, rm->n_grp_count);
6447 
6448 	if (rm->n_grp_count > 1) {
6449 		if (rm->n_grp_count > free_res_idx)
6450 			return ICE_ERR_MAX_LIMIT;
6451 
6452 		rm->n_grp_count++;
6453 	}
6454 
6455 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
6456 		return ICE_ERR_MAX_LIMIT;
6457 
6458 	tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
6459 							    ICE_MAX_NUM_RECIPES,
6460 							    sizeof(*tmp));
6461 	if (!tmp)
6462 		return ICE_ERR_NO_MEMORY;
6463 
6464 	buf = (struct ice_aqc_recipe_data_elem *)
6465 		ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
6466 	if (!buf) {
6467 		status = ICE_ERR_NO_MEMORY;
6468 		goto err_mem;
6469 	}
6470 
6471 	ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
6472 	recipe_count = ICE_MAX_NUM_RECIPES;
6473 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
6474 				   NULL);
6475 	if (status || recipe_count == 0)
6476 		goto err_unroll;
6477 
6478 	/* Allocate the recipe resources, and configure them according to the
6479 	 * match fields from protocol headers and extracted field vectors.
6480 	 */
6481 	chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
6482 	LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6483 		u8 i;
6484 
6485 		status = ice_alloc_recipe(hw, &entry->rid);
6486 		if (status)
6487 			goto err_unroll;
6488 
6489 		/* Clear the result index of the located recipe, as this will be
6490 		 * updated, if needed, later in the recipe creation process.
6491 		 */
6492 		tmp[0].content.result_indx = 0;
6493 
6494 		buf[recps] = tmp[0];
6495 		buf[recps].recipe_indx = (u8)entry->rid;
6496 		/* if the recipe is a non-root recipe RID should be programmed
6497 		 * as 0 for the rules to be applied correctly.
6498 		 */
6499 		buf[recps].content.rid = 0;
6500 		ice_memset(&buf[recps].content.lkup_indx, 0,
6501 			   sizeof(buf[recps].content.lkup_indx),
6502 			   ICE_NONDMA_MEM);
6503 
6504 		/* All recipes use look-up index 0 to match switch ID. */
6505 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6506 		buf[recps].content.mask[0] =
6507 			CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6508 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
6509 		 * to be 0
6510 		 */
6511 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6512 			buf[recps].content.lkup_indx[i] = 0x80;
6513 			buf[recps].content.mask[i] = 0;
6514 		}
6515 
6516 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
6517 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
6518 			buf[recps].content.mask[i + 1] =
6519 				CPU_TO_LE16(entry->fv_mask[i]);
6520 		}
6521 
6522 		if (rm->n_grp_count > 1) {
6523 			/* Checks to see if there really is a valid result index
6524 			 * that can be used.
6525 			 */
6526 			if (chain_idx >= ICE_MAX_FV_WORDS) {
6527 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
6528 				status = ICE_ERR_MAX_LIMIT;
6529 				goto err_unroll;
6530 			}
6531 
6532 			entry->chain_idx = chain_idx;
6533 			buf[recps].content.result_indx =
6534 				ICE_AQ_RECIPE_RESULT_EN |
6535 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
6536 				 ICE_AQ_RECIPE_RESULT_DATA_M);
6537 			ice_clear_bit(chain_idx, result_idx_bm);
6538 			chain_idx = ice_find_first_bit(result_idx_bm,
6539 						       ICE_MAX_FV_WORDS);
6540 		}
6541 
6542 		/* fill recipe dependencies */
6543 		ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
6544 				ICE_MAX_NUM_RECIPES);
6545 		ice_set_bit(buf[recps].recipe_indx,
6546 			    (ice_bitmap_t *)buf[recps].recipe_bitmap);
6547 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6548 		recps++;
6549 	}
6550 
6551 	if (rm->n_grp_count == 1) {
6552 		rm->root_rid = buf[0].recipe_indx;
6553 		ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
6554 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
6555 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
6556 			ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
6557 				   sizeof(buf[0].recipe_bitmap),
6558 				   ICE_NONDMA_TO_NONDMA);
6559 		} else {
6560 			status = ICE_ERR_BAD_PTR;
6561 			goto err_unroll;
6562 		}
6563 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
6564 		 * the recipe which is getting created if specified
6565 		 * by user. Usually any advanced switch filter, which results
6566 		 * into new extraction sequence, ended up creating a new recipe
6567 		 * of type ROOT and usually recipes are associated with profiles
6568 		 * Switch rule referreing newly created recipe, needs to have
6569 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
6570 		 * evaluation will not happen correctly. In other words, if
6571 		 * switch rule to be evaluated on priority basis, then recipe
6572 		 * needs to have priority, otherwise it will be evaluated last.
6573 		 */
6574 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
6575 	} else {
6576 		struct ice_recp_grp_entry *last_chain_entry;
6577 		u16 rid, i;
6578 
6579 		/* Allocate the last recipe that will chain the outcomes of the
6580 		 * other recipes together
6581 		 */
6582 		status = ice_alloc_recipe(hw, &rid);
6583 		if (status)
6584 			goto err_unroll;
6585 
6586 		buf[recps].recipe_indx = (u8)rid;
6587 		buf[recps].content.rid = (u8)rid;
6588 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
6589 		/* the new entry created should also be part of rg_list to
6590 		 * make sure we have complete recipe
6591 		 */
6592 		last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
6593 			sizeof(*last_chain_entry));
6594 		if (!last_chain_entry) {
6595 			status = ICE_ERR_NO_MEMORY;
6596 			goto err_unroll;
6597 		}
6598 		last_chain_entry->rid = rid;
6599 		ice_memset(&buf[recps].content.lkup_indx, 0,
6600 			   sizeof(buf[recps].content.lkup_indx),
6601 			   ICE_NONDMA_MEM);
6602 		/* All recipes use look-up index 0 to match switch ID. */
6603 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
6604 		buf[recps].content.mask[0] =
6605 			CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
6606 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
6607 			buf[recps].content.lkup_indx[i] =
6608 				ICE_AQ_RECIPE_LKUP_IGNORE;
6609 			buf[recps].content.mask[i] = 0;
6610 		}
6611 
6612 		i = 1;
6613 		/* update r_bitmap with the recp that is used for chaining */
6614 		ice_set_bit(rid, rm->r_bitmap);
6615 		/* this is the recipe that chains all the other recipes so it
6616 		 * should not have a chaining ID to indicate the same
6617 		 */
6618 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
6619 		LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
6620 				    l_entry) {
6621 			last_chain_entry->fv_idx[i] = entry->chain_idx;
6622 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
6623 			buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
6624 			ice_set_bit(entry->rid, rm->r_bitmap);
6625 		}
6626 		LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
6627 		if (sizeof(buf[recps].recipe_bitmap) >=
6628 		    sizeof(rm->r_bitmap)) {
6629 			ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
6630 				   sizeof(buf[recps].recipe_bitmap),
6631 				   ICE_NONDMA_TO_NONDMA);
6632 		} else {
6633 			status = ICE_ERR_BAD_PTR;
6634 			goto err_unroll;
6635 		}
6636 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
6637 
6638 		recps++;
6639 		rm->root_rid = (u8)rid;
6640 	}
6641 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
6642 	if (status)
6643 		goto err_unroll;
6644 
6645 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
6646 	ice_release_change_lock(hw);
6647 	if (status)
6648 		goto err_unroll;
6649 
6650 	/* Every recipe that just got created add it to the recipe
6651 	 * book keeping list
6652 	 */
6653 	LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
6654 		struct ice_switch_info *sw = hw->switch_info;
6655 		bool is_root, idx_found = false;
6656 		struct ice_sw_recipe *recp;
6657 		u16 idx, buf_idx = 0;
6658 
6659 		/* find buffer index for copying some data */
6660 		for (idx = 0; idx < rm->n_grp_count; idx++)
6661 			if (buf[idx].recipe_indx == entry->rid) {
6662 				buf_idx = idx;
6663 				idx_found = true;
6664 			}
6665 
6666 		if (!idx_found) {
6667 			status = ICE_ERR_OUT_OF_RANGE;
6668 			goto err_unroll;
6669 		}
6670 
6671 		recp = &sw->recp_list[entry->rid];
6672 		is_root = (rm->root_rid == entry->rid);
6673 		recp->is_root = is_root;
6674 
6675 		recp->root_rid = entry->rid;
6676 		recp->big_recp = (is_root && rm->n_grp_count > 1);
6677 
6678 		ice_memcpy(&recp->ext_words, entry->r_group.pairs,
6679 			   entry->r_group.n_val_pairs *
6680 			   sizeof(struct ice_fv_word),
6681 			   ICE_NONDMA_TO_NONDMA);
6682 
6683 		ice_memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
6684 			   sizeof(recp->r_bitmap), ICE_NONDMA_TO_NONDMA);
6685 
6686 		/* Copy non-result fv index values and masks to recipe. This
6687 		 * call will also update the result recipe bitmask.
6688 		 */
6689 		ice_collect_result_idx(&buf[buf_idx], recp);
6690 
6691 		/* for non-root recipes, also copy to the root, this allows
6692 		 * easier matching of a complete chained recipe
6693 		 */
6694 		if (!is_root)
6695 			ice_collect_result_idx(&buf[buf_idx],
6696 					       &sw->recp_list[rm->root_rid]);
6697 
6698 		recp->n_ext_words = entry->r_group.n_val_pairs;
6699 		recp->chain_idx = entry->chain_idx;
6700 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
6701 		recp->n_grp_count = rm->n_grp_count;
6702 		recp->tun_type = rm->tun_type;
6703 		recp->recp_created = true;
6704 	}
6705 	rm->root_buf = buf;
6706 	ice_free(hw, tmp);
6707 	return status;
6708 
6709 err_unroll:
6710 err_mem:
6711 	ice_free(hw, tmp);
6712 	ice_free(hw, buf);
6713 	return status;
6714 }
6715 
6716 /**
6717  * ice_create_recipe_group - creates recipe group
6718  * @hw: pointer to hardware structure
6719  * @rm: recipe management list entry
6720  * @lkup_exts: lookup elements
6721  */
6722 static enum ice_status
ice_create_recipe_group(struct ice_hw * hw,struct ice_sw_recipe * rm,struct ice_prot_lkup_ext * lkup_exts)6723 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
6724 			struct ice_prot_lkup_ext *lkup_exts)
6725 {
6726 	enum ice_status status;
6727 	u8 recp_count = 0;
6728 
6729 	rm->n_grp_count = 0;
6730 
6731 	/* Create recipes for words that are marked not done by packing them
6732 	 * as best fit.
6733 	 */
6734 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
6735 					       &rm->rg_list, &recp_count);
6736 	if (!status) {
6737 		rm->n_grp_count += recp_count;
6738 		rm->n_ext_words = lkup_exts->n_val_words;
6739 		ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
6740 			   sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
6741 		ice_memcpy(rm->word_masks, lkup_exts->field_mask,
6742 			   sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
6743 	}
6744 
6745 	return status;
6746 }
6747 
6748 /**
6749  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
6750  * @hw: pointer to hardware structure
6751  * @lkups: lookup elements or match criteria for the advanced recipe, one
6752  *	   structure per protocol header
6753  * @lkups_cnt: number of protocols
6754  * @bm: bitmap of field vectors to consider
6755  * @fv_list: pointer to a list that holds the returned field vectors
6756  */
6757 static enum ice_status
ice_get_fv(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,ice_bitmap_t * bm,struct LIST_HEAD_TYPE * fv_list)6758 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
6759 	   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
6760 {
6761 	enum ice_status status;
6762 	u8 *prot_ids;
6763 	u16 i;
6764 
6765 	if (!lkups_cnt)
6766 		return ICE_SUCCESS;
6767 
6768 	prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
6769 	if (!prot_ids)
6770 		return ICE_ERR_NO_MEMORY;
6771 
6772 	for (i = 0; i < lkups_cnt; i++)
6773 		if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
6774 			status = ICE_ERR_CFG;
6775 			goto free_mem;
6776 		}
6777 
6778 	/* Find field vectors that include all specified protocol types */
6779 	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
6780 
6781 free_mem:
6782 	ice_free(hw, prot_ids);
6783 	return status;
6784 }
6785 
6786 /**
6787  * ice_tun_type_match_mask - determine if tun type needs a match mask
6788  * @tun_type: tunnel type
6789  * @mask: mask to be used for the tunnel
6790  */
ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type,u16 * mask)6791 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
6792 {
6793 	switch (tun_type) {
6794 	case ICE_SW_TUN_VXLAN_GPE:
6795 	case ICE_SW_TUN_GENEVE:
6796 	case ICE_SW_TUN_VXLAN:
6797 	case ICE_SW_TUN_NVGRE:
6798 	case ICE_SW_TUN_UDP:
6799 	case ICE_ALL_TUNNELS:
6800 	case ICE_SW_TUN_AND_NON_TUN_QINQ:
6801 	case ICE_NON_TUN_QINQ:
6802 	case ICE_SW_TUN_PPPOE_QINQ:
6803 	case ICE_SW_TUN_PPPOE_PAY_QINQ:
6804 	case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6805 	case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6806 		*mask = ICE_TUN_FLAG_MASK;
6807 		return true;
6808 
6809 	case ICE_SW_TUN_GENEVE_VLAN:
6810 	case ICE_SW_TUN_VXLAN_VLAN:
6811 		*mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK;
6812 		return true;
6813 
6814 	default:
6815 		*mask = 0;
6816 		return false;
6817 	}
6818 }
6819 
6820 /**
6821  * ice_add_special_words - Add words that are not protocols, such as metadata
6822  * @rinfo: other information regarding the rule e.g. priority and action info
6823  * @lkup_exts: lookup word structure
6824  */
6825 static enum ice_status
ice_add_special_words(struct ice_adv_rule_info * rinfo,struct ice_prot_lkup_ext * lkup_exts)6826 ice_add_special_words(struct ice_adv_rule_info *rinfo,
6827 		      struct ice_prot_lkup_ext *lkup_exts)
6828 {
6829 	u16 mask;
6830 
6831 	/* If this is a tunneled packet, then add recipe index to match the
6832 	 * tunnel bit in the packet metadata flags.
6833 	 */
6834 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
6835 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
6836 			u8 word = lkup_exts->n_val_words++;
6837 
6838 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
6839 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
6840 			lkup_exts->field_mask[word] = mask;
6841 		} else {
6842 			return ICE_ERR_MAX_LIMIT;
6843 		}
6844 	}
6845 
6846 	return ICE_SUCCESS;
6847 }
6848 
6849 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
6850  * @hw: pointer to hardware structure
6851  * @rinfo: other information regarding the rule e.g. priority and action info
6852  * @bm: pointer to memory for returning the bitmap of field vectors
6853  */
6854 static void
ice_get_compat_fv_bitmap(struct ice_hw * hw,struct ice_adv_rule_info * rinfo,ice_bitmap_t * bm)6855 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
6856 			 ice_bitmap_t *bm)
6857 {
6858 	enum ice_prof_type prof_type;
6859 
6860 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
6861 
6862 	switch (rinfo->tun_type) {
6863 	case ICE_NON_TUN:
6864 	case ICE_NON_TUN_QINQ:
6865 		prof_type = ICE_PROF_NON_TUN;
6866 		break;
6867 	case ICE_ALL_TUNNELS:
6868 		prof_type = ICE_PROF_TUN_ALL;
6869 		break;
6870 	case ICE_SW_TUN_VXLAN_GPE:
6871 	case ICE_SW_TUN_GENEVE:
6872 	case ICE_SW_TUN_GENEVE_VLAN:
6873 	case ICE_SW_TUN_VXLAN:
6874 	case ICE_SW_TUN_VXLAN_VLAN:
6875 	case ICE_SW_TUN_UDP:
6876 	case ICE_SW_TUN_GTP:
6877 		prof_type = ICE_PROF_TUN_UDP;
6878 		break;
6879 	case ICE_SW_TUN_NVGRE:
6880 		prof_type = ICE_PROF_TUN_GRE;
6881 		break;
6882 	case ICE_SW_TUN_PPPOE:
6883 	case ICE_SW_TUN_PPPOE_QINQ:
6884 		prof_type = ICE_PROF_TUN_PPPOE;
6885 		break;
6886 	case ICE_SW_TUN_PPPOE_PAY:
6887 	case ICE_SW_TUN_PPPOE_PAY_QINQ:
6888 		ice_set_bit(ICE_PROFID_PPPOE_PAY, bm);
6889 		return;
6890 	case ICE_SW_TUN_PPPOE_IPV4:
6891 	case ICE_SW_TUN_PPPOE_IPV4_QINQ:
6892 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_OTHER, bm);
6893 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6894 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6895 		return;
6896 	case ICE_SW_TUN_PPPOE_IPV4_TCP:
6897 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_TCP, bm);
6898 		return;
6899 	case ICE_SW_TUN_PPPOE_IPV4_UDP:
6900 		ice_set_bit(ICE_PROFID_PPPOE_IPV4_UDP, bm);
6901 		return;
6902 	case ICE_SW_TUN_PPPOE_IPV6:
6903 	case ICE_SW_TUN_PPPOE_IPV6_QINQ:
6904 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_OTHER, bm);
6905 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6906 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6907 		return;
6908 	case ICE_SW_TUN_PPPOE_IPV6_TCP:
6909 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_TCP, bm);
6910 		return;
6911 	case ICE_SW_TUN_PPPOE_IPV6_UDP:
6912 		ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm);
6913 		return;
6914 	case ICE_SW_TUN_PROFID_IPV6_ESP:
6915 	case ICE_SW_TUN_IPV6_ESP:
6916 		ice_set_bit(ICE_PROFID_IPV6_ESP, bm);
6917 		return;
6918 	case ICE_SW_TUN_PROFID_IPV6_AH:
6919 	case ICE_SW_TUN_IPV6_AH:
6920 		ice_set_bit(ICE_PROFID_IPV6_AH, bm);
6921 		return;
6922 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
6923 	case ICE_SW_TUN_IPV6_L2TPV3:
6924 		ice_set_bit(ICE_PROFID_MAC_IPV6_L2TPV3, bm);
6925 		return;
6926 	case ICE_SW_TUN_PROFID_IPV6_NAT_T:
6927 	case ICE_SW_TUN_IPV6_NAT_T:
6928 		ice_set_bit(ICE_PROFID_IPV6_NAT_T, bm);
6929 		return;
6930 	case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
6931 		ice_set_bit(ICE_PROFID_IPV4_PFCP_NODE, bm);
6932 		return;
6933 	case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
6934 		ice_set_bit(ICE_PROFID_IPV4_PFCP_SESSION, bm);
6935 		return;
6936 	case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
6937 		ice_set_bit(ICE_PROFID_IPV6_PFCP_NODE, bm);
6938 		return;
6939 	case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
6940 		ice_set_bit(ICE_PROFID_IPV6_PFCP_SESSION, bm);
6941 		return;
6942 	case ICE_SW_TUN_IPV4_NAT_T:
6943 		ice_set_bit(ICE_PROFID_IPV4_NAT_T, bm);
6944 		return;
6945 	case ICE_SW_TUN_IPV4_L2TPV3:
6946 		ice_set_bit(ICE_PROFID_MAC_IPV4_L2TPV3, bm);
6947 		return;
6948 	case ICE_SW_TUN_IPV4_ESP:
6949 		ice_set_bit(ICE_PROFID_IPV4_ESP, bm);
6950 		return;
6951 	case ICE_SW_TUN_IPV4_AH:
6952 		ice_set_bit(ICE_PROFID_IPV4_AH, bm);
6953 		return;
6954 	case ICE_SW_IPV4_TCP:
6955 		ice_set_bit(ICE_PROFID_IPV4_TCP, bm);
6956 		return;
6957 	case ICE_SW_IPV4_UDP:
6958 		ice_set_bit(ICE_PROFID_IPV4_UDP, bm);
6959 		return;
6960 	case ICE_SW_IPV6_TCP:
6961 		ice_set_bit(ICE_PROFID_IPV6_TCP, bm);
6962 		return;
6963 	case ICE_SW_IPV6_UDP:
6964 		ice_set_bit(ICE_PROFID_IPV6_UDP, bm);
6965 		return;
6966 	case ICE_SW_TUN_IPV4_GTPU_IPV4:
6967 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER, bm);
6968 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_OTHER, bm);
6969 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_UDP, bm);
6970 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_UDP, bm);
6971 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV4_TCP, bm);
6972 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV4_TCP, bm);
6973 		return;
6974 	case ICE_SW_TUN_IPV6_GTPU_IPV4:
6975 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_OTHER, bm);
6976 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_OTHER, bm);
6977 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_UDP, bm);
6978 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_UDP, bm);
6979 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV4_TCP, bm);
6980 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV4_TCP, bm);
6981 		return;
6982 	case ICE_SW_TUN_IPV4_GTPU_IPV6:
6983 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_OTHER, bm);
6984 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_OTHER, bm);
6985 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_UDP, bm);
6986 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_UDP, bm);
6987 		ice_set_bit(ICE_PROFID_IPV4_GTPU_EH_IPV6_TCP, bm);
6988 		ice_set_bit(ICE_PROFID_IPV4_GTPU_IPV6_TCP, bm);
6989 		return;
6990 	case ICE_SW_TUN_IPV6_GTPU_IPV6:
6991 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_OTHER, bm);
6992 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_OTHER, bm);
6993 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_UDP, bm);
6994 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_UDP, bm);
6995 		ice_set_bit(ICE_PROFID_IPV6_GTPU_EH_IPV6_TCP, bm);
6996 		ice_set_bit(ICE_PROFID_IPV6_GTPU_IPV6_TCP, bm);
6997 		return;
6998 	case ICE_SW_TUN_AND_NON_TUN:
6999 	case ICE_SW_TUN_AND_NON_TUN_QINQ:
7000 	default:
7001 		prof_type = ICE_PROF_ALL;
7002 		break;
7003 	}
7004 
7005 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
7006 }
7007 
7008 /**
7009  * ice_is_prof_rule - determine if rule type is a profile rule
7010  * @type: the rule type
7011  *
7012  * if the rule type is a profile rule, that means that there no field value
7013  * match required, in this case just a profile hit is required.
7014  */
ice_is_prof_rule(enum ice_sw_tunnel_type type)7015 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
7016 {
7017 	switch (type) {
7018 	case ICE_SW_TUN_PROFID_IPV6_ESP:
7019 	case ICE_SW_TUN_PROFID_IPV6_AH:
7020 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
7021 	case ICE_SW_TUN_PROFID_IPV6_NAT_T:
7022 	case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
7023 	case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
7024 	case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
7025 	case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
7026 		return true;
7027 	default:
7028 		break;
7029 	}
7030 
7031 	return false;
7032 }
7033 
7034 /**
7035  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
7036  * @hw: pointer to hardware structure
7037  * @lkups: lookup elements or match criteria for the advanced recipe, one
7038  *  structure per protocol header
7039  * @lkups_cnt: number of protocols
7040  * @rinfo: other information regarding the rule e.g. priority and action info
7041  * @rid: return the recipe ID of the recipe created
7042  */
7043 static enum ice_status
ice_add_adv_recipe(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,u16 * rid)7044 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7045 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
7046 {
7047 	ice_declare_bitmap(fv_bitmap, ICE_MAX_NUM_PROFILES);
7048 	ice_declare_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7049 	struct ice_prot_lkup_ext *lkup_exts;
7050 	struct ice_recp_grp_entry *r_entry;
7051 	struct ice_sw_fv_list_entry *fvit;
7052 	struct ice_recp_grp_entry *r_tmp;
7053 	struct ice_sw_fv_list_entry *tmp;
7054 	enum ice_status status = ICE_SUCCESS;
7055 	struct ice_sw_recipe *rm;
7056 	u8 i;
7057 
7058 	if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt)
7059 		return ICE_ERR_PARAM;
7060 
7061 	lkup_exts = (struct ice_prot_lkup_ext *)
7062 		ice_malloc(hw, sizeof(*lkup_exts));
7063 	if (!lkup_exts)
7064 		return ICE_ERR_NO_MEMORY;
7065 
7066 	/* Determine the number of words to be matched and if it exceeds a
7067 	 * recipe's restrictions
7068 	 */
7069 	for (i = 0; i < lkups_cnt; i++) {
7070 		u16 count;
7071 
7072 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
7073 			status = ICE_ERR_CFG;
7074 			goto err_free_lkup_exts;
7075 		}
7076 
7077 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
7078 		if (!count) {
7079 			status = ICE_ERR_CFG;
7080 			goto err_free_lkup_exts;
7081 		}
7082 	}
7083 
7084 	rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
7085 	if (!rm) {
7086 		status = ICE_ERR_NO_MEMORY;
7087 		goto err_free_lkup_exts;
7088 	}
7089 
7090 	/* Get field vectors that contain fields extracted from all the protocol
7091 	 * headers being programmed.
7092 	 */
7093 	INIT_LIST_HEAD(&rm->fv_list);
7094 	INIT_LIST_HEAD(&rm->rg_list);
7095 
7096 	/* Get bitmap of field vectors (profiles) that are compatible with the
7097 	 * rule request; only these will be searched in the subsequent call to
7098 	 * ice_get_fv.
7099 	 */
7100 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
7101 
7102 	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
7103 	if (status)
7104 		goto err_unroll;
7105 
7106 	/* Create any special protocol/offset pairs, such as looking at tunnel
7107 	 * bits by extracting metadata
7108 	 */
7109 	status = ice_add_special_words(rinfo, lkup_exts);
7110 	if (status)
7111 		goto err_free_lkup_exts;
7112 
7113 	/* Group match words into recipes using preferred recipe grouping
7114 	 * criteria.
7115 	 */
7116 	status = ice_create_recipe_group(hw, rm, lkup_exts);
7117 	if (status)
7118 		goto err_unroll;
7119 
7120 	/* set the recipe priority if specified */
7121 	rm->priority = (u8)rinfo->priority;
7122 
7123 	/* Find offsets from the field vector. Pick the first one for all the
7124 	 * recipes.
7125 	 */
7126 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
7127 	if (status)
7128 		goto err_unroll;
7129 
7130 	/* An empty FV list means to use all the profiles returned in the
7131 	 * profile bitmap
7132 	 */
7133 	if (LIST_EMPTY(&rm->fv_list)) {
7134 		u16 j;
7135 
7136 		ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
7137 			struct ice_sw_fv_list_entry *fvl;
7138 
7139 			fvl = (struct ice_sw_fv_list_entry *)
7140 				ice_malloc(hw, sizeof(*fvl));
7141 			if (!fvl)
7142 				goto err_unroll;
7143 			fvl->fv_ptr = NULL;
7144 			fvl->profile_id = j;
7145 			LIST_ADD(&fvl->list_entry, &rm->fv_list);
7146 		}
7147 	}
7148 
7149 	/* get bitmap of all profiles the recipe will be associated with */
7150 	ice_zero_bitmap(profiles, ICE_MAX_NUM_PROFILES);
7151 	LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7152 			    list_entry) {
7153 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
7154 		ice_set_bit((u16)fvit->profile_id, profiles);
7155 	}
7156 
7157 	/* Look for a recipe which matches our requested fv / mask list */
7158 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
7159 	if (*rid < ICE_MAX_NUM_RECIPES)
7160 		/* Success if found a recipe that match the existing criteria */
7161 		goto err_unroll;
7162 
7163 	rm->tun_type = rinfo->tun_type;
7164 	/* Recipe we need does not exist, add a recipe */
7165 	status = ice_add_sw_recipe(hw, rm, profiles);
7166 	if (status)
7167 		goto err_unroll;
7168 
7169 	/* Associate all the recipes created with all the profiles in the
7170 	 * common field vector.
7171 	 */
7172 	LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
7173 			    list_entry) {
7174 		ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
7175 		u16 j;
7176 
7177 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
7178 						      (u8 *)r_bitmap, NULL);
7179 		if (status)
7180 			goto err_unroll;
7181 
7182 		ice_or_bitmap(r_bitmap, r_bitmap, rm->r_bitmap,
7183 			      ICE_MAX_NUM_RECIPES);
7184 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
7185 		if (status)
7186 			goto err_unroll;
7187 
7188 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
7189 						      (u8 *)r_bitmap,
7190 						      NULL);
7191 		ice_release_change_lock(hw);
7192 
7193 		if (status)
7194 			goto err_unroll;
7195 
7196 		/* Update profile to recipe bitmap array */
7197 		ice_cp_bitmap(profile_to_recipe[fvit->profile_id], r_bitmap,
7198 			      ICE_MAX_NUM_RECIPES);
7199 
7200 		/* Update recipe to profile bitmap array */
7201 		ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
7202 			ice_set_bit((u16)fvit->profile_id,
7203 				    recipe_to_profile[j]);
7204 	}
7205 
7206 	*rid = rm->root_rid;
7207 	ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
7208 		   lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
7209 err_unroll:
7210 	LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
7211 				 ice_recp_grp_entry, l_entry) {
7212 		LIST_DEL(&r_entry->l_entry);
7213 		ice_free(hw, r_entry);
7214 	}
7215 
7216 	LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
7217 				 list_entry) {
7218 		LIST_DEL(&fvit->list_entry);
7219 		ice_free(hw, fvit);
7220 	}
7221 
7222 	if (rm->root_buf)
7223 		ice_free(hw, rm->root_buf);
7224 
7225 	ice_free(hw, rm);
7226 
7227 err_free_lkup_exts:
7228 	ice_free(hw, lkup_exts);
7229 
7230 	return status;
7231 }
7232 
7233 /**
7234  * ice_find_dummy_packet - find dummy packet by tunnel type
7235  *
7236  * @lkups: lookup elements or match criteria for the advanced recipe, one
7237  *	   structure per protocol header
7238  * @lkups_cnt: number of protocols
7239  * @tun_type: tunnel type from the match criteria
7240  * @pkt: dummy packet to fill according to filter match criteria
7241  * @pkt_len: packet length of dummy packet
7242  * @offsets: pointer to receive the pointer to the offsets for the packet
7243  */
7244 static void
ice_find_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,enum ice_sw_tunnel_type tun_type,const u8 ** pkt,u16 * pkt_len,const struct ice_dummy_pkt_offsets ** offsets)7245 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7246 		      enum ice_sw_tunnel_type tun_type, const u8 **pkt,
7247 		      u16 *pkt_len,
7248 		      const struct ice_dummy_pkt_offsets **offsets)
7249 {
7250 	bool tcp = false, udp = false, ipv6 = false, vlan = false;
7251 	bool gre = false;
7252 	u16 i;
7253 
7254 	for (i = 0; i < lkups_cnt; i++) {
7255 		if (lkups[i].type == ICE_UDP_ILOS)
7256 			udp = true;
7257 		else if (lkups[i].type == ICE_TCP_IL)
7258 			tcp = true;
7259 		else if (lkups[i].type == ICE_IPV6_OFOS)
7260 			ipv6 = true;
7261 		else if (lkups[i].type == ICE_VLAN_OFOS)
7262 			vlan = true;
7263 		else if (lkups[i].type == ICE_IPV4_OFOS &&
7264 			 lkups[i].h_u.ipv4_hdr.protocol ==
7265 				ICE_IPV4_NVGRE_PROTO_ID &&
7266 			 lkups[i].m_u.ipv4_hdr.protocol ==
7267 				0xFF)
7268 			gre = true;
7269 		else if (lkups[i].type == ICE_PPPOE &&
7270 			 lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
7271 				CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
7272 			 lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
7273 				0xFFFF)
7274 			ipv6 = true;
7275 		else if (lkups[i].type == ICE_ETYPE_OL &&
7276 			 lkups[i].h_u.ethertype.ethtype_id ==
7277 				CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
7278 			 lkups[i].m_u.ethertype.ethtype_id ==
7279 					0xFFFF)
7280 			ipv6 = true;
7281 		else if (lkups[i].type == ICE_IPV4_IL &&
7282 			 lkups[i].h_u.ipv4_hdr.protocol ==
7283 				ICE_TCP_PROTO_ID &&
7284 			 lkups[i].m_u.ipv4_hdr.protocol ==
7285 				0xFF)
7286 			tcp = true;
7287 	}
7288 
7289 	if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7290 	     tun_type == ICE_NON_TUN_QINQ) && ipv6) {
7291 		*pkt = dummy_qinq_ipv6_pkt;
7292 		*pkt_len = sizeof(dummy_qinq_ipv6_pkt);
7293 		*offsets = dummy_qinq_ipv6_packet_offsets;
7294 		return;
7295 	} else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
7296 			   tun_type == ICE_NON_TUN_QINQ) {
7297 		*pkt = dummy_qinq_ipv4_pkt;
7298 		*pkt_len = sizeof(dummy_qinq_ipv4_pkt);
7299 		*offsets = dummy_qinq_ipv4_packet_offsets;
7300 		return;
7301 	}
7302 
7303 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
7304 		*pkt = dummy_qinq_pppoe_ipv6_packet;
7305 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
7306 		*offsets = dummy_qinq_pppoe_packet_ipv6_offsets;
7307 		return;
7308 	} else if (tun_type == ICE_SW_TUN_PPPOE_IPV4_QINQ) {
7309 		*pkt = dummy_qinq_pppoe_ipv4_pkt;
7310 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7311 		*offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
7312 		return;
7313 	} else if (tun_type == ICE_SW_TUN_PPPOE_QINQ ||
7314 			tun_type == ICE_SW_TUN_PPPOE_PAY_QINQ) {
7315 		*pkt = dummy_qinq_pppoe_ipv4_pkt;
7316 		*pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
7317 		*offsets = dummy_qinq_pppoe_packet_offsets;
7318 		return;
7319 	}
7320 
7321 	if (tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY) {
7322 		*pkt = dummy_ipv4_gtpu_ipv4_packet;
7323 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7324 		*offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
7325 		return;
7326 	} else if (tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
7327 		*pkt = dummy_ipv6_gtpu_ipv6_packet;
7328 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7329 		*offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
7330 		return;
7331 	} else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV4) {
7332 		*pkt = dummy_ipv4_gtpu_ipv4_packet;
7333 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
7334 		*offsets = dummy_ipv4_gtpu_ipv4_packet_offsets;
7335 		return;
7336 	} else if (tun_type == ICE_SW_TUN_IPV4_GTPU_IPV6) {
7337 		*pkt = dummy_ipv4_gtpu_ipv6_packet;
7338 		*pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_packet);
7339 		*offsets = dummy_ipv4_gtpu_ipv6_packet_offsets;
7340 		return;
7341 	} else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV4) {
7342 		*pkt = dummy_ipv6_gtpu_ipv4_packet;
7343 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_packet);
7344 		*offsets = dummy_ipv6_gtpu_ipv4_packet_offsets;
7345 		return;
7346 	} else if (tun_type == ICE_SW_TUN_IPV6_GTPU_IPV6) {
7347 		*pkt = dummy_ipv6_gtpu_ipv6_packet;
7348 		*pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_packet);
7349 		*offsets = dummy_ipv6_gtpu_ipv6_packet_offsets;
7350 		return;
7351 	}
7352 
7353 	if (tun_type == ICE_SW_TUN_IPV4_ESP) {
7354 		*pkt = dummy_ipv4_esp_pkt;
7355 		*pkt_len = sizeof(dummy_ipv4_esp_pkt);
7356 		*offsets = dummy_ipv4_esp_packet_offsets;
7357 		return;
7358 	}
7359 
7360 	if (tun_type == ICE_SW_TUN_IPV6_ESP) {
7361 		*pkt = dummy_ipv6_esp_pkt;
7362 		*pkt_len = sizeof(dummy_ipv6_esp_pkt);
7363 		*offsets = dummy_ipv6_esp_packet_offsets;
7364 		return;
7365 	}
7366 
7367 	if (tun_type == ICE_SW_TUN_IPV4_AH) {
7368 		*pkt = dummy_ipv4_ah_pkt;
7369 		*pkt_len = sizeof(dummy_ipv4_ah_pkt);
7370 		*offsets = dummy_ipv4_ah_packet_offsets;
7371 		return;
7372 	}
7373 
7374 	if (tun_type == ICE_SW_TUN_IPV6_AH) {
7375 		*pkt = dummy_ipv6_ah_pkt;
7376 		*pkt_len = sizeof(dummy_ipv6_ah_pkt);
7377 		*offsets = dummy_ipv6_ah_packet_offsets;
7378 		return;
7379 	}
7380 
7381 	if (tun_type == ICE_SW_TUN_IPV4_NAT_T) {
7382 		*pkt = dummy_ipv4_nat_pkt;
7383 		*pkt_len = sizeof(dummy_ipv4_nat_pkt);
7384 		*offsets = dummy_ipv4_nat_packet_offsets;
7385 		return;
7386 	}
7387 
7388 	if (tun_type == ICE_SW_TUN_IPV6_NAT_T) {
7389 		*pkt = dummy_ipv6_nat_pkt;
7390 		*pkt_len = sizeof(dummy_ipv6_nat_pkt);
7391 		*offsets = dummy_ipv6_nat_packet_offsets;
7392 		return;
7393 	}
7394 
7395 	if (tun_type == ICE_SW_TUN_IPV4_L2TPV3) {
7396 		*pkt = dummy_ipv4_l2tpv3_pkt;
7397 		*pkt_len = sizeof(dummy_ipv4_l2tpv3_pkt);
7398 		*offsets = dummy_ipv4_l2tpv3_packet_offsets;
7399 		return;
7400 	}
7401 
7402 	if (tun_type == ICE_SW_TUN_IPV6_L2TPV3) {
7403 		*pkt = dummy_ipv6_l2tpv3_pkt;
7404 		*pkt_len = sizeof(dummy_ipv6_l2tpv3_pkt);
7405 		*offsets = dummy_ipv6_l2tpv3_packet_offsets;
7406 		return;
7407 	}
7408 
7409 	if (tun_type == ICE_SW_TUN_GTP) {
7410 		*pkt = dummy_udp_gtp_packet;
7411 		*pkt_len = sizeof(dummy_udp_gtp_packet);
7412 		*offsets = dummy_udp_gtp_packet_offsets;
7413 		return;
7414 	}
7415 
7416 	if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
7417 		*pkt = dummy_pppoe_ipv6_packet;
7418 		*pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7419 		*offsets = dummy_pppoe_packet_offsets;
7420 		return;
7421 	} else if (tun_type == ICE_SW_TUN_PPPOE ||
7422 		tun_type == ICE_SW_TUN_PPPOE_PAY) {
7423 		*pkt = dummy_pppoe_ipv4_packet;
7424 		*pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7425 		*offsets = dummy_pppoe_packet_offsets;
7426 		return;
7427 	}
7428 
7429 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4) {
7430 		*pkt = dummy_pppoe_ipv4_packet;
7431 		*pkt_len = sizeof(dummy_pppoe_ipv4_packet);
7432 		*offsets = dummy_pppoe_packet_ipv4_offsets;
7433 		return;
7434 	}
7435 
7436 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4_TCP) {
7437 		*pkt = dummy_pppoe_ipv4_tcp_packet;
7438 		*pkt_len = sizeof(dummy_pppoe_ipv4_tcp_packet);
7439 		*offsets = dummy_pppoe_ipv4_tcp_packet_offsets;
7440 		return;
7441 	}
7442 
7443 	if (tun_type == ICE_SW_TUN_PPPOE_IPV4_UDP) {
7444 		*pkt = dummy_pppoe_ipv4_udp_packet;
7445 		*pkt_len = sizeof(dummy_pppoe_ipv4_udp_packet);
7446 		*offsets = dummy_pppoe_ipv4_udp_packet_offsets;
7447 		return;
7448 	}
7449 
7450 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6) {
7451 		*pkt = dummy_pppoe_ipv6_packet;
7452 		*pkt_len = sizeof(dummy_pppoe_ipv6_packet);
7453 		*offsets = dummy_pppoe_packet_ipv6_offsets;
7454 		return;
7455 	}
7456 
7457 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_TCP) {
7458 		*pkt = dummy_pppoe_ipv6_tcp_packet;
7459 		*pkt_len = sizeof(dummy_pppoe_ipv6_tcp_packet);
7460 		*offsets = dummy_pppoe_packet_ipv6_tcp_offsets;
7461 		return;
7462 	}
7463 
7464 	if (tun_type == ICE_SW_TUN_PPPOE_IPV6_UDP) {
7465 		*pkt = dummy_pppoe_ipv6_udp_packet;
7466 		*pkt_len = sizeof(dummy_pppoe_ipv6_udp_packet);
7467 		*offsets = dummy_pppoe_packet_ipv6_udp_offsets;
7468 		return;
7469 	}
7470 
7471 	if (tun_type == ICE_SW_IPV4_TCP) {
7472 		*pkt = dummy_tcp_packet;
7473 		*pkt_len = sizeof(dummy_tcp_packet);
7474 		*offsets = dummy_tcp_packet_offsets;
7475 		return;
7476 	}
7477 
7478 	if (tun_type == ICE_SW_IPV4_UDP) {
7479 		*pkt = dummy_udp_packet;
7480 		*pkt_len = sizeof(dummy_udp_packet);
7481 		*offsets = dummy_udp_packet_offsets;
7482 		return;
7483 	}
7484 
7485 	if (tun_type == ICE_SW_IPV6_TCP) {
7486 		*pkt = dummy_tcp_ipv6_packet;
7487 		*pkt_len = sizeof(dummy_tcp_ipv6_packet);
7488 		*offsets = dummy_tcp_ipv6_packet_offsets;
7489 		return;
7490 	}
7491 
7492 	if (tun_type == ICE_SW_IPV6_UDP) {
7493 		*pkt = dummy_udp_ipv6_packet;
7494 		*pkt_len = sizeof(dummy_udp_ipv6_packet);
7495 		*offsets = dummy_udp_ipv6_packet_offsets;
7496 		return;
7497 	}
7498 
7499 	if (tun_type == ICE_ALL_TUNNELS) {
7500 		*pkt = dummy_gre_udp_packet;
7501 		*pkt_len = sizeof(dummy_gre_udp_packet);
7502 		*offsets = dummy_gre_udp_packet_offsets;
7503 		return;
7504 	}
7505 
7506 	if (tun_type == ICE_SW_TUN_NVGRE || gre) {
7507 		if (tcp) {
7508 			*pkt = dummy_gre_tcp_packet;
7509 			*pkt_len = sizeof(dummy_gre_tcp_packet);
7510 			*offsets = dummy_gre_tcp_packet_offsets;
7511 			return;
7512 		}
7513 
7514 		*pkt = dummy_gre_udp_packet;
7515 		*pkt_len = sizeof(dummy_gre_udp_packet);
7516 		*offsets = dummy_gre_udp_packet_offsets;
7517 		return;
7518 	}
7519 
7520 	if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
7521 	    tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP ||
7522 	    tun_type == ICE_SW_TUN_GENEVE_VLAN ||
7523 	    tun_type == ICE_SW_TUN_VXLAN_VLAN) {
7524 		if (tcp) {
7525 			*pkt = dummy_udp_tun_tcp_packet;
7526 			*pkt_len = sizeof(dummy_udp_tun_tcp_packet);
7527 			*offsets = dummy_udp_tun_tcp_packet_offsets;
7528 			return;
7529 		}
7530 
7531 		*pkt = dummy_udp_tun_udp_packet;
7532 		*pkt_len = sizeof(dummy_udp_tun_udp_packet);
7533 		*offsets = dummy_udp_tun_udp_packet_offsets;
7534 		return;
7535 	}
7536 
7537 	if (udp && !ipv6) {
7538 		if (vlan) {
7539 			*pkt = dummy_vlan_udp_packet;
7540 			*pkt_len = sizeof(dummy_vlan_udp_packet);
7541 			*offsets = dummy_vlan_udp_packet_offsets;
7542 			return;
7543 		}
7544 		*pkt = dummy_udp_packet;
7545 		*pkt_len = sizeof(dummy_udp_packet);
7546 		*offsets = dummy_udp_packet_offsets;
7547 		return;
7548 	} else if (udp && ipv6) {
7549 		if (vlan) {
7550 			*pkt = dummy_vlan_udp_ipv6_packet;
7551 			*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
7552 			*offsets = dummy_vlan_udp_ipv6_packet_offsets;
7553 			return;
7554 		}
7555 		*pkt = dummy_udp_ipv6_packet;
7556 		*pkt_len = sizeof(dummy_udp_ipv6_packet);
7557 		*offsets = dummy_udp_ipv6_packet_offsets;
7558 		return;
7559 	} else if ((tcp && ipv6) || ipv6) {
7560 		if (vlan) {
7561 			*pkt = dummy_vlan_tcp_ipv6_packet;
7562 			*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
7563 			*offsets = dummy_vlan_tcp_ipv6_packet_offsets;
7564 			return;
7565 		}
7566 		*pkt = dummy_tcp_ipv6_packet;
7567 		*pkt_len = sizeof(dummy_tcp_ipv6_packet);
7568 		*offsets = dummy_tcp_ipv6_packet_offsets;
7569 		return;
7570 	}
7571 
7572 	if (vlan) {
7573 		*pkt = dummy_vlan_tcp_packet;
7574 		*pkt_len = sizeof(dummy_vlan_tcp_packet);
7575 		*offsets = dummy_vlan_tcp_packet_offsets;
7576 	} else {
7577 		*pkt = dummy_tcp_packet;
7578 		*pkt_len = sizeof(dummy_tcp_packet);
7579 		*offsets = dummy_tcp_packet_offsets;
7580 	}
7581 }
7582 
7583 /**
7584  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
7585  *
7586  * @lkups: lookup elements or match criteria for the advanced recipe, one
7587  *	   structure per protocol header
7588  * @lkups_cnt: number of protocols
7589  * @s_rule: stores rule information from the match criteria
7590  * @dummy_pkt: dummy packet to fill according to filter match criteria
7591  * @pkt_len: packet length of dummy packet
7592  * @offsets: offset info for the dummy packet
7593  */
7594 static enum ice_status
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_aqc_sw_rules_elem * s_rule,const u8 * dummy_pkt,u16 pkt_len,const struct ice_dummy_pkt_offsets * offsets)7595 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
7596 			  struct ice_aqc_sw_rules_elem *s_rule,
7597 			  const u8 *dummy_pkt, u16 pkt_len,
7598 			  const struct ice_dummy_pkt_offsets *offsets)
7599 {
7600 	u8 *pkt;
7601 	u16 i;
7602 
7603 	/* Start with a packet with a pre-defined/dummy content. Then, fill
7604 	 * in the header values to be looked up or matched.
7605 	 */
7606 	pkt = s_rule->pdata.lkup_tx_rx.hdr;
7607 
7608 	ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
7609 
7610 	for (i = 0; i < lkups_cnt; i++) {
7611 		enum ice_protocol_type type;
7612 		u16 offset = 0, len = 0, j;
7613 		bool found = false;
7614 
7615 		/* find the start of this layer; it should be found since this
7616 		 * was already checked when search for the dummy packet
7617 		 */
7618 		type = lkups[i].type;
7619 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
7620 			if (type == offsets[j].type) {
7621 				offset = offsets[j].offset;
7622 				found = true;
7623 				break;
7624 			}
7625 		}
7626 		/* this should never happen in a correct calling sequence */
7627 		if (!found)
7628 			return ICE_ERR_PARAM;
7629 
7630 		switch (lkups[i].type) {
7631 		case ICE_MAC_OFOS:
7632 		case ICE_MAC_IL:
7633 			len = sizeof(struct ice_ether_hdr);
7634 			break;
7635 		case ICE_ETYPE_OL:
7636 			len = sizeof(struct ice_ethtype_hdr);
7637 			break;
7638 		case ICE_VLAN_OFOS:
7639 		case ICE_VLAN_EX:
7640 			len = sizeof(struct ice_vlan_hdr);
7641 			break;
7642 		case ICE_IPV4_OFOS:
7643 		case ICE_IPV4_IL:
7644 			len = sizeof(struct ice_ipv4_hdr);
7645 			break;
7646 		case ICE_IPV6_OFOS:
7647 		case ICE_IPV6_IL:
7648 			len = sizeof(struct ice_ipv6_hdr);
7649 			break;
7650 		case ICE_TCP_IL:
7651 		case ICE_UDP_OF:
7652 		case ICE_UDP_ILOS:
7653 			len = sizeof(struct ice_l4_hdr);
7654 			break;
7655 		case ICE_SCTP_IL:
7656 			len = sizeof(struct ice_sctp_hdr);
7657 			break;
7658 		case ICE_NVGRE:
7659 			len = sizeof(struct ice_nvgre);
7660 			break;
7661 		case ICE_VXLAN:
7662 		case ICE_GENEVE:
7663 		case ICE_VXLAN_GPE:
7664 			len = sizeof(struct ice_udp_tnl_hdr);
7665 			break;
7666 
7667 		case ICE_GTP:
7668 		case ICE_GTP_NO_PAY:
7669 			len = sizeof(struct ice_udp_gtp_hdr);
7670 			break;
7671 		case ICE_PPPOE:
7672 			len = sizeof(struct ice_pppoe_hdr);
7673 			break;
7674 		case ICE_ESP:
7675 			len = sizeof(struct ice_esp_hdr);
7676 			break;
7677 		case ICE_NAT_T:
7678 			len = sizeof(struct ice_nat_t_hdr);
7679 			break;
7680 		case ICE_AH:
7681 			len = sizeof(struct ice_ah_hdr);
7682 			break;
7683 		case ICE_L2TPV3:
7684 			len = sizeof(struct ice_l2tpv3_sess_hdr);
7685 			break;
7686 		default:
7687 			return ICE_ERR_PARAM;
7688 		}
7689 
7690 		/* the length should be a word multiple */
7691 		if (len % ICE_BYTES_PER_WORD)
7692 			return ICE_ERR_CFG;
7693 
7694 		/* We have the offset to the header start, the length, the
7695 		 * caller's header values and mask. Use this information to
7696 		 * copy the data into the dummy packet appropriately based on
7697 		 * the mask. Note that we need to only write the bits as
7698 		 * indicated by the mask to make sure we don't improperly write
7699 		 * over any significant packet data.
7700 		 */
7701 		for (j = 0; j < len / sizeof(u16); j++)
7702 			if (((u16 *)&lkups[i].m_u)[j])
7703 				((u16 *)(pkt + offset))[j] =
7704 					(((u16 *)(pkt + offset))[j] &
7705 					 ~((u16 *)&lkups[i].m_u)[j]) |
7706 					(((u16 *)&lkups[i].h_u)[j] &
7707 					 ((u16 *)&lkups[i].m_u)[j]);
7708 	}
7709 
7710 	s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
7711 
7712 	return ICE_SUCCESS;
7713 }
7714 
7715 /**
7716  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
7717  * @hw: pointer to the hardware structure
7718  * @tun_type: tunnel type
7719  * @pkt: dummy packet to fill in
7720  * @offsets: offset info for the dummy packet
7721  */
7722 static enum ice_status
ice_fill_adv_packet_tun(struct ice_hw * hw,enum ice_sw_tunnel_type tun_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)7723 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
7724 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
7725 {
7726 	u16 open_port, i;
7727 
7728 	switch (tun_type) {
7729 	case ICE_SW_TUN_AND_NON_TUN:
7730 	case ICE_SW_TUN_VXLAN_GPE:
7731 	case ICE_SW_TUN_VXLAN:
7732 	case ICE_SW_TUN_VXLAN_VLAN:
7733 	case ICE_SW_TUN_UDP:
7734 		if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port))
7735 			return ICE_ERR_CFG;
7736 		break;
7737 
7738 	case ICE_SW_TUN_GENEVE:
7739 	case ICE_SW_TUN_GENEVE_VLAN:
7740 		if (!ice_get_open_tunnel_port(hw, TNL_GENEVE, &open_port))
7741 			return ICE_ERR_CFG;
7742 		break;
7743 
7744 	default:
7745 		/* Nothing needs to be done for this tunnel type */
7746 		return ICE_SUCCESS;
7747 	}
7748 
7749 	/* Find the outer UDP protocol header and insert the port number */
7750 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
7751 		if (offsets[i].type == ICE_UDP_OF) {
7752 			struct ice_l4_hdr *hdr;
7753 			u16 offset;
7754 
7755 			offset = offsets[i].offset;
7756 			hdr = (struct ice_l4_hdr *)&pkt[offset];
7757 			hdr->dst_port = CPU_TO_BE16(open_port);
7758 
7759 			return ICE_SUCCESS;
7760 		}
7761 	}
7762 
7763 	return ICE_ERR_CFG;
7764 }
7765 
7766 /**
7767  * ice_find_adv_rule_entry - Search a rule entry
7768  * @hw: pointer to the hardware structure
7769  * @lkups: lookup elements or match criteria for the advanced recipe, one
7770  *	   structure per protocol header
7771  * @lkups_cnt: number of protocols
7772  * @recp_id: recipe ID for which we are finding the rule
7773  * @rinfo: other information regarding the rule e.g. priority and action info
7774  *
7775  * Helper function to search for a given advance rule entry
7776  * Returns pointer to entry storing the rule if found
7777  */
7778 static struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,u16 recp_id,struct ice_adv_rule_info * rinfo)7779 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7780 			u16 lkups_cnt, u16 recp_id,
7781 			struct ice_adv_rule_info *rinfo)
7782 {
7783 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
7784 	struct ice_switch_info *sw = hw->switch_info;
7785 	int i;
7786 
7787 	LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
7788 			    ice_adv_fltr_mgmt_list_entry, list_entry) {
7789 		bool lkups_matched = true;
7790 
7791 		if (lkups_cnt != list_itr->lkups_cnt)
7792 			continue;
7793 		for (i = 0; i < list_itr->lkups_cnt; i++)
7794 			if (memcmp(&list_itr->lkups[i], &lkups[i],
7795 				   sizeof(*lkups))) {
7796 				lkups_matched = false;
7797 				break;
7798 			}
7799 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
7800 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
7801 		    lkups_matched)
7802 			return list_itr;
7803 	}
7804 	return NULL;
7805 }
7806 
7807 /**
7808  * ice_adv_add_update_vsi_list
7809  * @hw: pointer to the hardware structure
7810  * @m_entry: pointer to current adv filter management list entry
7811  * @cur_fltr: filter information from the book keeping entry
7812  * @new_fltr: filter information with the new VSI to be added
7813  *
7814  * Call AQ command to add or update previously created VSI list with new VSI.
7815  *
7816  * Helper function to do book keeping associated with adding filter information
7817  * The algorithm to do the booking keeping is described below :
7818  * When a VSI needs to subscribe to a given advanced filter
7819  *	if only one VSI has been added till now
7820  *		Allocate a new VSI list and add two VSIs
7821  *		to this list using switch rule command
7822  *		Update the previously created switch rule with the
7823  *		newly created VSI list ID
7824  *	if a VSI list was previously created
7825  *		Add the new VSI to the previously created VSI list set
7826  *		using the update switch rule command
7827  */
7828 static enum ice_status
ice_adv_add_update_vsi_list(struct ice_hw * hw,struct ice_adv_fltr_mgmt_list_entry * m_entry,struct ice_adv_rule_info * cur_fltr,struct ice_adv_rule_info * new_fltr)7829 ice_adv_add_update_vsi_list(struct ice_hw *hw,
7830 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
7831 			    struct ice_adv_rule_info *cur_fltr,
7832 			    struct ice_adv_rule_info *new_fltr)
7833 {
7834 	enum ice_status status;
7835 	u16 vsi_list_id = 0;
7836 
7837 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7838 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7839 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
7840 		return ICE_ERR_NOT_IMPL;
7841 
7842 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
7843 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
7844 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7845 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
7846 		return ICE_ERR_NOT_IMPL;
7847 
7848 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
7849 		 /* Only one entry existed in the mapping and it was not already
7850 		  * a part of a VSI list. So, create a VSI list with the old and
7851 		  * new VSIs.
7852 		  */
7853 		struct ice_fltr_info tmp_fltr;
7854 		u16 vsi_handle_arr[2];
7855 
7856 		/* A rule already exists with the new VSI being added */
7857 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
7858 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
7859 			return ICE_ERR_ALREADY_EXISTS;
7860 
7861 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
7862 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
7863 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
7864 						  &vsi_list_id,
7865 						  ICE_SW_LKUP_LAST);
7866 		if (status)
7867 			return status;
7868 
7869 		ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
7870 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
7871 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
7872 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
7873 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
7874 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
7875 
7876 		/* Update the previous switch rule of "forward to VSI" to
7877 		 * "fwd to VSI list"
7878 		 */
7879 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
7880 		if (status)
7881 			return status;
7882 
7883 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
7884 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
7885 		m_entry->vsi_list_info =
7886 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
7887 						vsi_list_id);
7888 	} else {
7889 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
7890 
7891 		if (!m_entry->vsi_list_info)
7892 			return ICE_ERR_CFG;
7893 
7894 		/* A rule already exists with the new VSI being added */
7895 		if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
7896 			return ICE_SUCCESS;
7897 
7898 		/* Update the previously created VSI list set with
7899 		 * the new VSI ID passed in
7900 		 */
7901 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
7902 
7903 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
7904 						  vsi_list_id, false,
7905 						  ice_aqc_opc_update_sw_rules,
7906 						  ICE_SW_LKUP_LAST);
7907 		/* update VSI list mapping info with new VSI ID */
7908 		if (!status)
7909 			ice_set_bit(vsi_handle,
7910 				    m_entry->vsi_list_info->vsi_map);
7911 	}
7912 	if (!status)
7913 		m_entry->vsi_count++;
7914 	return status;
7915 }
7916 
7917 /**
7918  * ice_add_adv_rule - helper function to create an advanced switch rule
7919  * @hw: pointer to the hardware structure
7920  * @lkups: information on the words that needs to be looked up. All words
7921  * together makes one recipe
7922  * @lkups_cnt: num of entries in the lkups array
7923  * @rinfo: other information related to the rule that needs to be programmed
7924  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
7925  *               ignored is case of error.
7926  *
7927  * This function can program only 1 rule at a time. The lkups is used to
7928  * describe the all the words that forms the "lookup" portion of the recipe.
7929  * These words can span multiple protocols. Callers to this function need to
7930  * pass in a list of protocol headers with lookup information along and mask
7931  * that determines which words are valid from the given protocol header.
7932  * rinfo describes other information related to this rule such as forwarding
7933  * IDs, priority of this rule, etc.
7934  */
7935 enum ice_status
ice_add_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,struct ice_rule_query_data * added_entry)7936 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
7937 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
7938 		 struct ice_rule_query_data *added_entry)
7939 {
7940 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
7941 	u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
7942 	const struct ice_dummy_pkt_offsets *pkt_offsets;
7943 	struct ice_aqc_sw_rules_elem *s_rule = NULL;
7944 	struct LIST_HEAD_TYPE *rule_head;
7945 	struct ice_switch_info *sw;
7946 	enum ice_status status;
7947 	const u8 *pkt = NULL;
7948 	bool prof_rule;
7949 	u16 word_cnt;
7950 	u32 act = 0;
7951 	u8 q_rgn;
7952 
7953 	/* Initialize profile to result index bitmap */
7954 	if (!hw->switch_info->prof_res_bm_init) {
7955 		hw->switch_info->prof_res_bm_init = 1;
7956 		ice_init_prof_result_bm(hw);
7957 	}
7958 
7959 	prof_rule = ice_is_prof_rule(rinfo->tun_type);
7960 	if (!prof_rule && !lkups_cnt)
7961 		return ICE_ERR_PARAM;
7962 
7963 	/* get # of words we need to match */
7964 	word_cnt = 0;
7965 	for (i = 0; i < lkups_cnt; i++) {
7966 		u16 j, *ptr;
7967 
7968 		ptr = (u16 *)&lkups[i].m_u;
7969 		for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
7970 			if (ptr[j] != 0)
7971 				word_cnt++;
7972 	}
7973 
7974 	if (prof_rule) {
7975 		if (word_cnt > ICE_MAX_CHAIN_WORDS)
7976 			return ICE_ERR_PARAM;
7977 	} else {
7978 		if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
7979 			return ICE_ERR_PARAM;
7980 	}
7981 
7982 	/* make sure that we can locate a dummy packet */
7983 	ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
7984 			      &pkt_offsets);
7985 	if (!pkt) {
7986 		status = ICE_ERR_PARAM;
7987 		goto err_ice_add_adv_rule;
7988 	}
7989 
7990 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
7991 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
7992 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
7993 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
7994 		return ICE_ERR_CFG;
7995 
7996 	vsi_handle = rinfo->sw_act.vsi_handle;
7997 	if (!ice_is_vsi_valid(hw, vsi_handle))
7998 		return ICE_ERR_PARAM;
7999 
8000 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8001 		rinfo->sw_act.fwd_id.hw_vsi_id =
8002 			ice_get_hw_vsi_num(hw, vsi_handle);
8003 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
8004 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
8005 
8006 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
8007 	if (status)
8008 		return status;
8009 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8010 	if (m_entry) {
8011 		/* we have to add VSI to VSI_LIST and increment vsi_count.
8012 		 * Also Update VSI list so that we can change forwarding rule
8013 		 * if the rule already exists, we will check if it exists with
8014 		 * same vsi_id, if not then add it to the VSI list if it already
8015 		 * exists if not then create a VSI list and add the existing VSI
8016 		 * ID and the new VSI ID to the list
8017 		 * We will add that VSI to the list
8018 		 */
8019 		status = ice_adv_add_update_vsi_list(hw, m_entry,
8020 						     &m_entry->rule_info,
8021 						     rinfo);
8022 		if (added_entry) {
8023 			added_entry->rid = rid;
8024 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
8025 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8026 		}
8027 		return status;
8028 	}
8029 	rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
8030 	s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
8031 	if (!s_rule)
8032 		return ICE_ERR_NO_MEMORY;
8033 	act |= ICE_SINGLE_ACT_LAN_ENABLE;
8034 	switch (rinfo->sw_act.fltr_act) {
8035 	case ICE_FWD_TO_VSI:
8036 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
8037 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
8038 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
8039 		break;
8040 	case ICE_FWD_TO_Q:
8041 		act |= ICE_SINGLE_ACT_TO_Q;
8042 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8043 		       ICE_SINGLE_ACT_Q_INDEX_M;
8044 		break;
8045 	case ICE_FWD_TO_QGRP:
8046 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
8047 			(u8)ice_ilog2(rinfo->sw_act.qgrp_size) : 0;
8048 		act |= ICE_SINGLE_ACT_TO_Q;
8049 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
8050 		       ICE_SINGLE_ACT_Q_INDEX_M;
8051 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
8052 		       ICE_SINGLE_ACT_Q_REGION_M;
8053 		break;
8054 	case ICE_DROP_PACKET:
8055 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
8056 		       ICE_SINGLE_ACT_VALID_BIT;
8057 		break;
8058 	default:
8059 		status = ICE_ERR_CFG;
8060 		goto err_ice_add_adv_rule;
8061 	}
8062 
8063 	/* set the rule LOOKUP type based on caller specified 'RX'
8064 	 * instead of hardcoding it to be either LOOKUP_TX/RX
8065 	 *
8066 	 * for 'RX' set the source to be the port number
8067 	 * for 'TX' set the source to be the source HW VSI number (determined
8068 	 * by caller)
8069 	 */
8070 	if (rinfo->rx) {
8071 		s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
8072 		s_rule->pdata.lkup_tx_rx.src =
8073 			CPU_TO_LE16(hw->port_info->lport);
8074 	} else {
8075 		s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
8076 		s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
8077 	}
8078 
8079 	s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
8080 	s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
8081 
8082 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
8083 					   pkt_len, pkt_offsets);
8084 	if (status)
8085 		goto err_ice_add_adv_rule;
8086 
8087 	if (rinfo->tun_type != ICE_NON_TUN &&
8088 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
8089 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
8090 						 s_rule->pdata.lkup_tx_rx.hdr,
8091 						 pkt_offsets);
8092 		if (status)
8093 			goto err_ice_add_adv_rule;
8094 	}
8095 
8096 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8097 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
8098 				 NULL);
8099 	if (status)
8100 		goto err_ice_add_adv_rule;
8101 	adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
8102 		ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
8103 	if (!adv_fltr) {
8104 		status = ICE_ERR_NO_MEMORY;
8105 		goto err_ice_add_adv_rule;
8106 	}
8107 
8108 	adv_fltr->lkups = (struct ice_adv_lkup_elem *)
8109 		ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
8110 			   ICE_NONDMA_TO_NONDMA);
8111 	if (!adv_fltr->lkups && !prof_rule) {
8112 		status = ICE_ERR_NO_MEMORY;
8113 		goto err_ice_add_adv_rule;
8114 	}
8115 
8116 	adv_fltr->lkups_cnt = lkups_cnt;
8117 	adv_fltr->rule_info = *rinfo;
8118 	adv_fltr->rule_info.fltr_rule_id =
8119 		LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
8120 	sw = hw->switch_info;
8121 	sw->recp_list[rid].adv_rule = true;
8122 	rule_head = &sw->recp_list[rid].filt_rules;
8123 
8124 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
8125 		adv_fltr->vsi_count = 1;
8126 
8127 	/* Add rule entry to book keeping list */
8128 	LIST_ADD(&adv_fltr->list_entry, rule_head);
8129 	if (added_entry) {
8130 		added_entry->rid = rid;
8131 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
8132 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
8133 	}
8134 err_ice_add_adv_rule:
8135 	if (status && adv_fltr) {
8136 		ice_free(hw, adv_fltr->lkups);
8137 		ice_free(hw, adv_fltr);
8138 	}
8139 
8140 	ice_free(hw, s_rule);
8141 
8142 	return status;
8143 }
8144 
8145 /**
8146  * ice_adv_rem_update_vsi_list
8147  * @hw: pointer to the hardware structure
8148  * @vsi_handle: VSI handle of the VSI to remove
8149  * @fm_list: filter management entry for which the VSI list management needs to
8150  *	     be done
8151  */
8152 static enum ice_status
ice_adv_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_adv_fltr_mgmt_list_entry * fm_list)8153 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
8154 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
8155 {
8156 	struct ice_vsi_list_map_info *vsi_list_info;
8157 	enum ice_sw_lkup_type lkup_type;
8158 	enum ice_status status;
8159 	u16 vsi_list_id;
8160 
8161 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
8162 	    fm_list->vsi_count == 0)
8163 		return ICE_ERR_PARAM;
8164 
8165 	/* A rule with the VSI being removed does not exist */
8166 	if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
8167 		return ICE_ERR_DOES_NOT_EXIST;
8168 
8169 	lkup_type = ICE_SW_LKUP_LAST;
8170 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
8171 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
8172 					  ice_aqc_opc_update_sw_rules,
8173 					  lkup_type);
8174 	if (status)
8175 		return status;
8176 
8177 	fm_list->vsi_count--;
8178 	ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
8179 	vsi_list_info = fm_list->vsi_list_info;
8180 	if (fm_list->vsi_count == 1) {
8181 		struct ice_fltr_info tmp_fltr;
8182 		u16 rem_vsi_handle;
8183 
8184 		rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
8185 						    ICE_MAX_VSI);
8186 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
8187 			return ICE_ERR_OUT_OF_RANGE;
8188 
8189 		/* Make sure VSI list is empty before removing it below */
8190 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
8191 						  vsi_list_id, true,
8192 						  ice_aqc_opc_update_sw_rules,
8193 						  lkup_type);
8194 		if (status)
8195 			return status;
8196 
8197 		ice_memset(&tmp_fltr, 0, sizeof(tmp_fltr), ICE_NONDMA_MEM);
8198 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
8199 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
8200 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
8201 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
8202 		tmp_fltr.fwd_id.hw_vsi_id =
8203 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
8204 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
8205 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
8206 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
8207 
8208 		/* Update the previous switch rule of "MAC forward to VSI" to
8209 		 * "MAC fwd to VSI list"
8210 		 */
8211 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
8212 		if (status) {
8213 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
8214 				  tmp_fltr.fwd_id.hw_vsi_id, status);
8215 			return status;
8216 		}
8217 		fm_list->vsi_list_info->ref_cnt--;
8218 
8219 		/* Remove the VSI list since it is no longer used */
8220 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
8221 		if (status) {
8222 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
8223 				  vsi_list_id, status);
8224 			return status;
8225 		}
8226 
8227 		LIST_DEL(&vsi_list_info->list_entry);
8228 		ice_free(hw, vsi_list_info);
8229 		fm_list->vsi_list_info = NULL;
8230 	}
8231 
8232 	return status;
8233 }
8234 
8235 /**
8236  * ice_rem_adv_rule - removes existing advanced switch rule
8237  * @hw: pointer to the hardware structure
8238  * @lkups: information on the words that needs to be looked up. All words
8239  *         together makes one recipe
8240  * @lkups_cnt: num of entries in the lkups array
8241  * @rinfo: Its the pointer to the rule information for the rule
8242  *
8243  * This function can be used to remove 1 rule at a time. The lkups is
8244  * used to describe all the words that forms the "lookup" portion of the
8245  * rule. These words can span multiple protocols. Callers to this function
8246  * need to pass in a list of protocol headers with lookup information along
8247  * and mask that determines which words are valid from the given protocol
8248  * header. rinfo describes other information related to this rule such as
8249  * forwarding IDs, priority of this rule, etc.
8250  */
8251 enum ice_status
ice_rem_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo)8252 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
8253 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
8254 {
8255 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
8256 	struct ice_prot_lkup_ext lkup_exts;
8257 	struct ice_lock *rule_lock; /* Lock to protect filter rule list */
8258 	enum ice_status status = ICE_SUCCESS;
8259 	bool remove_rule = false;
8260 	u16 i, rid, vsi_handle;
8261 
8262 	ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
8263 	for (i = 0; i < lkups_cnt; i++) {
8264 		u16 count;
8265 
8266 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
8267 			return ICE_ERR_CFG;
8268 
8269 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
8270 		if (!count)
8271 			return ICE_ERR_CFG;
8272 	}
8273 
8274 	/* Create any special protocol/offset pairs, such as looking at tunnel
8275 	 * bits by extracting metadata
8276 	 */
8277 	status = ice_add_special_words(rinfo, &lkup_exts);
8278 	if (status)
8279 		return status;
8280 
8281 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
8282 	/* If did not find a recipe that match the existing criteria */
8283 	if (rid == ICE_MAX_NUM_RECIPES)
8284 		return ICE_ERR_PARAM;
8285 
8286 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
8287 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
8288 	/* the rule is already removed */
8289 	if (!list_elem)
8290 		return ICE_SUCCESS;
8291 	ice_acquire_lock(rule_lock);
8292 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
8293 		remove_rule = true;
8294 	} else if (list_elem->vsi_count > 1) {
8295 		remove_rule = false;
8296 		vsi_handle = rinfo->sw_act.vsi_handle;
8297 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8298 	} else {
8299 		vsi_handle = rinfo->sw_act.vsi_handle;
8300 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
8301 		if (status) {
8302 			ice_release_lock(rule_lock);
8303 			return status;
8304 		}
8305 		if (list_elem->vsi_count == 0)
8306 			remove_rule = true;
8307 	}
8308 	ice_release_lock(rule_lock);
8309 	if (remove_rule) {
8310 		struct ice_aqc_sw_rules_elem *s_rule;
8311 		u16 rule_buf_sz;
8312 
8313 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
8314 		s_rule = (struct ice_aqc_sw_rules_elem *)
8315 			ice_malloc(hw, rule_buf_sz);
8316 		if (!s_rule)
8317 			return ICE_ERR_NO_MEMORY;
8318 		s_rule->pdata.lkup_tx_rx.act = 0;
8319 		s_rule->pdata.lkup_tx_rx.index =
8320 			CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
8321 		s_rule->pdata.lkup_tx_rx.hdr_len = 0;
8322 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
8323 					 rule_buf_sz, 1,
8324 					 ice_aqc_opc_remove_sw_rules, NULL);
8325 		if (status == ICE_SUCCESS || status == ICE_ERR_DOES_NOT_EXIST) {
8326 			struct ice_switch_info *sw = hw->switch_info;
8327 
8328 			ice_acquire_lock(rule_lock);
8329 			LIST_DEL(&list_elem->list_entry);
8330 			ice_free(hw, list_elem->lkups);
8331 			ice_free(hw, list_elem);
8332 			ice_release_lock(rule_lock);
8333 			if (LIST_EMPTY(&sw->recp_list[rid].filt_rules))
8334 				sw->recp_list[rid].adv_rule = false;
8335 		}
8336 		ice_free(hw, s_rule);
8337 	}
8338 	return status;
8339 }
8340 
8341 /**
8342  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
8343  * @hw: pointer to the hardware structure
8344  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
8345  *
8346  * This function is used to remove 1 rule at a time. The removal is based on
8347  * the remove_entry parameter. This function will remove rule for a given
8348  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
8349  */
8350 enum ice_status
ice_rem_adv_rule_by_id(struct ice_hw * hw,struct ice_rule_query_data * remove_entry)8351 ice_rem_adv_rule_by_id(struct ice_hw *hw,
8352 		       struct ice_rule_query_data *remove_entry)
8353 {
8354 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
8355 	struct LIST_HEAD_TYPE *list_head;
8356 	struct ice_adv_rule_info rinfo;
8357 	struct ice_switch_info *sw;
8358 
8359 	sw = hw->switch_info;
8360 	if (!sw->recp_list[remove_entry->rid].recp_created)
8361 		return ICE_ERR_PARAM;
8362 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
8363 	LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
8364 			    list_entry) {
8365 		if (list_itr->rule_info.fltr_rule_id ==
8366 		    remove_entry->rule_id) {
8367 			rinfo = list_itr->rule_info;
8368 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
8369 			return ice_rem_adv_rule(hw, list_itr->lkups,
8370 						list_itr->lkups_cnt, &rinfo);
8371 		}
8372 	}
8373 	/* either list is empty or unable to find rule */
8374 	return ICE_ERR_DOES_NOT_EXIST;
8375 }
8376 
8377 /**
8378  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
8379  *                       given VSI handle
8380  * @hw: pointer to the hardware structure
8381  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
8382  *
8383  * This function is used to remove all the rules for a given VSI and as soon
8384  * as removing a rule fails, it will return immediately with the error code,
8385  * else it will return ICE_SUCCESS
8386  */
ice_rem_adv_rule_for_vsi(struct ice_hw * hw,u16 vsi_handle)8387 enum ice_status ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
8388 {
8389 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
8390 	struct ice_vsi_list_map_info *map_info;
8391 	struct LIST_HEAD_TYPE *list_head;
8392 	struct ice_adv_rule_info rinfo;
8393 	struct ice_switch_info *sw;
8394 	enum ice_status status;
8395 	u8 rid;
8396 
8397 	sw = hw->switch_info;
8398 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
8399 		if (!sw->recp_list[rid].recp_created)
8400 			continue;
8401 		if (!sw->recp_list[rid].adv_rule)
8402 			continue;
8403 
8404 		list_head = &sw->recp_list[rid].filt_rules;
8405 		LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp_entry, list_head,
8406 					 ice_adv_fltr_mgmt_list_entry,
8407 					 list_entry) {
8408 			rinfo = list_itr->rule_info;
8409 
8410 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
8411 				map_info = list_itr->vsi_list_info;
8412 				if (!map_info)
8413 					continue;
8414 
8415 				if (!ice_is_bit_set(map_info->vsi_map,
8416 						    vsi_handle))
8417 					continue;
8418 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
8419 				continue;
8420 			}
8421 
8422 			rinfo.sw_act.vsi_handle = vsi_handle;
8423 			status = ice_rem_adv_rule(hw, list_itr->lkups,
8424 						  list_itr->lkups_cnt, &rinfo);
8425 
8426 			if (status)
8427 				return status;
8428 		}
8429 	}
8430 	return ICE_SUCCESS;
8431 }
8432 
8433 /**
8434  * ice_replay_fltr - Replay all the filters stored by a specific list head
8435  * @hw: pointer to the hardware structure
8436  * @list_head: list for which filters needs to be replayed
8437  * @recp_id: Recipe ID for which rules need to be replayed
8438  */
8439 static enum ice_status
ice_replay_fltr(struct ice_hw * hw,u8 recp_id,struct LIST_HEAD_TYPE * list_head)8440 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
8441 {
8442 	struct ice_fltr_mgmt_list_entry *itr;
8443 	enum ice_status status = ICE_SUCCESS;
8444 	struct ice_sw_recipe *recp_list;
8445 	u8 lport = hw->port_info->lport;
8446 	struct LIST_HEAD_TYPE l_head;
8447 
8448 	if (LIST_EMPTY(list_head))
8449 		return status;
8450 
8451 	recp_list = &hw->switch_info->recp_list[recp_id];
8452 	/* Move entries from the given list_head to a temporary l_head so that
8453 	 * they can be replayed. Otherwise when trying to re-add the same
8454 	 * filter, the function will return already exists
8455 	 */
8456 	LIST_REPLACE_INIT(list_head, &l_head);
8457 
8458 	/* Mark the given list_head empty by reinitializing it so filters
8459 	 * could be added again by *handler
8460 	 */
8461 	LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
8462 			    list_entry) {
8463 		struct ice_fltr_list_entry f_entry;
8464 		u16 vsi_handle;
8465 
8466 		f_entry.fltr_info = itr->fltr_info;
8467 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
8468 			status = ice_add_rule_internal(hw, recp_list, lport,
8469 						       &f_entry);
8470 			if (status != ICE_SUCCESS)
8471 				goto end;
8472 			continue;
8473 		}
8474 
8475 		/* Add a filter per VSI separately */
8476 		ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
8477 				     ICE_MAX_VSI) {
8478 			if (!ice_is_vsi_valid(hw, vsi_handle))
8479 				break;
8480 
8481 			ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8482 			f_entry.fltr_info.vsi_handle = vsi_handle;
8483 			f_entry.fltr_info.fwd_id.hw_vsi_id =
8484 				ice_get_hw_vsi_num(hw, vsi_handle);
8485 			f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8486 			if (recp_id == ICE_SW_LKUP_VLAN)
8487 				status = ice_add_vlan_internal(hw, recp_list,
8488 							       &f_entry);
8489 			else
8490 				status = ice_add_rule_internal(hw, recp_list,
8491 							       lport,
8492 							       &f_entry);
8493 			if (status != ICE_SUCCESS)
8494 				goto end;
8495 		}
8496 	}
8497 end:
8498 	/* Clear the filter management list */
8499 	ice_rem_sw_rule_info(hw, &l_head);
8500 	return status;
8501 }
8502 
8503 /**
8504  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
8505  * @hw: pointer to the hardware structure
8506  *
8507  * NOTE: This function does not clean up partially added filters on error.
8508  * It is up to caller of the function to issue a reset or fail early.
8509  */
ice_replay_all_fltr(struct ice_hw * hw)8510 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
8511 {
8512 	struct ice_switch_info *sw = hw->switch_info;
8513 	enum ice_status status = ICE_SUCCESS;
8514 	u8 i;
8515 
8516 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8517 		struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
8518 
8519 		status = ice_replay_fltr(hw, i, head);
8520 		if (status != ICE_SUCCESS)
8521 			return status;
8522 	}
8523 	return status;
8524 }
8525 
8526 /**
8527  * ice_replay_vsi_fltr - Replay filters for requested VSI
8528  * @hw: pointer to the hardware structure
8529  * @pi: pointer to port information structure
8530  * @sw: pointer to switch info struct for which function replays filters
8531  * @vsi_handle: driver VSI handle
8532  * @recp_id: Recipe ID for which rules need to be replayed
8533  * @list_head: list for which filters need to be replayed
8534  *
8535  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
8536  * It is required to pass valid VSI handle.
8537  */
8538 static enum ice_status
ice_replay_vsi_fltr(struct ice_hw * hw,struct ice_port_info * pi,struct ice_switch_info * sw,u16 vsi_handle,u8 recp_id,struct LIST_HEAD_TYPE * list_head)8539 ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8540 		    struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
8541 		    struct LIST_HEAD_TYPE *list_head)
8542 {
8543 	struct ice_fltr_mgmt_list_entry *itr;
8544 	enum ice_status status = ICE_SUCCESS;
8545 	struct ice_sw_recipe *recp_list;
8546 	u16 hw_vsi_id;
8547 
8548 	if (LIST_EMPTY(list_head))
8549 		return status;
8550 	recp_list = &sw->recp_list[recp_id];
8551 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
8552 
8553 	LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
8554 			    list_entry) {
8555 		struct ice_fltr_list_entry f_entry;
8556 
8557 		f_entry.fltr_info = itr->fltr_info;
8558 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
8559 		    itr->fltr_info.vsi_handle == vsi_handle) {
8560 			/* update the src in case it is VSI num */
8561 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8562 				f_entry.fltr_info.src = hw_vsi_id;
8563 			status = ice_add_rule_internal(hw, recp_list,
8564 						       pi->lport,
8565 						       &f_entry);
8566 			if (status != ICE_SUCCESS)
8567 				goto end;
8568 			continue;
8569 		}
8570 		if (!itr->vsi_list_info ||
8571 		    !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
8572 			continue;
8573 		/* Clearing it so that the logic can add it back */
8574 		ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
8575 		f_entry.fltr_info.vsi_handle = vsi_handle;
8576 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
8577 		/* update the src in case it is VSI num */
8578 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
8579 			f_entry.fltr_info.src = hw_vsi_id;
8580 		if (recp_id == ICE_SW_LKUP_VLAN)
8581 			status = ice_add_vlan_internal(hw, recp_list, &f_entry);
8582 		else
8583 			status = ice_add_rule_internal(hw, recp_list,
8584 						       pi->lport,
8585 						       &f_entry);
8586 		if (status != ICE_SUCCESS)
8587 			goto end;
8588 	}
8589 end:
8590 	return status;
8591 }
8592 
8593 /**
8594  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
8595  * @hw: pointer to the hardware structure
8596  * @vsi_handle: driver VSI handle
8597  * @list_head: list for which filters need to be replayed
8598  *
8599  * Replay the advanced rule for the given VSI.
8600  */
8601 static enum ice_status
ice_replay_vsi_adv_rule(struct ice_hw * hw,u16 vsi_handle,struct LIST_HEAD_TYPE * list_head)8602 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
8603 			struct LIST_HEAD_TYPE *list_head)
8604 {
8605 	struct ice_rule_query_data added_entry = { 0 };
8606 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
8607 	enum ice_status status = ICE_SUCCESS;
8608 
8609 	if (LIST_EMPTY(list_head))
8610 		return status;
8611 	LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
8612 			    list_entry) {
8613 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
8614 		u16 lk_cnt = adv_fltr->lkups_cnt;
8615 
8616 		if (vsi_handle != rinfo->sw_act.vsi_handle)
8617 			continue;
8618 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
8619 					  &added_entry);
8620 		if (status)
8621 			break;
8622 	}
8623 	return status;
8624 }
8625 
8626 /**
8627  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
8628  * @hw: pointer to the hardware structure
8629  * @pi: pointer to port information structure
8630  * @vsi_handle: driver VSI handle
8631  *
8632  * Replays filters for requested VSI via vsi_handle.
8633  */
8634 enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw * hw,struct ice_port_info * pi,u16 vsi_handle)8635 ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
8636 			u16 vsi_handle)
8637 {
8638 	struct ice_switch_info *sw = hw->switch_info;
8639 	enum ice_status status;
8640 	u8 i;
8641 
8642 	/* Update the recipes that were created */
8643 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8644 		struct LIST_HEAD_TYPE *head;
8645 
8646 		head = &sw->recp_list[i].filt_replay_rules;
8647 		if (!sw->recp_list[i].adv_rule)
8648 			status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
8649 						     head);
8650 		else
8651 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
8652 		if (status != ICE_SUCCESS)
8653 			return status;
8654 	}
8655 
8656 	return ICE_SUCCESS;
8657 }
8658 
8659 /**
8660  * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
8661  * @hw: pointer to the HW struct
8662  * @sw: pointer to switch info struct for which function removes filters
8663  *
8664  * Deletes the filter replay rules for given switch
8665  */
ice_rm_sw_replay_rule_info(struct ice_hw * hw,struct ice_switch_info * sw)8666 void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
8667 {
8668 	u8 i;
8669 
8670 	if (!sw)
8671 		return;
8672 
8673 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
8674 		if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
8675 			struct LIST_HEAD_TYPE *l_head;
8676 
8677 			l_head = &sw->recp_list[i].filt_replay_rules;
8678 			if (!sw->recp_list[i].adv_rule)
8679 				ice_rem_sw_rule_info(hw, l_head);
8680 			else
8681 				ice_rem_adv_rule_info(hw, l_head);
8682 		}
8683 	}
8684 }
8685 
8686 /**
8687  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
8688  * @hw: pointer to the HW struct
8689  *
8690  * Deletes the filter replay rules.
8691  */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)8692 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
8693 {
8694 	ice_rm_sw_replay_rule_info(hw, hw->switch_info);
8695 }
8696