1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "ice_common.h"
6 #include "ice_flow.h"
7
8 /* Size of known protocol header fields */
9 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2
10 #define ICE_FLOW_FLD_SZ_VLAN 2
11 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
12 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
13 #define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
14 #define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
15 #define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1
17 #define ICE_FLOW_FLD_SZ_IP_TTL 1
18 #define ICE_FLOW_FLD_SZ_IP_PROT 1
19 #define ICE_FLOW_FLD_SZ_PORT 2
20 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
21 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
22 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1
23 #define ICE_FLOW_FLD_SZ_ARP_OPER 2
24 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4
25 #define ICE_FLOW_FLD_SZ_GTP_TEID 4
26 #define ICE_FLOW_FLD_SZ_GTP_QFI 2
27 #define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID 2
28 #define ICE_FLOW_FLD_SZ_PFCP_SEID 8
29 #define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID 4
30 #define ICE_FLOW_FLD_SZ_ESP_SPI 4
31 #define ICE_FLOW_FLD_SZ_AH_SPI 4
32 #define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
33
34 /* Describe properties of a protocol header field */
35 struct ice_flow_field_info {
36 enum ice_flow_seg_hdr hdr;
37 s16 off; /* Offset from start of a protocol header, in bits */
38 u16 size; /* Size of fields in bits */
39 u16 mask; /* 16-bit mask for field */
40 };
41
42 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
43 .hdr = _hdr, \
44 .off = (_offset_bytes) * BITS_PER_BYTE, \
45 .size = (_size_bytes) * BITS_PER_BYTE, \
46 .mask = 0, \
47 }
48
49 #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
50 .hdr = _hdr, \
51 .off = (_offset_bytes) * BITS_PER_BYTE, \
52 .size = (_size_bytes) * BITS_PER_BYTE, \
53 .mask = _mask, \
54 }
55
56 /* Table containing properties of supported protocol header fields */
57 static const
58 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
59 /* Ether */
60 /* ICE_FLOW_FIELD_IDX_ETH_DA */
61 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
62 /* ICE_FLOW_FIELD_IDX_ETH_SA */
63 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
64 /* ICE_FLOW_FIELD_IDX_S_VLAN */
65 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
66 /* ICE_FLOW_FIELD_IDX_C_VLAN */
67 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
68 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
70 /* IPv4 / IPv6 */
71 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
72 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
73 0x00fc),
74 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
75 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
76 0x0ff0),
77 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
78 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
79 ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
80 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
81 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
82 ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
83 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
84 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
85 ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
86 /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
87 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
88 ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
89 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
91 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
93 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
95 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
97 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
99 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
100 /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
102 ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
103 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
104 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
105 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
106 /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
108 ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
109 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
110 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
111 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
112 /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
114 ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
115 /* Transport */
116 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
117 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
118 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
119 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
120 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
121 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
122 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
123 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
124 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
125 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
126 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
128 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
130 /* ARP */
131 /* ICE_FLOW_FIELD_IDX_ARP_SIP */
132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
133 /* ICE_FLOW_FIELD_IDX_ARP_DIP */
134 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
135 /* ICE_FLOW_FIELD_IDX_ARP_SHA */
136 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
137 /* ICE_FLOW_FIELD_IDX_ARP_DHA */
138 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
139 /* ICE_FLOW_FIELD_IDX_ARP_OP */
140 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
141 /* ICMP */
142 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
143 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
144 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
145 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
146 /* GRE */
147 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
148 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
149 /* GTP */
150 /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
151 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
152 ICE_FLOW_FLD_SZ_GTP_TEID),
153 /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
154 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
155 ICE_FLOW_FLD_SZ_GTP_TEID),
156 /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
157 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
158 ICE_FLOW_FLD_SZ_GTP_TEID),
159 /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
160 ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
161 ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
162 /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
163 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
164 ICE_FLOW_FLD_SZ_GTP_TEID),
165 /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
166 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
167 ICE_FLOW_FLD_SZ_GTP_TEID),
168 /* PPPOE */
169 /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
170 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
171 ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
172 /* PFCP */
173 /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
174 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
175 ICE_FLOW_FLD_SZ_PFCP_SEID),
176 /* L2TPV3 */
177 /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
178 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
179 ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
180 /* ESP */
181 /* ICE_FLOW_FIELD_IDX_ESP_SPI */
182 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
183 ICE_FLOW_FLD_SZ_ESP_SPI),
184 /* AH */
185 /* ICE_FLOW_FIELD_IDX_AH_SPI */
186 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
187 ICE_FLOW_FLD_SZ_AH_SPI),
188 /* NAT_T_ESP */
189 /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
190 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
191 ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
192 };
193
194 /* Bitmaps indicating relevant packet types for a particular protocol header
195 *
196 * Packet types for packets with an Outer/First/Single MAC header
197 */
198 static const u32 ice_ptypes_mac_ofos[] = {
199 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
200 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
201 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000307,
202 0x00000000, 0x00000000, 0x00000000, 0x00000000,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x00000000, 0x00000000, 0x00000000, 0x00000000,
206 0x00000000, 0x00000000, 0x00000000, 0x00000000,
207 };
208
209 /* Packet types for packets with an Innermost/Last MAC VLAN header */
210 static const u32 ice_ptypes_macvlan_il[] = {
211 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
212 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
213 0x00000000, 0x00000000, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
215 0x00000000, 0x00000000, 0x00000000, 0x00000000,
216 0x00000000, 0x00000000, 0x00000000, 0x00000000,
217 0x00000000, 0x00000000, 0x00000000, 0x00000000,
218 0x00000000, 0x00000000, 0x00000000, 0x00000000,
219 };
220
221 /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
222 * include IPV4 other PTYPEs
223 */
224 static const u32 ice_ptypes_ipv4_ofos[] = {
225 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
226 0x00000000, 0x00000155, 0x00000000, 0x00000000,
227 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
228 0x00000000, 0x00000000, 0x00000000, 0x00000000,
229 0x00000000, 0x00000000, 0x00000000, 0x00000000,
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000000, 0x00000000, 0x00000000,
232 0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 };
234
235 /* Packet types for packets with an Outer/First/Single IPv4 header, includes
236 * IPV4 other PTYPEs
237 */
238 static const u32 ice_ptypes_ipv4_ofos_all[] = {
239 0x1DC00000, 0x24000800, 0x00000000, 0x00000000,
240 0x00000000, 0x00000155, 0x00000000, 0x00000000,
241 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
242 0x00000000, 0x00000000, 0x00000000, 0x00000000,
243 0x00000000, 0x00000000, 0x00000000, 0x00000000,
244 0x00000000, 0x00000000, 0x00000000, 0x00000000,
245 0x00000000, 0x00000000, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
247 };
248
249 /* Packet types for packets with an Innermost/Last IPv4 header */
250 static const u32 ice_ptypes_ipv4_il[] = {
251 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
252 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
253 0x00000000, 0x00000000, 0x001FF800, 0x00000000,
254 0x00000000, 0x00000000, 0x00000000, 0x00000000,
255 0x00000000, 0x00000000, 0x00000000, 0x00000000,
256 0x00000000, 0x00000000, 0x00000000, 0x00000000,
257 0x00000000, 0x00000000, 0x00000000, 0x00000000,
258 0x00000000, 0x00000000, 0x00000000, 0x00000000,
259 };
260
261 /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
262 * include IVP6 other PTYPEs
263 */
264 static const u32 ice_ptypes_ipv6_ofos[] = {
265 0x00000000, 0x00000000, 0x77000000, 0x10002000,
266 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
267 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
268 0x00000000, 0x00000000, 0x00000000, 0x00000000,
269 0x00000000, 0x00000000, 0x00000000, 0x00000000,
270 0x00000000, 0x00000000, 0x00000000, 0x00000000,
271 0x00000000, 0x00000000, 0x00000000, 0x00000000,
272 0x00000000, 0x00000000, 0x00000000, 0x00000000,
273 };
274
275 /* Packet types for packets with an Outer/First/Single IPv6 header, includes
276 * IPV6 other PTYPEs
277 */
278 static const u32 ice_ptypes_ipv6_ofos_all[] = {
279 0x00000000, 0x00000000, 0x77000000, 0x10002000,
280 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
281 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
282 0x00000000, 0x00000000, 0x00000000, 0x00000000,
283 0x00000000, 0x00000000, 0x00000000, 0x00000000,
284 0x00000000, 0x00000000, 0x00000000, 0x00000000,
285 0x00000000, 0x00000000, 0x00000000, 0x00000000,
286 0x00000000, 0x00000000, 0x00000000, 0x00000000,
287 };
288
289 /* Packet types for packets with an Innermost/Last IPv6 header */
290 static const u32 ice_ptypes_ipv6_il[] = {
291 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
292 0x00000770, 0x00000000, 0x00000000, 0x00000000,
293 0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
294 0x00000000, 0x00000000, 0x00000000, 0x00000000,
295 0x00000000, 0x00000000, 0x00000000, 0x00000000,
296 0x00000000, 0x00000000, 0x00000000, 0x00000000,
297 0x00000000, 0x00000000, 0x00000000, 0x00000000,
298 0x00000000, 0x00000000, 0x00000000, 0x00000000,
299 };
300
301 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
302 static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
303 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
304 0x00000000, 0x00000000, 0x00000000, 0x00000000,
305 0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
306 0x00000000, 0x00000000, 0x00000000, 0x00000000,
307 0x00000000, 0x00000000, 0x00000000, 0x00000000,
308 0x00000000, 0x00000000, 0x00000000, 0x00000000,
309 0x00000000, 0x00000000, 0x00000000, 0x00000000,
310 0x00000000, 0x00000000, 0x00000000, 0x00000000,
311 };
312
313 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
314 static const u32 ice_ptypes_ipv4_il_no_l4[] = {
315 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
316 0x00000008, 0x00000000, 0x00000000, 0x00000000,
317 0x00000000, 0x00000000, 0x00139800, 0x00000000,
318 0x00000000, 0x00000000, 0x00000000, 0x00000000,
319 0x00000000, 0x00000000, 0x00000000, 0x00000000,
320 0x00000000, 0x00000000, 0x00000000, 0x00000000,
321 0x00000000, 0x00000000, 0x00000000, 0x00000000,
322 0x00000000, 0x00000000, 0x00000000, 0x00000000,
323 };
324
325 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
326 static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
327 0x00000000, 0x00000000, 0x43000000, 0x10002000,
328 0x00000000, 0x00000000, 0x00000000, 0x00000000,
329 0x00000000, 0x02300000, 0x00000540, 0x00000000,
330 0x00000000, 0x00000000, 0x00000000, 0x00000000,
331 0x00000000, 0x00000000, 0x00000000, 0x00000000,
332 0x00000000, 0x00000000, 0x00000000, 0x00000000,
333 0x00000000, 0x00000000, 0x00000000, 0x00000000,
334 0x00000000, 0x00000000, 0x00000000, 0x00000000,
335 };
336
337 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
338 static const u32 ice_ptypes_ipv6_il_no_l4[] = {
339 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
340 0x00000430, 0x00000000, 0x00000000, 0x00000000,
341 0x00000000, 0x00000000, 0x4e600000, 0x00000000,
342 0x00000000, 0x00000000, 0x00000000, 0x00000000,
343 0x00000000, 0x00000000, 0x00000000, 0x00000000,
344 0x00000000, 0x00000000, 0x00000000, 0x00000000,
345 0x00000000, 0x00000000, 0x00000000, 0x00000000,
346 0x00000000, 0x00000000, 0x00000000, 0x00000000,
347 };
348
349 /* Packet types for packets with an Outermost/First ARP header */
350 static const u32 ice_ptypes_arp_of[] = {
351 0x00000800, 0x00000000, 0x00000000, 0x00000000,
352 0x00000000, 0x00000000, 0x00000000, 0x00000000,
353 0x00000000, 0x00000000, 0x00000000, 0x00000000,
354 0x00000000, 0x00000000, 0x00000000, 0x00000000,
355 0x00000000, 0x00000000, 0x00000000, 0x00000000,
356 0x00000000, 0x00000000, 0x00000000, 0x00000000,
357 0x00000000, 0x00000000, 0x00000000, 0x00000000,
358 0x00000000, 0x00000000, 0x00000000, 0x00000000,
359 };
360
361 /* UDP Packet types for non-tunneled packets or tunneled
362 * packets with inner UDP.
363 */
364 static const u32 ice_ptypes_udp_il[] = {
365 0x81000000, 0x20204040, 0x04000010, 0x80810102,
366 0x00000040, 0x00000000, 0x00000000, 0x00000000,
367 0x00000000, 0x00410000, 0x90842000, 0x00000007,
368 0x00000000, 0x00000000, 0x00000000, 0x00000000,
369 0x00000000, 0x00000000, 0x00000000, 0x00000000,
370 0x00000000, 0x00000000, 0x00000000, 0x00000000,
371 0x00000000, 0x00000000, 0x00000000, 0x00000000,
372 0x00000000, 0x00000000, 0x00000000, 0x00000000,
373 };
374
375 /* Packet types for packets with an Innermost/Last TCP header */
376 static const u32 ice_ptypes_tcp_il[] = {
377 0x04000000, 0x80810102, 0x10000040, 0x02040408,
378 0x00000102, 0x00000000, 0x00000000, 0x00000000,
379 0x00000000, 0x00820000, 0x21084000, 0x00000000,
380 0x00000000, 0x00000000, 0x00000000, 0x00000000,
381 0x00000000, 0x00000000, 0x00000000, 0x00000000,
382 0x00000000, 0x00000000, 0x00000000, 0x00000000,
383 0x00000000, 0x00000000, 0x00000000, 0x00000000,
384 0x00000000, 0x00000000, 0x00000000, 0x00000000,
385 };
386
387 /* Packet types for packets with an Innermost/Last SCTP header */
388 static const u32 ice_ptypes_sctp_il[] = {
389 0x08000000, 0x01020204, 0x20000081, 0x04080810,
390 0x00000204, 0x00000000, 0x00000000, 0x00000000,
391 0x00000000, 0x01040000, 0x00000000, 0x00000000,
392 0x00000000, 0x00000000, 0x00000000, 0x00000000,
393 0x00000000, 0x00000000, 0x00000000, 0x00000000,
394 0x00000000, 0x00000000, 0x00000000, 0x00000000,
395 0x00000000, 0x00000000, 0x00000000, 0x00000000,
396 0x00000000, 0x00000000, 0x00000000, 0x00000000,
397 };
398
399 /* Packet types for packets with an Outermost/First ICMP header */
400 static const u32 ice_ptypes_icmp_of[] = {
401 0x10000000, 0x00000000, 0x00000000, 0x00000000,
402 0x00000000, 0x00000000, 0x00000000, 0x00000000,
403 0x00000000, 0x00000000, 0x00000000, 0x00000000,
404 0x00000000, 0x00000000, 0x00000000, 0x00000000,
405 0x00000000, 0x00000000, 0x00000000, 0x00000000,
406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
407 0x00000000, 0x00000000, 0x00000000, 0x00000000,
408 0x00000000, 0x00000000, 0x00000000, 0x00000000,
409 };
410
411 /* Packet types for packets with an Innermost/Last ICMP header */
412 static const u32 ice_ptypes_icmp_il[] = {
413 0x00000000, 0x02040408, 0x40000102, 0x08101020,
414 0x00000408, 0x00000000, 0x00000000, 0x00000000,
415 0x00000000, 0x00000000, 0x42108000, 0x00000000,
416 0x00000000, 0x00000000, 0x00000000, 0x00000000,
417 0x00000000, 0x00000000, 0x00000000, 0x00000000,
418 0x00000000, 0x00000000, 0x00000000, 0x00000000,
419 0x00000000, 0x00000000, 0x00000000, 0x00000000,
420 0x00000000, 0x00000000, 0x00000000, 0x00000000,
421 };
422
423 /* Packet types for packets with an Outermost/First GRE header */
424 static const u32 ice_ptypes_gre_of[] = {
425 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
426 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
427 0x00000000, 0x00000000, 0x00000000, 0x00000000,
428 0x00000000, 0x00000000, 0x00000000, 0x00000000,
429 0x00000000, 0x00000000, 0x00000000, 0x00000000,
430 0x00000000, 0x00000000, 0x00000000, 0x00000000,
431 0x00000000, 0x00000000, 0x00000000, 0x00000000,
432 0x00000000, 0x00000000, 0x00000000, 0x00000000,
433 };
434
435 /* Packet types for packets with an Innermost/Last MAC header */
436 static const u32 ice_ptypes_mac_il[] = {
437 0x00000000, 0x20000000, 0x00000000, 0x00000000,
438 0x00000000, 0x00000000, 0x00000000, 0x00000000,
439 0x00000000, 0x00000000, 0x00000000, 0x00000000,
440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
441 0x00000000, 0x00000000, 0x00000000, 0x00000000,
442 0x00000000, 0x00000000, 0x00000000, 0x00000000,
443 0x00000000, 0x00000000, 0x00000000, 0x00000000,
444 0x00000000, 0x00000000, 0x00000000, 0x00000000,
445 };
446
447 /* Packet types for GTPC */
448 static const u32 ice_ptypes_gtpc[] = {
449 0x00000000, 0x00000000, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x00000000, 0x00000000, 0x00000000, 0x00000000,
454 0x00000000, 0x00000000, 0x00000000, 0x00000000,
455 0x00000000, 0x00000000, 0x00000000, 0x00000000,
456 0x00000000, 0x00000000, 0x00000000, 0x00000000,
457 };
458
459 /* Packet types for GTPC with TEID */
460 static const u32 ice_ptypes_gtpc_tid[] = {
461 0x00000000, 0x00000000, 0x00000000, 0x00000000,
462 0x00000000, 0x00000000, 0x00000000, 0x00000000,
463 0x00000000, 0x00000000, 0x00000060, 0x00000000,
464 0x00000000, 0x00000000, 0x00000000, 0x00000000,
465 0x00000000, 0x00000000, 0x00000000, 0x00000000,
466 0x00000000, 0x00000000, 0x00000000, 0x00000000,
467 0x00000000, 0x00000000, 0x00000000, 0x00000000,
468 0x00000000, 0x00000000, 0x00000000, 0x00000000,
469 };
470
471 /* Packet types for GTPU */
472 static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
473 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
474 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
475 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
476 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
477 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
478 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
479 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
480 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
481 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
482 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
483 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
484 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
485 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
486 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
487 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
488 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
489 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
490 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
491 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
492 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
493 };
494
495 static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
496 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
497 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
498 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
499 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
500 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
501 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
502 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
503 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
504 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
505 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
506 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
507 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
508 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
509 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
510 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
511 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
512 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
513 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
514 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
515 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
516 };
517
518 static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
519 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
520 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
521 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
522 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
523 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
524 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
525 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
526 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
527 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
528 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
529 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
530 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
531 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
532 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
533 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
534 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
535 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
536 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
537 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
538 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
539 };
540
541 static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
542 { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
543 { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
544 { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
545 { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
546 { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
547 { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
548 { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
549 { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
550 { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
551 { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
552 { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
553 { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
554 { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
555 { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
556 { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
557 { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
558 { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
559 { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
560 { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
561 { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
562 };
563
564 static const u32 ice_ptypes_gtpu[] = {
565 0x00000000, 0x00000000, 0x00000000, 0x00000000,
566 0x00000000, 0x00000000, 0x00000000, 0x00000000,
567 0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
568 0x00000000, 0x00000000, 0x00000000, 0x00000000,
569 0x00000000, 0x00000000, 0x00000000, 0x00000000,
570 0x00000000, 0x00000000, 0x00000000, 0x00000000,
571 0x00000000, 0x00000000, 0x00000000, 0x00000000,
572 0x00000000, 0x00000000, 0x00000000, 0x00000000,
573 };
574
575 /* Packet types for pppoe */
576 static const u32 ice_ptypes_pppoe[] = {
577 0x00000000, 0x00000000, 0x00000000, 0x00000000,
578 0x00000000, 0x00000000, 0x00000000, 0x00000000,
579 0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
580 0x00000000, 0x00000000, 0x00000000, 0x00000000,
581 0x00000000, 0x00000000, 0x00000000, 0x00000000,
582 0x00000000, 0x00000000, 0x00000000, 0x00000000,
583 0x00000000, 0x00000000, 0x00000000, 0x00000000,
584 0x00000000, 0x00000000, 0x00000000, 0x00000000,
585 };
586
587 /* Packet types for packets with PFCP NODE header */
588 static const u32 ice_ptypes_pfcp_node[] = {
589 0x00000000, 0x00000000, 0x00000000, 0x00000000,
590 0x00000000, 0x00000000, 0x00000000, 0x00000000,
591 0x00000000, 0x00000000, 0x80000000, 0x00000002,
592 0x00000000, 0x00000000, 0x00000000, 0x00000000,
593 0x00000000, 0x00000000, 0x00000000, 0x00000000,
594 0x00000000, 0x00000000, 0x00000000, 0x00000000,
595 0x00000000, 0x00000000, 0x00000000, 0x00000000,
596 0x00000000, 0x00000000, 0x00000000, 0x00000000,
597 };
598
599 /* Packet types for packets with PFCP SESSION header */
600 static const u32 ice_ptypes_pfcp_session[] = {
601 0x00000000, 0x00000000, 0x00000000, 0x00000000,
602 0x00000000, 0x00000000, 0x00000000, 0x00000000,
603 0x00000000, 0x00000000, 0x00000000, 0x00000005,
604 0x00000000, 0x00000000, 0x00000000, 0x00000000,
605 0x00000000, 0x00000000, 0x00000000, 0x00000000,
606 0x00000000, 0x00000000, 0x00000000, 0x00000000,
607 0x00000000, 0x00000000, 0x00000000, 0x00000000,
608 0x00000000, 0x00000000, 0x00000000, 0x00000000,
609 };
610
611 /* Packet types for l2tpv3 */
612 static const u32 ice_ptypes_l2tpv3[] = {
613 0x00000000, 0x00000000, 0x00000000, 0x00000000,
614 0x00000000, 0x00000000, 0x00000000, 0x00000000,
615 0x00000000, 0x00000000, 0x00000000, 0x00000300,
616 0x00000000, 0x00000000, 0x00000000, 0x00000000,
617 0x00000000, 0x00000000, 0x00000000, 0x00000000,
618 0x00000000, 0x00000000, 0x00000000, 0x00000000,
619 0x00000000, 0x00000000, 0x00000000, 0x00000000,
620 0x00000000, 0x00000000, 0x00000000, 0x00000000,
621 };
622
623 /* Packet types for esp */
624 static const u32 ice_ptypes_esp[] = {
625 0x00000000, 0x00000000, 0x00000000, 0x00000000,
626 0x00000000, 0x00000003, 0x00000000, 0x00000000,
627 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000,
629 0x00000000, 0x00000000, 0x00000000, 0x00000000,
630 0x00000000, 0x00000000, 0x00000000, 0x00000000,
631 0x00000000, 0x00000000, 0x00000000, 0x00000000,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000,
633 };
634
635 /* Packet types for ah */
636 static const u32 ice_ptypes_ah[] = {
637 0x00000000, 0x00000000, 0x00000000, 0x00000000,
638 0x00000000, 0x0000000C, 0x00000000, 0x00000000,
639 0x00000000, 0x00000000, 0x00000000, 0x00000000,
640 0x00000000, 0x00000000, 0x00000000, 0x00000000,
641 0x00000000, 0x00000000, 0x00000000, 0x00000000,
642 0x00000000, 0x00000000, 0x00000000, 0x00000000,
643 0x00000000, 0x00000000, 0x00000000, 0x00000000,
644 0x00000000, 0x00000000, 0x00000000, 0x00000000,
645 };
646
647 /* Packet types for packets with NAT_T ESP header */
648 static const u32 ice_ptypes_nat_t_esp[] = {
649 0x00000000, 0x00000000, 0x00000000, 0x00000000,
650 0x00000000, 0x00000030, 0x00000000, 0x00000000,
651 0x00000000, 0x00000000, 0x00000000, 0x00000000,
652 0x00000000, 0x00000000, 0x00000000, 0x00000000,
653 0x00000000, 0x00000000, 0x00000000, 0x00000000,
654 0x00000000, 0x00000000, 0x00000000, 0x00000000,
655 0x00000000, 0x00000000, 0x00000000, 0x00000000,
656 0x00000000, 0x00000000, 0x00000000, 0x00000000,
657 };
658
659 static const u32 ice_ptypes_mac_non_ip_ofos[] = {
660 0x00000846, 0x00000000, 0x00000000, 0x00000000,
661 0x00000000, 0x00000000, 0x00000000, 0x00000000,
662 0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
663 0x00000000, 0x00000000, 0x00000000, 0x00000000,
664 0x00000000, 0x00000000, 0x00000000, 0x00000000,
665 0x00000000, 0x00000000, 0x00000000, 0x00000000,
666 0x00000000, 0x00000000, 0x00000000, 0x00000000,
667 0x00000000, 0x00000000, 0x00000000, 0x00000000,
668 };
669
670 static const u32 ice_ptypes_gtpu_no_ip[] = {
671 0x00000000, 0x00000000, 0x00000000, 0x00000000,
672 0x00000000, 0x00000000, 0x00000000, 0x00000000,
673 0x00000000, 0x00000000, 0x00000600, 0x00000000,
674 0x00000000, 0x00000000, 0x00000000, 0x00000000,
675 0x00000000, 0x00000000, 0x00000000, 0x00000000,
676 0x00000000, 0x00000000, 0x00000000, 0x00000000,
677 0x00000000, 0x00000000, 0x00000000, 0x00000000,
678 0x00000000, 0x00000000, 0x00000000, 0x00000000,
679 };
680
681 /* Manage parameters and info. used during the creation of a flow profile */
682 struct ice_flow_prof_params {
683 enum ice_block blk;
684 u16 entry_length; /* # of bytes formatted entry will require */
685 u8 es_cnt;
686 struct ice_flow_prof *prof;
687
688 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
689 * This will give us the direction flags.
690 */
691 struct ice_fv_word es[ICE_MAX_FV_WORDS];
692 /* attributes can be used to add attributes to a particular PTYPE */
693 const struct ice_ptype_attributes *attr;
694 u16 attr_cnt;
695
696 u16 mask[ICE_MAX_FV_WORDS];
697 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
698 };
699
700 #define ICE_FLOW_RSS_HDRS_INNER_MASK \
701 (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
702 ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
703 ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
704 ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
705 ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP)
706
707 #define ICE_FLOW_SEG_HDRS_L2_MASK \
708 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
709 #define ICE_FLOW_SEG_HDRS_L3_MASK \
710 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
711 ICE_FLOW_SEG_HDR_ARP)
712 #define ICE_FLOW_SEG_HDRS_L4_MASK \
713 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
714 ICE_FLOW_SEG_HDR_SCTP)
715 /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
716 #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
717 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
718
719 /**
720 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
721 * @segs: array of one or more packet segments that describe the flow
722 * @segs_cnt: number of packet segments provided
723 */
724 static enum ice_status
ice_flow_val_hdrs(struct ice_flow_seg_info * segs,u8 segs_cnt)725 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
726 {
727 u8 i;
728
729 for (i = 0; i < segs_cnt; i++) {
730 /* Multiple L3 headers */
731 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
732 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
733 return ICE_ERR_PARAM;
734
735 /* Multiple L4 headers */
736 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
737 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
738 return ICE_ERR_PARAM;
739 }
740
741 return ICE_SUCCESS;
742 }
743
744 /* Sizes of fixed known protocol headers without header options */
745 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
746 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
747 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
748 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
749 #define ICE_FLOW_PROT_HDR_SZ_ARP 28
750 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8
751 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
752 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
753 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
754
755 /**
756 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
757 * @params: information about the flow to be processed
758 * @seg: index of packet segment whose header size is to be determined
759 */
ice_flow_calc_seg_sz(struct ice_flow_prof_params * params,u8 seg)760 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
761 {
762 u16 sz;
763
764 /* L2 headers */
765 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
766 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
767
768 /* L3 headers */
769 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
770 sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
771 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
772 sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
773 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
774 sz += ICE_FLOW_PROT_HDR_SZ_ARP;
775 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
776 /* A L3 header is required if L4 is specified */
777 return 0;
778
779 /* L4 headers */
780 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
781 sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
782 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
783 sz += ICE_FLOW_PROT_HDR_SZ_TCP;
784 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
785 sz += ICE_FLOW_PROT_HDR_SZ_UDP;
786 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
787 sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
788
789 return sz;
790 }
791
792 /**
793 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
794 * @params: information about the flow to be processed
795 *
796 * This function identifies the packet types associated with the protocol
797 * headers being present in packet segments of the specified flow profile.
798 */
799 static enum ice_status
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params * params)800 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
801 {
802 struct ice_flow_prof *prof;
803 u8 i;
804
805 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
806 ICE_NONDMA_MEM);
807
808 prof = params->prof;
809
810 for (i = 0; i < params->prof->segs_cnt; i++) {
811 const ice_bitmap_t *src;
812 u32 hdrs;
813
814 hdrs = prof->segs[i].hdrs;
815
816 if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
817 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
818 (const ice_bitmap_t *)ice_ptypes_mac_il;
819 ice_and_bitmap(params->ptypes, params->ptypes, src,
820 ICE_FLOW_PTYPE_MAX);
821 }
822
823 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
824 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
825 ice_and_bitmap(params->ptypes, params->ptypes, src,
826 ICE_FLOW_PTYPE_MAX);
827 }
828
829 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
830 ice_and_bitmap(params->ptypes, params->ptypes,
831 (const ice_bitmap_t *)ice_ptypes_arp_of,
832 ICE_FLOW_PTYPE_MAX);
833 }
834
835 if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
836 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
837 ice_and_bitmap(params->ptypes, params->ptypes, src,
838 ICE_FLOW_PTYPE_MAX);
839 }
840 if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
841 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
842 src = i ?
843 (const ice_bitmap_t *)ice_ptypes_ipv4_il :
844 (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
845 ice_and_bitmap(params->ptypes, params->ptypes, src,
846 ICE_FLOW_PTYPE_MAX);
847 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
848 (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
849 src = i ?
850 (const ice_bitmap_t *)ice_ptypes_ipv6_il :
851 (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
852 ice_and_bitmap(params->ptypes, params->ptypes, src,
853 ICE_FLOW_PTYPE_MAX);
854 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
855 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
856 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
857 (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
858 ice_and_bitmap(params->ptypes, params->ptypes, src,
859 ICE_FLOW_PTYPE_MAX);
860 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
861 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
862 (const ice_bitmap_t *)ice_ptypes_ipv4_il;
863 ice_and_bitmap(params->ptypes, params->ptypes, src,
864 ICE_FLOW_PTYPE_MAX);
865 } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
866 !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
867 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
868 (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
869 ice_and_bitmap(params->ptypes, params->ptypes, src,
870 ICE_FLOW_PTYPE_MAX);
871 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
872 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
873 (const ice_bitmap_t *)ice_ptypes_ipv6_il;
874 ice_and_bitmap(params->ptypes, params->ptypes, src,
875 ICE_FLOW_PTYPE_MAX);
876 }
877
878 if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
879 src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
880 ice_and_bitmap(params->ptypes, params->ptypes,
881 src, ICE_FLOW_PTYPE_MAX);
882 } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
883 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
884 ice_and_bitmap(params->ptypes, params->ptypes, src,
885 ICE_FLOW_PTYPE_MAX);
886 } else {
887 src = (const ice_bitmap_t *)ice_ptypes_pppoe;
888 ice_andnot_bitmap(params->ptypes, params->ptypes, src,
889 ICE_FLOW_PTYPE_MAX);
890 }
891
892 if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
893 src = (const ice_bitmap_t *)ice_ptypes_udp_il;
894 ice_and_bitmap(params->ptypes, params->ptypes, src,
895 ICE_FLOW_PTYPE_MAX);
896 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
897 ice_and_bitmap(params->ptypes, params->ptypes,
898 (const ice_bitmap_t *)ice_ptypes_tcp_il,
899 ICE_FLOW_PTYPE_MAX);
900 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
901 src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
902 ice_and_bitmap(params->ptypes, params->ptypes, src,
903 ICE_FLOW_PTYPE_MAX);
904 }
905
906 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
907 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
908 (const ice_bitmap_t *)ice_ptypes_icmp_il;
909 ice_and_bitmap(params->ptypes, params->ptypes, src,
910 ICE_FLOW_PTYPE_MAX);
911 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
912 if (!i) {
913 src = (const ice_bitmap_t *)ice_ptypes_gre_of;
914 ice_and_bitmap(params->ptypes, params->ptypes,
915 src, ICE_FLOW_PTYPE_MAX);
916 }
917 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
918 src = (const ice_bitmap_t *)ice_ptypes_gtpc;
919 ice_and_bitmap(params->ptypes, params->ptypes,
920 src, ICE_FLOW_PTYPE_MAX);
921 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
922 src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
923 ice_and_bitmap(params->ptypes, params->ptypes,
924 src, ICE_FLOW_PTYPE_MAX);
925 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
926 src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
927 ice_and_bitmap(params->ptypes, params->ptypes,
928 src, ICE_FLOW_PTYPE_MAX);
929 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
930 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
931 ice_and_bitmap(params->ptypes, params->ptypes,
932 src, ICE_FLOW_PTYPE_MAX);
933
934 /* Attributes for GTP packet with downlink */
935 params->attr = ice_attr_gtpu_down;
936 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
937 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
938 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
939 ice_and_bitmap(params->ptypes, params->ptypes,
940 src, ICE_FLOW_PTYPE_MAX);
941
942 /* Attributes for GTP packet with uplink */
943 params->attr = ice_attr_gtpu_up;
944 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
945 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
946 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
947 ice_and_bitmap(params->ptypes, params->ptypes,
948 src, ICE_FLOW_PTYPE_MAX);
949
950 /* Attributes for GTP packet with Extension Header */
951 params->attr = ice_attr_gtpu_eh;
952 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
953 } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
954 src = (const ice_bitmap_t *)ice_ptypes_gtpu;
955 ice_and_bitmap(params->ptypes, params->ptypes,
956 src, ICE_FLOW_PTYPE_MAX);
957
958 /* Attributes for GTP packet without Extension Header */
959 params->attr = ice_attr_gtpu_session;
960 params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
961 } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
962 src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
963 ice_and_bitmap(params->ptypes, params->ptypes,
964 src, ICE_FLOW_PTYPE_MAX);
965 } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
966 src = (const ice_bitmap_t *)ice_ptypes_esp;
967 ice_and_bitmap(params->ptypes, params->ptypes,
968 src, ICE_FLOW_PTYPE_MAX);
969 } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
970 src = (const ice_bitmap_t *)ice_ptypes_ah;
971 ice_and_bitmap(params->ptypes, params->ptypes,
972 src, ICE_FLOW_PTYPE_MAX);
973 } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
974 src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
975 ice_and_bitmap(params->ptypes, params->ptypes,
976 src, ICE_FLOW_PTYPE_MAX);
977 }
978
979 if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
980 if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
981 src =
982 (const ice_bitmap_t *)ice_ptypes_pfcp_node;
983 else
984 src =
985 (const ice_bitmap_t *)ice_ptypes_pfcp_session;
986
987 ice_and_bitmap(params->ptypes, params->ptypes,
988 src, ICE_FLOW_PTYPE_MAX);
989 } else {
990 src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
991 ice_andnot_bitmap(params->ptypes, params->ptypes,
992 src, ICE_FLOW_PTYPE_MAX);
993
994 src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
995 ice_andnot_bitmap(params->ptypes, params->ptypes,
996 src, ICE_FLOW_PTYPE_MAX);
997 }
998 }
999
1000 return ICE_SUCCESS;
1001 }
1002
1003 /**
1004 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1005 * @hw: pointer to the HW struct
1006 * @params: information about the flow to be processed
1007 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1008 *
1009 * This function will allocate an extraction sequence entries for a DWORD size
1010 * chunk of the packet flags.
1011 */
1012 static enum ice_status
ice_flow_xtract_pkt_flags(struct ice_hw * hw,struct ice_flow_prof_params * params,enum ice_flex_mdid_pkt_flags flags)1013 ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1014 struct ice_flow_prof_params *params,
1015 enum ice_flex_mdid_pkt_flags flags)
1016 {
1017 u8 fv_words = hw->blk[params->blk].es.fvw;
1018 u8 idx;
1019
1020 /* Make sure the number of extraction sequence entries required does not
1021 * exceed the block's capacity.
1022 */
1023 if (params->es_cnt >= fv_words)
1024 return ICE_ERR_MAX_LIMIT;
1025
1026 /* some blocks require a reversed field vector layout */
1027 if (hw->blk[params->blk].es.reverse)
1028 idx = fv_words - params->es_cnt - 1;
1029 else
1030 idx = params->es_cnt;
1031
1032 params->es[idx].prot_id = ICE_PROT_META_ID;
1033 params->es[idx].off = flags;
1034 params->es_cnt++;
1035
1036 return ICE_SUCCESS;
1037 }
1038
1039 /**
1040 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1041 * @hw: pointer to the HW struct
1042 * @params: information about the flow to be processed
1043 * @seg: packet segment index of the field to be extracted
1044 * @fld: ID of field to be extracted
1045 * @match: bitfield of all fields
1046 *
1047 * This function determines the protocol ID, offset, and size of the given
1048 * field. It then allocates one or more extraction sequence entries for the
1049 * given field, and fill the entries with protocol ID and offset information.
1050 */
1051 static enum ice_status
ice_flow_xtract_fld(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg,enum ice_flow_field fld,u64 match)1052 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1053 u8 seg, enum ice_flow_field fld, u64 match)
1054 {
1055 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1056 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1057 u8 fv_words = hw->blk[params->blk].es.fvw;
1058 struct ice_flow_fld_info *flds;
1059 u16 cnt, ese_bits, i;
1060 u16 sib_mask = 0;
1061 u16 mask;
1062 u16 off;
1063
1064 flds = params->prof->segs[seg].fields;
1065
1066 switch (fld) {
1067 case ICE_FLOW_FIELD_IDX_ETH_DA:
1068 case ICE_FLOW_FIELD_IDX_ETH_SA:
1069 case ICE_FLOW_FIELD_IDX_S_VLAN:
1070 case ICE_FLOW_FIELD_IDX_C_VLAN:
1071 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1072 break;
1073 case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1074 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1075 break;
1076 case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1077 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1078 break;
1079 case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1080 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1081 break;
1082 case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1083 case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1084 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1085
1086 /* TTL and PROT share the same extraction seq. entry.
1087 * Each is considered a sibling to the other in terms of sharing
1088 * the same extraction sequence entry.
1089 */
1090 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1091 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1092 else
1093 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1094
1095 /* If the sibling field is also included, that field's
1096 * mask needs to be included.
1097 */
1098 if (match & BIT(sib))
1099 sib_mask = ice_flds_info[sib].mask;
1100 break;
1101 case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1102 case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1103 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1104
1105 /* TTL and PROT share the same extraction seq. entry.
1106 * Each is considered a sibling to the other in terms of sharing
1107 * the same extraction sequence entry.
1108 */
1109 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1110 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1111 else
1112 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1113
1114 /* If the sibling field is also included, that field's
1115 * mask needs to be included.
1116 */
1117 if (match & BIT(sib))
1118 sib_mask = ice_flds_info[sib].mask;
1119 break;
1120 case ICE_FLOW_FIELD_IDX_IPV4_SA:
1121 case ICE_FLOW_FIELD_IDX_IPV4_DA:
1122 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1123 break;
1124 case ICE_FLOW_FIELD_IDX_IPV6_SA:
1125 case ICE_FLOW_FIELD_IDX_IPV6_DA:
1126 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1127 case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1128 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1129 case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1130 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1131 case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1132 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1133 break;
1134 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1135 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1136 case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1137 prot_id = ICE_PROT_TCP_IL;
1138 break;
1139 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1140 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1141 prot_id = ICE_PROT_UDP_IL_OR_S;
1142 break;
1143 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1144 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1145 prot_id = ICE_PROT_SCTP_IL;
1146 break;
1147 case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1148 case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1149 case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1150 case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1151 case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1152 case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1153 /* GTP is accessed through UDP OF protocol */
1154 prot_id = ICE_PROT_UDP_OF;
1155 break;
1156 case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1157 prot_id = ICE_PROT_PPPOE;
1158 break;
1159 case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1160 prot_id = ICE_PROT_UDP_IL_OR_S;
1161 break;
1162 case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1163 prot_id = ICE_PROT_L2TPV3;
1164 break;
1165 case ICE_FLOW_FIELD_IDX_ESP_SPI:
1166 prot_id = ICE_PROT_ESP_F;
1167 break;
1168 case ICE_FLOW_FIELD_IDX_AH_SPI:
1169 prot_id = ICE_PROT_ESP_2;
1170 break;
1171 case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1172 prot_id = ICE_PROT_UDP_IL_OR_S;
1173 break;
1174 case ICE_FLOW_FIELD_IDX_ARP_SIP:
1175 case ICE_FLOW_FIELD_IDX_ARP_DIP:
1176 case ICE_FLOW_FIELD_IDX_ARP_SHA:
1177 case ICE_FLOW_FIELD_IDX_ARP_DHA:
1178 case ICE_FLOW_FIELD_IDX_ARP_OP:
1179 prot_id = ICE_PROT_ARP_OF;
1180 break;
1181 case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1182 case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1183 /* ICMP type and code share the same extraction seq. entry */
1184 prot_id = (params->prof->segs[seg].hdrs &
1185 ICE_FLOW_SEG_HDR_IPV4) ?
1186 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1187 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1188 ICE_FLOW_FIELD_IDX_ICMP_CODE :
1189 ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1190 break;
1191 case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1192 prot_id = ICE_PROT_GRE_OF;
1193 break;
1194 default:
1195 return ICE_ERR_NOT_IMPL;
1196 }
1197
1198 /* Each extraction sequence entry is a word in size, and extracts a
1199 * word-aligned offset from a protocol header.
1200 */
1201 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1202
1203 flds[fld].xtrct.prot_id = prot_id;
1204 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1205 ICE_FLOW_FV_EXTRACT_SZ;
1206 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1207 flds[fld].xtrct.idx = params->es_cnt;
1208 flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1209
1210 /* Adjust the next field-entry index after accommodating the number of
1211 * entries this field consumes
1212 */
1213 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1214 ice_flds_info[fld].size, ese_bits);
1215
1216 /* Fill in the extraction sequence entries needed for this field */
1217 off = flds[fld].xtrct.off;
1218 mask = flds[fld].xtrct.mask;
1219 for (i = 0; i < cnt; i++) {
1220 /* Only consume an extraction sequence entry if there is no
1221 * sibling field associated with this field or the sibling entry
1222 * already extracts the word shared with this field.
1223 */
1224 if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1225 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1226 flds[sib].xtrct.off != off) {
1227 u8 idx;
1228
1229 /* Make sure the number of extraction sequence required
1230 * does not exceed the block's capability
1231 */
1232 if (params->es_cnt >= fv_words)
1233 return ICE_ERR_MAX_LIMIT;
1234
1235 /* some blocks require a reversed field vector layout */
1236 if (hw->blk[params->blk].es.reverse)
1237 idx = fv_words - params->es_cnt - 1;
1238 else
1239 idx = params->es_cnt;
1240
1241 params->es[idx].prot_id = prot_id;
1242 params->es[idx].off = off;
1243 params->mask[idx] = mask | sib_mask;
1244 params->es_cnt++;
1245 }
1246
1247 off += ICE_FLOW_FV_EXTRACT_SZ;
1248 }
1249
1250 return ICE_SUCCESS;
1251 }
1252
1253 /**
1254 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1255 * @hw: pointer to the HW struct
1256 * @params: information about the flow to be processed
1257 * @seg: index of packet segment whose raw fields are to be extracted
1258 */
1259 static enum ice_status
ice_flow_xtract_raws(struct ice_hw * hw,struct ice_flow_prof_params * params,u8 seg)1260 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1261 u8 seg)
1262 {
1263 u16 fv_words;
1264 u16 hdrs_sz;
1265 u8 i;
1266
1267 if (!params->prof->segs[seg].raws_cnt)
1268 return ICE_SUCCESS;
1269
1270 if (params->prof->segs[seg].raws_cnt >
1271 ARRAY_SIZE(params->prof->segs[seg].raws))
1272 return ICE_ERR_MAX_LIMIT;
1273
1274 /* Offsets within the segment headers are not supported */
1275 hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1276 if (!hdrs_sz)
1277 return ICE_ERR_PARAM;
1278
1279 fv_words = hw->blk[params->blk].es.fvw;
1280
1281 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1282 struct ice_flow_seg_fld_raw *raw;
1283 u16 off, cnt, j;
1284
1285 raw = ¶ms->prof->segs[seg].raws[i];
1286
1287 /* Storing extraction information */
1288 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1289 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1290 ICE_FLOW_FV_EXTRACT_SZ;
1291 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1292 BITS_PER_BYTE;
1293 raw->info.xtrct.idx = params->es_cnt;
1294
1295 /* Determine the number of field vector entries this raw field
1296 * consumes.
1297 */
1298 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1299 (raw->info.src.last * BITS_PER_BYTE),
1300 (ICE_FLOW_FV_EXTRACT_SZ *
1301 BITS_PER_BYTE));
1302 off = raw->info.xtrct.off;
1303 for (j = 0; j < cnt; j++) {
1304 u16 idx;
1305
1306 /* Make sure the number of extraction sequence required
1307 * does not exceed the block's capability
1308 */
1309 if (params->es_cnt >= hw->blk[params->blk].es.count ||
1310 params->es_cnt >= ICE_MAX_FV_WORDS)
1311 return ICE_ERR_MAX_LIMIT;
1312
1313 /* some blocks require a reversed field vector layout */
1314 if (hw->blk[params->blk].es.reverse)
1315 idx = fv_words - params->es_cnt - 1;
1316 else
1317 idx = params->es_cnt;
1318
1319 params->es[idx].prot_id = raw->info.xtrct.prot_id;
1320 params->es[idx].off = off;
1321 params->es_cnt++;
1322 off += ICE_FLOW_FV_EXTRACT_SZ;
1323 }
1324 }
1325
1326 return ICE_SUCCESS;
1327 }
1328
1329 /**
1330 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1331 * @hw: pointer to the HW struct
1332 * @params: information about the flow to be processed
1333 *
1334 * This function iterates through all matched fields in the given segments, and
1335 * creates an extraction sequence for the fields.
1336 */
1337 static enum ice_status
ice_flow_create_xtrct_seq(struct ice_hw * hw,struct ice_flow_prof_params * params)1338 ice_flow_create_xtrct_seq(struct ice_hw *hw,
1339 struct ice_flow_prof_params *params)
1340 {
1341 enum ice_status status = ICE_SUCCESS;
1342 u8 i;
1343
1344 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1345 * packet flags
1346 */
1347 if (params->blk == ICE_BLK_ACL) {
1348 status = ice_flow_xtract_pkt_flags(hw, params,
1349 ICE_RX_MDID_PKT_FLAGS_15_0);
1350 if (status)
1351 return status;
1352 }
1353
1354 for (i = 0; i < params->prof->segs_cnt; i++) {
1355 u64 match = params->prof->segs[i].match;
1356 enum ice_flow_field j;
1357
1358 ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1359 ICE_FLOW_FIELD_IDX_MAX) {
1360 status = ice_flow_xtract_fld(hw, params, i, j, match);
1361 if (status)
1362 return status;
1363 ice_clear_bit(j, (ice_bitmap_t *)&match);
1364 }
1365
1366 /* Process raw matching bytes */
1367 status = ice_flow_xtract_raws(hw, params, i);
1368 if (status)
1369 return status;
1370 }
1371
1372 return status;
1373 }
1374
1375 /**
1376 * ice_flow_sel_acl_scen - returns the specific scenario
1377 * @hw: pointer to the hardware structure
1378 * @params: information about the flow to be processed
1379 *
1380 * This function will return the specific scenario based on the
1381 * params passed to it
1382 */
1383 static enum ice_status
ice_flow_sel_acl_scen(struct ice_hw * hw,struct ice_flow_prof_params * params)1384 ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1385 {
1386 /* Find the best-fit scenario for the provided match width */
1387 struct ice_acl_scen *cand_scen = NULL, *scen;
1388
1389 if (!hw->acl_tbl)
1390 return ICE_ERR_DOES_NOT_EXIST;
1391
1392 /* Loop through each scenario and match against the scenario width
1393 * to select the specific scenario
1394 */
1395 LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1396 if (scen->eff_width >= params->entry_length &&
1397 (!cand_scen || cand_scen->eff_width > scen->eff_width))
1398 cand_scen = scen;
1399 if (!cand_scen)
1400 return ICE_ERR_DOES_NOT_EXIST;
1401
1402 params->prof->cfg.scen = cand_scen;
1403
1404 return ICE_SUCCESS;
1405 }
1406
1407 /**
1408 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1409 * @params: information about the flow to be processed
1410 */
1411 static enum ice_status
ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params * params)1412 ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1413 {
1414 u16 index, i, range_idx = 0;
1415
1416 index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1417
1418 for (i = 0; i < params->prof->segs_cnt; i++) {
1419 struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
1420 u8 j;
1421
1422 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1423 ICE_FLOW_FIELD_IDX_MAX) {
1424 struct ice_flow_fld_info *fld = &seg->fields[j];
1425
1426 fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1427
1428 if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1429 fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1430
1431 /* Range checking only supported for single
1432 * words
1433 */
1434 if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1435 fld->xtrct.disp,
1436 BITS_PER_BYTE * 2) > 1)
1437 return ICE_ERR_PARAM;
1438
1439 /* Ranges must define low and high values */
1440 if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1441 fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1442 return ICE_ERR_PARAM;
1443
1444 fld->entry.val = range_idx++;
1445 } else {
1446 /* Store adjusted byte-length of field for later
1447 * use, taking into account potential
1448 * non-byte-aligned displacement
1449 */
1450 fld->entry.last = DIVIDE_AND_ROUND_UP
1451 (ice_flds_info[j].size +
1452 (fld->xtrct.disp % BITS_PER_BYTE),
1453 BITS_PER_BYTE);
1454 fld->entry.val = index;
1455 index += fld->entry.last;
1456 }
1457 }
1458
1459 for (j = 0; j < seg->raws_cnt; j++) {
1460 struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1461
1462 raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1463 raw->info.entry.val = index;
1464 raw->info.entry.last = raw->info.src.last;
1465 index += raw->info.entry.last;
1466 }
1467 }
1468
1469 /* Currently only support using the byte selection base, which only
1470 * allows for an effective entry size of 30 bytes. Reject anything
1471 * larger.
1472 */
1473 if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1474 return ICE_ERR_PARAM;
1475
1476 /* Only 8 range checkers per profile, reject anything trying to use
1477 * more
1478 */
1479 if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1480 return ICE_ERR_PARAM;
1481
1482 /* Store # bytes required for entry for later use */
1483 params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1484
1485 return ICE_SUCCESS;
1486 }
1487
1488 /**
1489 * ice_flow_proc_segs - process all packet segments associated with a profile
1490 * @hw: pointer to the HW struct
1491 * @params: information about the flow to be processed
1492 */
1493 static enum ice_status
ice_flow_proc_segs(struct ice_hw * hw,struct ice_flow_prof_params * params)1494 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1495 {
1496 enum ice_status status;
1497
1498 status = ice_flow_proc_seg_hdrs(params);
1499 if (status)
1500 return status;
1501
1502 status = ice_flow_create_xtrct_seq(hw, params);
1503 if (status)
1504 return status;
1505
1506 switch (params->blk) {
1507 case ICE_BLK_FD:
1508 case ICE_BLK_RSS:
1509 status = ICE_SUCCESS;
1510 break;
1511 case ICE_BLK_ACL:
1512 status = ice_flow_acl_def_entry_frmt(params);
1513 if (status)
1514 return status;
1515 status = ice_flow_sel_acl_scen(hw, params);
1516 if (status)
1517 return status;
1518 break;
1519 default:
1520 return ICE_ERR_NOT_IMPL;
1521 }
1522
1523 return status;
1524 }
1525
1526 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
1527 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
1528 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
1529
1530 /**
1531 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1532 * @hw: pointer to the HW struct
1533 * @blk: classification stage
1534 * @dir: flow direction
1535 * @segs: array of one or more packet segments that describe the flow
1536 * @segs_cnt: number of packet segments provided
1537 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1538 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1539 */
1540 static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,struct ice_flow_seg_info * segs,u8 segs_cnt,u16 vsi_handle,u32 conds)1541 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1542 enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1543 u8 segs_cnt, u16 vsi_handle, u32 conds)
1544 {
1545 struct ice_flow_prof *p, *prof = NULL;
1546
1547 ice_acquire_lock(&hw->fl_profs_locks[blk]);
1548 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1549 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1550 segs_cnt && segs_cnt == p->segs_cnt) {
1551 u8 i;
1552
1553 /* Check for profile-VSI association if specified */
1554 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1555 ice_is_vsi_valid(hw, vsi_handle) &&
1556 !ice_is_bit_set(p->vsis, vsi_handle))
1557 continue;
1558
1559 /* Protocol headers must be checked. Matched fields are
1560 * checked if specified.
1561 */
1562 for (i = 0; i < segs_cnt; i++)
1563 if (segs[i].hdrs != p->segs[i].hdrs ||
1564 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1565 segs[i].match != p->segs[i].match))
1566 break;
1567
1568 /* A match is found if all segments are matched */
1569 if (i == segs_cnt) {
1570 prof = p;
1571 break;
1572 }
1573 }
1574 ice_release_lock(&hw->fl_profs_locks[blk]);
1575
1576 return prof;
1577 }
1578
1579 /**
1580 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1581 * @hw: pointer to the HW struct
1582 * @blk: classification stage
1583 * @dir: flow direction
1584 * @segs: array of one or more packet segments that describe the flow
1585 * @segs_cnt: number of packet segments provided
1586 */
1587 u64
ice_flow_find_prof(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,struct ice_flow_seg_info * segs,u8 segs_cnt)1588 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1589 struct ice_flow_seg_info *segs, u8 segs_cnt)
1590 {
1591 struct ice_flow_prof *p;
1592
1593 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1594 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1595
1596 return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1597 }
1598
1599 /**
1600 * ice_flow_find_prof_id - Look up a profile with given profile ID
1601 * @hw: pointer to the HW struct
1602 * @blk: classification stage
1603 * @prof_id: unique ID to identify this flow profile
1604 */
1605 static struct ice_flow_prof *
ice_flow_find_prof_id(struct ice_hw * hw,enum ice_block blk,u64 prof_id)1606 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1607 {
1608 struct ice_flow_prof *p;
1609
1610 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1611 if (p->id == prof_id)
1612 return p;
1613
1614 return NULL;
1615 }
1616
1617 /**
1618 * ice_dealloc_flow_entry - Deallocate flow entry memory
1619 * @hw: pointer to the HW struct
1620 * @entry: flow entry to be removed
1621 */
1622 static void
ice_dealloc_flow_entry(struct ice_hw * hw,struct ice_flow_entry * entry)1623 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1624 {
1625 if (!entry)
1626 return;
1627
1628 if (entry->entry)
1629 ice_free(hw, entry->entry);
1630
1631 if (entry->range_buf) {
1632 ice_free(hw, entry->range_buf);
1633 entry->range_buf = NULL;
1634 }
1635
1636 if (entry->acts) {
1637 ice_free(hw, entry->acts);
1638 entry->acts = NULL;
1639 entry->acts_cnt = 0;
1640 }
1641
1642 ice_free(hw, entry);
1643 }
1644
1645 /**
1646 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1647 * @hw: pointer to the HW struct
1648 * @blk: classification stage
1649 * @prof_id: the profile ID handle
1650 * @hw_prof_id: pointer to variable to receive the HW profile ID
1651 */
1652 enum ice_status
ice_flow_get_hw_prof(struct ice_hw * hw,enum ice_block blk,u64 prof_id,u8 * hw_prof_id)1653 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1654 u8 *hw_prof_id)
1655 {
1656 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1657 struct ice_prof_map *map;
1658
1659 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1660 map = ice_search_prof_id(hw, blk, prof_id);
1661 if (map) {
1662 *hw_prof_id = map->prof_id;
1663 status = ICE_SUCCESS;
1664 }
1665 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1666 return status;
1667 }
1668
1669 #define ICE_ACL_INVALID_SCEN 0x3f
1670
1671 /**
1672 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1673 * @hw: pointer to the hardware structure
1674 * @prof: pointer to flow profile
1675 * @buf: destination buffer function writes partial extraction sequence to
1676 *
1677 * returns ICE_SUCCESS if no PF is associated to the given profile
1678 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1679 * returns other error code for real error
1680 */
1681 static enum ice_status
ice_flow_acl_is_prof_in_use(struct ice_hw * hw,struct ice_flow_prof * prof,struct ice_aqc_acl_prof_generic_frmt * buf)1682 ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1683 struct ice_aqc_acl_prof_generic_frmt *buf)
1684 {
1685 enum ice_status status;
1686 u8 prof_id = 0;
1687
1688 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1689 if (status)
1690 return status;
1691
1692 status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1693 if (status)
1694 return status;
1695
1696 /* If all PF's associated scenarios are all 0 or all
1697 * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1698 * not been configured yet.
1699 */
1700 if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1701 buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1702 buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1703 buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1704 return ICE_SUCCESS;
1705
1706 if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1707 buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1708 buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1709 buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1710 buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1711 buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1712 buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1713 buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1714 return ICE_SUCCESS;
1715
1716 return ICE_ERR_IN_USE;
1717 }
1718
1719 /**
1720 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1721 * @hw: pointer to the hardware structure
1722 * @acts: array of actions to be performed on a match
1723 * @acts_cnt: number of actions
1724 */
1725 static enum ice_status
ice_flow_acl_free_act_cntr(struct ice_hw * hw,struct ice_flow_action * acts,u8 acts_cnt)1726 ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1727 u8 acts_cnt)
1728 {
1729 int i;
1730
1731 for (i = 0; i < acts_cnt; i++) {
1732 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1733 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1734 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1735 struct ice_acl_cntrs cntrs;
1736 enum ice_status status;
1737
1738 cntrs.bank = 0; /* Only bank0 for the moment */
1739 cntrs.first_cntr =
1740 LE16_TO_CPU(acts[i].data.acl_act.value);
1741 cntrs.last_cntr =
1742 LE16_TO_CPU(acts[i].data.acl_act.value);
1743
1744 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1745 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1746 else
1747 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1748
1749 status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1750 if (status)
1751 return status;
1752 }
1753 }
1754 return ICE_SUCCESS;
1755 }
1756
1757 /**
1758 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1759 * @hw: pointer to the hardware structure
1760 * @prof: pointer to flow profile
1761 *
1762 * Disassociate the scenario from the profile for the PF of the VSI.
1763 */
1764 static enum ice_status
ice_flow_acl_disassoc_scen(struct ice_hw * hw,struct ice_flow_prof * prof)1765 ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1766 {
1767 struct ice_aqc_acl_prof_generic_frmt buf;
1768 enum ice_status status = ICE_SUCCESS;
1769 u8 prof_id = 0;
1770
1771 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1772
1773 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1774 if (status)
1775 return status;
1776
1777 status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1778 if (status)
1779 return status;
1780
1781 /* Clear scenario for this PF */
1782 buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1783 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1784
1785 return status;
1786 }
1787
1788 /**
1789 * ice_flow_rem_entry_sync - Remove a flow entry
1790 * @hw: pointer to the HW struct
1791 * @blk: classification stage
1792 * @entry: flow entry to be removed
1793 */
1794 static enum ice_status
ice_flow_rem_entry_sync(struct ice_hw * hw,enum ice_block blk,struct ice_flow_entry * entry)1795 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1796 struct ice_flow_entry *entry)
1797 {
1798 if (!entry)
1799 return ICE_ERR_BAD_PTR;
1800
1801 if (blk == ICE_BLK_ACL) {
1802 enum ice_status status;
1803
1804 if (!entry->prof)
1805 return ICE_ERR_BAD_PTR;
1806
1807 status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1808 entry->scen_entry_idx);
1809 if (status)
1810 return status;
1811
1812 /* Checks if we need to release an ACL counter. */
1813 if (entry->acts_cnt && entry->acts)
1814 ice_flow_acl_free_act_cntr(hw, entry->acts,
1815 entry->acts_cnt);
1816 }
1817
1818 LIST_DEL(&entry->l_entry);
1819
1820 ice_dealloc_flow_entry(hw, entry);
1821
1822 return ICE_SUCCESS;
1823 }
1824
1825 /**
1826 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1827 * @hw: pointer to the HW struct
1828 * @blk: classification stage
1829 * @dir: flow direction
1830 * @prof_id: unique ID to identify this flow profile
1831 * @segs: array of one or more packet segments that describe the flow
1832 * @segs_cnt: number of packet segments provided
1833 * @acts: array of default actions
1834 * @acts_cnt: number of default actions
1835 * @prof: stores the returned flow profile added
1836 *
1837 * Assumption: the caller has acquired the lock to the profile list
1838 */
1839 static enum ice_status
ice_flow_add_prof_sync(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_action * acts,u8 acts_cnt,struct ice_flow_prof ** prof)1840 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1841 enum ice_flow_dir dir, u64 prof_id,
1842 struct ice_flow_seg_info *segs, u8 segs_cnt,
1843 struct ice_flow_action *acts, u8 acts_cnt,
1844 struct ice_flow_prof **prof)
1845 {
1846 struct ice_flow_prof_params *params;
1847 enum ice_status status;
1848 u8 i;
1849
1850 if (!prof || (acts_cnt && !acts))
1851 return ICE_ERR_BAD_PTR;
1852
1853 params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
1854 if (!params)
1855 return ICE_ERR_NO_MEMORY;
1856
1857 params->prof = (struct ice_flow_prof *)
1858 ice_malloc(hw, sizeof(*params->prof));
1859 if (!params->prof) {
1860 status = ICE_ERR_NO_MEMORY;
1861 goto free_params;
1862 }
1863
1864 /* initialize extraction sequence to all invalid (0xff) */
1865 for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
1866 params->es[i].prot_id = ICE_PROT_INVALID;
1867 params->es[i].off = ICE_FV_OFFSET_INVAL;
1868 }
1869
1870 params->blk = blk;
1871 params->prof->id = prof_id;
1872 params->prof->dir = dir;
1873 params->prof->segs_cnt = segs_cnt;
1874
1875 /* Make a copy of the segments that need to be persistent in the flow
1876 * profile instance
1877 */
1878 for (i = 0; i < segs_cnt; i++)
1879 ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs),
1880 ICE_NONDMA_TO_NONDMA);
1881
1882 /* Make a copy of the actions that need to be persistent in the flow
1883 * profile instance.
1884 */
1885 if (acts_cnt) {
1886 params->prof->acts = (struct ice_flow_action *)
1887 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
1888 ICE_NONDMA_TO_NONDMA);
1889
1890 if (!params->prof->acts) {
1891 status = ICE_ERR_NO_MEMORY;
1892 goto out;
1893 }
1894 }
1895
1896 status = ice_flow_proc_segs(hw, params);
1897 if (status) {
1898 ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
1899 goto out;
1900 }
1901
1902 /* Add a HW profile for this flow profile */
1903 status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
1904 params->attr, params->attr_cnt, params->es,
1905 params->mask);
1906 if (status) {
1907 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
1908 goto out;
1909 }
1910
1911 INIT_LIST_HEAD(¶ms->prof->entries);
1912 ice_init_lock(¶ms->prof->entries_lock);
1913 *prof = params->prof;
1914
1915 out:
1916 if (status) {
1917 if (params->prof->acts)
1918 ice_free(hw, params->prof->acts);
1919 ice_free(hw, params->prof);
1920 }
1921 free_params:
1922 ice_free(hw, params);
1923
1924 return status;
1925 }
1926
1927 /**
1928 * ice_flow_rem_prof_sync - remove a flow profile
1929 * @hw: pointer to the hardware structure
1930 * @blk: classification stage
1931 * @prof: pointer to flow profile to remove
1932 *
1933 * Assumption: the caller has acquired the lock to the profile list
1934 */
1935 static enum ice_status
ice_flow_rem_prof_sync(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof)1936 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
1937 struct ice_flow_prof *prof)
1938 {
1939 enum ice_status status;
1940
1941 /* Remove all remaining flow entries before removing the flow profile */
1942 if (!LIST_EMPTY(&prof->entries)) {
1943 struct ice_flow_entry *e, *t;
1944
1945 ice_acquire_lock(&prof->entries_lock);
1946
1947 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
1948 l_entry) {
1949 status = ice_flow_rem_entry_sync(hw, blk, e);
1950 if (status)
1951 break;
1952 }
1953
1954 ice_release_lock(&prof->entries_lock);
1955 }
1956
1957 if (blk == ICE_BLK_ACL) {
1958 struct ice_aqc_acl_profile_ranges query_rng_buf;
1959 struct ice_aqc_acl_prof_generic_frmt buf;
1960 u8 prof_id = 0;
1961
1962 /* Disassociate the scenario from the profile for the PF */
1963 status = ice_flow_acl_disassoc_scen(hw, prof);
1964 if (status)
1965 return status;
1966
1967 /* Clear the range-checker if the profile ID is no longer
1968 * used by any PF
1969 */
1970 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
1971 if (status && status != ICE_ERR_IN_USE) {
1972 return status;
1973 } else if (!status) {
1974 /* Clear the range-checker value for profile ID */
1975 ice_memset(&query_rng_buf, 0,
1976 sizeof(struct ice_aqc_acl_profile_ranges),
1977 ICE_NONDMA_MEM);
1978
1979 status = ice_flow_get_hw_prof(hw, blk, prof->id,
1980 &prof_id);
1981 if (status)
1982 return status;
1983
1984 status = ice_prog_acl_prof_ranges(hw, prof_id,
1985 &query_rng_buf, NULL);
1986 if (status)
1987 return status;
1988 }
1989 }
1990
1991 /* Remove all hardware profiles associated with this flow profile */
1992 status = ice_rem_prof(hw, blk, prof->id);
1993 if (!status) {
1994 LIST_DEL(&prof->l_entry);
1995 ice_destroy_lock(&prof->entries_lock);
1996 if (prof->acts)
1997 ice_free(hw, prof->acts);
1998 ice_free(hw, prof);
1999 }
2000
2001 return status;
2002 }
2003
2004 /**
2005 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2006 * @buf: Destination buffer function writes partial xtrct sequence to
2007 * @info: Info about field
2008 */
2009 static void
ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt * buf,struct ice_flow_fld_info * info)2010 ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2011 struct ice_flow_fld_info *info)
2012 {
2013 u16 dst, i;
2014 u8 src;
2015
2016 src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2017 info->xtrct.disp / BITS_PER_BYTE;
2018 dst = info->entry.val;
2019 for (i = 0; i < info->entry.last; i++)
2020 /* HW stores field vector words in LE, convert words back to BE
2021 * so constructed entries will end up in network order
2022 */
2023 buf->byte_selection[dst++] = src++ ^ 1;
2024 }
2025
2026 /**
2027 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2028 * @hw: pointer to the hardware structure
2029 * @prof: pointer to flow profile
2030 */
2031 static enum ice_status
ice_flow_acl_set_xtrct_seq(struct ice_hw * hw,struct ice_flow_prof * prof)2032 ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2033 {
2034 struct ice_aqc_acl_prof_generic_frmt buf;
2035 struct ice_flow_fld_info *info;
2036 enum ice_status status;
2037 u8 prof_id = 0;
2038 u16 i;
2039
2040 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2041
2042 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2043 if (status)
2044 return status;
2045
2046 status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2047 if (status && status != ICE_ERR_IN_USE)
2048 return status;
2049
2050 if (!status) {
2051 /* Program the profile dependent configuration. This is done
2052 * only once regardless of the number of PFs using that profile
2053 */
2054 ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2055
2056 for (i = 0; i < prof->segs_cnt; i++) {
2057 struct ice_flow_seg_info *seg = &prof->segs[i];
2058 u16 j;
2059
2060 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2061 ICE_FLOW_FIELD_IDX_MAX) {
2062 info = &seg->fields[j];
2063
2064 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2065 buf.word_selection[info->entry.val] =
2066 info->xtrct.idx;
2067 else
2068 ice_flow_acl_set_xtrct_seq_fld(&buf,
2069 info);
2070 }
2071
2072 for (j = 0; j < seg->raws_cnt; j++) {
2073 info = &seg->raws[j].info;
2074 ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2075 }
2076 }
2077
2078 ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2079 ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2080 ICE_NONDMA_MEM);
2081 }
2082
2083 /* Update the current PF */
2084 buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2085 status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2086
2087 return status;
2088 }
2089
2090 /**
2091 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2092 * @hw: pointer to the hardware structure
2093 * @blk: classification stage
2094 * @vsi_handle: software VSI handle
2095 * @vsig: target VSI group
2096 *
2097 * Assumption: the caller has already verified that the VSI to
2098 * be added has the same characteristics as the VSIG and will
2099 * thereby have access to all resources added to that VSIG.
2100 */
2101 enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi_handle,u16 vsig)2102 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2103 u16 vsig)
2104 {
2105 enum ice_status status;
2106
2107 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2108 return ICE_ERR_PARAM;
2109
2110 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2111 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2112 vsig);
2113 ice_release_lock(&hw->fl_profs_locks[blk]);
2114
2115 return status;
2116 }
2117
2118 /**
2119 * ice_flow_assoc_prof - associate a VSI with a flow profile
2120 * @hw: pointer to the hardware structure
2121 * @blk: classification stage
2122 * @prof: pointer to flow profile
2123 * @vsi_handle: software VSI handle
2124 *
2125 * Assumption: the caller has acquired the lock to the profile list
2126 * and the software VSI handle has been validated
2127 */
2128 enum ice_status
ice_flow_assoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)2129 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2130 struct ice_flow_prof *prof, u16 vsi_handle)
2131 {
2132 enum ice_status status = ICE_SUCCESS;
2133
2134 if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2135 if (blk == ICE_BLK_ACL) {
2136 status = ice_flow_acl_set_xtrct_seq(hw, prof);
2137 if (status)
2138 return status;
2139 }
2140 status = ice_add_prof_id_flow(hw, blk,
2141 ice_get_hw_vsi_num(hw,
2142 vsi_handle),
2143 prof->id);
2144 if (!status)
2145 ice_set_bit(vsi_handle, prof->vsis);
2146 else
2147 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2148 status);
2149 }
2150
2151 return status;
2152 }
2153
2154 /**
2155 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2156 * @hw: pointer to the hardware structure
2157 * @blk: classification stage
2158 * @prof: pointer to flow profile
2159 * @vsi_handle: software VSI handle
2160 *
2161 * Assumption: the caller has acquired the lock to the profile list
2162 * and the software VSI handle has been validated
2163 */
2164 static enum ice_status
ice_flow_disassoc_prof(struct ice_hw * hw,enum ice_block blk,struct ice_flow_prof * prof,u16 vsi_handle)2165 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2166 struct ice_flow_prof *prof, u16 vsi_handle)
2167 {
2168 enum ice_status status = ICE_SUCCESS;
2169
2170 if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2171 status = ice_rem_prof_id_flow(hw, blk,
2172 ice_get_hw_vsi_num(hw,
2173 vsi_handle),
2174 prof->id);
2175 if (!status)
2176 ice_clear_bit(vsi_handle, prof->vsis);
2177 else
2178 ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2179 status);
2180 }
2181
2182 return status;
2183 }
2184
2185 /**
2186 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2187 * @hw: pointer to the HW struct
2188 * @blk: classification stage
2189 * @dir: flow direction
2190 * @prof_id: unique ID to identify this flow profile
2191 * @segs: array of one or more packet segments that describe the flow
2192 * @segs_cnt: number of packet segments provided
2193 * @acts: array of default actions
2194 * @acts_cnt: number of default actions
2195 * @prof: stores the returned flow profile added
2196 */
2197 enum ice_status
ice_flow_add_prof(struct ice_hw * hw,enum ice_block blk,enum ice_flow_dir dir,u64 prof_id,struct ice_flow_seg_info * segs,u8 segs_cnt,struct ice_flow_action * acts,u8 acts_cnt,struct ice_flow_prof ** prof)2198 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2199 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2200 struct ice_flow_action *acts, u8 acts_cnt,
2201 struct ice_flow_prof **prof)
2202 {
2203 enum ice_status status;
2204
2205 if (segs_cnt > ICE_FLOW_SEG_MAX)
2206 return ICE_ERR_MAX_LIMIT;
2207
2208 if (!segs_cnt)
2209 return ICE_ERR_PARAM;
2210
2211 if (!segs)
2212 return ICE_ERR_BAD_PTR;
2213
2214 status = ice_flow_val_hdrs(segs, segs_cnt);
2215 if (status)
2216 return status;
2217
2218 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2219
2220 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2221 acts, acts_cnt, prof);
2222 if (!status)
2223 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2224
2225 ice_release_lock(&hw->fl_profs_locks[blk]);
2226
2227 return status;
2228 }
2229
2230 /**
2231 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2232 * @hw: pointer to the HW struct
2233 * @blk: the block for which the flow profile is to be removed
2234 * @prof_id: unique ID of the flow profile to be removed
2235 */
2236 enum ice_status
ice_flow_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 prof_id)2237 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2238 {
2239 struct ice_flow_prof *prof;
2240 enum ice_status status;
2241
2242 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2243
2244 prof = ice_flow_find_prof_id(hw, blk, prof_id);
2245 if (!prof) {
2246 status = ICE_ERR_DOES_NOT_EXIST;
2247 goto out;
2248 }
2249
2250 /* prof becomes invalid after the call */
2251 status = ice_flow_rem_prof_sync(hw, blk, prof);
2252
2253 out:
2254 ice_release_lock(&hw->fl_profs_locks[blk]);
2255
2256 return status;
2257 }
2258
2259 /**
2260 * ice_flow_find_entry - look for a flow entry using its unique ID
2261 * @hw: pointer to the HW struct
2262 * @blk: classification stage
2263 * @entry_id: unique ID to identify this flow entry
2264 *
2265 * This function looks for the flow entry with the specified unique ID in all
2266 * flow profiles of the specified classification stage. If the entry is found,
2267 * and it returns the handle to the flow entry. Otherwise, it returns
2268 * ICE_FLOW_ENTRY_ID_INVAL.
2269 */
ice_flow_find_entry(struct ice_hw * hw,enum ice_block blk,u64 entry_id)2270 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2271 {
2272 struct ice_flow_entry *found = NULL;
2273 struct ice_flow_prof *p;
2274
2275 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2276
2277 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2278 struct ice_flow_entry *e;
2279
2280 ice_acquire_lock(&p->entries_lock);
2281 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2282 if (e->id == entry_id) {
2283 found = e;
2284 break;
2285 }
2286 ice_release_lock(&p->entries_lock);
2287
2288 if (found)
2289 break;
2290 }
2291
2292 ice_release_lock(&hw->fl_profs_locks[blk]);
2293
2294 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2295 }
2296
2297 /**
2298 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2299 * @hw: pointer to the hardware structure
2300 * @acts: array of actions to be performed on a match
2301 * @acts_cnt: number of actions
2302 * @cnt_alloc: indicates if an ACL counter has been allocated.
2303 */
2304 static enum ice_status
ice_flow_acl_check_actions(struct ice_hw * hw,struct ice_flow_action * acts,u8 acts_cnt,bool * cnt_alloc)2305 ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2306 u8 acts_cnt, bool *cnt_alloc)
2307 {
2308 ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2309 int i;
2310
2311 ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2312 *cnt_alloc = false;
2313
2314 if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2315 return ICE_ERR_OUT_OF_RANGE;
2316
2317 for (i = 0; i < acts_cnt; i++) {
2318 if (acts[i].type != ICE_FLOW_ACT_NOP &&
2319 acts[i].type != ICE_FLOW_ACT_DROP &&
2320 acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2321 acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2322 return ICE_ERR_CFG;
2323
2324 /* If the caller want to add two actions of the same type, then
2325 * it is considered invalid configuration.
2326 */
2327 if (ice_test_and_set_bit(acts[i].type, dup_check))
2328 return ICE_ERR_PARAM;
2329 }
2330
2331 /* Checks if ACL counters are needed. */
2332 for (i = 0; i < acts_cnt; i++) {
2333 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2334 acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2335 acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2336 struct ice_acl_cntrs cntrs;
2337 enum ice_status status;
2338
2339 cntrs.amount = 1;
2340 cntrs.bank = 0; /* Only bank0 for the moment */
2341
2342 if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2343 cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2344 else
2345 cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2346
2347 status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2348 if (status)
2349 return status;
2350 /* Counter index within the bank */
2351 acts[i].data.acl_act.value =
2352 CPU_TO_LE16(cntrs.first_cntr);
2353 *cnt_alloc = true;
2354 }
2355 }
2356
2357 return ICE_SUCCESS;
2358 }
2359
2360 /**
2361 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2362 * @fld: number of the given field
2363 * @info: info about field
2364 * @range_buf: range checker configuration buffer
2365 * @data: pointer to a data buffer containing flow entry's match values/masks
2366 * @range: Input/output param indicating which range checkers are being used
2367 */
2368 static void
ice_flow_acl_frmt_entry_range(u16 fld,struct ice_flow_fld_info * info,struct ice_aqc_acl_profile_ranges * range_buf,u8 * data,u8 * range)2369 ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2370 struct ice_aqc_acl_profile_ranges *range_buf,
2371 u8 *data, u8 *range)
2372 {
2373 u16 new_mask;
2374
2375 /* If not specified, default mask is all bits in field */
2376 new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2377 BIT(ice_flds_info[fld].size) - 1 :
2378 (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2379
2380 /* If the mask is 0, then we don't need to worry about this input
2381 * range checker value.
2382 */
2383 if (new_mask) {
2384 u16 new_high =
2385 (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2386 u16 new_low =
2387 (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2388 u8 range_idx = info->entry.val;
2389
2390 range_buf->checker_cfg[range_idx].low_boundary =
2391 CPU_TO_BE16(new_low);
2392 range_buf->checker_cfg[range_idx].high_boundary =
2393 CPU_TO_BE16(new_high);
2394 range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2395
2396 /* Indicate which range checker is being used */
2397 *range |= BIT(range_idx);
2398 }
2399 }
2400
2401 /**
2402 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2403 * @fld: number of the given field
2404 * @info: info about the field
2405 * @buf: buffer containing the entry
2406 * @dontcare: buffer containing don't care mask for entry
2407 * @data: pointer to a data buffer containing flow entry's match values/masks
2408 */
2409 static void
ice_flow_acl_frmt_entry_fld(u16 fld,struct ice_flow_fld_info * info,u8 * buf,u8 * dontcare,u8 * data)2410 ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2411 u8 *dontcare, u8 *data)
2412 {
2413 u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2414 bool use_mask = false;
2415 u8 disp;
2416
2417 src = info->src.val;
2418 mask = info->src.mask;
2419 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2420 disp = info->xtrct.disp % BITS_PER_BYTE;
2421
2422 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2423 use_mask = true;
2424
2425 for (k = 0; k < info->entry.last; k++, dst++) {
2426 /* Add overflow bits from previous byte */
2427 buf[dst] = (tmp_s & 0xff00) >> 8;
2428
2429 /* If mask is not valid, tmp_m is always zero, so just setting
2430 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2431 * overflow bits of mask from prev byte
2432 */
2433 dontcare[dst] = (tmp_m & 0xff00) >> 8;
2434
2435 /* If there is displacement, last byte will only contain
2436 * displaced data, but there is no more data to read from user
2437 * buffer, so skip so as not to potentially read beyond end of
2438 * user buffer
2439 */
2440 if (!disp || k < info->entry.last - 1) {
2441 /* Store shifted data to use in next byte */
2442 tmp_s = data[src++] << disp;
2443
2444 /* Add current (shifted) byte */
2445 buf[dst] |= tmp_s & 0xff;
2446
2447 /* Handle mask if valid */
2448 if (use_mask) {
2449 tmp_m = (~data[mask++] & 0xff) << disp;
2450 dontcare[dst] |= tmp_m & 0xff;
2451 }
2452 }
2453 }
2454
2455 /* Fill in don't care bits at beginning of field */
2456 if (disp) {
2457 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2458 for (k = 0; k < disp; k++)
2459 dontcare[dst] |= BIT(k);
2460 }
2461
2462 end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2463
2464 /* Fill in don't care bits at end of field */
2465 if (end_disp) {
2466 dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2467 info->entry.last - 1;
2468 for (k = end_disp; k < BITS_PER_BYTE; k++)
2469 dontcare[dst] |= BIT(k);
2470 }
2471 }
2472
2473 /**
2474 * ice_flow_acl_frmt_entry - Format ACL entry
2475 * @hw: pointer to the hardware structure
2476 * @prof: pointer to flow profile
2477 * @e: pointer to the flow entry
2478 * @data: pointer to a data buffer containing flow entry's match values/masks
2479 * @acts: array of actions to be performed on a match
2480 * @acts_cnt: number of actions
2481 *
2482 * Formats the key (and key_inverse) to be matched from the data passed in,
2483 * along with data from the flow profile. This key/key_inverse pair makes up
2484 * the 'entry' for an ACL flow entry.
2485 */
2486 static enum ice_status
ice_flow_acl_frmt_entry(struct ice_hw * hw,struct ice_flow_prof * prof,struct ice_flow_entry * e,u8 * data,struct ice_flow_action * acts,u8 acts_cnt)2487 ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2488 struct ice_flow_entry *e, u8 *data,
2489 struct ice_flow_action *acts, u8 acts_cnt)
2490 {
2491 u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2492 struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2493 enum ice_status status;
2494 bool cnt_alloc;
2495 u8 prof_id = 0;
2496 u16 i, buf_sz;
2497
2498 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2499 if (status)
2500 return status;
2501
2502 /* Format the result action */
2503
2504 status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2505 if (status)
2506 return status;
2507
2508 status = ICE_ERR_NO_MEMORY;
2509
2510 e->acts = (struct ice_flow_action *)
2511 ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2512 ICE_NONDMA_TO_NONDMA);
2513 if (!e->acts)
2514 goto out;
2515
2516 e->acts_cnt = acts_cnt;
2517
2518 /* Format the matching data */
2519 buf_sz = prof->cfg.scen->width;
2520 buf = (u8 *)ice_malloc(hw, buf_sz);
2521 if (!buf)
2522 goto out;
2523
2524 dontcare = (u8 *)ice_malloc(hw, buf_sz);
2525 if (!dontcare)
2526 goto out;
2527
2528 /* 'key' buffer will store both key and key_inverse, so must be twice
2529 * size of buf
2530 */
2531 key = (u8 *)ice_malloc(hw, buf_sz * 2);
2532 if (!key)
2533 goto out;
2534
2535 range_buf = (struct ice_aqc_acl_profile_ranges *)
2536 ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2537 if (!range_buf)
2538 goto out;
2539
2540 /* Set don't care mask to all 1's to start, will zero out used bytes */
2541 ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2542
2543 for (i = 0; i < prof->segs_cnt; i++) {
2544 struct ice_flow_seg_info *seg = &prof->segs[i];
2545 u8 j;
2546
2547 ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2548 ICE_FLOW_FIELD_IDX_MAX) {
2549 struct ice_flow_fld_info *info = &seg->fields[j];
2550
2551 if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2552 ice_flow_acl_frmt_entry_range(j, info,
2553 range_buf, data,
2554 &range);
2555 else
2556 ice_flow_acl_frmt_entry_fld(j, info, buf,
2557 dontcare, data);
2558 }
2559
2560 for (j = 0; j < seg->raws_cnt; j++) {
2561 struct ice_flow_fld_info *info = &seg->raws[j].info;
2562 u16 dst, src, mask, k;
2563 bool use_mask = false;
2564
2565 src = info->src.val;
2566 dst = info->entry.val -
2567 ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2568 mask = info->src.mask;
2569
2570 if (mask != ICE_FLOW_FLD_OFF_INVAL)
2571 use_mask = true;
2572
2573 for (k = 0; k < info->entry.last; k++, dst++) {
2574 buf[dst] = data[src++];
2575 if (use_mask)
2576 dontcare[dst] = ~data[mask++];
2577 else
2578 dontcare[dst] = 0;
2579 }
2580 }
2581 }
2582
2583 buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2584 dontcare[prof->cfg.scen->pid_idx] = 0;
2585
2586 /* Format the buffer for direction flags */
2587 dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2588
2589 if (prof->dir == ICE_FLOW_RX)
2590 buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2591
2592 if (range) {
2593 buf[prof->cfg.scen->rng_chk_idx] = range;
2594 /* Mark any unused range checkers as don't care */
2595 dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2596 e->range_buf = range_buf;
2597 } else {
2598 ice_free(hw, range_buf);
2599 }
2600
2601 status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2602 buf_sz);
2603 if (status)
2604 goto out;
2605
2606 e->entry = key;
2607 e->entry_sz = buf_sz * 2;
2608
2609 out:
2610 if (buf)
2611 ice_free(hw, buf);
2612
2613 if (dontcare)
2614 ice_free(hw, dontcare);
2615
2616 if (status && key)
2617 ice_free(hw, key);
2618
2619 if (status && range_buf) {
2620 ice_free(hw, range_buf);
2621 e->range_buf = NULL;
2622 }
2623
2624 if (status && e->acts) {
2625 ice_free(hw, e->acts);
2626 e->acts = NULL;
2627 e->acts_cnt = 0;
2628 }
2629
2630 if (status && cnt_alloc)
2631 ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2632
2633 return status;
2634 }
2635
2636 /**
2637 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2638 * the compared data.
2639 * @prof: pointer to flow profile
2640 * @e: pointer to the comparing flow entry
2641 * @do_chg_action: decide if we want to change the ACL action
2642 * @do_add_entry: decide if we want to add the new ACL entry
2643 * @do_rem_entry: decide if we want to remove the current ACL entry
2644 *
2645 * Find an ACL scenario entry that matches the compared data. In the same time,
2646 * this function also figure out:
2647 * a/ If we want to change the ACL action
2648 * b/ If we want to add the new ACL entry
2649 * c/ If we want to remove the current ACL entry
2650 */
2651 static struct ice_flow_entry *
ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof * prof,struct ice_flow_entry * e,bool * do_chg_action,bool * do_add_entry,bool * do_rem_entry)2652 ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2653 struct ice_flow_entry *e, bool *do_chg_action,
2654 bool *do_add_entry, bool *do_rem_entry)
2655 {
2656 struct ice_flow_entry *p, *return_entry = NULL;
2657 u8 i, j;
2658
2659 /* Check if:
2660 * a/ There exists an entry with same matching data, but different
2661 * priority, then we remove this existing ACL entry. Then, we
2662 * will add the new entry to the ACL scenario.
2663 * b/ There exists an entry with same matching data, priority, and
2664 * result action, then we do nothing
2665 * c/ There exists an entry with same matching data, priority, but
2666 * different, action, then do only change the action's entry.
2667 * d/ Else, we add this new entry to the ACL scenario.
2668 */
2669 *do_chg_action = false;
2670 *do_add_entry = true;
2671 *do_rem_entry = false;
2672 LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2673 if (memcmp(p->entry, e->entry, p->entry_sz))
2674 continue;
2675
2676 /* From this point, we have the same matching_data. */
2677 *do_add_entry = false;
2678 return_entry = p;
2679
2680 if (p->priority != e->priority) {
2681 /* matching data && !priority */
2682 *do_add_entry = true;
2683 *do_rem_entry = true;
2684 break;
2685 }
2686
2687 /* From this point, we will have matching_data && priority */
2688 if (p->acts_cnt != e->acts_cnt)
2689 *do_chg_action = true;
2690 for (i = 0; i < p->acts_cnt; i++) {
2691 bool found_not_match = false;
2692
2693 for (j = 0; j < e->acts_cnt; j++)
2694 if (memcmp(&p->acts[i], &e->acts[j],
2695 sizeof(struct ice_flow_action))) {
2696 found_not_match = true;
2697 break;
2698 }
2699
2700 if (found_not_match) {
2701 *do_chg_action = true;
2702 break;
2703 }
2704 }
2705
2706 /* (do_chg_action = true) means :
2707 * matching_data && priority && !result_action
2708 * (do_chg_action = false) means :
2709 * matching_data && priority && result_action
2710 */
2711 break;
2712 }
2713
2714 return return_entry;
2715 }
2716
2717 /**
2718 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2719 * @p: flow priority
2720 */
2721 static enum ice_acl_entry_prio
ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)2722 ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2723 {
2724 enum ice_acl_entry_prio acl_prio;
2725
2726 switch (p) {
2727 case ICE_FLOW_PRIO_LOW:
2728 acl_prio = ICE_ACL_PRIO_LOW;
2729 break;
2730 case ICE_FLOW_PRIO_NORMAL:
2731 acl_prio = ICE_ACL_PRIO_NORMAL;
2732 break;
2733 case ICE_FLOW_PRIO_HIGH:
2734 acl_prio = ICE_ACL_PRIO_HIGH;
2735 break;
2736 default:
2737 acl_prio = ICE_ACL_PRIO_NORMAL;
2738 break;
2739 }
2740
2741 return acl_prio;
2742 }
2743
2744 /**
2745 * ice_flow_acl_union_rng_chk - Perform union operation between two
2746 * range-range checker buffers
2747 * @dst_buf: pointer to destination range checker buffer
2748 * @src_buf: pointer to source range checker buffer
2749 *
2750 * For this function, we do the union between dst_buf and src_buf
2751 * range checker buffer, and we will save the result back to dst_buf
2752 */
2753 static enum ice_status
ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges * dst_buf,struct ice_aqc_acl_profile_ranges * src_buf)2754 ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2755 struct ice_aqc_acl_profile_ranges *src_buf)
2756 {
2757 u8 i, j;
2758
2759 if (!dst_buf || !src_buf)
2760 return ICE_ERR_BAD_PTR;
2761
2762 for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2763 struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2764 bool will_populate = false;
2765
2766 in_data = &src_buf->checker_cfg[i];
2767
2768 if (!in_data->mask)
2769 break;
2770
2771 for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2772 cfg_data = &dst_buf->checker_cfg[j];
2773
2774 if (!cfg_data->mask ||
2775 !memcmp(cfg_data, in_data,
2776 sizeof(struct ice_acl_rng_data))) {
2777 will_populate = true;
2778 break;
2779 }
2780 }
2781
2782 if (will_populate) {
2783 ice_memcpy(cfg_data, in_data,
2784 sizeof(struct ice_acl_rng_data),
2785 ICE_NONDMA_TO_NONDMA);
2786 } else {
2787 /* No available slot left to program range checker */
2788 return ICE_ERR_MAX_LIMIT;
2789 }
2790 }
2791
2792 return ICE_SUCCESS;
2793 }
2794
2795 /**
2796 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2797 * @hw: pointer to the hardware structure
2798 * @prof: pointer to flow profile
2799 * @entry: double pointer to the flow entry
2800 *
2801 * For this function, we will look at the current added entries in the
2802 * corresponding ACL scenario. Then, we will perform matching logic to
2803 * see if we want to add/modify/do nothing with this new entry.
2804 */
2805 static enum ice_status
ice_flow_acl_add_scen_entry_sync(struct ice_hw * hw,struct ice_flow_prof * prof,struct ice_flow_entry ** entry)2806 ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2807 struct ice_flow_entry **entry)
2808 {
2809 bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2810 struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2811 struct ice_acl_act_entry *acts = NULL;
2812 struct ice_flow_entry *exist;
2813 enum ice_status status = ICE_SUCCESS;
2814 struct ice_flow_entry *e;
2815 u8 i;
2816
2817 if (!entry || !(*entry) || !prof)
2818 return ICE_ERR_BAD_PTR;
2819
2820 e = *entry;
2821
2822 do_chg_rng_chk = false;
2823 if (e->range_buf) {
2824 u8 prof_id = 0;
2825
2826 status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2827 &prof_id);
2828 if (status)
2829 return status;
2830
2831 /* Query the current range-checker value in FW */
2832 status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2833 NULL);
2834 if (status)
2835 return status;
2836 ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2837 sizeof(struct ice_aqc_acl_profile_ranges),
2838 ICE_NONDMA_TO_NONDMA);
2839
2840 /* Generate the new range-checker value */
2841 status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2842 if (status)
2843 return status;
2844
2845 /* Reconfigure the range check if the buffer is changed. */
2846 do_chg_rng_chk = false;
2847 if (memcmp(&query_rng_buf, &cfg_rng_buf,
2848 sizeof(struct ice_aqc_acl_profile_ranges))) {
2849 status = ice_prog_acl_prof_ranges(hw, prof_id,
2850 &cfg_rng_buf, NULL);
2851 if (status)
2852 return status;
2853
2854 do_chg_rng_chk = true;
2855 }
2856 }
2857
2858 /* Figure out if we want to (change the ACL action) and/or
2859 * (Add the new ACL entry) and/or (Remove the current ACL entry)
2860 */
2861 exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
2862 &do_add_entry, &do_rem_entry);
2863 if (do_rem_entry) {
2864 status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
2865 if (status)
2866 return status;
2867 }
2868
2869 /* Prepare the result action buffer */
2870 acts = (struct ice_acl_act_entry *)
2871 ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
2872 if (!acts)
2873 return ICE_ERR_NO_MEMORY;
2874
2875 for (i = 0; i < e->acts_cnt; i++)
2876 ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
2877 sizeof(struct ice_acl_act_entry),
2878 ICE_NONDMA_TO_NONDMA);
2879
2880 if (do_add_entry) {
2881 enum ice_acl_entry_prio prio;
2882 u8 *keys, *inverts;
2883 u16 entry_idx;
2884
2885 keys = (u8 *)e->entry;
2886 inverts = keys + (e->entry_sz / 2);
2887 prio = ice_flow_acl_convert_to_acl_prio(e->priority);
2888
2889 status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
2890 inverts, acts, e->acts_cnt,
2891 &entry_idx);
2892 if (status)
2893 goto out;
2894
2895 e->scen_entry_idx = entry_idx;
2896 LIST_ADD(&e->l_entry, &prof->entries);
2897 } else {
2898 if (do_chg_action) {
2899 /* For the action memory info, update the SW's copy of
2900 * exist entry with e's action memory info
2901 */
2902 ice_free(hw, exist->acts);
2903 exist->acts_cnt = e->acts_cnt;
2904 exist->acts = (struct ice_flow_action *)
2905 ice_calloc(hw, exist->acts_cnt,
2906 sizeof(struct ice_flow_action));
2907 if (!exist->acts) {
2908 status = ICE_ERR_NO_MEMORY;
2909 goto out;
2910 }
2911
2912 ice_memcpy(exist->acts, e->acts,
2913 sizeof(struct ice_flow_action) * e->acts_cnt,
2914 ICE_NONDMA_TO_NONDMA);
2915
2916 status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
2917 e->acts_cnt,
2918 exist->scen_entry_idx);
2919 if (status)
2920 goto out;
2921 }
2922
2923 if (do_chg_rng_chk) {
2924 /* In this case, we want to update the range checker
2925 * information of the exist entry
2926 */
2927 status = ice_flow_acl_union_rng_chk(exist->range_buf,
2928 e->range_buf);
2929 if (status)
2930 goto out;
2931 }
2932
2933 /* As we don't add the new entry to our SW DB, deallocate its
2934 * memories, and return the exist entry to the caller
2935 */
2936 ice_dealloc_flow_entry(hw, e);
2937 *(entry) = exist;
2938 }
2939 out:
2940 ice_free(hw, acts);
2941
2942 return status;
2943 }
2944
2945 /**
2946 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
2947 * @hw: pointer to the hardware structure
2948 * @prof: pointer to flow profile
2949 * @e: double pointer to the flow entry
2950 */
2951 static enum ice_status
ice_flow_acl_add_scen_entry(struct ice_hw * hw,struct ice_flow_prof * prof,struct ice_flow_entry ** e)2952 ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2953 struct ice_flow_entry **e)
2954 {
2955 enum ice_status status;
2956
2957 ice_acquire_lock(&prof->entries_lock);
2958 status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
2959 ice_release_lock(&prof->entries_lock);
2960
2961 return status;
2962 }
2963
2964 /**
2965 * ice_flow_add_entry - Add a flow entry
2966 * @hw: pointer to the HW struct
2967 * @blk: classification stage
2968 * @prof_id: ID of the profile to add a new flow entry to
2969 * @entry_id: unique ID to identify this flow entry
2970 * @vsi_handle: software VSI handle for the flow entry
2971 * @prio: priority of the flow entry
2972 * @data: pointer to a data buffer containing flow entry's match values/masks
2973 * @acts: arrays of actions to be performed on a match
2974 * @acts_cnt: number of actions
2975 * @entry_h: pointer to buffer that receives the new flow entry's handle
2976 */
2977 enum ice_status
ice_flow_add_entry(struct ice_hw * hw,enum ice_block blk,u64 prof_id,u64 entry_id,u16 vsi_handle,enum ice_flow_priority prio,void * data,struct ice_flow_action * acts,u8 acts_cnt,u64 * entry_h)2978 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
2979 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
2980 void *data, struct ice_flow_action *acts, u8 acts_cnt,
2981 u64 *entry_h)
2982 {
2983 struct ice_flow_entry *e = NULL;
2984 struct ice_flow_prof *prof;
2985 enum ice_status status = ICE_SUCCESS;
2986
2987 /* ACL entries must indicate an action */
2988 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
2989 return ICE_ERR_PARAM;
2990
2991 /* No flow entry data is expected for RSS */
2992 if (!entry_h || (!data && blk != ICE_BLK_RSS))
2993 return ICE_ERR_BAD_PTR;
2994
2995 if (!ice_is_vsi_valid(hw, vsi_handle))
2996 return ICE_ERR_PARAM;
2997
2998 ice_acquire_lock(&hw->fl_profs_locks[blk]);
2999
3000 prof = ice_flow_find_prof_id(hw, blk, prof_id);
3001 if (!prof) {
3002 status = ICE_ERR_DOES_NOT_EXIST;
3003 } else {
3004 /* Allocate memory for the entry being added and associate
3005 * the VSI to the found flow profile
3006 */
3007 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3008 if (!e)
3009 status = ICE_ERR_NO_MEMORY;
3010 else
3011 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3012 }
3013
3014 ice_release_lock(&hw->fl_profs_locks[blk]);
3015 if (status)
3016 goto out;
3017
3018 e->id = entry_id;
3019 e->vsi_handle = vsi_handle;
3020 e->prof = prof;
3021 e->priority = prio;
3022
3023 switch (blk) {
3024 case ICE_BLK_FD:
3025 case ICE_BLK_RSS:
3026 break;
3027 case ICE_BLK_ACL:
3028 /* ACL will handle the entry management */
3029 status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3030 acts_cnt);
3031 if (status)
3032 goto out;
3033
3034 status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3035 if (status)
3036 goto out;
3037
3038 break;
3039 default:
3040 status = ICE_ERR_NOT_IMPL;
3041 goto out;
3042 }
3043
3044 if (blk != ICE_BLK_ACL) {
3045 /* ACL will handle the entry management */
3046 ice_acquire_lock(&prof->entries_lock);
3047 LIST_ADD(&e->l_entry, &prof->entries);
3048 ice_release_lock(&prof->entries_lock);
3049 }
3050
3051 *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3052
3053 out:
3054 if (status && e) {
3055 if (e->entry)
3056 ice_free(hw, e->entry);
3057 ice_free(hw, e);
3058 }
3059
3060 return status;
3061 }
3062
3063 /**
3064 * ice_flow_rem_entry - Remove a flow entry
3065 * @hw: pointer to the HW struct
3066 * @blk: classification stage
3067 * @entry_h: handle to the flow entry to be removed
3068 */
ice_flow_rem_entry(struct ice_hw * hw,enum ice_block blk,u64 entry_h)3069 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3070 u64 entry_h)
3071 {
3072 struct ice_flow_entry *entry;
3073 struct ice_flow_prof *prof;
3074 enum ice_status status = ICE_SUCCESS;
3075
3076 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3077 return ICE_ERR_PARAM;
3078
3079 entry = ICE_FLOW_ENTRY_PTR((unsigned long)entry_h);
3080
3081 /* Retain the pointer to the flow profile as the entry will be freed */
3082 prof = entry->prof;
3083
3084 if (prof) {
3085 ice_acquire_lock(&prof->entries_lock);
3086 status = ice_flow_rem_entry_sync(hw, blk, entry);
3087 ice_release_lock(&prof->entries_lock);
3088 }
3089
3090 return status;
3091 }
3092
3093 /**
3094 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3095 * @seg: packet segment the field being set belongs to
3096 * @fld: field to be set
3097 * @field_type: type of the field
3098 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3099 * entry's input buffer
3100 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3101 * input buffer
3102 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3103 * entry's input buffer
3104 *
3105 * This helper function stores information of a field being matched, including
3106 * the type of the field and the locations of the value to match, the mask, and
3107 * the upper-bound value in the start of the input buffer for a flow entry.
3108 * This function should only be used for fixed-size data structures.
3109 *
3110 * This function also opportunistically determines the protocol headers to be
3111 * present based on the fields being set. Some fields cannot be used alone to
3112 * determine the protocol headers present. Sometimes, fields for particular
3113 * protocol headers are not matched. In those cases, the protocol headers
3114 * must be explicitly set.
3115 */
3116 static void
ice_flow_set_fld_ext(struct ice_flow_seg_info * seg,enum ice_flow_field fld,enum ice_flow_fld_match_type field_type,u16 val_loc,u16 mask_loc,u16 last_loc)3117 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3118 enum ice_flow_fld_match_type field_type, u16 val_loc,
3119 u16 mask_loc, u16 last_loc)
3120 {
3121 u64 bit = BIT_ULL(fld);
3122
3123 seg->match |= bit;
3124 if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3125 seg->range |= bit;
3126
3127 seg->fields[fld].type = field_type;
3128 seg->fields[fld].src.val = val_loc;
3129 seg->fields[fld].src.mask = mask_loc;
3130 seg->fields[fld].src.last = last_loc;
3131
3132 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3133 }
3134
3135 /**
3136 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3137 * @seg: packet segment the field being set belongs to
3138 * @fld: field to be set
3139 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3140 * entry's input buffer
3141 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3142 * input buffer
3143 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3144 * entry's input buffer
3145 * @range: indicate if field being matched is to be in a range
3146 *
3147 * This function specifies the locations, in the form of byte offsets from the
3148 * start of the input buffer for a flow entry, from where the value to match,
3149 * the mask value, and upper value can be extracted. These locations are then
3150 * stored in the flow profile. When adding a flow entry associated with the
3151 * flow profile, these locations will be used to quickly extract the values and
3152 * create the content of a match entry. This function should only be used for
3153 * fixed-size data structures.
3154 */
3155 void
ice_flow_set_fld(struct ice_flow_seg_info * seg,enum ice_flow_field fld,u16 val_loc,u16 mask_loc,u16 last_loc,bool range)3156 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3157 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3158 {
3159 enum ice_flow_fld_match_type t = range ?
3160 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3161
3162 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3163 }
3164
3165 /**
3166 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3167 * @seg: packet segment the field being set belongs to
3168 * @fld: field to be set
3169 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3170 * entry's input buffer
3171 * @pref_loc: location of prefix value from entry's input buffer
3172 * @pref_sz: size of the location holding the prefix value
3173 *
3174 * This function specifies the locations, in the form of byte offsets from the
3175 * start of the input buffer for a flow entry, from where the value to match
3176 * and the IPv4 prefix value can be extracted. These locations are then stored
3177 * in the flow profile. When adding flow entries to the associated flow profile,
3178 * these locations can be used to quickly extract the values to create the
3179 * content of a match entry. This function should only be used for fixed-size
3180 * data structures.
3181 */
3182 void
ice_flow_set_fld_prefix(struct ice_flow_seg_info * seg,enum ice_flow_field fld,u16 val_loc,u16 pref_loc,u8 pref_sz)3183 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3184 u16 val_loc, u16 pref_loc, u8 pref_sz)
3185 {
3186 /* For this type of field, the "mask" location is for the prefix value's
3187 * location and the "last" location is for the size of the location of
3188 * the prefix value.
3189 */
3190 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3191 pref_loc, (u16)pref_sz);
3192 }
3193
3194 /**
3195 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3196 * @seg: packet segment the field being set belongs to
3197 * @off: offset of the raw field from the beginning of the segment in bytes
3198 * @len: length of the raw pattern to be matched
3199 * @val_loc: location of the value to match from entry's input buffer
3200 * @mask_loc: location of mask value from entry's input buffer
3201 *
3202 * This function specifies the offset of the raw field to be match from the
3203 * beginning of the specified packet segment, and the locations, in the form of
3204 * byte offsets from the start of the input buffer for a flow entry, from where
3205 * the value to match and the mask value to be extracted. These locations are
3206 * then stored in the flow profile. When adding flow entries to the associated
3207 * flow profile, these locations can be used to quickly extract the values to
3208 * create the content of a match entry. This function should only be used for
3209 * fixed-size data structures.
3210 */
3211 void
ice_flow_add_fld_raw(struct ice_flow_seg_info * seg,u16 off,u8 len,u16 val_loc,u16 mask_loc)3212 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3213 u16 val_loc, u16 mask_loc)
3214 {
3215 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3216 seg->raws[seg->raws_cnt].off = off;
3217 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3218 seg->raws[seg->raws_cnt].info.src.val = val_loc;
3219 seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3220 /* The "last" field is used to store the length of the field */
3221 seg->raws[seg->raws_cnt].info.src.last = len;
3222 }
3223
3224 /* Overflows of "raws" will be handled as an error condition later in
3225 * the flow when this information is processed.
3226 */
3227 seg->raws_cnt++;
3228 }
3229
3230 #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3231 (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
3232
3233 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3234 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3235
3236 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3237 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3238
3239 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3240 (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3241 ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3242 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3243
3244 /**
3245 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3246 * @segs: pointer to the flow field segment(s)
3247 * @seg_cnt: segment count
3248 * @cfg: configure parameters
3249 *
3250 * Helper function to extract fields from hash bitmap and use flow
3251 * header value to set flow field segment for further use in flow
3252 * profile entry or removal.
3253 */
3254 static enum ice_status
ice_flow_set_rss_seg_info(struct ice_flow_seg_info * segs,u8 seg_cnt,const struct ice_rss_hash_cfg * cfg)3255 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3256 const struct ice_rss_hash_cfg *cfg)
3257 {
3258 struct ice_flow_seg_info *seg;
3259 u64 val;
3260 u8 i;
3261
3262 /* set inner most segment */
3263 seg = &segs[seg_cnt - 1];
3264
3265 ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3266 ICE_FLOW_FIELD_IDX_MAX)
3267 ice_flow_set_fld(seg, (enum ice_flow_field)i,
3268 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3269 ICE_FLOW_FLD_OFF_INVAL, false);
3270
3271 ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3272
3273 /* set outer most header */
3274 if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3275 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3276 ICE_FLOW_SEG_HDR_IPV_OTHER;
3277 else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3278 segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3279 ICE_FLOW_SEG_HDR_IPV_OTHER;
3280
3281 if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3282 ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
3283 return ICE_ERR_PARAM;
3284
3285 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3286 if (val && !ice_is_pow2(val))
3287 return ICE_ERR_CFG;
3288
3289 val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3290 if (val && !ice_is_pow2(val))
3291 return ICE_ERR_CFG;
3292
3293 return ICE_SUCCESS;
3294 }
3295
3296 /**
3297 * ice_rem_vsi_rss_list - remove VSI from RSS list
3298 * @hw: pointer to the hardware structure
3299 * @vsi_handle: software VSI handle
3300 *
3301 * Remove the VSI from all RSS configurations in the list.
3302 */
ice_rem_vsi_rss_list(struct ice_hw * hw,u16 vsi_handle)3303 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3304 {
3305 struct ice_rss_cfg *r, *tmp;
3306
3307 if (LIST_EMPTY(&hw->rss_list_head))
3308 return;
3309
3310 ice_acquire_lock(&hw->rss_locks);
3311 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3312 ice_rss_cfg, l_entry)
3313 if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3314 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3315 LIST_DEL(&r->l_entry);
3316 ice_free(hw, r);
3317 }
3318 ice_release_lock(&hw->rss_locks);
3319 }
3320
3321 /**
3322 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3323 * @hw: pointer to the hardware structure
3324 * @vsi_handle: software VSI handle
3325 *
3326 * This function will iterate through all flow profiles and disassociate
3327 * the VSI from that profile. If the flow profile has no VSIs it will
3328 * be removed.
3329 */
ice_rem_vsi_rss_cfg(struct ice_hw * hw,u16 vsi_handle)3330 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3331 {
3332 const enum ice_block blk = ICE_BLK_RSS;
3333 struct ice_flow_prof *p, *t;
3334 enum ice_status status = ICE_SUCCESS;
3335
3336 if (!ice_is_vsi_valid(hw, vsi_handle))
3337 return ICE_ERR_PARAM;
3338
3339 if (LIST_EMPTY(&hw->fl_profs[blk]))
3340 return ICE_SUCCESS;
3341
3342 ice_acquire_lock(&hw->rss_locks);
3343 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3344 l_entry)
3345 if (ice_is_bit_set(p->vsis, vsi_handle)) {
3346 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3347 if (status)
3348 break;
3349
3350 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3351 status = ice_flow_rem_prof(hw, blk, p->id);
3352 if (status)
3353 break;
3354 }
3355 }
3356 ice_release_lock(&hw->rss_locks);
3357
3358 return status;
3359 }
3360
3361 /**
3362 * ice_get_rss_hdr_type - get a RSS profile's header type
3363 * @prof: RSS flow profile
3364 */
3365 static enum ice_rss_cfg_hdr_type
ice_get_rss_hdr_type(struct ice_flow_prof * prof)3366 ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3367 {
3368 enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3369
3370 if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3371 hdr_type = ICE_RSS_OUTER_HEADERS;
3372 } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3373 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3374 hdr_type = ICE_RSS_INNER_HEADERS;
3375 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3376 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3377 if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3378 hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3379 }
3380
3381 return hdr_type;
3382 }
3383
3384 /**
3385 * ice_rem_rss_list - remove RSS configuration from list
3386 * @hw: pointer to the hardware structure
3387 * @vsi_handle: software VSI handle
3388 * @prof: pointer to flow profile
3389 *
3390 * Assumption: lock has already been acquired for RSS list
3391 */
3392 static void
ice_rem_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)3393 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3394 {
3395 enum ice_rss_cfg_hdr_type hdr_type;
3396 struct ice_rss_cfg *r, *tmp;
3397
3398 /* Search for RSS hash fields associated to the VSI that match the
3399 * hash configurations associated to the flow profile. If found
3400 * remove from the RSS entry list of the VSI context and delete entry.
3401 */
3402 hdr_type = ice_get_rss_hdr_type(prof);
3403 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3404 ice_rss_cfg, l_entry)
3405 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3406 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3407 r->hash.hdr_type == hdr_type) {
3408 ice_clear_bit(vsi_handle, r->vsis);
3409 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3410 LIST_DEL(&r->l_entry);
3411 ice_free(hw, r);
3412 }
3413 return;
3414 }
3415 }
3416
3417 /**
3418 * ice_add_rss_list - add RSS configuration to list
3419 * @hw: pointer to the hardware structure
3420 * @vsi_handle: software VSI handle
3421 * @prof: pointer to flow profile
3422 *
3423 * Assumption: lock has already been acquired for RSS list
3424 */
3425 static enum ice_status
ice_add_rss_list(struct ice_hw * hw,u16 vsi_handle,struct ice_flow_prof * prof)3426 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3427 {
3428 enum ice_rss_cfg_hdr_type hdr_type;
3429 struct ice_rss_cfg *r, *rss_cfg;
3430
3431 hdr_type = ice_get_rss_hdr_type(prof);
3432 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3433 ice_rss_cfg, l_entry)
3434 if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3435 r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3436 r->hash.hdr_type == hdr_type) {
3437 ice_set_bit(vsi_handle, r->vsis);
3438 return ICE_SUCCESS;
3439 }
3440
3441 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3442 if (!rss_cfg)
3443 return ICE_ERR_NO_MEMORY;
3444
3445 rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3446 rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3447 rss_cfg->hash.hdr_type = hdr_type;
3448 rss_cfg->hash.symm = prof->cfg.symm;
3449 ice_set_bit(vsi_handle, rss_cfg->vsis);
3450
3451 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3452
3453 return ICE_SUCCESS;
3454 }
3455
3456 #define ICE_FLOW_PROF_HASH_S 0
3457 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3458 #define ICE_FLOW_PROF_HDR_S 32
3459 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3460 #define ICE_FLOW_PROF_ENCAP_S 62
3461 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3462
3463 /* Flow profile ID format:
3464 * [0:31] - Packet match fields
3465 * [32:61] - Protocol header
3466 * [62:63] - Encapsulation flag:
3467 * 0 if non-tunneled
3468 * 1 if tunneled
3469 * 2 for tunneled with outer ipv4
3470 * 3 for tunneled with outer ipv6
3471 */
3472 #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3473 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3474 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3475 (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
3476
3477 static void
ice_rss_config_xor_word(struct ice_hw * hw,u8 prof_id,u8 src,u8 dst)3478 ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3479 {
3480 u32 s = ((src % 4) << 3); /* byte shift */
3481 u32 v = dst | 0x80; /* value to program */
3482 u8 i = src / 4; /* register index */
3483 u32 reg;
3484
3485 reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3486 reg = (reg & ~(0xff << s)) | (v << s);
3487 wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3488 }
3489
3490 static void
ice_rss_config_xor(struct ice_hw * hw,u8 prof_id,u8 src,u8 dst,u8 len)3491 ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3492 {
3493 int fv_last_word =
3494 ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3495 int i;
3496
3497 for (i = 0; i < len; i++) {
3498 ice_rss_config_xor_word(hw, prof_id,
3499 /* Yes, field vector in GLQF_HSYMM and
3500 * GLQF_HINSET is inversed!
3501 */
3502 fv_last_word - (src + i),
3503 fv_last_word - (dst + i));
3504 ice_rss_config_xor_word(hw, prof_id,
3505 fv_last_word - (dst + i),
3506 fv_last_word - (src + i));
3507 }
3508 }
3509
3510 static void
ice_rss_update_symm(struct ice_hw * hw,struct ice_flow_prof * prof)3511 ice_rss_update_symm(struct ice_hw *hw,
3512 struct ice_flow_prof *prof)
3513 {
3514 struct ice_prof_map *map;
3515 u8 prof_id, m;
3516
3517 ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3518 map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3519 if (map)
3520 prof_id = map->prof_id;
3521 ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3522 if (!map)
3523 return;
3524 /* clear to default */
3525 for (m = 0; m < 6; m++)
3526 wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3527 if (prof->cfg.symm) {
3528 struct ice_flow_seg_info *seg =
3529 &prof->segs[prof->segs_cnt - 1];
3530
3531 struct ice_flow_seg_xtrct *ipv4_src =
3532 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3533 struct ice_flow_seg_xtrct *ipv4_dst =
3534 &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3535 struct ice_flow_seg_xtrct *ipv6_src =
3536 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3537 struct ice_flow_seg_xtrct *ipv6_dst =
3538 &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3539
3540 struct ice_flow_seg_xtrct *tcp_src =
3541 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3542 struct ice_flow_seg_xtrct *tcp_dst =
3543 &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3544
3545 struct ice_flow_seg_xtrct *udp_src =
3546 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3547 struct ice_flow_seg_xtrct *udp_dst =
3548 &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3549
3550 struct ice_flow_seg_xtrct *sctp_src =
3551 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3552 struct ice_flow_seg_xtrct *sctp_dst =
3553 &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3554
3555 /* xor IPv4 */
3556 if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3557 ice_rss_config_xor(hw, prof_id,
3558 ipv4_src->idx, ipv4_dst->idx, 2);
3559
3560 /* xor IPv6 */
3561 if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3562 ice_rss_config_xor(hw, prof_id,
3563 ipv6_src->idx, ipv6_dst->idx, 8);
3564
3565 /* xor TCP */
3566 if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3567 ice_rss_config_xor(hw, prof_id,
3568 tcp_src->idx, tcp_dst->idx, 1);
3569
3570 /* xor UDP */
3571 if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3572 ice_rss_config_xor(hw, prof_id,
3573 udp_src->idx, udp_dst->idx, 1);
3574
3575 /* xor SCTP */
3576 if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3577 ice_rss_config_xor(hw, prof_id,
3578 sctp_src->idx, sctp_dst->idx, 1);
3579 }
3580 }
3581
3582 /**
3583 * ice_add_rss_cfg_sync - add an RSS configuration
3584 * @hw: pointer to the hardware structure
3585 * @vsi_handle: software VSI handle
3586 * @cfg: configure parameters
3587 *
3588 * Assumption: lock has already been acquired for RSS list
3589 */
3590 static enum ice_status
ice_add_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,const struct ice_rss_hash_cfg * cfg)3591 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3592 const struct ice_rss_hash_cfg *cfg)
3593 {
3594 const enum ice_block blk = ICE_BLK_RSS;
3595 struct ice_flow_prof *prof = NULL;
3596 struct ice_flow_seg_info *segs;
3597 enum ice_status status;
3598 u8 segs_cnt;
3599
3600 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3601 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3602
3603 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3604 sizeof(*segs));
3605 if (!segs)
3606 return ICE_ERR_NO_MEMORY;
3607
3608 /* Construct the packet segment info from the hashed fields */
3609 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3610 if (status)
3611 goto exit;
3612
3613 /* Don't do RSS for GTPU Outer */
3614 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3615 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3616 status = ICE_SUCCESS;
3617 goto exit;
3618 }
3619
3620 /* Search for a flow profile that has matching headers, hash fields
3621 * and has the input VSI associated to it. If found, no further
3622 * operations required and exit.
3623 */
3624 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3625 vsi_handle,
3626 ICE_FLOW_FIND_PROF_CHK_FLDS |
3627 ICE_FLOW_FIND_PROF_CHK_VSI);
3628 if (prof) {
3629 if (prof->cfg.symm == cfg->symm)
3630 goto exit;
3631 prof->cfg.symm = cfg->symm;
3632 goto update_symm;
3633 }
3634
3635 /* Check if a flow profile exists with the same protocol headers and
3636 * associated with the input VSI. If so disassociate the VSI from
3637 * this profile. The VSI will be added to a new profile created with
3638 * the protocol header and new hash field configuration.
3639 */
3640 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3641 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3642 if (prof) {
3643 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3644 if (!status)
3645 ice_rem_rss_list(hw, vsi_handle, prof);
3646 else
3647 goto exit;
3648
3649 /* Remove profile if it has no VSIs associated */
3650 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3651 status = ice_flow_rem_prof(hw, blk, prof->id);
3652 if (status)
3653 goto exit;
3654 }
3655 }
3656
3657 /* Search for a profile that has same match fields only. If this
3658 * exists then associate the VSI to this profile.
3659 */
3660 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3661 vsi_handle,
3662 ICE_FLOW_FIND_PROF_CHK_FLDS);
3663 if (prof) {
3664 if (prof->cfg.symm == cfg->symm) {
3665 status = ice_flow_assoc_prof(hw, blk, prof,
3666 vsi_handle);
3667 if (!status)
3668 status = ice_add_rss_list(hw, vsi_handle,
3669 prof);
3670 } else {
3671 /* if a profile exist but with different symmetric
3672 * requirement, just return error.
3673 */
3674 status = ICE_ERR_NOT_SUPPORTED;
3675 }
3676 goto exit;
3677 }
3678
3679 /* Create a new flow profile with generated profile and packet
3680 * segment information.
3681 */
3682 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3683 ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3684 segs[segs_cnt - 1].hdrs,
3685 cfg->hdr_type),
3686 segs, segs_cnt, NULL, 0, &prof);
3687 if (status)
3688 goto exit;
3689
3690 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3691 /* If association to a new flow profile failed then this profile can
3692 * be removed.
3693 */
3694 if (status) {
3695 ice_flow_rem_prof(hw, blk, prof->id);
3696 goto exit;
3697 }
3698
3699 status = ice_add_rss_list(hw, vsi_handle, prof);
3700
3701 prof->cfg.symm = cfg->symm;
3702 update_symm:
3703 ice_rss_update_symm(hw, prof);
3704
3705 exit:
3706 ice_free(hw, segs);
3707 return status;
3708 }
3709
3710 /**
3711 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3712 * @hw: pointer to the hardware structure
3713 * @vsi_handle: software VSI handle
3714 * @cfg: configure parameters
3715 *
3716 * This function will generate a flow profile based on fields associated with
3717 * the input fields to hash on, the flow type and use the VSI number to add
3718 * a flow entry to the profile.
3719 */
3720 enum ice_status
ice_add_rss_cfg(struct ice_hw * hw,u16 vsi_handle,const struct ice_rss_hash_cfg * cfg)3721 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3722 const struct ice_rss_hash_cfg *cfg)
3723 {
3724 struct ice_rss_hash_cfg local_cfg;
3725 enum ice_status status;
3726
3727 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3728 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3729 cfg->hash_flds == ICE_HASH_INVALID)
3730 return ICE_ERR_PARAM;
3731
3732 local_cfg = *cfg;
3733 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3734 ice_acquire_lock(&hw->rss_locks);
3735 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3736 ice_release_lock(&hw->rss_locks);
3737 } else {
3738 ice_acquire_lock(&hw->rss_locks);
3739 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3740 status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3741 if (!status) {
3742 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3743 status = ice_add_rss_cfg_sync(hw, vsi_handle,
3744 &local_cfg);
3745 }
3746 ice_release_lock(&hw->rss_locks);
3747 }
3748
3749 return status;
3750 }
3751
3752 /**
3753 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3754 * @hw: pointer to the hardware structure
3755 * @vsi_handle: software VSI handle
3756 * @cfg: configure parameters
3757 *
3758 * Assumption: lock has already been acquired for RSS list
3759 */
3760 static enum ice_status
ice_rem_rss_cfg_sync(struct ice_hw * hw,u16 vsi_handle,const struct ice_rss_hash_cfg * cfg)3761 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3762 const struct ice_rss_hash_cfg *cfg)
3763 {
3764 const enum ice_block blk = ICE_BLK_RSS;
3765 struct ice_flow_seg_info *segs;
3766 struct ice_flow_prof *prof;
3767 enum ice_status status;
3768 u8 segs_cnt;
3769
3770 segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3771 ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3772 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3773 sizeof(*segs));
3774 if (!segs)
3775 return ICE_ERR_NO_MEMORY;
3776
3777 /* Construct the packet segment info from the hashed fields */
3778 status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3779 if (status)
3780 goto out;
3781
3782 /* Don't do RSS for GTPU Outer */
3783 if (segs_cnt == ICE_FLOW_SEG_SINGLE &&
3784 segs[segs_cnt - 1].hdrs & ICE_FLOW_SEG_HDR_GTPU) {
3785 status = ICE_SUCCESS;
3786 goto out;
3787 }
3788
3789 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3790 vsi_handle,
3791 ICE_FLOW_FIND_PROF_CHK_FLDS);
3792 if (!prof) {
3793 status = ICE_ERR_DOES_NOT_EXIST;
3794 goto out;
3795 }
3796
3797 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3798 if (status)
3799 goto out;
3800
3801 /* Remove RSS configuration from VSI context before deleting
3802 * the flow profile.
3803 */
3804 ice_rem_rss_list(hw, vsi_handle, prof);
3805
3806 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
3807 status = ice_flow_rem_prof(hw, blk, prof->id);
3808
3809 out:
3810 ice_free(hw, segs);
3811 return status;
3812 }
3813
3814 /**
3815 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
3816 * @hw: pointer to the hardware structure
3817 * @vsi_handle: software VSI handle
3818 * @cfg: configure parameters
3819 *
3820 * This function will lookup the flow profile based on the input
3821 * hash field bitmap, iterate through the profile entry list of
3822 * that profile and find entry associated with input VSI to be
3823 * removed. Calls are made to underlying flow apis which will in
3824 * turn build or update buffers for RSS XLT1 section.
3825 */
3826 enum ice_status
ice_rem_rss_cfg(struct ice_hw * hw,u16 vsi_handle,const struct ice_rss_hash_cfg * cfg)3827 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3828 const struct ice_rss_hash_cfg *cfg)
3829 {
3830 struct ice_rss_hash_cfg local_cfg;
3831 enum ice_status status;
3832
3833 if (!ice_is_vsi_valid(hw, vsi_handle) ||
3834 !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3835 cfg->hash_flds == ICE_HASH_INVALID)
3836 return ICE_ERR_PARAM;
3837
3838 ice_acquire_lock(&hw->rss_locks);
3839 local_cfg = *cfg;
3840 if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3841 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3842 } else {
3843 local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3844 status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3845
3846 if (!status) {
3847 local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3848 status = ice_rem_rss_cfg_sync(hw, vsi_handle,
3849 &local_cfg);
3850 }
3851 }
3852 ice_release_lock(&hw->rss_locks);
3853
3854 return status;
3855 }
3856
3857 /**
3858 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
3859 * @hw: pointer to the hardware structure
3860 * @vsi_handle: software VSI handle
3861 */
ice_replay_rss_cfg(struct ice_hw * hw,u16 vsi_handle)3862 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3863 {
3864 enum ice_status status = ICE_SUCCESS;
3865 struct ice_rss_cfg *r;
3866
3867 if (!ice_is_vsi_valid(hw, vsi_handle))
3868 return ICE_ERR_PARAM;
3869
3870 ice_acquire_lock(&hw->rss_locks);
3871 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3872 ice_rss_cfg, l_entry) {
3873 if (ice_is_bit_set(r->vsis, vsi_handle)) {
3874 status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
3875 if (status)
3876 break;
3877 }
3878 }
3879 ice_release_lock(&hw->rss_locks);
3880
3881 return status;
3882 }
3883
3884 /**
3885 * ice_get_rss_cfg - returns hashed fields for the given header types
3886 * @hw: pointer to the hardware structure
3887 * @vsi_handle: software VSI handle
3888 * @hdrs: protocol header type
3889 *
3890 * This function will return the match fields of the first instance of flow
3891 * profile having the given header types and containing input VSI
3892 */
ice_get_rss_cfg(struct ice_hw * hw,u16 vsi_handle,u32 hdrs)3893 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
3894 {
3895 u64 rss_hash = ICE_HASH_INVALID;
3896 struct ice_rss_cfg *r;
3897
3898 /* verify if the protocol header is non zero and VSI is valid */
3899 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
3900 return ICE_HASH_INVALID;
3901
3902 ice_acquire_lock(&hw->rss_locks);
3903 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3904 ice_rss_cfg, l_entry)
3905 if (ice_is_bit_set(r->vsis, vsi_handle) &&
3906 r->hash.addl_hdrs == hdrs) {
3907 rss_hash = r->hash.hash_flds;
3908 break;
3909 }
3910 ice_release_lock(&hw->rss_locks);
3911
3912 return rss_hash;
3913 }
3914