1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <assert.h>
11
12 #include <rte_malloc.h>
13 #include <rte_tailq.h>
14 #include "base/i40e_prototype.h"
15 #include "i40e_logs.h"
16 #include "i40e_ethdev.h"
17 #include "i40e_hash.h"
18
19 #ifndef BIT
20 #define BIT(n) (1UL << (n))
21 #endif
22
23 #ifndef BIT_ULL
24 #define BIT_ULL(n) (1ULL << (n))
25 #endif
26
27 /* Pattern item headers */
28 #define I40E_HASH_HDR_ETH 0x01ULL
29 #define I40E_HASH_HDR_IPV4 0x10ULL
30 #define I40E_HASH_HDR_IPV6 0x20ULL
31 #define I40E_HASH_HDR_IPV6_FRAG 0x40ULL
32 #define I40E_HASH_HDR_TCP 0x100ULL
33 #define I40E_HASH_HDR_UDP 0x200ULL
34 #define I40E_HASH_HDR_SCTP 0x400ULL
35 #define I40E_HASH_HDR_ESP 0x10000ULL
36 #define I40E_HASH_HDR_L2TPV3 0x20000ULL
37 #define I40E_HASH_HDR_AH 0x40000ULL
38 #define I40E_HASH_HDR_GTPC 0x100000ULL
39 #define I40E_HASH_HDR_GTPU 0x200000ULL
40
41 #define I40E_HASH_HDR_INNER_SHIFT 32
42 #define I40E_HASH_HDR_IPV4_INNER (I40E_HASH_HDR_IPV4 << \
43 I40E_HASH_HDR_INNER_SHIFT)
44 #define I40E_HASH_HDR_IPV6_INNER (I40E_HASH_HDR_IPV6 << \
45 I40E_HASH_HDR_INNER_SHIFT)
46
47 /* ETH */
48 #define I40E_PHINT_ETH I40E_HASH_HDR_ETH
49
50 /* IPv4 */
51 #define I40E_PHINT_IPV4 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV4)
52 #define I40E_PHINT_IPV4_TCP (I40E_PHINT_IPV4 | I40E_HASH_HDR_TCP)
53 #define I40E_PHINT_IPV4_UDP (I40E_PHINT_IPV4 | I40E_HASH_HDR_UDP)
54 #define I40E_PHINT_IPV4_SCTP (I40E_PHINT_IPV4 | I40E_HASH_HDR_SCTP)
55
56 /* IPv6 */
57 #define I40E_PHINT_IPV6 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV6)
58 #define I40E_PHINT_IPV6_FRAG (I40E_PHINT_IPV6 | \
59 I40E_HASH_HDR_IPV6_FRAG)
60 #define I40E_PHINT_IPV6_TCP (I40E_PHINT_IPV6 | I40E_HASH_HDR_TCP)
61 #define I40E_PHINT_IPV6_UDP (I40E_PHINT_IPV6 | I40E_HASH_HDR_UDP)
62 #define I40E_PHINT_IPV6_SCTP (I40E_PHINT_IPV6 | I40E_HASH_HDR_SCTP)
63
64 /* ESP */
65 #define I40E_PHINT_IPV4_ESP (I40E_PHINT_IPV4 | I40E_HASH_HDR_ESP)
66 #define I40E_PHINT_IPV6_ESP (I40E_PHINT_IPV6 | I40E_HASH_HDR_ESP)
67 #define I40E_PHINT_IPV4_UDP_ESP (I40E_PHINT_IPV4_UDP | \
68 I40E_HASH_HDR_ESP)
69 #define I40E_PHINT_IPV6_UDP_ESP (I40E_PHINT_IPV6_UDP | \
70 I40E_HASH_HDR_ESP)
71
72 /* GTPC */
73 #define I40E_PHINT_IPV4_GTPC (I40E_PHINT_IPV4_UDP | \
74 I40E_HASH_HDR_GTPC)
75 #define I40E_PHINT_IPV6_GTPC (I40E_PHINT_IPV6_UDP | \
76 I40E_HASH_HDR_GTPC)
77
78 /* GTPU */
79 #define I40E_PHINT_IPV4_GTPU (I40E_PHINT_IPV4_UDP | \
80 I40E_HASH_HDR_GTPU)
81 #define I40E_PHINT_IPV4_GTPU_IPV4 (I40E_PHINT_IPV4_GTPU | \
82 I40E_HASH_HDR_IPV4_INNER)
83 #define I40E_PHINT_IPV4_GTPU_IPV6 (I40E_PHINT_IPV4_GTPU | \
84 I40E_HASH_HDR_IPV6_INNER)
85 #define I40E_PHINT_IPV6_GTPU (I40E_PHINT_IPV6_UDP | \
86 I40E_HASH_HDR_GTPU)
87 #define I40E_PHINT_IPV6_GTPU_IPV4 (I40E_PHINT_IPV6_GTPU | \
88 I40E_HASH_HDR_IPV4_INNER)
89 #define I40E_PHINT_IPV6_GTPU_IPV6 (I40E_PHINT_IPV6_GTPU | \
90 I40E_HASH_HDR_IPV6_INNER)
91
92 /* L2TPV3 */
93 #define I40E_PHINT_IPV4_L2TPV3 (I40E_PHINT_IPV4 | I40E_HASH_HDR_L2TPV3)
94 #define I40E_PHINT_IPV6_L2TPV3 (I40E_PHINT_IPV6 | I40E_HASH_HDR_L2TPV3)
95
96 /* AH */
97 #define I40E_PHINT_IPV4_AH (I40E_PHINT_IPV4 | I40E_HASH_HDR_AH)
98 #define I40E_PHINT_IPV6_AH (I40E_PHINT_IPV6 | I40E_HASH_HDR_AH)
99
100 /* Structure of mapping RSS type to input set */
101 struct i40e_hash_map_rss_inset {
102 uint64_t rss_type;
103 uint64_t inset;
104 };
105
106 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
107 /* IPv4 */
108 { RTE_ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
109 { RTE_ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
110
111 { RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
112 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
113
114 { RTE_ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
115 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
116
117 { RTE_ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
118 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
119
120 { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
121 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
122
123 /* IPv6 */
124 { RTE_ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
125 { RTE_ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
126
127 { RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
128 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
129
130 { RTE_ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
131 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
132
133 { RTE_ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
134 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
135
136 { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
137 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
138
139 /* Port */
140 { RTE_ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
141
142 /* Ether */
143 { RTE_ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
144 { RTE_ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
145
146 /* VLAN */
147 { RTE_ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
148 { RTE_ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
149 };
150
151 #define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
152
153 #define I40E_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
154 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6) | \
155 BIT_ULL(RTE_FLOW_ITEM_TYPE_VLAN))
156
157 #define I40E_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
158 BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
159 BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP) | \
160 BIT_ULL(RTE_FLOW_ITEM_TYPE_ESP) | \
161 BIT_ULL(RTE_FLOW_ITEM_TYPE_L2TPV3OIP) |\
162 BIT_ULL(RTE_FLOW_ITEM_TYPE_AH))
163
164 #define I40E_HASH_IPV6_NEXT_ALLOW (I40E_HASH_IP_NEXT_ALLOW | \
165 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT))
166
167 #define I40E_HASH_UDP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPU) | \
168 BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPC))
169
170 #define I40E_HASH_GTPU_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
171 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
172
173 static const uint64_t pattern_next_allow_items[] = {
174 [RTE_FLOW_ITEM_TYPE_VOID] = I40E_HASH_VOID_NEXT_ALLOW,
175 [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_ETH_NEXT_ALLOW,
176 [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_IP_NEXT_ALLOW,
177 [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_IPV6_NEXT_ALLOW,
178 [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_UDP_NEXT_ALLOW,
179 [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_GTPU_NEXT_ALLOW,
180 };
181
182 static const uint64_t pattern_item_header[] = {
183 [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_HDR_ETH,
184 [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_HDR_IPV4,
185 [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_HDR_IPV6,
186 [RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT] = I40E_HASH_HDR_IPV6_FRAG,
187 [RTE_FLOW_ITEM_TYPE_TCP] = I40E_HASH_HDR_TCP,
188 [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_HDR_UDP,
189 [RTE_FLOW_ITEM_TYPE_SCTP] = I40E_HASH_HDR_SCTP,
190 [RTE_FLOW_ITEM_TYPE_ESP] = I40E_HASH_HDR_ESP,
191 [RTE_FLOW_ITEM_TYPE_GTPC] = I40E_HASH_HDR_GTPC,
192 [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_HDR_GTPU,
193 [RTE_FLOW_ITEM_TYPE_L2TPV3OIP] = I40E_HASH_HDR_L2TPV3,
194 [RTE_FLOW_ITEM_TYPE_AH] = I40E_HASH_HDR_AH,
195 };
196
197 /* Structure of matched pattern */
198 struct i40e_hash_match_pattern {
199 uint64_t pattern_type;
200 uint64_t rss_mask; /* Supported RSS type for this pattern */
201 bool custom_pctype_flag;/* true for custom packet type */
202 uint8_t pctype;
203 };
204
205 #define I40E_HASH_MAP_PATTERN(pattern, rss_mask, pctype) { \
206 pattern, rss_mask, false, pctype }
207
208 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
209 pattern, rss_mask, true, cus_pctype }
210
211 #define I40E_HASH_L2_RSS_MASK (RTE_ETH_RSS_VLAN | RTE_ETH_RSS_ETH | \
212 RTE_ETH_RSS_L2_SRC_ONLY | \
213 RTE_ETH_RSS_L2_DST_ONLY)
214
215 #define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
216 RTE_ETH_RSS_L3_SRC_ONLY | \
217 RTE_ETH_RSS_L3_DST_ONLY)
218
219 #define I40E_HASH_IPV4_L23_RSS_MASK (RTE_ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
220 #define I40E_HASH_IPV6_L23_RSS_MASK (RTE_ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
221
222 #define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
223 RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | \
224 RTE_ETH_RSS_L4_DST_ONLY)
225
226 #define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV4)
227 #define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | RTE_ETH_RSS_IPV6)
228
229 #define I40E_HASH_L4_TYPES (RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
230 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
231 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
232 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
233 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
234 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
235
236 /* Current supported patterns and RSS types.
237 * All items that have the same pattern types are together.
238 */
239 static const struct i40e_hash_match_pattern match_patterns[] = {
240 /* Ether */
241 I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
242 RTE_ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
243 I40E_FILTER_PCTYPE_L2_PAYLOAD),
244
245 /* IPv4 */
246 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
247 RTE_ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
248 I40E_FILTER_PCTYPE_FRAG_IPV4),
249
250 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
251 RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
252 I40E_HASH_IPV4_L23_RSS_MASK,
253 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
254
255 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
256 RTE_ETH_RSS_NONFRAG_IPV4_TCP |
257 I40E_HASH_IPV4_L234_RSS_MASK,
258 I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
259
260 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
261 RTE_ETH_RSS_NONFRAG_IPV4_UDP |
262 I40E_HASH_IPV4_L234_RSS_MASK,
263 I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
264
265 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
266 RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
267 I40E_HASH_IPV4_L234_RSS_MASK,
268 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
269
270 /* IPv6 */
271 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
272 RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
273 I40E_FILTER_PCTYPE_FRAG_IPV6),
274
275 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
276 RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
277 I40E_HASH_IPV6_L23_RSS_MASK,
278 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
279
280 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_FRAG,
281 RTE_ETH_RSS_FRAG_IPV6 | I40E_HASH_L23_RSS_MASK,
282 I40E_FILTER_PCTYPE_FRAG_IPV6),
283
284 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
285 RTE_ETH_RSS_NONFRAG_IPV6_TCP |
286 I40E_HASH_IPV6_L234_RSS_MASK,
287 I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
288
289 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
290 RTE_ETH_RSS_NONFRAG_IPV6_UDP |
291 I40E_HASH_IPV6_L234_RSS_MASK,
292 I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
293
294 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
295 RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
296 I40E_HASH_IPV6_L234_RSS_MASK,
297 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
298
299 /* ESP */
300 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
301 RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
302 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
303 RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
304 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
305 RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
306 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
307 RTE_ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
308
309 /* GTPC */
310 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
311 I40E_HASH_IPV4_L234_RSS_MASK,
312 I40E_CUSTOMIZED_GTPC),
313 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPC,
314 I40E_HASH_IPV6_L234_RSS_MASK,
315 I40E_CUSTOMIZED_GTPC),
316
317 /* GTPU */
318 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU,
319 I40E_HASH_IPV4_L234_RSS_MASK,
320 I40E_CUSTOMIZED_GTPU),
321 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
322 RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
323 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
324 RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
325 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
326 I40E_HASH_IPV6_L234_RSS_MASK,
327 I40E_CUSTOMIZED_GTPU),
328 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
329 RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
330 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
331 RTE_ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
332
333 /* L2TPV3 */
334 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
335 RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
336 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
337 RTE_ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
338
339 /* AH */
340 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, RTE_ETH_RSS_AH,
341 I40E_CUSTOMIZED_AH_IPV4),
342 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, RTE_ETH_RSS_AH,
343 I40E_CUSTOMIZED_AH_IPV6),
344 };
345
346 static int
i40e_hash_get_pattern_type(const struct rte_flow_item pattern[],uint64_t * pattern_types,struct rte_flow_error * error)347 i40e_hash_get_pattern_type(const struct rte_flow_item pattern[],
348 uint64_t *pattern_types,
349 struct rte_flow_error *error)
350 {
351 const char *message = "Pattern not supported";
352 enum rte_flow_item_type prev_item_type = RTE_FLOW_ITEM_TYPE_VOID;
353 enum rte_flow_item_type last_item_type = prev_item_type;
354 uint64_t item_hdr, pattern_hdrs = 0;
355 bool inner_flag = false;
356 int vlan_count = 0;
357
358 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
359 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
360 continue;
361
362 if (pattern->mask || pattern->spec || pattern->last) {
363 message = "Header info should not be specified";
364 goto not_sup;
365 }
366
367 /* Check the previous item allows this sub-item. */
368 if (prev_item_type >= (enum rte_flow_item_type)
369 RTE_DIM(pattern_next_allow_items) ||
370 !(pattern_next_allow_items[prev_item_type] &
371 BIT_ULL(pattern->type)))
372 goto not_sup;
373
374 /* For VLAN item, it does no matter about to pattern type
375 * recognition. So just count the number of VLAN and do not
376 * change the value of variable `prev_item_type`.
377 */
378 last_item_type = pattern->type;
379 if (last_item_type == RTE_FLOW_ITEM_TYPE_VLAN) {
380 if (vlan_count >= 2)
381 goto not_sup;
382 vlan_count++;
383 continue;
384 }
385
386 prev_item_type = last_item_type;
387 assert(last_item_type < (enum rte_flow_item_type)
388 RTE_DIM(pattern_item_header));
389 item_hdr = pattern_item_header[last_item_type];
390 assert(item_hdr);
391
392 if (inner_flag) {
393 item_hdr <<= I40E_HASH_HDR_INNER_SHIFT;
394
395 /* Inner layer should not have GTPU item */
396 if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU)
397 goto not_sup;
398 } else {
399 if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU) {
400 inner_flag = true;
401 vlan_count = 0;
402 }
403 }
404
405 if (item_hdr & pattern_hdrs)
406 goto not_sup;
407
408 pattern_hdrs |= item_hdr;
409 }
410
411 if (pattern_hdrs && last_item_type != RTE_FLOW_ITEM_TYPE_VLAN) {
412 *pattern_types = pattern_hdrs;
413 return 0;
414 }
415
416 not_sup:
417 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
418 pattern, message);
419 }
420
421 static uint64_t
i40e_hash_get_x722_ext_pctypes(uint8_t match_pctype)422 i40e_hash_get_x722_ext_pctypes(uint8_t match_pctype)
423 {
424 uint64_t pctypes = 0;
425
426 switch (match_pctype) {
427 case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
428 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
429 break;
430
431 case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
432 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
433 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
434 break;
435
436 case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
437 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
438 break;
439
440 case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
441 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
442 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
443 break;
444 }
445
446 return pctypes;
447 }
448
449 static int
i40e_hash_translate_gtp_inset(struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)450 i40e_hash_translate_gtp_inset(struct i40e_rte_flow_rss_conf *rss_conf,
451 struct rte_flow_error *error)
452 {
453 if (rss_conf->inset &
454 (I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC |
455 I40E_INSET_DST_PORT | I40E_INSET_SRC_PORT))
456 return rte_flow_error_set(error, ENOTSUP,
457 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
458 NULL,
459 "Only support external destination IP");
460
461 if (rss_conf->inset & I40E_INSET_IPV4_DST)
462 rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV4_DST) |
463 I40E_INSET_TUNNEL_IPV4_DST;
464
465 if (rss_conf->inset & I40E_INSET_IPV6_DST)
466 rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV6_DST) |
467 I40E_INSET_TUNNEL_IPV6_DST;
468
469 return 0;
470 }
471
472 static int
i40e_hash_get_pctypes(const struct rte_eth_dev * dev,const struct i40e_hash_match_pattern * match,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)473 i40e_hash_get_pctypes(const struct rte_eth_dev *dev,
474 const struct i40e_hash_match_pattern *match,
475 struct i40e_rte_flow_rss_conf *rss_conf,
476 struct rte_flow_error *error)
477 {
478 if (match->custom_pctype_flag) {
479 struct i40e_pf *pf;
480 struct i40e_customized_pctype *custom_type;
481
482 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
483 custom_type = i40e_find_customized_pctype(pf, match->pctype);
484 if (!custom_type || !custom_type->valid)
485 return rte_flow_error_set(error, ENOTSUP,
486 RTE_FLOW_ERROR_TYPE_ITEM,
487 NULL, "PCTYPE not supported");
488
489 rss_conf->config_pctypes |= BIT_ULL(custom_type->pctype);
490
491 if (match->pctype == I40E_CUSTOMIZED_GTPU ||
492 match->pctype == I40E_CUSTOMIZED_GTPC)
493 return i40e_hash_translate_gtp_inset(rss_conf, error);
494 } else {
495 struct i40e_hw *hw =
496 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
497 uint64_t types;
498
499 rss_conf->config_pctypes |= BIT_ULL(match->pctype);
500 if (hw->mac.type == I40E_MAC_X722) {
501 types = i40e_hash_get_x722_ext_pctypes(match->pctype);
502 rss_conf->config_pctypes |= types;
503 }
504 }
505
506 return 0;
507 }
508
509 static int
i40e_hash_get_pattern_pctypes(const struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)510 i40e_hash_get_pattern_pctypes(const struct rte_eth_dev *dev,
511 const struct rte_flow_item pattern[],
512 const struct rte_flow_action_rss *rss_act,
513 struct i40e_rte_flow_rss_conf *rss_conf,
514 struct rte_flow_error *error)
515 {
516 uint64_t pattern_types = 0;
517 bool match_flag = false;
518 int i, ret;
519
520 ret = i40e_hash_get_pattern_type(pattern, &pattern_types, error);
521 if (ret)
522 return ret;
523
524 for (i = 0; i < (int)RTE_DIM(match_patterns); i++) {
525 const struct i40e_hash_match_pattern *match =
526 &match_patterns[i];
527
528 /* Check pattern types match. All items that have the same
529 * pattern types are together, so if the pattern types match
530 * previous item but they doesn't match current item, it means
531 * the pattern types do not match all remain items.
532 */
533 if (pattern_types != match->pattern_type) {
534 if (match_flag)
535 break;
536 continue;
537 }
538 match_flag = true;
539
540 /* Check RSS types match */
541 if (!(rss_act->types & ~match->rss_mask)) {
542 ret = i40e_hash_get_pctypes(dev, match,
543 rss_conf, error);
544 if (ret)
545 return ret;
546 }
547 }
548
549 if (rss_conf->config_pctypes)
550 return 0;
551
552 if (match_flag)
553 return rte_flow_error_set(error, ENOTSUP,
554 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
555 NULL, "RSS types not supported");
556
557 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
558 NULL, "Pattern not supported");
559 }
560
561 static uint64_t
i40e_hash_get_inset(uint64_t rss_types)562 i40e_hash_get_inset(uint64_t rss_types)
563 {
564 uint64_t mask, inset = 0;
565 int i;
566
567 for (i = 0; i < (int)RTE_DIM(i40e_hash_rss_inset); i++) {
568 if (rss_types & i40e_hash_rss_inset[i].rss_type)
569 inset |= i40e_hash_rss_inset[i].inset;
570 }
571
572 if (!inset)
573 return 0;
574
575 /* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
576 * it is the same case as none of them are added.
577 */
578 mask = rss_types & (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY);
579 if (mask == RTE_ETH_RSS_L2_SRC_ONLY)
580 inset &= ~I40E_INSET_DMAC;
581 else if (mask == RTE_ETH_RSS_L2_DST_ONLY)
582 inset &= ~I40E_INSET_SMAC;
583
584 mask = rss_types & (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
585 if (mask == RTE_ETH_RSS_L3_SRC_ONLY)
586 inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
587 else if (mask == RTE_ETH_RSS_L3_DST_ONLY)
588 inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
589
590 mask = rss_types & (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
591 if (mask == RTE_ETH_RSS_L4_SRC_ONLY)
592 inset &= ~I40E_INSET_DST_PORT;
593 else if (mask == RTE_ETH_RSS_L4_DST_ONLY)
594 inset &= ~I40E_INSET_SRC_PORT;
595
596 if (rss_types & I40E_HASH_L4_TYPES) {
597 uint64_t l3_mask = rss_types &
598 (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
599 uint64_t l4_mask = rss_types &
600 (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
601
602 if (l3_mask && !l4_mask)
603 inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
604 else if (!l3_mask && l4_mask)
605 inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST |
606 I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
607 }
608
609 return inset;
610 }
611
612 static int
i40e_hash_config_func(struct i40e_hw * hw,enum rte_eth_hash_function func)613 i40e_hash_config_func(struct i40e_hw *hw, enum rte_eth_hash_function func)
614 {
615 struct i40e_pf *pf;
616 uint32_t reg;
617 uint8_t symmetric = 0;
618
619 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
620
621 if (func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
622 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK))
623 goto set_symmetric;
624
625 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
626 } else {
627 if (func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
628 symmetric = 1;
629
630 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
631 goto set_symmetric;
632
633 reg |= I40E_GLQF_CTL_HTOEP_MASK;
634 }
635
636 pf = &((struct i40e_adapter *)hw->back)->pf;
637 if (pf->support_multi_driver) {
638 PMD_DRV_LOG(ERR,
639 "Modify hash function is not permitted when multi-driver enabled");
640 return -EPERM;
641 }
642
643 PMD_DRV_LOG(INFO, "NIC hash function is setting to %d", func);
644 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
645 I40E_WRITE_FLUSH(hw);
646
647 set_symmetric:
648 i40e_set_symmetric_hash_enable_per_port(hw, symmetric);
649 return 0;
650 }
651
652 static int
i40e_hash_config_pctype_symmetric(struct i40e_hw * hw,uint32_t pctype,bool symmetric)653 i40e_hash_config_pctype_symmetric(struct i40e_hw *hw,
654 uint32_t pctype,
655 bool symmetric)
656 {
657 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
658 uint32_t reg;
659
660 /* For X722, get translated pctype in fd pctype register */
661 if (hw->mac.type == I40E_MAC_X722)
662 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
663
664 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
665 if (symmetric) {
666 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
667 return 0;
668 reg |= I40E_GLQF_HSYM_SYMH_ENA_MASK;
669 } else {
670 if (!(reg & I40E_GLQF_HSYM_SYMH_ENA_MASK))
671 return 0;
672 reg &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
673 }
674
675 if (pf->support_multi_driver) {
676 PMD_DRV_LOG(ERR,
677 "Enable/Disable symmetric hash is not permitted when multi-driver enabled");
678 return -EPERM;
679 }
680
681 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
682 I40E_WRITE_FLUSH(hw);
683 return 0;
684 }
685
686 static void
i40e_hash_enable_pctype(struct i40e_hw * hw,uint32_t pctype,bool enable)687 i40e_hash_enable_pctype(struct i40e_hw *hw,
688 uint32_t pctype, bool enable)
689 {
690 uint32_t reg, reg_val, mask;
691
692 if (pctype < 32) {
693 mask = BIT(pctype);
694 reg = I40E_PFQF_HENA(0);
695 } else {
696 mask = BIT(pctype - 32);
697 reg = I40E_PFQF_HENA(1);
698 }
699
700 reg_val = i40e_read_rx_ctl(hw, reg);
701
702 if (enable) {
703 if (reg_val & mask)
704 return;
705
706 reg_val |= mask;
707 } else {
708 if (!(reg_val & mask))
709 return;
710
711 reg_val &= ~mask;
712 }
713
714 i40e_write_rx_ctl(hw, reg, reg_val);
715 I40E_WRITE_FLUSH(hw);
716 }
717
718 static int
i40e_hash_config_pctype(struct i40e_hw * hw,struct i40e_rte_flow_rss_conf * rss_conf,uint32_t pctype)719 i40e_hash_config_pctype(struct i40e_hw *hw,
720 struct i40e_rte_flow_rss_conf *rss_conf,
721 uint32_t pctype)
722 {
723 uint64_t rss_types = rss_conf->conf.types;
724 int ret;
725
726 if (rss_types == 0) {
727 i40e_hash_enable_pctype(hw, pctype, false);
728 return 0;
729 }
730
731 if (rss_conf->inset) {
732 ret = i40e_set_hash_inset(hw, rss_conf->inset, pctype, false);
733 if (ret)
734 return ret;
735 }
736
737 i40e_hash_enable_pctype(hw, pctype, true);
738 return 0;
739 }
740
741 static int
i40e_hash_config_region(struct i40e_pf * pf,const struct i40e_rte_flow_rss_conf * rss_conf)742 i40e_hash_config_region(struct i40e_pf *pf,
743 const struct i40e_rte_flow_rss_conf *rss_conf)
744 {
745 struct i40e_hw *hw = &pf->adapter->hw;
746 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
747 struct i40e_queue_region_info *regions = pf->queue_region.region;
748 uint32_t num = pf->queue_region.queue_region_number;
749 uint32_t i, region_id_mask = 0;
750
751 /* Use a 32 bit variable to represent all regions */
752 RTE_BUILD_BUG_ON(I40E_REGION_MAX_INDEX > 31);
753
754 /* Re-configure the region if it existed */
755 for (i = 0; i < num; i++) {
756 if (rss_conf->region_queue_start ==
757 regions[i].queue_start_index &&
758 rss_conf->region_queue_num == regions[i].queue_num) {
759 uint32_t j;
760
761 for (j = 0; j < regions[i].user_priority_num; j++) {
762 if (regions[i].user_priority[j] ==
763 rss_conf->region_priority)
764 return 0;
765 }
766
767 if (j >= I40E_MAX_USER_PRIORITY) {
768 PMD_DRV_LOG(ERR,
769 "Priority number exceed the maximum %d",
770 I40E_MAX_USER_PRIORITY);
771 return -ENOSPC;
772 }
773
774 regions[i].user_priority[j] = rss_conf->region_priority;
775 regions[i].user_priority_num++;
776 return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
777 }
778
779 region_id_mask |= BIT(regions[i].region_id);
780 }
781
782 if (num > I40E_REGION_MAX_INDEX) {
783 PMD_DRV_LOG(ERR, "Queue region resource used up");
784 return -ENOSPC;
785 }
786
787 /* Add a new region */
788
789 pf->queue_region.queue_region_number++;
790 memset(®ions[num], 0, sizeof(regions[0]));
791
792 regions[num].region_id = rte_bsf32(~region_id_mask);
793 regions[num].queue_num = rss_conf->region_queue_num;
794 regions[num].queue_start_index = rss_conf->region_queue_start;
795 regions[num].user_priority[0] = rss_conf->region_priority;
796 regions[num].user_priority_num = 1;
797
798 return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
799 }
800
801 static int
i40e_hash_config(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * rss_conf)802 i40e_hash_config(struct i40e_pf *pf,
803 struct i40e_rte_flow_rss_conf *rss_conf)
804 {
805 struct rte_flow_action_rss *rss_info = &rss_conf->conf;
806 struct i40e_hw *hw = &pf->adapter->hw;
807 uint64_t pctypes;
808 int ret;
809
810 if (rss_info->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
811 ret = i40e_hash_config_func(hw, rss_info->func);
812 if (ret)
813 return ret;
814
815 if (rss_info->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
816 rss_conf->misc_reset_flags |=
817 I40E_HASH_FLOW_RESET_FLAG_FUNC;
818 }
819
820 if (rss_conf->region_queue_num > 0) {
821 ret = i40e_hash_config_region(pf, rss_conf);
822 if (ret)
823 return ret;
824
825 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
826 }
827
828 if (rss_info->key_len > 0) {
829 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->key,
830 rss_info->key_len);
831 if (ret)
832 return ret;
833
834 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_KEY;
835 }
836
837 /* Update lookup table */
838 if (rss_info->queue_num > 0) {
839 uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
840 uint32_t i, j = 0;
841
842 for (i = 0; i < hw->func_caps.rss_table_size; i++) {
843 lut[i] = (uint8_t)rss_info->queue[j];
844 j = (j == rss_info->queue_num - 1) ? 0 : (j + 1);
845 }
846
847 ret = i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
848 if (ret)
849 return ret;
850
851 pf->hash_enabled_queues = 0;
852 for (i = 0; i < rss_info->queue_num; i++)
853 pf->hash_enabled_queues |= BIT_ULL(lut[i]);
854
855 pf->adapter->rss_reta_updated = 0;
856 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_QUEUE;
857 }
858
859 /* The codes behind configure the input sets and symmetric hash
860 * function of the packet types and enable hash on them.
861 */
862 pctypes = rss_conf->config_pctypes;
863 if (!pctypes)
864 return 0;
865
866 /* For first flow that will enable hash on any packet type, we clean
867 * the RSS sets that by legacy configuration commands and parameters.
868 */
869 if (!pf->hash_filter_enabled) {
870 i40e_pf_disable_rss(pf);
871 pf->hash_filter_enabled = true;
872 }
873
874 do {
875 uint32_t idx = rte_bsf64(pctypes);
876 uint64_t bit = BIT_ULL(idx);
877
878 if (rss_conf->symmetric_enable) {
879 ret = i40e_hash_config_pctype_symmetric(hw, idx, true);
880 if (ret)
881 return ret;
882
883 rss_conf->reset_symmetric_pctypes |= bit;
884 }
885
886 ret = i40e_hash_config_pctype(hw, rss_conf, idx);
887 if (ret)
888 return ret;
889
890 rss_conf->reset_config_pctypes |= bit;
891 pctypes &= ~bit;
892 } while (pctypes);
893
894 return 0;
895 }
896
897 static void
i40e_hash_parse_key(const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf)898 i40e_hash_parse_key(const struct rte_flow_action_rss *rss_act,
899 struct i40e_rte_flow_rss_conf *rss_conf)
900 {
901 const uint8_t *key = rss_act->key;
902
903 if (!key || rss_act->key_len != sizeof(rss_conf->key)) {
904 const uint32_t rss_key_default[] = {0x6b793944,
905 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
906 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
907 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
908
909 if (rss_act->key_len != sizeof(rss_conf->key))
910 PMD_DRV_LOG(WARNING,
911 "RSS key length invalid, must be %u bytes, now set key to default",
912 (uint32_t)sizeof(rss_conf->key));
913
914 memcpy(rss_conf->key, rss_key_default, sizeof(rss_conf->key));
915 } else {
916 memcpy(rss_conf->key, key, sizeof(rss_conf->key));
917 }
918
919 rss_conf->conf.key = rss_conf->key;
920 rss_conf->conf.key_len = sizeof(rss_conf->key);
921 }
922
923 static int
i40e_hash_parse_queues(const struct rte_eth_dev * dev,const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)924 i40e_hash_parse_queues(const struct rte_eth_dev *dev,
925 const struct rte_flow_action_rss *rss_act,
926 struct i40e_rte_flow_rss_conf *rss_conf,
927 struct rte_flow_error *error)
928 {
929 struct i40e_pf *pf;
930 struct i40e_hw *hw;
931 uint16_t i;
932 int max_queue;
933
934 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
935 if (!rss_act->queue_num ||
936 rss_act->queue_num > hw->func_caps.rss_table_size)
937 return rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
939 NULL, "Invalid RSS queue number");
940
941 if (rss_act->key_len)
942 PMD_DRV_LOG(WARNING,
943 "RSS key is ignored when queues specified");
944
945 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
946 if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
947 max_queue = i40e_pf_calc_configured_queues_num(pf);
948 else
949 max_queue = pf->dev_data->nb_rx_queues;
950
951 max_queue = RTE_MIN(max_queue, I40E_MAX_Q_PER_TC);
952
953 for (i = 0; i < rss_act->queue_num; i++) {
954 if ((int)rss_act->queue[i] >= max_queue)
955 break;
956 }
957
958 if (i < rss_act->queue_num)
959 return rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
961 NULL, "Invalid RSS queues");
962
963 memcpy(rss_conf->queue, rss_act->queue,
964 rss_act->queue_num * sizeof(rss_conf->queue[0]));
965 rss_conf->conf.queue = rss_conf->queue;
966 rss_conf->conf.queue_num = rss_act->queue_num;
967 return 0;
968 }
969
970 static int
i40e_hash_parse_queue_region(const struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)971 i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
972 const struct rte_flow_item pattern[],
973 const struct rte_flow_action_rss *rss_act,
974 struct i40e_rte_flow_rss_conf *rss_conf,
975 struct rte_flow_error *error)
976 {
977 struct i40e_pf *pf;
978 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
979 uint64_t hash_queues;
980 uint32_t i;
981
982 if (pattern[1].type != RTE_FLOW_ITEM_TYPE_END)
983 return rte_flow_error_set(error, ENOTSUP,
984 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
985 &pattern[1],
986 "Pattern not supported.");
987
988 vlan_spec = pattern->spec;
989 vlan_mask = pattern->mask;
990 if (!vlan_spec || !vlan_mask ||
991 (rte_be_to_cpu_16(vlan_mask->tci) >> 13) != 7)
992 return rte_flow_error_set(error, EINVAL,
993 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
994 "Pattern error.");
995
996 if (!rss_act->queue)
997 return rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
999 NULL, "Queues not specified");
1000
1001 if (rss_act->key_len)
1002 PMD_DRV_LOG(WARNING,
1003 "RSS key is ignored when configure queue region");
1004
1005 /* Use a 64 bit variable to represent all queues in a region. */
1006 RTE_BUILD_BUG_ON(I40E_MAX_Q_PER_TC > 64);
1007
1008 if (!rss_act->queue_num ||
1009 !rte_is_power_of_2(rss_act->queue_num) ||
1010 rss_act->queue_num + rss_act->queue[0] > I40E_MAX_Q_PER_TC)
1011 return rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1013 NULL, "Queue number error");
1014
1015 for (i = 1; i < rss_act->queue_num; i++) {
1016 if (rss_act->queue[i - 1] + 1 != rss_act->queue[i])
1017 break;
1018 }
1019
1020 if (i < rss_act->queue_num)
1021 return rte_flow_error_set(error, EINVAL,
1022 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1023 NULL,
1024 "Queues must be incremented continuously");
1025
1026 /* Map all queues to bits of uint64_t */
1027 hash_queues = (BIT_ULL(rss_act->queue[0] + rss_act->queue_num) - 1) &
1028 ~(BIT_ULL(rss_act->queue[0]) - 1);
1029
1030 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1031 if (hash_queues & ~pf->hash_enabled_queues)
1032 return rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1034 NULL, "Some queues are not in LUT");
1035
1036 rss_conf->region_queue_num = (uint8_t)rss_act->queue_num;
1037 rss_conf->region_queue_start = rss_act->queue[0];
1038 rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->tci) >> 13;
1039 return 0;
1040 }
1041
1042 static int
i40e_hash_parse_global_conf(const struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)1043 i40e_hash_parse_global_conf(const struct rte_eth_dev *dev,
1044 const struct rte_flow_item pattern[],
1045 const struct rte_flow_action_rss *rss_act,
1046 struct i40e_rte_flow_rss_conf *rss_conf,
1047 struct rte_flow_error *error)
1048 {
1049 if (rss_act->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
1050 return rte_flow_error_set(error, EINVAL,
1051 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1052 NULL,
1053 "Symmetric function should be set with pattern types");
1054
1055 rss_conf->conf.func = rss_act->func;
1056
1057 if (rss_act->types)
1058 PMD_DRV_LOG(WARNING,
1059 "RSS types are ignored when no pattern specified");
1060
1061 if (pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
1062 return i40e_hash_parse_queue_region(dev, pattern, rss_act,
1063 rss_conf, error);
1064
1065 if (rss_act->queue)
1066 return i40e_hash_parse_queues(dev, rss_act, rss_conf, error);
1067
1068 if (rss_act->key_len) {
1069 i40e_hash_parse_key(rss_act, rss_conf);
1070 return 0;
1071 }
1072
1073 if (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT)
1074 PMD_DRV_LOG(WARNING, "Nothing change");
1075 return 0;
1076 }
1077
1078 static bool
i40e_hash_validate_rss_types(uint64_t rss_types)1079 i40e_hash_validate_rss_types(uint64_t rss_types)
1080 {
1081 uint64_t type, mask;
1082
1083 /* Validate L2 */
1084 type = RTE_ETH_RSS_ETH & rss_types;
1085 mask = (RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY) & rss_types;
1086 if (!type && mask)
1087 return false;
1088
1089 /* Validate L3 */
1090 type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1091 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_IPV6 |
1092 RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
1093 mask = (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY) & rss_types;
1094 if (!type && mask)
1095 return false;
1096
1097 /* Validate L4 */
1098 type = (I40E_HASH_L4_TYPES | RTE_ETH_RSS_PORT) & rss_types;
1099 mask = (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) & rss_types;
1100 if (!type && mask)
1101 return false;
1102
1103 return true;
1104 }
1105
1106 static int
i40e_hash_parse_pattern_act(const struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action_rss * rss_act,struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)1107 i40e_hash_parse_pattern_act(const struct rte_eth_dev *dev,
1108 const struct rte_flow_item pattern[],
1109 const struct rte_flow_action_rss *rss_act,
1110 struct i40e_rte_flow_rss_conf *rss_conf,
1111 struct rte_flow_error *error)
1112 {
1113 if (rss_act->queue)
1114 return rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1116 NULL,
1117 "RSS Queues not supported when pattern specified");
1118
1119 switch (rss_act->func) {
1120 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1121 rss_conf->symmetric_enable = true;
1122 break;
1123 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1124 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1125 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1126 break;
1127 default:
1128 return rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1130 NULL,
1131 "RSS hash function not supported "
1132 "when pattern specified");
1133 }
1134
1135 if (!i40e_hash_validate_rss_types(rss_act->types))
1136 return rte_flow_error_set(error, EINVAL,
1137 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1138 NULL, "RSS types are invalid");
1139
1140 if (rss_act->key_len)
1141 i40e_hash_parse_key(rss_act, rss_conf);
1142
1143 rss_conf->conf.func = rss_act->func;
1144 rss_conf->conf.types = rss_act->types;
1145 rss_conf->inset = i40e_hash_get_inset(rss_act->types);
1146
1147 return i40e_hash_get_pattern_pctypes(dev, pattern, rss_act,
1148 rss_conf, error);
1149 }
1150
1151 int
i40e_hash_parse(const struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct i40e_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)1152 i40e_hash_parse(const struct rte_eth_dev *dev,
1153 const struct rte_flow_item pattern[],
1154 const struct rte_flow_action actions[],
1155 struct i40e_rte_flow_rss_conf *rss_conf,
1156 struct rte_flow_error *error)
1157 {
1158 const struct rte_flow_action_rss *rss_act;
1159
1160 if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
1161 return rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_ACTION,
1163 &actions[1],
1164 "Only support one action for RSS.");
1165
1166 rss_act = (const struct rte_flow_action_rss *)actions[0].conf;
1167 if (rss_act->level)
1168 return rte_flow_error_set(error, ENOTSUP,
1169 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1170 actions,
1171 "RSS level is not supported");
1172
1173 while (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
1174 pattern++;
1175
1176 if (pattern[0].type == RTE_FLOW_ITEM_TYPE_END ||
1177 pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
1178 return i40e_hash_parse_global_conf(dev, pattern, rss_act,
1179 rss_conf, error);
1180
1181 return i40e_hash_parse_pattern_act(dev, pattern, rss_act,
1182 rss_conf, error);
1183 }
1184
1185 static void
i40e_invalid_rss_filter(const struct i40e_rte_flow_rss_conf * ref_conf,struct i40e_rte_flow_rss_conf * conf)1186 i40e_invalid_rss_filter(const struct i40e_rte_flow_rss_conf *ref_conf,
1187 struct i40e_rte_flow_rss_conf *conf)
1188 {
1189 uint32_t reset_flags = conf->misc_reset_flags;
1190
1191 conf->misc_reset_flags &= ~ref_conf->misc_reset_flags;
1192
1193 if ((reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
1194 (ref_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
1195 (conf->region_queue_start != ref_conf->region_queue_start ||
1196 conf->region_queue_num != ref_conf->region_queue_num))
1197 conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
1198
1199 conf->reset_config_pctypes &= ~ref_conf->reset_config_pctypes;
1200 conf->reset_symmetric_pctypes &= ~ref_conf->reset_symmetric_pctypes;
1201 }
1202
1203 int
i40e_hash_filter_restore(struct i40e_pf * pf)1204 i40e_hash_filter_restore(struct i40e_pf *pf)
1205 {
1206 struct i40e_rss_filter *filter;
1207 int ret;
1208
1209 TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
1210 struct i40e_rte_flow_rss_conf *rss_conf =
1211 &filter->rss_filter_info;
1212 struct i40e_rss_filter *prev;
1213
1214 rss_conf->misc_reset_flags = 0;
1215 rss_conf->reset_config_pctypes = 0;
1216 rss_conf->reset_symmetric_pctypes = 0;
1217
1218 ret = i40e_hash_config(pf, rss_conf);
1219 if (ret) {
1220 pf->hash_filter_enabled = 0;
1221 i40e_pf_disable_rss(pf);
1222 PMD_DRV_LOG(ERR,
1223 "Re-configure RSS failed, RSS has been disabled");
1224 return ret;
1225 }
1226
1227 /* Invalid previous RSS filter */
1228 TAILQ_FOREACH(prev, &pf->rss_config_list, next) {
1229 if (prev == filter)
1230 break;
1231 i40e_invalid_rss_filter(rss_conf,
1232 &prev->rss_filter_info);
1233 }
1234 }
1235
1236 return 0;
1237 }
1238
1239 int
i40e_hash_filter_create(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * rss_conf)1240 i40e_hash_filter_create(struct i40e_pf *pf,
1241 struct i40e_rte_flow_rss_conf *rss_conf)
1242 {
1243 struct i40e_rss_filter *filter, *prev;
1244 struct i40e_rte_flow_rss_conf *new_conf;
1245 int ret;
1246
1247 filter = rte_zmalloc("i40e_rss_filter", sizeof(*filter), 0);
1248 if (!filter) {
1249 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1250 return -ENOMEM;
1251 }
1252
1253 new_conf = &filter->rss_filter_info;
1254
1255 memcpy(new_conf, rss_conf, sizeof(*new_conf));
1256 if (new_conf->conf.queue_num)
1257 new_conf->conf.queue = new_conf->queue;
1258 if (new_conf->conf.key_len)
1259 new_conf->conf.key = new_conf->key;
1260
1261 ret = i40e_hash_config(pf, new_conf);
1262 if (ret) {
1263 rte_free(filter);
1264 if (i40e_pf_config_rss(pf))
1265 return ret;
1266
1267 (void)i40e_hash_filter_restore(pf);
1268 return ret;
1269 }
1270
1271 /* Invalid previous RSS filter */
1272 TAILQ_FOREACH(prev, &pf->rss_config_list, next)
1273 i40e_invalid_rss_filter(new_conf, &prev->rss_filter_info);
1274
1275 TAILQ_INSERT_TAIL(&pf->rss_config_list, filter, next);
1276 return 0;
1277 }
1278
1279 static int
i40e_hash_reset_conf(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * rss_conf)1280 i40e_hash_reset_conf(struct i40e_pf *pf,
1281 struct i40e_rte_flow_rss_conf *rss_conf)
1282 {
1283 struct i40e_hw *hw = &pf->adapter->hw;
1284 struct rte_eth_dev *dev;
1285 uint64_t inset;
1286 uint32_t idx;
1287 int ret;
1288
1289 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_FUNC) {
1290 ret = i40e_hash_config_func(hw, RTE_ETH_HASH_FUNCTION_TOEPLITZ);
1291 if (ret)
1292 return ret;
1293
1294 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_FUNC;
1295 }
1296
1297 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) {
1298 dev = &rte_eth_devices[pf->dev_data->port_id];
1299 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
1300 if (ret)
1301 return ret;
1302
1303 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_REGION;
1304 }
1305
1306 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_KEY) {
1307 ret = i40e_pf_reset_rss_key(pf);
1308 if (ret)
1309 return ret;
1310
1311 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_KEY;
1312 }
1313
1314 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_QUEUE) {
1315 if (!pf->adapter->rss_reta_updated) {
1316 ret = i40e_pf_reset_rss_reta(pf);
1317 if (ret)
1318 return ret;
1319 }
1320
1321 pf->hash_enabled_queues = 0;
1322 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_QUEUE;
1323 }
1324
1325 while (rss_conf->reset_config_pctypes) {
1326 idx = rte_bsf64(rss_conf->reset_config_pctypes);
1327
1328 i40e_hash_enable_pctype(hw, idx, false);
1329 inset = i40e_get_default_input_set(idx);
1330 if (inset) {
1331 ret = i40e_set_hash_inset(hw, inset, idx, false);
1332 if (ret)
1333 return ret;
1334 }
1335
1336 rss_conf->reset_config_pctypes &= ~BIT_ULL(idx);
1337 }
1338
1339 while (rss_conf->reset_symmetric_pctypes) {
1340 idx = rte_bsf64(rss_conf->reset_symmetric_pctypes);
1341
1342 ret = i40e_hash_config_pctype_symmetric(hw, idx, false);
1343 if (ret)
1344 return ret;
1345
1346 rss_conf->reset_symmetric_pctypes &= ~BIT_ULL(idx);
1347 }
1348
1349 return 0;
1350 }
1351
1352 int
i40e_hash_filter_destroy(struct i40e_pf * pf,const struct i40e_rss_filter * rss_filter)1353 i40e_hash_filter_destroy(struct i40e_pf *pf,
1354 const struct i40e_rss_filter *rss_filter)
1355 {
1356 struct i40e_rss_filter *filter;
1357 int ret;
1358
1359 TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
1360 if (rss_filter == filter) {
1361 ret = i40e_hash_reset_conf(pf,
1362 &filter->rss_filter_info);
1363 if (ret)
1364 return ret;
1365
1366 TAILQ_REMOVE(&pf->rss_config_list, filter, next);
1367 rte_free(filter);
1368 return 0;
1369 }
1370 }
1371
1372 return -ENOENT;
1373 }
1374
1375 int
i40e_hash_filter_flush(struct i40e_pf * pf)1376 i40e_hash_filter_flush(struct i40e_pf *pf)
1377 {
1378 struct rte_flow *flow, *next;
1379
1380 RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, next) {
1381 if (flow->filter_type != RTE_ETH_FILTER_HASH)
1382 continue;
1383
1384 if (flow->rule) {
1385 struct i40e_rss_filter *filter = flow->rule;
1386 int ret;
1387
1388 ret = i40e_hash_reset_conf(pf,
1389 &filter->rss_filter_info);
1390 if (ret)
1391 return ret;
1392
1393 TAILQ_REMOVE(&pf->rss_config_list, filter, next);
1394 rte_free(filter);
1395 }
1396
1397 TAILQ_REMOVE(&pf->flow_list, flow, node);
1398 rte_free(flow);
1399 }
1400
1401 assert(!pf->rss_config_list.tqh_first);
1402 return 0;
1403 }
1404