1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17
18 #include "efx.h"
19
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27
28 struct sfc_flow_ops_by_spec {
29 sfc_flow_parse_cb_t *parse;
30 sfc_flow_verify_cb_t *verify;
31 sfc_flow_cleanup_cb_t *cleanup;
32 sfc_flow_insert_cb_t *insert;
33 sfc_flow_remove_cb_t *remove;
34 };
35
36 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
38 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
39 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
40
41 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
42 .parse = sfc_flow_parse_rte_to_filter,
43 .verify = NULL,
44 .cleanup = NULL,
45 .insert = sfc_flow_filter_insert,
46 .remove = sfc_flow_filter_remove,
47 };
48
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
50 .parse = sfc_flow_parse_rte_to_mae,
51 .verify = sfc_mae_flow_verify,
52 .cleanup = sfc_mae_flow_cleanup,
53 .insert = sfc_mae_flow_insert,
54 .remove = sfc_mae_flow_remove,
55 };
56
57 static const struct sfc_flow_ops_by_spec *
sfc_flow_get_ops_by_spec(struct rte_flow * flow)58 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
59 {
60 struct sfc_flow_spec *spec = &flow->spec;
61 const struct sfc_flow_ops_by_spec *ops = NULL;
62
63 switch (spec->type) {
64 case SFC_FLOW_SPEC_FILTER:
65 ops = &sfc_flow_ops_filter;
66 break;
67 case SFC_FLOW_SPEC_MAE:
68 ops = &sfc_flow_ops_mae;
69 break;
70 default:
71 SFC_ASSERT(false);
72 break;
73 }
74
75 return ops;
76 }
77
78 /*
79 * Currently, filter-based (VNIC) flow API is implemented in such a manner
80 * that each flow rule is converted to one or more hardware filters.
81 * All elements of flow rule (attributes, pattern items, actions)
82 * correspond to one or more fields in the efx_filter_spec_s structure
83 * that is responsible for the hardware filter.
84 * If some required field is unset in the flow rule, then a handful
85 * of filter copies will be created to cover all possible values
86 * of such a field.
87 */
88
89 static sfc_flow_item_parse sfc_flow_parse_void;
90 static sfc_flow_item_parse sfc_flow_parse_eth;
91 static sfc_flow_item_parse sfc_flow_parse_vlan;
92 static sfc_flow_item_parse sfc_flow_parse_ipv4;
93 static sfc_flow_item_parse sfc_flow_parse_ipv6;
94 static sfc_flow_item_parse sfc_flow_parse_tcp;
95 static sfc_flow_item_parse sfc_flow_parse_udp;
96 static sfc_flow_item_parse sfc_flow_parse_vxlan;
97 static sfc_flow_item_parse sfc_flow_parse_geneve;
98 static sfc_flow_item_parse sfc_flow_parse_nvgre;
99
100 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
101 unsigned int filters_count_for_one_val,
102 struct rte_flow_error *error);
103
104 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
105 efx_filter_spec_t *spec,
106 struct sfc_filter *filter);
107
108 struct sfc_flow_copy_flag {
109 /* EFX filter specification match flag */
110 efx_filter_match_flags_t flag;
111 /* Number of values of corresponding field */
112 unsigned int vals_count;
113 /* Function to set values in specifications */
114 sfc_flow_spec_set_vals *set_vals;
115 /*
116 * Function to check that the specification is suitable
117 * for adding this match flag
118 */
119 sfc_flow_spec_check *spec_check;
120 };
121
122 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
123 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
124 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
125 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
126 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
127 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
128 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
129
130 static boolean_t
sfc_flow_is_zero(const uint8_t * buf,unsigned int size)131 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
132 {
133 uint8_t sum = 0;
134 unsigned int i;
135
136 for (i = 0; i < size; i++)
137 sum |= buf[i];
138
139 return (sum == 0) ? B_TRUE : B_FALSE;
140 }
141
142 /*
143 * Validate item and prepare structures spec and mask for parsing
144 */
145 int
sfc_flow_parse_init(const struct rte_flow_item * item,const void ** spec_ptr,const void ** mask_ptr,const void * supp_mask,const void * def_mask,unsigned int size,struct rte_flow_error * error)146 sfc_flow_parse_init(const struct rte_flow_item *item,
147 const void **spec_ptr,
148 const void **mask_ptr,
149 const void *supp_mask,
150 const void *def_mask,
151 unsigned int size,
152 struct rte_flow_error *error)
153 {
154 const uint8_t *spec;
155 const uint8_t *mask;
156 const uint8_t *last;
157 uint8_t supp;
158 unsigned int i;
159
160 if (item == NULL) {
161 rte_flow_error_set(error, EINVAL,
162 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
163 "NULL item");
164 return -rte_errno;
165 }
166
167 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
168 rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_ITEM, item,
170 "Mask or last is set without spec");
171 return -rte_errno;
172 }
173
174 /*
175 * If "mask" is not set, default mask is used,
176 * but if default mask is NULL, "mask" should be set
177 */
178 if (item->mask == NULL) {
179 if (def_mask == NULL) {
180 rte_flow_error_set(error, EINVAL,
181 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
182 "Mask should be specified");
183 return -rte_errno;
184 }
185
186 mask = def_mask;
187 } else {
188 mask = item->mask;
189 }
190
191 spec = item->spec;
192 last = item->last;
193
194 if (spec == NULL)
195 goto exit;
196
197 /*
198 * If field values in "last" are either 0 or equal to the corresponding
199 * values in "spec" then they are ignored
200 */
201 if (last != NULL &&
202 !sfc_flow_is_zero(last, size) &&
203 memcmp(last, spec, size) != 0) {
204 rte_flow_error_set(error, ENOTSUP,
205 RTE_FLOW_ERROR_TYPE_ITEM, item,
206 "Ranging is not supported");
207 return -rte_errno;
208 }
209
210 if (supp_mask == NULL) {
211 rte_flow_error_set(error, EINVAL,
212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
213 "Supported mask for item should be specified");
214 return -rte_errno;
215 }
216
217 /* Check that mask does not ask for more match than supp_mask */
218 for (i = 0; i < size; i++) {
219 supp = ((const uint8_t *)supp_mask)[i];
220
221 if (~supp & mask[i]) {
222 rte_flow_error_set(error, ENOTSUP,
223 RTE_FLOW_ERROR_TYPE_ITEM, item,
224 "Item's field is not supported");
225 return -rte_errno;
226 }
227 }
228
229 exit:
230 *spec_ptr = spec;
231 *mask_ptr = mask;
232 return 0;
233 }
234
235 /*
236 * Protocol parsers.
237 * Masking is not supported, so masks in items should be either
238 * full or empty (zeroed) and set only for supported fields which
239 * are specified in the supp_mask.
240 */
241
242 static int
sfc_flow_parse_void(__rte_unused const struct rte_flow_item * item,__rte_unused struct sfc_flow_parse_ctx * parse_ctx,__rte_unused struct rte_flow_error * error)243 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
244 __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
245 __rte_unused struct rte_flow_error *error)
246 {
247 return 0;
248 }
249
250 /**
251 * Convert Ethernet item to EFX filter specification.
252 *
253 * @param item[in]
254 * Item specification. Outer frame specification may only comprise
255 * source/destination addresses and Ethertype field.
256 * Inner frame specification may contain destination address only.
257 * There is support for individual/group mask as well as for empty and full.
258 * If the mask is NULL, default mask will be used. Ranging is not supported.
259 * @param efx_spec[in, out]
260 * EFX filter specification to update.
261 * @param[out] error
262 * Perform verbose error reporting if not NULL.
263 */
264 static int
sfc_flow_parse_eth(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)265 sfc_flow_parse_eth(const struct rte_flow_item *item,
266 struct sfc_flow_parse_ctx *parse_ctx,
267 struct rte_flow_error *error)
268 {
269 int rc;
270 efx_filter_spec_t *efx_spec = parse_ctx->filter;
271 const struct rte_flow_item_eth *spec = NULL;
272 const struct rte_flow_item_eth *mask = NULL;
273 const struct rte_flow_item_eth supp_mask = {
274 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
275 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276 .type = 0xffff,
277 };
278 const struct rte_flow_item_eth ifrm_supp_mask = {
279 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
280 };
281 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
282 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
283 };
284 const struct rte_flow_item_eth *supp_mask_p;
285 const struct rte_flow_item_eth *def_mask_p;
286 uint8_t *loc_mac = NULL;
287 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
288 EFX_TUNNEL_PROTOCOL_NONE);
289
290 if (is_ifrm) {
291 supp_mask_p = &ifrm_supp_mask;
292 def_mask_p = &ifrm_supp_mask;
293 loc_mac = efx_spec->efs_ifrm_loc_mac;
294 } else {
295 supp_mask_p = &supp_mask;
296 def_mask_p = &rte_flow_item_eth_mask;
297 loc_mac = efx_spec->efs_loc_mac;
298 }
299
300 rc = sfc_flow_parse_init(item,
301 (const void **)&spec,
302 (const void **)&mask,
303 supp_mask_p, def_mask_p,
304 sizeof(struct rte_flow_item_eth),
305 error);
306 if (rc != 0)
307 return rc;
308
309 /* If "spec" is not set, could be any Ethernet */
310 if (spec == NULL)
311 return 0;
312
313 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
314 efx_spec->efs_match_flags |= is_ifrm ?
315 EFX_FILTER_MATCH_IFRM_LOC_MAC :
316 EFX_FILTER_MATCH_LOC_MAC;
317 rte_memcpy(loc_mac, spec->dst.addr_bytes,
318 EFX_MAC_ADDR_LEN);
319 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
320 EFX_MAC_ADDR_LEN) == 0) {
321 if (rte_is_unicast_ether_addr(&spec->dst))
322 efx_spec->efs_match_flags |= is_ifrm ?
323 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
324 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
325 else
326 efx_spec->efs_match_flags |= is_ifrm ?
327 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
328 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
329 } else if (!rte_is_zero_ether_addr(&mask->dst)) {
330 goto fail_bad_mask;
331 }
332
333 /*
334 * ifrm_supp_mask ensures that the source address and
335 * ethertype masks are equal to zero in inner frame,
336 * so these fields are filled in only for the outer frame
337 */
338 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
339 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
340 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
341 EFX_MAC_ADDR_LEN);
342 } else if (!rte_is_zero_ether_addr(&mask->src)) {
343 goto fail_bad_mask;
344 }
345
346 /*
347 * Ether type is in big-endian byte order in item and
348 * in little-endian in efx_spec, so byte swap is used
349 */
350 if (mask->type == supp_mask.type) {
351 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
352 efx_spec->efs_ether_type = rte_bswap16(spec->type);
353 } else if (mask->type != 0) {
354 goto fail_bad_mask;
355 }
356
357 return 0;
358
359 fail_bad_mask:
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ITEM, item,
362 "Bad mask in the ETH pattern item");
363 return -rte_errno;
364 }
365
366 /**
367 * Convert VLAN item to EFX filter specification.
368 *
369 * @param item[in]
370 * Item specification. Only VID field is supported.
371 * The mask can not be NULL. Ranging is not supported.
372 * @param efx_spec[in, out]
373 * EFX filter specification to update.
374 * @param[out] error
375 * Perform verbose error reporting if not NULL.
376 */
377 static int
sfc_flow_parse_vlan(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)378 sfc_flow_parse_vlan(const struct rte_flow_item *item,
379 struct sfc_flow_parse_ctx *parse_ctx,
380 struct rte_flow_error *error)
381 {
382 int rc;
383 uint16_t vid;
384 efx_filter_spec_t *efx_spec = parse_ctx->filter;
385 const struct rte_flow_item_vlan *spec = NULL;
386 const struct rte_flow_item_vlan *mask = NULL;
387 const struct rte_flow_item_vlan supp_mask = {
388 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
389 .inner_type = RTE_BE16(0xffff),
390 };
391
392 rc = sfc_flow_parse_init(item,
393 (const void **)&spec,
394 (const void **)&mask,
395 &supp_mask,
396 NULL,
397 sizeof(struct rte_flow_item_vlan),
398 error);
399 if (rc != 0)
400 return rc;
401
402 /*
403 * VID is in big-endian byte order in item and
404 * in little-endian in efx_spec, so byte swap is used.
405 * If two VLAN items are included, the first matches
406 * the outer tag and the next matches the inner tag.
407 */
408 if (mask->tci == supp_mask.tci) {
409 /* Apply mask to keep VID only */
410 vid = rte_bswap16(spec->tci & mask->tci);
411
412 if (!(efx_spec->efs_match_flags &
413 EFX_FILTER_MATCH_OUTER_VID)) {
414 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
415 efx_spec->efs_outer_vid = vid;
416 } else if (!(efx_spec->efs_match_flags &
417 EFX_FILTER_MATCH_INNER_VID)) {
418 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
419 efx_spec->efs_inner_vid = vid;
420 } else {
421 rte_flow_error_set(error, EINVAL,
422 RTE_FLOW_ERROR_TYPE_ITEM, item,
423 "More than two VLAN items");
424 return -rte_errno;
425 }
426 } else {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ITEM, item,
429 "VLAN ID in TCI match is required");
430 return -rte_errno;
431 }
432
433 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
434 rte_flow_error_set(error, EINVAL,
435 RTE_FLOW_ERROR_TYPE_ITEM, item,
436 "VLAN TPID matching is not supported");
437 return -rte_errno;
438 }
439 if (mask->inner_type == supp_mask.inner_type) {
440 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
441 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
442 } else if (mask->inner_type) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM, item,
445 "Bad mask for VLAN inner_type");
446 return -rte_errno;
447 }
448
449 return 0;
450 }
451
452 /**
453 * Convert IPv4 item to EFX filter specification.
454 *
455 * @param item[in]
456 * Item specification. Only source and destination addresses and
457 * protocol fields are supported. If the mask is NULL, default
458 * mask will be used. Ranging is not supported.
459 * @param efx_spec[in, out]
460 * EFX filter specification to update.
461 * @param[out] error
462 * Perform verbose error reporting if not NULL.
463 */
464 static int
sfc_flow_parse_ipv4(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)465 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
466 struct sfc_flow_parse_ctx *parse_ctx,
467 struct rte_flow_error *error)
468 {
469 int rc;
470 efx_filter_spec_t *efx_spec = parse_ctx->filter;
471 const struct rte_flow_item_ipv4 *spec = NULL;
472 const struct rte_flow_item_ipv4 *mask = NULL;
473 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
474 const struct rte_flow_item_ipv4 supp_mask = {
475 .hdr = {
476 .src_addr = 0xffffffff,
477 .dst_addr = 0xffffffff,
478 .next_proto_id = 0xff,
479 }
480 };
481
482 rc = sfc_flow_parse_init(item,
483 (const void **)&spec,
484 (const void **)&mask,
485 &supp_mask,
486 &rte_flow_item_ipv4_mask,
487 sizeof(struct rte_flow_item_ipv4),
488 error);
489 if (rc != 0)
490 return rc;
491
492 /*
493 * Filtering by IPv4 source and destination addresses requires
494 * the appropriate ETHER_TYPE in hardware filters
495 */
496 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
497 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
498 efx_spec->efs_ether_type = ether_type_ipv4;
499 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ITEM, item,
502 "Ethertype in pattern with IPV4 item should be appropriate");
503 return -rte_errno;
504 }
505
506 if (spec == NULL)
507 return 0;
508
509 /*
510 * IPv4 addresses are in big-endian byte order in item and in
511 * efx_spec
512 */
513 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
514 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
515 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
516 } else if (mask->hdr.src_addr != 0) {
517 goto fail_bad_mask;
518 }
519
520 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
521 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
522 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
523 } else if (mask->hdr.dst_addr != 0) {
524 goto fail_bad_mask;
525 }
526
527 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
528 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
529 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
530 } else if (mask->hdr.next_proto_id != 0) {
531 goto fail_bad_mask;
532 }
533
534 return 0;
535
536 fail_bad_mask:
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM, item,
539 "Bad mask in the IPV4 pattern item");
540 return -rte_errno;
541 }
542
543 /**
544 * Convert IPv6 item to EFX filter specification.
545 *
546 * @param item[in]
547 * Item specification. Only source and destination addresses and
548 * next header fields are supported. If the mask is NULL, default
549 * mask will be used. Ranging is not supported.
550 * @param efx_spec[in, out]
551 * EFX filter specification to update.
552 * @param[out] error
553 * Perform verbose error reporting if not NULL.
554 */
555 static int
sfc_flow_parse_ipv6(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)556 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
557 struct sfc_flow_parse_ctx *parse_ctx,
558 struct rte_flow_error *error)
559 {
560 int rc;
561 efx_filter_spec_t *efx_spec = parse_ctx->filter;
562 const struct rte_flow_item_ipv6 *spec = NULL;
563 const struct rte_flow_item_ipv6 *mask = NULL;
564 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
565 const struct rte_flow_item_ipv6 supp_mask = {
566 .hdr = {
567 .src_addr = { 0xff, 0xff, 0xff, 0xff,
568 0xff, 0xff, 0xff, 0xff,
569 0xff, 0xff, 0xff, 0xff,
570 0xff, 0xff, 0xff, 0xff },
571 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
572 0xff, 0xff, 0xff, 0xff,
573 0xff, 0xff, 0xff, 0xff,
574 0xff, 0xff, 0xff, 0xff },
575 .proto = 0xff,
576 }
577 };
578
579 rc = sfc_flow_parse_init(item,
580 (const void **)&spec,
581 (const void **)&mask,
582 &supp_mask,
583 &rte_flow_item_ipv6_mask,
584 sizeof(struct rte_flow_item_ipv6),
585 error);
586 if (rc != 0)
587 return rc;
588
589 /*
590 * Filtering by IPv6 source and destination addresses requires
591 * the appropriate ETHER_TYPE in hardware filters
592 */
593 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
594 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
595 efx_spec->efs_ether_type = ether_type_ipv6;
596 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
597 rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM, item,
599 "Ethertype in pattern with IPV6 item should be appropriate");
600 return -rte_errno;
601 }
602
603 if (spec == NULL)
604 return 0;
605
606 /*
607 * IPv6 addresses are in big-endian byte order in item and in
608 * efx_spec
609 */
610 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
611 sizeof(mask->hdr.src_addr)) == 0) {
612 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
613
614 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
615 sizeof(spec->hdr.src_addr));
616 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
617 sizeof(efx_spec->efs_rem_host));
618 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
619 sizeof(mask->hdr.src_addr))) {
620 goto fail_bad_mask;
621 }
622
623 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
624 sizeof(mask->hdr.dst_addr)) == 0) {
625 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
626
627 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
628 sizeof(spec->hdr.dst_addr));
629 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
630 sizeof(efx_spec->efs_loc_host));
631 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
632 sizeof(mask->hdr.dst_addr))) {
633 goto fail_bad_mask;
634 }
635
636 if (mask->hdr.proto == supp_mask.hdr.proto) {
637 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
638 efx_spec->efs_ip_proto = spec->hdr.proto;
639 } else if (mask->hdr.proto != 0) {
640 goto fail_bad_mask;
641 }
642
643 return 0;
644
645 fail_bad_mask:
646 rte_flow_error_set(error, EINVAL,
647 RTE_FLOW_ERROR_TYPE_ITEM, item,
648 "Bad mask in the IPV6 pattern item");
649 return -rte_errno;
650 }
651
652 /**
653 * Convert TCP item to EFX filter specification.
654 *
655 * @param item[in]
656 * Item specification. Only source and destination ports fields
657 * are supported. If the mask is NULL, default mask will be used.
658 * Ranging is not supported.
659 * @param efx_spec[in, out]
660 * EFX filter specification to update.
661 * @param[out] error
662 * Perform verbose error reporting if not NULL.
663 */
664 static int
sfc_flow_parse_tcp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)665 sfc_flow_parse_tcp(const struct rte_flow_item *item,
666 struct sfc_flow_parse_ctx *parse_ctx,
667 struct rte_flow_error *error)
668 {
669 int rc;
670 efx_filter_spec_t *efx_spec = parse_ctx->filter;
671 const struct rte_flow_item_tcp *spec = NULL;
672 const struct rte_flow_item_tcp *mask = NULL;
673 const struct rte_flow_item_tcp supp_mask = {
674 .hdr = {
675 .src_port = 0xffff,
676 .dst_port = 0xffff,
677 }
678 };
679
680 rc = sfc_flow_parse_init(item,
681 (const void **)&spec,
682 (const void **)&mask,
683 &supp_mask,
684 &rte_flow_item_tcp_mask,
685 sizeof(struct rte_flow_item_tcp),
686 error);
687 if (rc != 0)
688 return rc;
689
690 /*
691 * Filtering by TCP source and destination ports requires
692 * the appropriate IP_PROTO in hardware filters
693 */
694 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
695 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
696 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
697 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
698 rte_flow_error_set(error, EINVAL,
699 RTE_FLOW_ERROR_TYPE_ITEM, item,
700 "IP proto in pattern with TCP item should be appropriate");
701 return -rte_errno;
702 }
703
704 if (spec == NULL)
705 return 0;
706
707 /*
708 * Source and destination ports are in big-endian byte order in item and
709 * in little-endian in efx_spec, so byte swap is used
710 */
711 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
712 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
713 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
714 } else if (mask->hdr.src_port != 0) {
715 goto fail_bad_mask;
716 }
717
718 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
719 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
720 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
721 } else if (mask->hdr.dst_port != 0) {
722 goto fail_bad_mask;
723 }
724
725 return 0;
726
727 fail_bad_mask:
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM, item,
730 "Bad mask in the TCP pattern item");
731 return -rte_errno;
732 }
733
734 /**
735 * Convert UDP item to EFX filter specification.
736 *
737 * @param item[in]
738 * Item specification. Only source and destination ports fields
739 * are supported. If the mask is NULL, default mask will be used.
740 * Ranging is not supported.
741 * @param efx_spec[in, out]
742 * EFX filter specification to update.
743 * @param[out] error
744 * Perform verbose error reporting if not NULL.
745 */
746 static int
sfc_flow_parse_udp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)747 sfc_flow_parse_udp(const struct rte_flow_item *item,
748 struct sfc_flow_parse_ctx *parse_ctx,
749 struct rte_flow_error *error)
750 {
751 int rc;
752 efx_filter_spec_t *efx_spec = parse_ctx->filter;
753 const struct rte_flow_item_udp *spec = NULL;
754 const struct rte_flow_item_udp *mask = NULL;
755 const struct rte_flow_item_udp supp_mask = {
756 .hdr = {
757 .src_port = 0xffff,
758 .dst_port = 0xffff,
759 }
760 };
761
762 rc = sfc_flow_parse_init(item,
763 (const void **)&spec,
764 (const void **)&mask,
765 &supp_mask,
766 &rte_flow_item_udp_mask,
767 sizeof(struct rte_flow_item_udp),
768 error);
769 if (rc != 0)
770 return rc;
771
772 /*
773 * Filtering by UDP source and destination ports requires
774 * the appropriate IP_PROTO in hardware filters
775 */
776 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
777 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
778 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
779 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ITEM, item,
782 "IP proto in pattern with UDP item should be appropriate");
783 return -rte_errno;
784 }
785
786 if (spec == NULL)
787 return 0;
788
789 /*
790 * Source and destination ports are in big-endian byte order in item and
791 * in little-endian in efx_spec, so byte swap is used
792 */
793 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
794 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
795 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
796 } else if (mask->hdr.src_port != 0) {
797 goto fail_bad_mask;
798 }
799
800 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
801 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
802 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
803 } else if (mask->hdr.dst_port != 0) {
804 goto fail_bad_mask;
805 }
806
807 return 0;
808
809 fail_bad_mask:
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ITEM, item,
812 "Bad mask in the UDP pattern item");
813 return -rte_errno;
814 }
815
816 /*
817 * Filters for encapsulated packets match based on the EtherType and IP
818 * protocol in the outer frame.
819 */
820 static int
sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item * item,efx_filter_spec_t * efx_spec,uint8_t ip_proto,struct rte_flow_error * error)821 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
822 efx_filter_spec_t *efx_spec,
823 uint8_t ip_proto,
824 struct rte_flow_error *error)
825 {
826 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
827 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
828 efx_spec->efs_ip_proto = ip_proto;
829 } else if (efx_spec->efs_ip_proto != ip_proto) {
830 switch (ip_proto) {
831 case EFX_IPPROTO_UDP:
832 rte_flow_error_set(error, EINVAL,
833 RTE_FLOW_ERROR_TYPE_ITEM, item,
834 "Outer IP header protocol must be UDP "
835 "in VxLAN/GENEVE pattern");
836 return -rte_errno;
837
838 case EFX_IPPROTO_GRE:
839 rte_flow_error_set(error, EINVAL,
840 RTE_FLOW_ERROR_TYPE_ITEM, item,
841 "Outer IP header protocol must be GRE "
842 "in NVGRE pattern");
843 return -rte_errno;
844
845 default:
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM, item,
848 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
849 "are supported");
850 return -rte_errno;
851 }
852 }
853
854 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
855 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
856 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM, item,
859 "Outer frame EtherType in pattern with tunneling "
860 "must be IPv4 or IPv6");
861 return -rte_errno;
862 }
863
864 return 0;
865 }
866
867 static int
sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t * efx_spec,const uint8_t * vni_or_vsid_val,const uint8_t * vni_or_vsid_mask,const struct rte_flow_item * item,struct rte_flow_error * error)868 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
869 const uint8_t *vni_or_vsid_val,
870 const uint8_t *vni_or_vsid_mask,
871 const struct rte_flow_item *item,
872 struct rte_flow_error *error)
873 {
874 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
875 0xff, 0xff, 0xff
876 };
877
878 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
879 EFX_VNI_OR_VSID_LEN) == 0) {
880 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
881 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
882 EFX_VNI_OR_VSID_LEN);
883 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM, item,
886 "Unsupported VNI/VSID mask");
887 return -rte_errno;
888 }
889
890 return 0;
891 }
892
893 /**
894 * Convert VXLAN item to EFX filter specification.
895 *
896 * @param item[in]
897 * Item specification. Only VXLAN network identifier field is supported.
898 * If the mask is NULL, default mask will be used.
899 * Ranging is not supported.
900 * @param efx_spec[in, out]
901 * EFX filter specification to update.
902 * @param[out] error
903 * Perform verbose error reporting if not NULL.
904 */
905 static int
sfc_flow_parse_vxlan(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)906 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
907 struct sfc_flow_parse_ctx *parse_ctx,
908 struct rte_flow_error *error)
909 {
910 int rc;
911 efx_filter_spec_t *efx_spec = parse_ctx->filter;
912 const struct rte_flow_item_vxlan *spec = NULL;
913 const struct rte_flow_item_vxlan *mask = NULL;
914 const struct rte_flow_item_vxlan supp_mask = {
915 .vni = { 0xff, 0xff, 0xff }
916 };
917
918 rc = sfc_flow_parse_init(item,
919 (const void **)&spec,
920 (const void **)&mask,
921 &supp_mask,
922 &rte_flow_item_vxlan_mask,
923 sizeof(struct rte_flow_item_vxlan),
924 error);
925 if (rc != 0)
926 return rc;
927
928 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
929 EFX_IPPROTO_UDP, error);
930 if (rc != 0)
931 return rc;
932
933 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
934 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
935
936 if (spec == NULL)
937 return 0;
938
939 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
940 mask->vni, item, error);
941
942 return rc;
943 }
944
945 /**
946 * Convert GENEVE item to EFX filter specification.
947 *
948 * @param item[in]
949 * Item specification. Only Virtual Network Identifier and protocol type
950 * fields are supported. But protocol type can be only Ethernet (0x6558).
951 * If the mask is NULL, default mask will be used.
952 * Ranging is not supported.
953 * @param efx_spec[in, out]
954 * EFX filter specification to update.
955 * @param[out] error
956 * Perform verbose error reporting if not NULL.
957 */
958 static int
sfc_flow_parse_geneve(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)959 sfc_flow_parse_geneve(const struct rte_flow_item *item,
960 struct sfc_flow_parse_ctx *parse_ctx,
961 struct rte_flow_error *error)
962 {
963 int rc;
964 efx_filter_spec_t *efx_spec = parse_ctx->filter;
965 const struct rte_flow_item_geneve *spec = NULL;
966 const struct rte_flow_item_geneve *mask = NULL;
967 const struct rte_flow_item_geneve supp_mask = {
968 .protocol = RTE_BE16(0xffff),
969 .vni = { 0xff, 0xff, 0xff }
970 };
971
972 rc = sfc_flow_parse_init(item,
973 (const void **)&spec,
974 (const void **)&mask,
975 &supp_mask,
976 &rte_flow_item_geneve_mask,
977 sizeof(struct rte_flow_item_geneve),
978 error);
979 if (rc != 0)
980 return rc;
981
982 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
983 EFX_IPPROTO_UDP, error);
984 if (rc != 0)
985 return rc;
986
987 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
988 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
989
990 if (spec == NULL)
991 return 0;
992
993 if (mask->protocol == supp_mask.protocol) {
994 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
995 rte_flow_error_set(error, EINVAL,
996 RTE_FLOW_ERROR_TYPE_ITEM, item,
997 "GENEVE encap. protocol must be Ethernet "
998 "(0x6558) in the GENEVE pattern item");
999 return -rte_errno;
1000 }
1001 } else if (mask->protocol != 0) {
1002 rte_flow_error_set(error, EINVAL,
1003 RTE_FLOW_ERROR_TYPE_ITEM, item,
1004 "Unsupported mask for GENEVE encap. protocol");
1005 return -rte_errno;
1006 }
1007
1008 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1009 mask->vni, item, error);
1010
1011 return rc;
1012 }
1013
1014 /**
1015 * Convert NVGRE item to EFX filter specification.
1016 *
1017 * @param item[in]
1018 * Item specification. Only virtual subnet ID field is supported.
1019 * If the mask is NULL, default mask will be used.
1020 * Ranging is not supported.
1021 * @param efx_spec[in, out]
1022 * EFX filter specification to update.
1023 * @param[out] error
1024 * Perform verbose error reporting if not NULL.
1025 */
1026 static int
sfc_flow_parse_nvgre(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)1027 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1028 struct sfc_flow_parse_ctx *parse_ctx,
1029 struct rte_flow_error *error)
1030 {
1031 int rc;
1032 efx_filter_spec_t *efx_spec = parse_ctx->filter;
1033 const struct rte_flow_item_nvgre *spec = NULL;
1034 const struct rte_flow_item_nvgre *mask = NULL;
1035 const struct rte_flow_item_nvgre supp_mask = {
1036 .tni = { 0xff, 0xff, 0xff }
1037 };
1038
1039 rc = sfc_flow_parse_init(item,
1040 (const void **)&spec,
1041 (const void **)&mask,
1042 &supp_mask,
1043 &rte_flow_item_nvgre_mask,
1044 sizeof(struct rte_flow_item_nvgre),
1045 error);
1046 if (rc != 0)
1047 return rc;
1048
1049 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1050 EFX_IPPROTO_GRE, error);
1051 if (rc != 0)
1052 return rc;
1053
1054 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1055 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1056
1057 if (spec == NULL)
1058 return 0;
1059
1060 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1061 mask->tni, item, error);
1062
1063 return rc;
1064 }
1065
1066 static const struct sfc_flow_item sfc_flow_items[] = {
1067 {
1068 .type = RTE_FLOW_ITEM_TYPE_VOID,
1069 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1070 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1071 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1072 .parse = sfc_flow_parse_void,
1073 },
1074 {
1075 .type = RTE_FLOW_ITEM_TYPE_ETH,
1076 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1077 .layer = SFC_FLOW_ITEM_L2,
1078 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1079 .parse = sfc_flow_parse_eth,
1080 },
1081 {
1082 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1083 .prev_layer = SFC_FLOW_ITEM_L2,
1084 .layer = SFC_FLOW_ITEM_L2,
1085 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1086 .parse = sfc_flow_parse_vlan,
1087 },
1088 {
1089 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1090 .prev_layer = SFC_FLOW_ITEM_L2,
1091 .layer = SFC_FLOW_ITEM_L3,
1092 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1093 .parse = sfc_flow_parse_ipv4,
1094 },
1095 {
1096 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1097 .prev_layer = SFC_FLOW_ITEM_L2,
1098 .layer = SFC_FLOW_ITEM_L3,
1099 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1100 .parse = sfc_flow_parse_ipv6,
1101 },
1102 {
1103 .type = RTE_FLOW_ITEM_TYPE_TCP,
1104 .prev_layer = SFC_FLOW_ITEM_L3,
1105 .layer = SFC_FLOW_ITEM_L4,
1106 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1107 .parse = sfc_flow_parse_tcp,
1108 },
1109 {
1110 .type = RTE_FLOW_ITEM_TYPE_UDP,
1111 .prev_layer = SFC_FLOW_ITEM_L3,
1112 .layer = SFC_FLOW_ITEM_L4,
1113 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1114 .parse = sfc_flow_parse_udp,
1115 },
1116 {
1117 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1118 .prev_layer = SFC_FLOW_ITEM_L4,
1119 .layer = SFC_FLOW_ITEM_START_LAYER,
1120 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1121 .parse = sfc_flow_parse_vxlan,
1122 },
1123 {
1124 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1125 .prev_layer = SFC_FLOW_ITEM_L4,
1126 .layer = SFC_FLOW_ITEM_START_LAYER,
1127 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1128 .parse = sfc_flow_parse_geneve,
1129 },
1130 {
1131 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1132 .prev_layer = SFC_FLOW_ITEM_L3,
1133 .layer = SFC_FLOW_ITEM_START_LAYER,
1134 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1135 .parse = sfc_flow_parse_nvgre,
1136 },
1137 };
1138
1139 /*
1140 * Protocol-independent flow API support
1141 */
1142 static int
sfc_flow_parse_attr(struct sfc_adapter * sa,const struct rte_flow_attr * attr,struct rte_flow * flow,struct rte_flow_error * error)1143 sfc_flow_parse_attr(struct sfc_adapter *sa,
1144 const struct rte_flow_attr *attr,
1145 struct rte_flow *flow,
1146 struct rte_flow_error *error)
1147 {
1148 struct sfc_flow_spec *spec = &flow->spec;
1149 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1150 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1151 struct sfc_mae *mae = &sa->mae;
1152
1153 if (attr == NULL) {
1154 rte_flow_error_set(error, EINVAL,
1155 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1156 "NULL attribute");
1157 return -rte_errno;
1158 }
1159 if (attr->group != 0) {
1160 rte_flow_error_set(error, ENOTSUP,
1161 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1162 "Groups are not supported");
1163 return -rte_errno;
1164 }
1165 if (attr->egress != 0) {
1166 rte_flow_error_set(error, ENOTSUP,
1167 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1168 "Egress is not supported");
1169 return -rte_errno;
1170 }
1171 if (attr->ingress == 0) {
1172 rte_flow_error_set(error, ENOTSUP,
1173 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1174 "Ingress is compulsory");
1175 return -rte_errno;
1176 }
1177 if (attr->transfer == 0) {
1178 if (attr->priority != 0) {
1179 rte_flow_error_set(error, ENOTSUP,
1180 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1181 attr, "Priorities are unsupported");
1182 return -rte_errno;
1183 }
1184 spec->type = SFC_FLOW_SPEC_FILTER;
1185 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1186 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1187 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1188 } else {
1189 if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1190 rte_flow_error_set(error, ENOTSUP,
1191 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1192 attr, "Transfer is not supported");
1193 return -rte_errno;
1194 }
1195 if (attr->priority > mae->nb_action_rule_prios_max) {
1196 rte_flow_error_set(error, ENOTSUP,
1197 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1198 attr, "Unsupported priority level");
1199 return -rte_errno;
1200 }
1201 spec->type = SFC_FLOW_SPEC_MAE;
1202 spec_mae->priority = attr->priority;
1203 spec_mae->match_spec = NULL;
1204 spec_mae->action_set = NULL;
1205 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1206 }
1207
1208 return 0;
1209 }
1210
1211 /* Get item from array sfc_flow_items */
1212 static const struct sfc_flow_item *
sfc_flow_get_item(const struct sfc_flow_item * items,unsigned int nb_items,enum rte_flow_item_type type)1213 sfc_flow_get_item(const struct sfc_flow_item *items,
1214 unsigned int nb_items,
1215 enum rte_flow_item_type type)
1216 {
1217 unsigned int i;
1218
1219 for (i = 0; i < nb_items; i++)
1220 if (items[i].type == type)
1221 return &items[i];
1222
1223 return NULL;
1224 }
1225
1226 int
sfc_flow_parse_pattern(const struct sfc_flow_item * flow_items,unsigned int nb_flow_items,const struct rte_flow_item pattern[],struct sfc_flow_parse_ctx * parse_ctx,struct rte_flow_error * error)1227 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1228 unsigned int nb_flow_items,
1229 const struct rte_flow_item pattern[],
1230 struct sfc_flow_parse_ctx *parse_ctx,
1231 struct rte_flow_error *error)
1232 {
1233 int rc;
1234 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1235 boolean_t is_ifrm = B_FALSE;
1236 const struct sfc_flow_item *item;
1237
1238 if (pattern == NULL) {
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1241 "NULL pattern");
1242 return -rte_errno;
1243 }
1244
1245 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1246 item = sfc_flow_get_item(flow_items, nb_flow_items,
1247 pattern->type);
1248 if (item == NULL) {
1249 rte_flow_error_set(error, ENOTSUP,
1250 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1251 "Unsupported pattern item");
1252 return -rte_errno;
1253 }
1254
1255 /*
1256 * Omitting one or several protocol layers at the beginning
1257 * of pattern is supported
1258 */
1259 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1260 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1261 item->prev_layer != prev_layer) {
1262 rte_flow_error_set(error, ENOTSUP,
1263 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1264 "Unexpected sequence of pattern items");
1265 return -rte_errno;
1266 }
1267
1268 /*
1269 * Allow only VOID and ETH pattern items in the inner frame.
1270 * Also check that there is only one tunneling protocol.
1271 */
1272 switch (item->type) {
1273 case RTE_FLOW_ITEM_TYPE_VOID:
1274 case RTE_FLOW_ITEM_TYPE_ETH:
1275 break;
1276
1277 case RTE_FLOW_ITEM_TYPE_VXLAN:
1278 case RTE_FLOW_ITEM_TYPE_GENEVE:
1279 case RTE_FLOW_ITEM_TYPE_NVGRE:
1280 if (is_ifrm) {
1281 rte_flow_error_set(error, EINVAL,
1282 RTE_FLOW_ERROR_TYPE_ITEM,
1283 pattern,
1284 "More than one tunneling protocol");
1285 return -rte_errno;
1286 }
1287 is_ifrm = B_TRUE;
1288 break;
1289
1290 default:
1291 if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1292 is_ifrm) {
1293 rte_flow_error_set(error, EINVAL,
1294 RTE_FLOW_ERROR_TYPE_ITEM,
1295 pattern,
1296 "There is an unsupported pattern item "
1297 "in the inner frame");
1298 return -rte_errno;
1299 }
1300 break;
1301 }
1302
1303 if (parse_ctx->type != item->ctx_type) {
1304 rte_flow_error_set(error, EINVAL,
1305 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1306 "Parse context type mismatch");
1307 return -rte_errno;
1308 }
1309
1310 rc = item->parse(pattern, parse_ctx, error);
1311 if (rc != 0)
1312 return rc;
1313
1314 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1315 prev_layer = item->layer;
1316 }
1317
1318 return 0;
1319 }
1320
1321 static int
sfc_flow_parse_queue(struct sfc_adapter * sa,const struct rte_flow_action_queue * queue,struct rte_flow * flow)1322 sfc_flow_parse_queue(struct sfc_adapter *sa,
1323 const struct rte_flow_action_queue *queue,
1324 struct rte_flow *flow)
1325 {
1326 struct sfc_flow_spec *spec = &flow->spec;
1327 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1328 struct sfc_rxq *rxq;
1329 struct sfc_rxq_info *rxq_info;
1330
1331 if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1332 return -EINVAL;
1333
1334 rxq = &sa->rxq_ctrl[queue->index];
1335 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1336
1337 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1338 spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1339 SFC_RXQ_FLAG_RSS_HASH);
1340
1341 return 0;
1342 }
1343
1344 static int
sfc_flow_parse_rss(struct sfc_adapter * sa,const struct rte_flow_action_rss * action_rss,struct rte_flow * flow)1345 sfc_flow_parse_rss(struct sfc_adapter *sa,
1346 const struct rte_flow_action_rss *action_rss,
1347 struct rte_flow *flow)
1348 {
1349 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1350 struct sfc_rss *rss = &sas->rss;
1351 unsigned int rxq_sw_index;
1352 struct sfc_rxq *rxq;
1353 unsigned int rxq_hw_index_min;
1354 unsigned int rxq_hw_index_max;
1355 efx_rx_hash_type_t efx_hash_types;
1356 const uint8_t *rss_key;
1357 struct sfc_flow_spec *spec = &flow->spec;
1358 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1359 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1360 unsigned int i;
1361
1362 if (action_rss->queue_num == 0)
1363 return -EINVAL;
1364
1365 rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1366 rxq = &sa->rxq_ctrl[rxq_sw_index];
1367 rxq_hw_index_min = rxq->hw_index;
1368 rxq_hw_index_max = 0;
1369
1370 for (i = 0; i < action_rss->queue_num; ++i) {
1371 rxq_sw_index = action_rss->queue[i];
1372
1373 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1374 return -EINVAL;
1375
1376 rxq = &sa->rxq_ctrl[rxq_sw_index];
1377
1378 if (rxq->hw_index < rxq_hw_index_min)
1379 rxq_hw_index_min = rxq->hw_index;
1380
1381 if (rxq->hw_index > rxq_hw_index_max)
1382 rxq_hw_index_max = rxq->hw_index;
1383 }
1384
1385 switch (action_rss->func) {
1386 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1387 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1388 break;
1389 default:
1390 return -EINVAL;
1391 }
1392
1393 if (action_rss->level)
1394 return -EINVAL;
1395
1396 /*
1397 * Dummy RSS action with only one queue and no specific settings
1398 * for hash types and key does not require dedicated RSS context
1399 * and may be simplified to single queue action.
1400 */
1401 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1402 action_rss->key_len == 0) {
1403 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1404 return 0;
1405 }
1406
1407 if (action_rss->types) {
1408 int rc;
1409
1410 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1411 &efx_hash_types);
1412 if (rc != 0)
1413 return -rc;
1414 } else {
1415 unsigned int i;
1416
1417 efx_hash_types = 0;
1418 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1419 efx_hash_types |= rss->hf_map[i].efx;
1420 }
1421
1422 if (action_rss->key_len) {
1423 if (action_rss->key_len != sizeof(rss->key))
1424 return -EINVAL;
1425
1426 rss_key = action_rss->key;
1427 } else {
1428 rss_key = rss->key;
1429 }
1430
1431 spec_filter->rss = B_TRUE;
1432
1433 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1434 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1435 sfc_rss_conf->rss_hash_types = efx_hash_types;
1436 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1437
1438 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1439 unsigned int nb_queues = action_rss->queue_num;
1440 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1441 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1442
1443 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1444 }
1445
1446 return 0;
1447 }
1448
1449 static int
sfc_flow_spec_flush(struct sfc_adapter * sa,struct sfc_flow_spec * spec,unsigned int filters_count)1450 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1451 unsigned int filters_count)
1452 {
1453 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1454 unsigned int i;
1455 int ret = 0;
1456
1457 for (i = 0; i < filters_count; i++) {
1458 int rc;
1459
1460 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1461 if (ret == 0 && rc != 0) {
1462 sfc_err(sa, "failed to remove filter specification "
1463 "(rc = %d)", rc);
1464 ret = rc;
1465 }
1466 }
1467
1468 return ret;
1469 }
1470
1471 static int
sfc_flow_spec_insert(struct sfc_adapter * sa,struct sfc_flow_spec * spec)1472 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1473 {
1474 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1475 unsigned int i;
1476 int rc = 0;
1477
1478 for (i = 0; i < spec_filter->count; i++) {
1479 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1480 if (rc != 0) {
1481 sfc_flow_spec_flush(sa, spec, i);
1482 break;
1483 }
1484 }
1485
1486 return rc;
1487 }
1488
1489 static int
sfc_flow_spec_remove(struct sfc_adapter * sa,struct sfc_flow_spec * spec)1490 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1491 {
1492 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1493
1494 return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1495 }
1496
1497 static int
sfc_flow_filter_insert(struct sfc_adapter * sa,struct rte_flow * flow)1498 sfc_flow_filter_insert(struct sfc_adapter *sa,
1499 struct rte_flow *flow)
1500 {
1501 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1502 struct sfc_rss *rss = &sas->rss;
1503 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1504 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1505 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1506 boolean_t create_context;
1507 unsigned int i;
1508 int rc = 0;
1509
1510 create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1511 rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1512
1513 if (create_context) {
1514 unsigned int rss_spread;
1515 unsigned int rss_hash_types;
1516 uint8_t *rss_key;
1517
1518 if (spec_filter->rss) {
1519 rss_spread = MIN(flow_rss->rxq_hw_index_max -
1520 flow_rss->rxq_hw_index_min + 1,
1521 EFX_MAXRSS);
1522 rss_hash_types = flow_rss->rss_hash_types;
1523 rss_key = flow_rss->rss_key;
1524 } else {
1525 /*
1526 * Initialize dummy RSS context parameters to have
1527 * valid RSS hash. Use default RSS hash function and
1528 * key.
1529 */
1530 rss_spread = 1;
1531 rss_hash_types = rss->hash_types;
1532 rss_key = rss->key;
1533 }
1534
1535 rc = efx_rx_scale_context_alloc(sa->nic,
1536 EFX_RX_SCALE_EXCLUSIVE,
1537 rss_spread,
1538 &efs_rss_context);
1539 if (rc != 0)
1540 goto fail_scale_context_alloc;
1541
1542 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1543 rss->hash_alg,
1544 rss_hash_types, B_TRUE);
1545 if (rc != 0)
1546 goto fail_scale_mode_set;
1547
1548 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1549 rss_key, sizeof(rss->key));
1550 if (rc != 0)
1551 goto fail_scale_key_set;
1552 } else {
1553 efs_rss_context = rss->dummy_rss_context;
1554 }
1555
1556 if (spec_filter->rss || spec_filter->rss_hash_required) {
1557 /*
1558 * At this point, fully elaborated filter specifications
1559 * have been produced from the template. To make sure that
1560 * RSS behaviour is consistent between them, set the same
1561 * RSS context value everywhere.
1562 */
1563 for (i = 0; i < spec_filter->count; i++) {
1564 efx_filter_spec_t *spec = &spec_filter->filters[i];
1565
1566 spec->efs_rss_context = efs_rss_context;
1567 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1568 if (spec_filter->rss)
1569 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1570 }
1571 }
1572
1573 rc = sfc_flow_spec_insert(sa, &flow->spec);
1574 if (rc != 0)
1575 goto fail_filter_insert;
1576
1577 if (create_context) {
1578 unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1579 unsigned int *tbl;
1580
1581 tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1582
1583 /*
1584 * Scale table is set after filter insertion because
1585 * the table entries are relative to the base RxQ ID
1586 * and the latter is submitted to the HW by means of
1587 * inserting a filter, so by the time of the request
1588 * the HW knows all the information needed to verify
1589 * the table entries, and the operation will succeed
1590 */
1591 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1592 tbl, RTE_DIM(flow_rss->rss_tbl));
1593 if (rc != 0)
1594 goto fail_scale_tbl_set;
1595
1596 /* Remember created dummy RSS context */
1597 if (!spec_filter->rss)
1598 rss->dummy_rss_context = efs_rss_context;
1599 }
1600
1601 return 0;
1602
1603 fail_scale_tbl_set:
1604 sfc_flow_spec_remove(sa, &flow->spec);
1605
1606 fail_filter_insert:
1607 fail_scale_key_set:
1608 fail_scale_mode_set:
1609 if (create_context)
1610 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1611
1612 fail_scale_context_alloc:
1613 return rc;
1614 }
1615
1616 static int
sfc_flow_filter_remove(struct sfc_adapter * sa,struct rte_flow * flow)1617 sfc_flow_filter_remove(struct sfc_adapter *sa,
1618 struct rte_flow *flow)
1619 {
1620 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1621 int rc = 0;
1622
1623 rc = sfc_flow_spec_remove(sa, &flow->spec);
1624 if (rc != 0)
1625 return rc;
1626
1627 if (spec_filter->rss) {
1628 /*
1629 * All specifications for a given flow rule have the same RSS
1630 * context, so that RSS context value is taken from the first
1631 * filter specification
1632 */
1633 efx_filter_spec_t *spec = &spec_filter->filters[0];
1634
1635 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1636 }
1637
1638 return rc;
1639 }
1640
1641 static int
sfc_flow_parse_mark(struct sfc_adapter * sa,const struct rte_flow_action_mark * mark,struct rte_flow * flow)1642 sfc_flow_parse_mark(struct sfc_adapter *sa,
1643 const struct rte_flow_action_mark *mark,
1644 struct rte_flow *flow)
1645 {
1646 struct sfc_flow_spec *spec = &flow->spec;
1647 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1648 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1649
1650 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1651 return EINVAL;
1652
1653 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1654 spec_filter->template.efs_mark = mark->id;
1655
1656 return 0;
1657 }
1658
1659 static int
sfc_flow_parse_actions(struct sfc_adapter * sa,const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)1660 sfc_flow_parse_actions(struct sfc_adapter *sa,
1661 const struct rte_flow_action actions[],
1662 struct rte_flow *flow,
1663 struct rte_flow_error *error)
1664 {
1665 int rc;
1666 struct sfc_flow_spec *spec = &flow->spec;
1667 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1668 const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1669 uint32_t actions_set = 0;
1670 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1671 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1672 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1673 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1674 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1675
1676 if (actions == NULL) {
1677 rte_flow_error_set(error, EINVAL,
1678 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1679 "NULL actions");
1680 return -rte_errno;
1681 }
1682
1683 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1684 switch (actions->type) {
1685 case RTE_FLOW_ACTION_TYPE_VOID:
1686 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1687 actions_set);
1688 break;
1689
1690 case RTE_FLOW_ACTION_TYPE_QUEUE:
1691 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1692 actions_set);
1693 if ((actions_set & fate_actions_mask) != 0)
1694 goto fail_fate_actions;
1695
1696 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1697 if (rc != 0) {
1698 rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1700 "Bad QUEUE action");
1701 return -rte_errno;
1702 }
1703 break;
1704
1705 case RTE_FLOW_ACTION_TYPE_RSS:
1706 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1707 actions_set);
1708 if ((actions_set & fate_actions_mask) != 0)
1709 goto fail_fate_actions;
1710
1711 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1712 if (rc != 0) {
1713 rte_flow_error_set(error, -rc,
1714 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1715 "Bad RSS action");
1716 return -rte_errno;
1717 }
1718 break;
1719
1720 case RTE_FLOW_ACTION_TYPE_DROP:
1721 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1722 actions_set);
1723 if ((actions_set & fate_actions_mask) != 0)
1724 goto fail_fate_actions;
1725
1726 spec_filter->template.efs_dmaq_id =
1727 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1728 break;
1729
1730 case RTE_FLOW_ACTION_TYPE_FLAG:
1731 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1732 actions_set);
1733 if ((actions_set & mark_actions_mask) != 0)
1734 goto fail_actions_overlap;
1735
1736 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1737 rte_flow_error_set(error, ENOTSUP,
1738 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1739 "FLAG action is not supported on the current Rx datapath");
1740 return -rte_errno;
1741 }
1742
1743 spec_filter->template.efs_flags |=
1744 EFX_FILTER_FLAG_ACTION_FLAG;
1745 break;
1746
1747 case RTE_FLOW_ACTION_TYPE_MARK:
1748 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1749 actions_set);
1750 if ((actions_set & mark_actions_mask) != 0)
1751 goto fail_actions_overlap;
1752
1753 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1754 rte_flow_error_set(error, ENOTSUP,
1755 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1756 "MARK action is not supported on the current Rx datapath");
1757 return -rte_errno;
1758 }
1759
1760 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1761 if (rc != 0) {
1762 rte_flow_error_set(error, rc,
1763 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1764 "Bad MARK action");
1765 return -rte_errno;
1766 }
1767 break;
1768
1769 default:
1770 rte_flow_error_set(error, ENOTSUP,
1771 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1772 "Action is not supported");
1773 return -rte_errno;
1774 }
1775
1776 actions_set |= (1UL << actions->type);
1777 }
1778
1779 /* When fate is unknown, drop traffic. */
1780 if ((actions_set & fate_actions_mask) == 0) {
1781 spec_filter->template.efs_dmaq_id =
1782 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1783 }
1784
1785 return 0;
1786
1787 fail_fate_actions:
1788 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1789 "Cannot combine several fate-deciding actions, "
1790 "choose between QUEUE, RSS or DROP");
1791 return -rte_errno;
1792
1793 fail_actions_overlap:
1794 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1795 "Overlapping actions are not supported");
1796 return -rte_errno;
1797 }
1798
1799 /**
1800 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1801 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1802 * specifications after copying.
1803 *
1804 * @param spec[in, out]
1805 * SFC flow specification to update.
1806 * @param filters_count_for_one_val[in]
1807 * How many specifications should have the same match flag, what is the
1808 * number of specifications before copying.
1809 * @param error[out]
1810 * Perform verbose error reporting if not NULL.
1811 */
1812 static int
sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec * spec,unsigned int filters_count_for_one_val,struct rte_flow_error * error)1813 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1814 unsigned int filters_count_for_one_val,
1815 struct rte_flow_error *error)
1816 {
1817 unsigned int i;
1818 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1819 static const efx_filter_match_flags_t vals[] = {
1820 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1821 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1822 };
1823
1824 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1825 rte_flow_error_set(error, EINVAL,
1826 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1827 "Number of specifications is incorrect while copying "
1828 "by unknown destination flags");
1829 return -rte_errno;
1830 }
1831
1832 for (i = 0; i < spec_filter->count; i++) {
1833 /* The check above ensures that divisor can't be zero here */
1834 spec_filter->filters[i].efs_match_flags |=
1835 vals[i / filters_count_for_one_val];
1836 }
1837
1838 return 0;
1839 }
1840
1841 /**
1842 * Check that the following conditions are met:
1843 * - the list of supported filters has a filter
1844 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1845 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1846 * be inserted.
1847 *
1848 * @param match[in]
1849 * The match flags of filter.
1850 * @param spec[in]
1851 * Specification to be supplemented.
1852 * @param filter[in]
1853 * SFC filter with list of supported filters.
1854 */
1855 static boolean_t
sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,__rte_unused efx_filter_spec_t * spec,struct sfc_filter * filter)1856 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1857 __rte_unused efx_filter_spec_t *spec,
1858 struct sfc_filter *filter)
1859 {
1860 unsigned int i;
1861 efx_filter_match_flags_t match_mcast_dst;
1862
1863 match_mcast_dst =
1864 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1865 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1866 for (i = 0; i < filter->supported_match_num; i++) {
1867 if (match_mcast_dst == filter->supported_match[i])
1868 return B_TRUE;
1869 }
1870
1871 return B_FALSE;
1872 }
1873
1874 /**
1875 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1876 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1877 * specifications after copying.
1878 *
1879 * @param spec[in, out]
1880 * SFC flow specification to update.
1881 * @param filters_count_for_one_val[in]
1882 * How many specifications should have the same EtherType value, what is the
1883 * number of specifications before copying.
1884 * @param error[out]
1885 * Perform verbose error reporting if not NULL.
1886 */
1887 static int
sfc_flow_set_ethertypes(struct sfc_flow_spec * spec,unsigned int filters_count_for_one_val,struct rte_flow_error * error)1888 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1889 unsigned int filters_count_for_one_val,
1890 struct rte_flow_error *error)
1891 {
1892 unsigned int i;
1893 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1894 static const uint16_t vals[] = {
1895 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1896 };
1897
1898 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1899 rte_flow_error_set(error, EINVAL,
1900 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1901 "Number of specifications is incorrect "
1902 "while copying by Ethertype");
1903 return -rte_errno;
1904 }
1905
1906 for (i = 0; i < spec_filter->count; i++) {
1907 spec_filter->filters[i].efs_match_flags |=
1908 EFX_FILTER_MATCH_ETHER_TYPE;
1909
1910 /*
1911 * The check above ensures that
1912 * filters_count_for_one_val is not 0
1913 */
1914 spec_filter->filters[i].efs_ether_type =
1915 vals[i / filters_count_for_one_val];
1916 }
1917
1918 return 0;
1919 }
1920
1921 /**
1922 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1923 * in the same specifications after copying.
1924 *
1925 * @param spec[in, out]
1926 * SFC flow specification to update.
1927 * @param filters_count_for_one_val[in]
1928 * How many specifications should have the same match flag, what is the
1929 * number of specifications before copying.
1930 * @param error[out]
1931 * Perform verbose error reporting if not NULL.
1932 */
1933 static int
sfc_flow_set_outer_vid_flag(struct sfc_flow_spec * spec,unsigned int filters_count_for_one_val,struct rte_flow_error * error)1934 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1935 unsigned int filters_count_for_one_val,
1936 struct rte_flow_error *error)
1937 {
1938 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1939 unsigned int i;
1940
1941 if (filters_count_for_one_val != spec_filter->count) {
1942 rte_flow_error_set(error, EINVAL,
1943 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1944 "Number of specifications is incorrect "
1945 "while copying by outer VLAN ID");
1946 return -rte_errno;
1947 }
1948
1949 for (i = 0; i < spec_filter->count; i++) {
1950 spec_filter->filters[i].efs_match_flags |=
1951 EFX_FILTER_MATCH_OUTER_VID;
1952
1953 spec_filter->filters[i].efs_outer_vid = 0;
1954 }
1955
1956 return 0;
1957 }
1958
1959 /**
1960 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1961 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1962 * specifications after copying.
1963 *
1964 * @param spec[in, out]
1965 * SFC flow specification to update.
1966 * @param filters_count_for_one_val[in]
1967 * How many specifications should have the same match flag, what is the
1968 * number of specifications before copying.
1969 * @param error[out]
1970 * Perform verbose error reporting if not NULL.
1971 */
1972 static int
sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec * spec,unsigned int filters_count_for_one_val,struct rte_flow_error * error)1973 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1974 unsigned int filters_count_for_one_val,
1975 struct rte_flow_error *error)
1976 {
1977 unsigned int i;
1978 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1979 static const efx_filter_match_flags_t vals[] = {
1980 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1981 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1982 };
1983
1984 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1985 rte_flow_error_set(error, EINVAL,
1986 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1987 "Number of specifications is incorrect while copying "
1988 "by inner frame unknown destination flags");
1989 return -rte_errno;
1990 }
1991
1992 for (i = 0; i < spec_filter->count; i++) {
1993 /* The check above ensures that divisor can't be zero here */
1994 spec_filter->filters[i].efs_match_flags |=
1995 vals[i / filters_count_for_one_val];
1996 }
1997
1998 return 0;
1999 }
2000
2001 /**
2002 * Check that the following conditions are met:
2003 * - the specification corresponds to a filter for encapsulated traffic
2004 * - the list of supported filters has a filter
2005 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2006 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2007 * be inserted.
2008 *
2009 * @param match[in]
2010 * The match flags of filter.
2011 * @param spec[in]
2012 * Specification to be supplemented.
2013 * @param filter[in]
2014 * SFC filter with list of supported filters.
2015 */
2016 static boolean_t
sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,efx_filter_spec_t * spec,struct sfc_filter * filter)2017 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2018 efx_filter_spec_t *spec,
2019 struct sfc_filter *filter)
2020 {
2021 unsigned int i;
2022 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2023 efx_filter_match_flags_t match_mcast_dst;
2024
2025 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2026 return B_FALSE;
2027
2028 match_mcast_dst =
2029 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2030 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2031 for (i = 0; i < filter->supported_match_num; i++) {
2032 if (match_mcast_dst == filter->supported_match[i])
2033 return B_TRUE;
2034 }
2035
2036 return B_FALSE;
2037 }
2038
2039 /**
2040 * Check that the list of supported filters has a filter that differs
2041 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2042 * in this case that filter will be used and the flag
2043 * EFX_FILTER_MATCH_OUTER_VID is not needed.
2044 *
2045 * @param match[in]
2046 * The match flags of filter.
2047 * @param spec[in]
2048 * Specification to be supplemented.
2049 * @param filter[in]
2050 * SFC filter with list of supported filters.
2051 */
2052 static boolean_t
sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,__rte_unused efx_filter_spec_t * spec,struct sfc_filter * filter)2053 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2054 __rte_unused efx_filter_spec_t *spec,
2055 struct sfc_filter *filter)
2056 {
2057 unsigned int i;
2058 efx_filter_match_flags_t match_without_vid =
2059 match & ~EFX_FILTER_MATCH_OUTER_VID;
2060
2061 for (i = 0; i < filter->supported_match_num; i++) {
2062 if (match_without_vid == filter->supported_match[i])
2063 return B_FALSE;
2064 }
2065
2066 return B_TRUE;
2067 }
2068
2069 /*
2070 * Match flags that can be automatically added to filters.
2071 * Selecting the last minimum when searching for the copy flag ensures that the
2072 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2073 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2074 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2075 * filters.
2076 */
2077 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2078 {
2079 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2080 .vals_count = 2,
2081 .set_vals = sfc_flow_set_unknown_dst_flags,
2082 .spec_check = sfc_flow_check_unknown_dst_flags,
2083 },
2084 {
2085 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2086 .vals_count = 2,
2087 .set_vals = sfc_flow_set_ethertypes,
2088 .spec_check = NULL,
2089 },
2090 {
2091 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2092 .vals_count = 2,
2093 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2094 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2095 },
2096 {
2097 .flag = EFX_FILTER_MATCH_OUTER_VID,
2098 .vals_count = 1,
2099 .set_vals = sfc_flow_set_outer_vid_flag,
2100 .spec_check = sfc_flow_check_outer_vid_flag,
2101 },
2102 };
2103
2104 /* Get item from array sfc_flow_copy_flags */
2105 static const struct sfc_flow_copy_flag *
sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)2106 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2111 if (sfc_flow_copy_flags[i].flag == flag)
2112 return &sfc_flow_copy_flags[i];
2113 }
2114
2115 return NULL;
2116 }
2117
2118 /**
2119 * Make copies of the specifications, set match flag and values
2120 * of the field that corresponds to it.
2121 *
2122 * @param spec[in, out]
2123 * SFC flow specification to update.
2124 * @param flag[in]
2125 * The match flag to add.
2126 * @param error[out]
2127 * Perform verbose error reporting if not NULL.
2128 */
2129 static int
sfc_flow_spec_add_match_flag(struct sfc_flow_spec * spec,efx_filter_match_flags_t flag,struct rte_flow_error * error)2130 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2131 efx_filter_match_flags_t flag,
2132 struct rte_flow_error *error)
2133 {
2134 unsigned int i;
2135 unsigned int new_filters_count;
2136 unsigned int filters_count_for_one_val;
2137 const struct sfc_flow_copy_flag *copy_flag;
2138 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2139 int rc;
2140
2141 copy_flag = sfc_flow_get_copy_flag(flag);
2142 if (copy_flag == NULL) {
2143 rte_flow_error_set(error, ENOTSUP,
2144 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2145 "Unsupported spec field for copying");
2146 return -rte_errno;
2147 }
2148
2149 new_filters_count = spec_filter->count * copy_flag->vals_count;
2150 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2153 "Too much EFX specifications in the flow rule");
2154 return -rte_errno;
2155 }
2156
2157 /* Copy filters specifications */
2158 for (i = spec_filter->count; i < new_filters_count; i++) {
2159 spec_filter->filters[i] =
2160 spec_filter->filters[i - spec_filter->count];
2161 }
2162
2163 filters_count_for_one_val = spec_filter->count;
2164 spec_filter->count = new_filters_count;
2165
2166 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2167 if (rc != 0)
2168 return rc;
2169
2170 return 0;
2171 }
2172
2173 /**
2174 * Check that the given set of match flags missing in the original filter spec
2175 * could be covered by adding spec copies which specify the corresponding
2176 * flags and packet field values to match.
2177 *
2178 * @param miss_flags[in]
2179 * Flags that are missing until the supported filter.
2180 * @param spec[in]
2181 * Specification to be supplemented.
2182 * @param filter[in]
2183 * SFC filter.
2184 *
2185 * @return
2186 * Number of specifications after copy or 0, if the flags can not be added.
2187 */
2188 static unsigned int
sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,efx_filter_spec_t * spec,struct sfc_filter * filter)2189 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2190 efx_filter_spec_t *spec,
2191 struct sfc_filter *filter)
2192 {
2193 unsigned int i;
2194 efx_filter_match_flags_t copy_flags = 0;
2195 efx_filter_match_flags_t flag;
2196 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2197 sfc_flow_spec_check *check;
2198 unsigned int multiplier = 1;
2199
2200 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2201 flag = sfc_flow_copy_flags[i].flag;
2202 check = sfc_flow_copy_flags[i].spec_check;
2203 if ((flag & miss_flags) == flag) {
2204 if (check != NULL && (!check(match, spec, filter)))
2205 continue;
2206
2207 copy_flags |= flag;
2208 multiplier *= sfc_flow_copy_flags[i].vals_count;
2209 }
2210 }
2211
2212 if (copy_flags == miss_flags)
2213 return multiplier;
2214
2215 return 0;
2216 }
2217
2218 /**
2219 * Attempt to supplement the specification template to the minimally
2220 * supported set of match flags. To do this, it is necessary to copy
2221 * the specifications, filling them with the values of fields that
2222 * correspond to the missing flags.
2223 * The necessary and sufficient filter is built from the fewest number
2224 * of copies which could be made to cover the minimally required set
2225 * of flags.
2226 *
2227 * @param sa[in]
2228 * SFC adapter.
2229 * @param spec[in, out]
2230 * SFC flow specification to update.
2231 * @param error[out]
2232 * Perform verbose error reporting if not NULL.
2233 */
2234 static int
sfc_flow_spec_filters_complete(struct sfc_adapter * sa,struct sfc_flow_spec * spec,struct rte_flow_error * error)2235 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2236 struct sfc_flow_spec *spec,
2237 struct rte_flow_error *error)
2238 {
2239 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2240 struct sfc_filter *filter = &sa->filter;
2241 efx_filter_match_flags_t miss_flags;
2242 efx_filter_match_flags_t min_miss_flags = 0;
2243 efx_filter_match_flags_t match;
2244 unsigned int min_multiplier = UINT_MAX;
2245 unsigned int multiplier;
2246 unsigned int i;
2247 int rc;
2248
2249 match = spec_filter->template.efs_match_flags;
2250 for (i = 0; i < filter->supported_match_num; i++) {
2251 if ((match & filter->supported_match[i]) == match) {
2252 miss_flags = filter->supported_match[i] & (~match);
2253 multiplier = sfc_flow_check_missing_flags(miss_flags,
2254 &spec_filter->template, filter);
2255 if (multiplier > 0) {
2256 if (multiplier <= min_multiplier) {
2257 min_multiplier = multiplier;
2258 min_miss_flags = miss_flags;
2259 }
2260 }
2261 }
2262 }
2263
2264 if (min_multiplier == UINT_MAX) {
2265 rte_flow_error_set(error, ENOTSUP,
2266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2267 "The flow rule pattern is unsupported");
2268 return -rte_errno;
2269 }
2270
2271 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2272 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2273
2274 if ((flag & min_miss_flags) == flag) {
2275 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2276 if (rc != 0)
2277 return rc;
2278 }
2279 }
2280
2281 return 0;
2282 }
2283
2284 /**
2285 * Check that set of match flags is referred to by a filter. Filter is
2286 * described by match flags with the ability to add OUTER_VID and INNER_VID
2287 * flags.
2288 *
2289 * @param match_flags[in]
2290 * Set of match flags.
2291 * @param flags_pattern[in]
2292 * Pattern of filter match flags.
2293 */
2294 static boolean_t
sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,efx_filter_match_flags_t flags_pattern)2295 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2296 efx_filter_match_flags_t flags_pattern)
2297 {
2298 if ((match_flags & flags_pattern) != flags_pattern)
2299 return B_FALSE;
2300
2301 switch (match_flags & ~flags_pattern) {
2302 case 0:
2303 case EFX_FILTER_MATCH_OUTER_VID:
2304 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2305 return B_TRUE;
2306 default:
2307 return B_FALSE;
2308 }
2309 }
2310
2311 /**
2312 * Check whether the spec maps to a hardware filter which is known to be
2313 * ineffective despite being valid.
2314 *
2315 * @param filter[in]
2316 * SFC filter with list of supported filters.
2317 * @param spec[in]
2318 * SFC flow specification.
2319 */
2320 static boolean_t
sfc_flow_is_match_flags_exception(struct sfc_filter * filter,struct sfc_flow_spec * spec)2321 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2322 struct sfc_flow_spec *spec)
2323 {
2324 unsigned int i;
2325 uint16_t ether_type;
2326 uint8_t ip_proto;
2327 efx_filter_match_flags_t match_flags;
2328 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2329
2330 for (i = 0; i < spec_filter->count; i++) {
2331 match_flags = spec_filter->filters[i].efs_match_flags;
2332
2333 if (sfc_flow_is_match_with_vids(match_flags,
2334 EFX_FILTER_MATCH_ETHER_TYPE) ||
2335 sfc_flow_is_match_with_vids(match_flags,
2336 EFX_FILTER_MATCH_ETHER_TYPE |
2337 EFX_FILTER_MATCH_LOC_MAC)) {
2338 ether_type = spec_filter->filters[i].efs_ether_type;
2339 if (filter->supports_ip_proto_or_addr_filter &&
2340 (ether_type == EFX_ETHER_TYPE_IPV4 ||
2341 ether_type == EFX_ETHER_TYPE_IPV6))
2342 return B_TRUE;
2343 } else if (sfc_flow_is_match_with_vids(match_flags,
2344 EFX_FILTER_MATCH_ETHER_TYPE |
2345 EFX_FILTER_MATCH_IP_PROTO) ||
2346 sfc_flow_is_match_with_vids(match_flags,
2347 EFX_FILTER_MATCH_ETHER_TYPE |
2348 EFX_FILTER_MATCH_IP_PROTO |
2349 EFX_FILTER_MATCH_LOC_MAC)) {
2350 ip_proto = spec_filter->filters[i].efs_ip_proto;
2351 if (filter->supports_rem_or_local_port_filter &&
2352 (ip_proto == EFX_IPPROTO_TCP ||
2353 ip_proto == EFX_IPPROTO_UDP))
2354 return B_TRUE;
2355 }
2356 }
2357
2358 return B_FALSE;
2359 }
2360
2361 static int
sfc_flow_validate_match_flags(struct sfc_adapter * sa,struct rte_flow * flow,struct rte_flow_error * error)2362 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2363 struct rte_flow *flow,
2364 struct rte_flow_error *error)
2365 {
2366 struct sfc_flow_spec *spec = &flow->spec;
2367 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2368 efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2369 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2370 int rc;
2371
2372 /* Initialize the first filter spec with template */
2373 spec_filter->filters[0] = *spec_tmpl;
2374 spec_filter->count = 1;
2375
2376 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2377 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2378 if (rc != 0)
2379 return rc;
2380 }
2381
2382 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2383 rte_flow_error_set(error, ENOTSUP,
2384 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2385 "The flow rule pattern is unsupported");
2386 return -rte_errno;
2387 }
2388
2389 return 0;
2390 }
2391
2392 static int
sfc_flow_parse_rte_to_filter(struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)2393 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2394 const struct rte_flow_item pattern[],
2395 const struct rte_flow_action actions[],
2396 struct rte_flow *flow,
2397 struct rte_flow_error *error)
2398 {
2399 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2400 struct sfc_flow_spec *spec = &flow->spec;
2401 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2402 struct sfc_flow_parse_ctx ctx;
2403 int rc;
2404
2405 ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2406 ctx.filter = &spec_filter->template;
2407
2408 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2409 pattern, &ctx, error);
2410 if (rc != 0)
2411 goto fail_bad_value;
2412
2413 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2414 if (rc != 0)
2415 goto fail_bad_value;
2416
2417 rc = sfc_flow_validate_match_flags(sa, flow, error);
2418 if (rc != 0)
2419 goto fail_bad_value;
2420
2421 return 0;
2422
2423 fail_bad_value:
2424 return rc;
2425 }
2426
2427 static int
sfc_flow_parse_rte_to_mae(struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)2428 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2429 const struct rte_flow_item pattern[],
2430 const struct rte_flow_action actions[],
2431 struct rte_flow *flow,
2432 struct rte_flow_error *error)
2433 {
2434 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2435 struct sfc_flow_spec *spec = &flow->spec;
2436 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2437 int rc;
2438
2439 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2440 if (rc != 0)
2441 return rc;
2442
2443 rc = sfc_mae_rule_parse_actions(sa, actions, &spec_mae->action_set,
2444 error);
2445 if (rc != 0)
2446 return rc;
2447
2448 return 0;
2449 }
2450
2451 static int
sfc_flow_parse(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)2452 sfc_flow_parse(struct rte_eth_dev *dev,
2453 const struct rte_flow_attr *attr,
2454 const struct rte_flow_item pattern[],
2455 const struct rte_flow_action actions[],
2456 struct rte_flow *flow,
2457 struct rte_flow_error *error)
2458 {
2459 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2460 const struct sfc_flow_ops_by_spec *ops;
2461 int rc;
2462
2463 rc = sfc_flow_parse_attr(sa, attr, flow, error);
2464 if (rc != 0)
2465 return rc;
2466
2467 ops = sfc_flow_get_ops_by_spec(flow);
2468 if (ops == NULL || ops->parse == NULL) {
2469 rte_flow_error_set(error, ENOTSUP,
2470 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2471 "No backend to handle this flow");
2472 return -rte_errno;
2473 }
2474
2475 return ops->parse(dev, pattern, actions, flow, error);
2476 }
2477
2478 static struct rte_flow *
sfc_flow_zmalloc(struct rte_flow_error * error)2479 sfc_flow_zmalloc(struct rte_flow_error *error)
2480 {
2481 struct rte_flow *flow;
2482
2483 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2484 if (flow == NULL) {
2485 rte_flow_error_set(error, ENOMEM,
2486 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2487 "Failed to allocate memory");
2488 }
2489
2490 return flow;
2491 }
2492
2493 static void
sfc_flow_free(struct sfc_adapter * sa,struct rte_flow * flow)2494 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2495 {
2496 const struct sfc_flow_ops_by_spec *ops;
2497
2498 ops = sfc_flow_get_ops_by_spec(flow);
2499 if (ops != NULL && ops->cleanup != NULL)
2500 ops->cleanup(sa, flow);
2501
2502 rte_free(flow);
2503 }
2504
2505 static int
sfc_flow_insert(struct sfc_adapter * sa,struct rte_flow * flow,struct rte_flow_error * error)2506 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2507 struct rte_flow_error *error)
2508 {
2509 const struct sfc_flow_ops_by_spec *ops;
2510 int rc;
2511
2512 ops = sfc_flow_get_ops_by_spec(flow);
2513 if (ops == NULL || ops->insert == NULL) {
2514 rte_flow_error_set(error, ENOTSUP,
2515 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2516 "No backend to handle this flow");
2517 return rte_errno;
2518 }
2519
2520 rc = ops->insert(sa, flow);
2521 if (rc != 0) {
2522 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2523 NULL, "Failed to insert the flow rule");
2524 }
2525
2526 return rc;
2527 }
2528
2529 static int
sfc_flow_remove(struct sfc_adapter * sa,struct rte_flow * flow,struct rte_flow_error * error)2530 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2531 struct rte_flow_error *error)
2532 {
2533 const struct sfc_flow_ops_by_spec *ops;
2534 int rc;
2535
2536 ops = sfc_flow_get_ops_by_spec(flow);
2537 if (ops == NULL || ops->remove == NULL) {
2538 rte_flow_error_set(error, ENOTSUP,
2539 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2540 "No backend to handle this flow");
2541 return rte_errno;
2542 }
2543
2544 rc = ops->remove(sa, flow);
2545 if (rc != 0) {
2546 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2547 NULL, "Failed to remove the flow rule");
2548 }
2549
2550 return rc;
2551 }
2552
2553 static int
sfc_flow_verify(struct sfc_adapter * sa,struct rte_flow * flow,struct rte_flow_error * error)2554 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2555 struct rte_flow_error *error)
2556 {
2557 const struct sfc_flow_ops_by_spec *ops;
2558 int rc = 0;
2559
2560 ops = sfc_flow_get_ops_by_spec(flow);
2561 if (ops == NULL) {
2562 rte_flow_error_set(error, ENOTSUP,
2563 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2564 "No backend to handle this flow");
2565 return -rte_errno;
2566 }
2567
2568 if (ops->verify != NULL) {
2569 SFC_ASSERT(sfc_adapter_is_locked(sa));
2570 rc = ops->verify(sa, flow);
2571 }
2572
2573 if (rc != 0) {
2574 rte_flow_error_set(error, rc,
2575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2576 "Failed to verify flow validity with FW");
2577 return -rte_errno;
2578 }
2579
2580 return 0;
2581 }
2582
2583 static int
sfc_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)2584 sfc_flow_validate(struct rte_eth_dev *dev,
2585 const struct rte_flow_attr *attr,
2586 const struct rte_flow_item pattern[],
2587 const struct rte_flow_action actions[],
2588 struct rte_flow_error *error)
2589 {
2590 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2591 struct rte_flow *flow;
2592 int rc;
2593
2594 flow = sfc_flow_zmalloc(error);
2595 if (flow == NULL)
2596 return -rte_errno;
2597
2598 sfc_adapter_lock(sa);
2599
2600 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2601 if (rc == 0)
2602 rc = sfc_flow_verify(sa, flow, error);
2603
2604 sfc_flow_free(sa, flow);
2605
2606 sfc_adapter_unlock(sa);
2607
2608 return rc;
2609 }
2610
2611 static struct rte_flow *
sfc_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)2612 sfc_flow_create(struct rte_eth_dev *dev,
2613 const struct rte_flow_attr *attr,
2614 const struct rte_flow_item pattern[],
2615 const struct rte_flow_action actions[],
2616 struct rte_flow_error *error)
2617 {
2618 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2619 struct rte_flow *flow = NULL;
2620 int rc;
2621
2622 flow = sfc_flow_zmalloc(error);
2623 if (flow == NULL)
2624 goto fail_no_mem;
2625
2626 sfc_adapter_lock(sa);
2627
2628 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2629 if (rc != 0)
2630 goto fail_bad_value;
2631
2632 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2633
2634 if (sa->state == SFC_ADAPTER_STARTED) {
2635 rc = sfc_flow_insert(sa, flow, error);
2636 if (rc != 0)
2637 goto fail_flow_insert;
2638 }
2639
2640 sfc_adapter_unlock(sa);
2641
2642 return flow;
2643
2644 fail_flow_insert:
2645 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2646
2647 fail_bad_value:
2648 sfc_flow_free(sa, flow);
2649 sfc_adapter_unlock(sa);
2650
2651 fail_no_mem:
2652 return NULL;
2653 }
2654
2655 static int
sfc_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)2656 sfc_flow_destroy(struct rte_eth_dev *dev,
2657 struct rte_flow *flow,
2658 struct rte_flow_error *error)
2659 {
2660 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2661 struct rte_flow *flow_ptr;
2662 int rc = EINVAL;
2663
2664 sfc_adapter_lock(sa);
2665
2666 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2667 if (flow_ptr == flow)
2668 rc = 0;
2669 }
2670 if (rc != 0) {
2671 rte_flow_error_set(error, rc,
2672 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2673 "Failed to find flow rule to destroy");
2674 goto fail_bad_value;
2675 }
2676
2677 if (sa->state == SFC_ADAPTER_STARTED)
2678 rc = sfc_flow_remove(sa, flow, error);
2679
2680 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2681 sfc_flow_free(sa, flow);
2682
2683 fail_bad_value:
2684 sfc_adapter_unlock(sa);
2685
2686 return -rc;
2687 }
2688
2689 static int
sfc_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)2690 sfc_flow_flush(struct rte_eth_dev *dev,
2691 struct rte_flow_error *error)
2692 {
2693 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2694 struct rte_flow *flow;
2695 int ret = 0;
2696
2697 sfc_adapter_lock(sa);
2698
2699 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2700 if (sa->state == SFC_ADAPTER_STARTED) {
2701 int rc;
2702
2703 rc = sfc_flow_remove(sa, flow, error);
2704 if (rc != 0)
2705 ret = rc;
2706 }
2707
2708 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2709 sfc_flow_free(sa, flow);
2710 }
2711
2712 sfc_adapter_unlock(sa);
2713
2714 return -ret;
2715 }
2716
2717 static int
sfc_flow_isolate(struct rte_eth_dev * dev,int enable,struct rte_flow_error * error)2718 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2719 struct rte_flow_error *error)
2720 {
2721 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2722 int ret = 0;
2723
2724 sfc_adapter_lock(sa);
2725 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2726 rte_flow_error_set(error, EBUSY,
2727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2728 NULL, "please close the port first");
2729 ret = -rte_errno;
2730 } else {
2731 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2732 }
2733 sfc_adapter_unlock(sa);
2734
2735 return ret;
2736 }
2737
2738 const struct rte_flow_ops sfc_flow_ops = {
2739 .validate = sfc_flow_validate,
2740 .create = sfc_flow_create,
2741 .destroy = sfc_flow_destroy,
2742 .flush = sfc_flow_flush,
2743 .query = NULL,
2744 .isolate = sfc_flow_isolate,
2745 };
2746
2747 void
sfc_flow_init(struct sfc_adapter * sa)2748 sfc_flow_init(struct sfc_adapter *sa)
2749 {
2750 SFC_ASSERT(sfc_adapter_is_locked(sa));
2751
2752 TAILQ_INIT(&sa->flow_list);
2753 }
2754
2755 void
sfc_flow_fini(struct sfc_adapter * sa)2756 sfc_flow_fini(struct sfc_adapter *sa)
2757 {
2758 struct rte_flow *flow;
2759
2760 SFC_ASSERT(sfc_adapter_is_locked(sa));
2761
2762 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2763 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2764 sfc_flow_free(sa, flow);
2765 }
2766 }
2767
2768 void
sfc_flow_stop(struct sfc_adapter * sa)2769 sfc_flow_stop(struct sfc_adapter *sa)
2770 {
2771 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2772 struct sfc_rss *rss = &sas->rss;
2773 struct rte_flow *flow;
2774
2775 SFC_ASSERT(sfc_adapter_is_locked(sa));
2776
2777 TAILQ_FOREACH(flow, &sa->flow_list, entries)
2778 sfc_flow_remove(sa, flow, NULL);
2779
2780 if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2781 efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2782 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2783 }
2784 }
2785
2786 int
sfc_flow_start(struct sfc_adapter * sa)2787 sfc_flow_start(struct sfc_adapter *sa)
2788 {
2789 struct rte_flow *flow;
2790 int rc = 0;
2791
2792 sfc_log_init(sa, "entry");
2793
2794 SFC_ASSERT(sfc_adapter_is_locked(sa));
2795
2796 TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2797 rc = sfc_flow_insert(sa, flow, NULL);
2798 if (rc != 0)
2799 goto fail_bad_flow;
2800 }
2801
2802 sfc_log_init(sa, "done");
2803
2804 fail_bad_flow:
2805 return rc;
2806 }
2807