1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #include <stdbool.h>
11
12 #include <rte_common.h>
13
14 #include "efx.h"
15
16 #include "sfc.h"
17 #include "sfc_log.h"
18 #include "sfc_switch.h"
19
20 static int
sfc_mae_assign_entity_mport(struct sfc_adapter * sa,efx_mport_sel_t * mportp)21 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
22 efx_mport_sel_t *mportp)
23 {
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25
26 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
27 mportp);
28 }
29
30 int
sfc_mae_attach(struct sfc_adapter * sa)31 sfc_mae_attach(struct sfc_adapter *sa)
32 {
33 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
34 struct sfc_mae_switch_port_request switch_port_request = {0};
35 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
36 efx_mport_sel_t entity_mport;
37 struct sfc_mae *mae = &sa->mae;
38 efx_mae_limits_t limits;
39 int rc;
40
41 sfc_log_init(sa, "entry");
42
43 if (!encp->enc_mae_supported) {
44 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
45 return 0;
46 }
47
48 sfc_log_init(sa, "init MAE");
49 rc = efx_mae_init(sa->nic);
50 if (rc != 0)
51 goto fail_mae_init;
52
53 sfc_log_init(sa, "get MAE limits");
54 rc = efx_mae_get_limits(sa->nic, &limits);
55 if (rc != 0)
56 goto fail_mae_get_limits;
57
58 sfc_log_init(sa, "assign entity MPORT");
59 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
60 if (rc != 0)
61 goto fail_mae_assign_entity_mport;
62
63 sfc_log_init(sa, "assign RTE switch domain");
64 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
65 if (rc != 0)
66 goto fail_mae_assign_switch_domain;
67
68 sfc_log_init(sa, "assign RTE switch port");
69 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
70 switch_port_request.entity_mportp = &entity_mport;
71 /*
72 * As of now, the driver does not support representors, so
73 * RTE ethdev MPORT simply matches that of the entity.
74 */
75 switch_port_request.ethdev_mportp = &entity_mport;
76 switch_port_request.ethdev_port_id = sas->port_id;
77 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
78 &switch_port_request,
79 &mae->switch_port_id);
80 if (rc != 0)
81 goto fail_mae_assign_switch_port;
82
83 mae->status = SFC_MAE_STATUS_SUPPORTED;
84 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
85 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
86 mae->encap_types_supported = limits.eml_encap_types_supported;
87 TAILQ_INIT(&mae->outer_rules);
88 TAILQ_INIT(&mae->action_sets);
89
90 sfc_log_init(sa, "done");
91
92 return 0;
93
94 fail_mae_assign_switch_port:
95 fail_mae_assign_switch_domain:
96 fail_mae_assign_entity_mport:
97 fail_mae_get_limits:
98 efx_mae_fini(sa->nic);
99
100 fail_mae_init:
101 sfc_log_init(sa, "failed %d", rc);
102
103 return rc;
104 }
105
106 void
sfc_mae_detach(struct sfc_adapter * sa)107 sfc_mae_detach(struct sfc_adapter *sa)
108 {
109 struct sfc_mae *mae = &sa->mae;
110 enum sfc_mae_status status_prev = mae->status;
111
112 sfc_log_init(sa, "entry");
113
114 mae->nb_action_rule_prios_max = 0;
115 mae->status = SFC_MAE_STATUS_UNKNOWN;
116
117 if (status_prev != SFC_MAE_STATUS_SUPPORTED)
118 return;
119
120 efx_mae_fini(sa->nic);
121
122 sfc_log_init(sa, "done");
123 }
124
125 static struct sfc_mae_outer_rule *
sfc_mae_outer_rule_attach(struct sfc_adapter * sa,const efx_mae_match_spec_t * match_spec,efx_tunnel_protocol_t encap_type)126 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
127 const efx_mae_match_spec_t *match_spec,
128 efx_tunnel_protocol_t encap_type)
129 {
130 struct sfc_mae_outer_rule *rule;
131 struct sfc_mae *mae = &sa->mae;
132
133 SFC_ASSERT(sfc_adapter_is_locked(sa));
134
135 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
136 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
137 rule->encap_type == encap_type) {
138 ++(rule->refcnt);
139 return rule;
140 }
141 }
142
143 return NULL;
144 }
145
146 static int
sfc_mae_outer_rule_add(struct sfc_adapter * sa,efx_mae_match_spec_t * match_spec,efx_tunnel_protocol_t encap_type,struct sfc_mae_outer_rule ** rulep)147 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
148 efx_mae_match_spec_t *match_spec,
149 efx_tunnel_protocol_t encap_type,
150 struct sfc_mae_outer_rule **rulep)
151 {
152 struct sfc_mae_outer_rule *rule;
153 struct sfc_mae *mae = &sa->mae;
154
155 SFC_ASSERT(sfc_adapter_is_locked(sa));
156
157 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
158 if (rule == NULL)
159 return ENOMEM;
160
161 rule->refcnt = 1;
162 rule->match_spec = match_spec;
163 rule->encap_type = encap_type;
164
165 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
166
167 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
168
169 *rulep = rule;
170
171 return 0;
172 }
173
174 static void
sfc_mae_outer_rule_del(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)175 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
176 struct sfc_mae_outer_rule *rule)
177 {
178 struct sfc_mae *mae = &sa->mae;
179
180 SFC_ASSERT(sfc_adapter_is_locked(sa));
181 SFC_ASSERT(rule->refcnt != 0);
182
183 --(rule->refcnt);
184
185 if (rule->refcnt != 0)
186 return;
187
188 SFC_ASSERT(rule->fw_rsrc.rule_id.id == EFX_MAE_RSRC_ID_INVALID);
189 SFC_ASSERT(rule->fw_rsrc.refcnt == 0);
190
191 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
192
193 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
194 rte_free(rule);
195 }
196
197 static int
sfc_mae_outer_rule_enable(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule,efx_mae_match_spec_t * match_spec_action)198 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
199 struct sfc_mae_outer_rule *rule,
200 efx_mae_match_spec_t *match_spec_action)
201 {
202 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
203 int rc;
204
205 SFC_ASSERT(sfc_adapter_is_locked(sa));
206
207 if (fw_rsrc->refcnt == 0) {
208 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
209 SFC_ASSERT(rule->match_spec != NULL);
210
211 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
212 rule->encap_type,
213 &fw_rsrc->rule_id);
214 if (rc != 0)
215 return rc;
216 }
217
218 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
219 &fw_rsrc->rule_id);
220 if (rc != 0) {
221 if (fw_rsrc->refcnt == 0) {
222 (void)efx_mae_outer_rule_remove(sa->nic,
223 &fw_rsrc->rule_id);
224 }
225 return rc;
226 }
227
228 ++(fw_rsrc->refcnt);
229
230 return 0;
231 }
232
233 static int
sfc_mae_outer_rule_disable(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)234 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
235 struct sfc_mae_outer_rule *rule)
236 {
237 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
238 int rc;
239
240 SFC_ASSERT(sfc_adapter_is_locked(sa));
241 SFC_ASSERT(fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
242 SFC_ASSERT(fw_rsrc->refcnt != 0);
243
244 if (fw_rsrc->refcnt == 1) {
245 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
246 if (rc != 0)
247 return rc;
248
249 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
250 }
251
252 --(fw_rsrc->refcnt);
253
254 return 0;
255 }
256
257 static struct sfc_mae_action_set *
sfc_mae_action_set_attach(struct sfc_adapter * sa,const efx_mae_actions_t * spec)258 sfc_mae_action_set_attach(struct sfc_adapter *sa,
259 const efx_mae_actions_t *spec)
260 {
261 struct sfc_mae_action_set *action_set;
262 struct sfc_mae *mae = &sa->mae;
263
264 SFC_ASSERT(sfc_adapter_is_locked(sa));
265
266 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
267 if (efx_mae_action_set_specs_equal(action_set->spec, spec)) {
268 ++(action_set->refcnt);
269 return action_set;
270 }
271 }
272
273 return NULL;
274 }
275
276 static int
sfc_mae_action_set_add(struct sfc_adapter * sa,efx_mae_actions_t * spec,struct sfc_mae_action_set ** action_setp)277 sfc_mae_action_set_add(struct sfc_adapter *sa,
278 efx_mae_actions_t *spec,
279 struct sfc_mae_action_set **action_setp)
280 {
281 struct sfc_mae_action_set *action_set;
282 struct sfc_mae *mae = &sa->mae;
283
284 SFC_ASSERT(sfc_adapter_is_locked(sa));
285
286 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
287 if (action_set == NULL)
288 return ENOMEM;
289
290 action_set->refcnt = 1;
291 action_set->spec = spec;
292
293 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
294
295 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
296
297 *action_setp = action_set;
298
299 return 0;
300 }
301
302 static void
sfc_mae_action_set_del(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)303 sfc_mae_action_set_del(struct sfc_adapter *sa,
304 struct sfc_mae_action_set *action_set)
305 {
306 struct sfc_mae *mae = &sa->mae;
307
308 SFC_ASSERT(sfc_adapter_is_locked(sa));
309 SFC_ASSERT(action_set->refcnt != 0);
310
311 --(action_set->refcnt);
312
313 if (action_set->refcnt != 0)
314 return;
315
316 SFC_ASSERT(action_set->fw_rsrc.aset_id.id == EFX_MAE_RSRC_ID_INVALID);
317 SFC_ASSERT(action_set->fw_rsrc.refcnt == 0);
318
319 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
320 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
321 rte_free(action_set);
322 }
323
324 static int
sfc_mae_action_set_enable(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)325 sfc_mae_action_set_enable(struct sfc_adapter *sa,
326 struct sfc_mae_action_set *action_set)
327 {
328 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
329 int rc;
330
331 SFC_ASSERT(sfc_adapter_is_locked(sa));
332
333 if (fw_rsrc->refcnt == 0) {
334 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
335 SFC_ASSERT(action_set->spec != NULL);
336
337 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
338 &fw_rsrc->aset_id);
339 if (rc != 0)
340 return rc;
341 }
342
343 ++(fw_rsrc->refcnt);
344
345 return 0;
346 }
347
348 static int
sfc_mae_action_set_disable(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)349 sfc_mae_action_set_disable(struct sfc_adapter *sa,
350 struct sfc_mae_action_set *action_set)
351 {
352 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
353 int rc;
354
355 SFC_ASSERT(sfc_adapter_is_locked(sa));
356 SFC_ASSERT(fw_rsrc->aset_id.id != EFX_MAE_RSRC_ID_INVALID);
357 SFC_ASSERT(fw_rsrc->refcnt != 0);
358
359 if (fw_rsrc->refcnt == 1) {
360 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
361 if (rc != 0)
362 return rc;
363
364 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
365 }
366
367 --(fw_rsrc->refcnt);
368
369 return 0;
370 }
371
372 void
sfc_mae_flow_cleanup(struct sfc_adapter * sa,struct rte_flow * flow)373 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
374 struct rte_flow *flow)
375 {
376 struct sfc_flow_spec *spec;
377 struct sfc_flow_spec_mae *spec_mae;
378
379 if (flow == NULL)
380 return;
381
382 spec = &flow->spec;
383
384 if (spec == NULL)
385 return;
386
387 spec_mae = &spec->mae;
388
389 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
390
391 if (spec_mae->outer_rule != NULL)
392 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
393
394 if (spec_mae->action_set != NULL)
395 sfc_mae_action_set_del(sa, spec_mae->action_set);
396
397 if (spec_mae->match_spec != NULL)
398 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
399 }
400
401 static int
sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx * ctx)402 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
403 {
404 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
405 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
406 const efx_mae_field_id_t field_ids[] = {
407 EFX_MAE_FIELD_VLAN0_PROTO_BE,
408 EFX_MAE_FIELD_VLAN1_PROTO_BE,
409 };
410 const struct sfc_mae_ethertype *et;
411 unsigned int i;
412 int rc;
413
414 /*
415 * In accordance with RTE flow API convention, the innermost L2
416 * item's "type" ("inner_type") is a L3 EtherType. If there is
417 * no L3 item, it's 0x0000/0x0000.
418 */
419 et = &pdata->ethertypes[pdata->nb_vlan_tags];
420 rc = efx_mae_match_spec_field_set(ctx->match_spec,
421 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
422 sizeof(et->value),
423 (const uint8_t *)&et->value,
424 sizeof(et->mask),
425 (const uint8_t *)&et->mask);
426 if (rc != 0)
427 return rc;
428
429 /*
430 * sfc_mae_rule_parse_item_vlan() has already made sure
431 * that pdata->nb_vlan_tags does not exceed this figure.
432 */
433 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
434
435 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
436 et = &pdata->ethertypes[i];
437
438 rc = efx_mae_match_spec_field_set(ctx->match_spec,
439 fremap[field_ids[i]],
440 sizeof(et->value),
441 (const uint8_t *)&et->value,
442 sizeof(et->mask),
443 (const uint8_t *)&et->mask);
444 if (rc != 0)
445 return rc;
446 }
447
448 return 0;
449 }
450
451 static int
sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)452 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
453 struct rte_flow_error *error)
454 {
455 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
456 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
457 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
458 const rte_be16_t supported_tpids[] = {
459 /* VLAN standard TPID (always the first element) */
460 RTE_BE16(RTE_ETHER_TYPE_VLAN),
461
462 /* Double-tagging TPIDs */
463 RTE_BE16(RTE_ETHER_TYPE_QINQ),
464 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
465 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
466 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
467 };
468 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
469 unsigned int ethertype_idx;
470 const uint8_t *valuep;
471 const uint8_t *maskp;
472 int rc;
473
474 if (pdata->innermost_ethertype_restriction.mask != 0 &&
475 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
476 /*
477 * If a single item VLAN is followed by a L3 item, value
478 * of "type" in item ETH can't be a double-tagging TPID.
479 */
480 nb_supported_tpids = 1;
481 }
482
483 /*
484 * sfc_mae_rule_parse_item_vlan() has already made sure
485 * that pdata->nb_vlan_tags does not exceed this figure.
486 */
487 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
488
489 for (ethertype_idx = 0;
490 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
491 unsigned int tpid_idx;
492
493 /* Exact match is supported only. */
494 if (ethertypes[ethertype_idx].mask != RTE_BE16(0xffff)) {
495 rc = EINVAL;
496 goto fail;
497 }
498
499 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
500 tpid_idx < nb_supported_tpids; ++tpid_idx) {
501 if (ethertypes[ethertype_idx].value ==
502 supported_tpids[tpid_idx])
503 break;
504 }
505
506 if (tpid_idx == nb_supported_tpids) {
507 rc = EINVAL;
508 goto fail;
509 }
510
511 nb_supported_tpids = 1;
512 }
513
514 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
515 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
516
517 if (et->mask == 0) {
518 et->mask = RTE_BE16(0xffff);
519 et->value =
520 pdata->innermost_ethertype_restriction.value;
521 } else if (et->mask != RTE_BE16(0xffff) ||
522 et->value !=
523 pdata->innermost_ethertype_restriction.value) {
524 rc = EINVAL;
525 goto fail;
526 }
527 }
528
529 /*
530 * Now, when the number of VLAN tags is known, set fields
531 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
532 * one is either a valid L3 EtherType (or 0x0000/0x0000),
533 * and the last two are valid TPIDs (or 0x0000/0x0000).
534 */
535 rc = sfc_mae_set_ethertypes(ctx);
536 if (rc != 0)
537 goto fail;
538
539 if (pdata->l3_next_proto_restriction_mask == 0xff) {
540 if (pdata->l3_next_proto_mask == 0) {
541 pdata->l3_next_proto_mask = 0xff;
542 pdata->l3_next_proto_value =
543 pdata->l3_next_proto_restriction_value;
544 } else if (pdata->l3_next_proto_mask != 0xff ||
545 pdata->l3_next_proto_value !=
546 pdata->l3_next_proto_restriction_value) {
547 rc = EINVAL;
548 goto fail;
549 }
550 }
551
552 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
553 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
554 rc = efx_mae_match_spec_field_set(ctx->match_spec,
555 fremap[EFX_MAE_FIELD_IP_PROTO],
556 sizeof(pdata->l3_next_proto_value),
557 valuep,
558 sizeof(pdata->l3_next_proto_mask),
559 maskp);
560 if (rc != 0)
561 goto fail;
562
563 return 0;
564
565 fail:
566 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
567 "Failed to process pattern data");
568 }
569
570 static int
sfc_mae_rule_parse_item_port_id(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)571 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
572 struct sfc_flow_parse_ctx *ctx,
573 struct rte_flow_error *error)
574 {
575 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
576 const struct rte_flow_item_port_id supp_mask = {
577 .id = 0xffffffff,
578 };
579 const void *def_mask = &rte_flow_item_port_id_mask;
580 const struct rte_flow_item_port_id *spec = NULL;
581 const struct rte_flow_item_port_id *mask = NULL;
582 efx_mport_sel_t mport_sel;
583 int rc;
584
585 if (ctx_mae->match_mport_set) {
586 return rte_flow_error_set(error, ENOTSUP,
587 RTE_FLOW_ERROR_TYPE_ITEM, item,
588 "Can't handle multiple traffic source items");
589 }
590
591 rc = sfc_flow_parse_init(item,
592 (const void **)&spec, (const void **)&mask,
593 (const void *)&supp_mask, def_mask,
594 sizeof(struct rte_flow_item_port_id), error);
595 if (rc != 0)
596 return rc;
597
598 if (mask->id != supp_mask.id) {
599 return rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM, item,
601 "Bad mask in the PORT_ID pattern item");
602 }
603
604 /* If "spec" is not set, could be any port ID */
605 if (spec == NULL)
606 return 0;
607
608 if (spec->id > UINT16_MAX) {
609 return rte_flow_error_set(error, EOVERFLOW,
610 RTE_FLOW_ERROR_TYPE_ITEM, item,
611 "The port ID is too large");
612 }
613
614 rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
615 spec->id, &mport_sel);
616 if (rc != 0) {
617 return rte_flow_error_set(error, rc,
618 RTE_FLOW_ERROR_TYPE_ITEM, item,
619 "Can't find RTE ethdev by the port ID");
620 }
621
622 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
623 &mport_sel, NULL);
624 if (rc != 0) {
625 return rte_flow_error_set(error, rc,
626 RTE_FLOW_ERROR_TYPE_ITEM, item,
627 "Failed to set MPORT for the port ID");
628 }
629
630 ctx_mae->match_mport_set = B_TRUE;
631
632 return 0;
633 }
634
635 static int
sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)636 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
637 struct sfc_flow_parse_ctx *ctx,
638 struct rte_flow_error *error)
639 {
640 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
641 const struct rte_flow_item_phy_port supp_mask = {
642 .index = 0xffffffff,
643 };
644 const void *def_mask = &rte_flow_item_phy_port_mask;
645 const struct rte_flow_item_phy_port *spec = NULL;
646 const struct rte_flow_item_phy_port *mask = NULL;
647 efx_mport_sel_t mport_v;
648 int rc;
649
650 if (ctx_mae->match_mport_set) {
651 return rte_flow_error_set(error, ENOTSUP,
652 RTE_FLOW_ERROR_TYPE_ITEM, item,
653 "Can't handle multiple traffic source items");
654 }
655
656 rc = sfc_flow_parse_init(item,
657 (const void **)&spec, (const void **)&mask,
658 (const void *)&supp_mask, def_mask,
659 sizeof(struct rte_flow_item_phy_port), error);
660 if (rc != 0)
661 return rc;
662
663 if (mask->index != supp_mask.index) {
664 return rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ITEM, item,
666 "Bad mask in the PHY_PORT pattern item");
667 }
668
669 /* If "spec" is not set, could be any physical port */
670 if (spec == NULL)
671 return 0;
672
673 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
674 if (rc != 0) {
675 return rte_flow_error_set(error, rc,
676 RTE_FLOW_ERROR_TYPE_ITEM, item,
677 "Failed to convert the PHY_PORT index");
678 }
679
680 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
681 if (rc != 0) {
682 return rte_flow_error_set(error, rc,
683 RTE_FLOW_ERROR_TYPE_ITEM, item,
684 "Failed to set MPORT for the PHY_PORT");
685 }
686
687 ctx_mae->match_mport_set = B_TRUE;
688
689 return 0;
690 }
691
692 static int
sfc_mae_rule_parse_item_pf(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)693 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
694 struct sfc_flow_parse_ctx *ctx,
695 struct rte_flow_error *error)
696 {
697 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
698 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
699 efx_mport_sel_t mport_v;
700 int rc;
701
702 if (ctx_mae->match_mport_set) {
703 return rte_flow_error_set(error, ENOTSUP,
704 RTE_FLOW_ERROR_TYPE_ITEM, item,
705 "Can't handle multiple traffic source items");
706 }
707
708 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
709 &mport_v);
710 if (rc != 0) {
711 return rte_flow_error_set(error, rc,
712 RTE_FLOW_ERROR_TYPE_ITEM, item,
713 "Failed to convert the PF ID");
714 }
715
716 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
717 if (rc != 0) {
718 return rte_flow_error_set(error, rc,
719 RTE_FLOW_ERROR_TYPE_ITEM, item,
720 "Failed to set MPORT for the PF");
721 }
722
723 ctx_mae->match_mport_set = B_TRUE;
724
725 return 0;
726 }
727
728 static int
sfc_mae_rule_parse_item_vf(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)729 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
730 struct sfc_flow_parse_ctx *ctx,
731 struct rte_flow_error *error)
732 {
733 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
734 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
735 const struct rte_flow_item_vf supp_mask = {
736 .id = 0xffffffff,
737 };
738 const void *def_mask = &rte_flow_item_vf_mask;
739 const struct rte_flow_item_vf *spec = NULL;
740 const struct rte_flow_item_vf *mask = NULL;
741 efx_mport_sel_t mport_v;
742 int rc;
743
744 if (ctx_mae->match_mport_set) {
745 return rte_flow_error_set(error, ENOTSUP,
746 RTE_FLOW_ERROR_TYPE_ITEM, item,
747 "Can't handle multiple traffic source items");
748 }
749
750 rc = sfc_flow_parse_init(item,
751 (const void **)&spec, (const void **)&mask,
752 (const void *)&supp_mask, def_mask,
753 sizeof(struct rte_flow_item_vf), error);
754 if (rc != 0)
755 return rc;
756
757 if (mask->id != supp_mask.id) {
758 return rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ITEM, item,
760 "Bad mask in the VF pattern item");
761 }
762
763 /*
764 * If "spec" is not set, the item requests any VF related to the
765 * PF of the current DPDK port (but not the PF itself).
766 * Reject this match criterion as unsupported.
767 */
768 if (spec == NULL) {
769 return rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ITEM, item,
771 "Bad spec in the VF pattern item");
772 }
773
774 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
775 if (rc != 0) {
776 return rte_flow_error_set(error, rc,
777 RTE_FLOW_ERROR_TYPE_ITEM, item,
778 "Failed to convert the PF + VF IDs");
779 }
780
781 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
782 if (rc != 0) {
783 return rte_flow_error_set(error, rc,
784 RTE_FLOW_ERROR_TYPE_ITEM, item,
785 "Failed to set MPORT for the PF + VF");
786 }
787
788 ctx_mae->match_mport_set = B_TRUE;
789
790 return 0;
791 }
792
793 /*
794 * Having this field ID in a field locator means that this
795 * locator cannot be used to actually set the field at the
796 * time when the corresponding item gets encountered. Such
797 * fields get stashed in the parsing context instead. This
798 * is required to resolve dependencies between the stashed
799 * fields. See sfc_mae_rule_process_pattern_data().
800 */
801 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
802
803 struct sfc_mae_field_locator {
804 efx_mae_field_id_t field_id;
805 size_t size;
806 /* Field offset in the corresponding rte_flow_item_ struct */
807 size_t ofst;
808 };
809
810 static void
sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator * field_locators,unsigned int nb_field_locators,void * mask_ptr,size_t mask_size)811 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
812 unsigned int nb_field_locators, void *mask_ptr,
813 size_t mask_size)
814 {
815 unsigned int i;
816
817 memset(mask_ptr, 0, mask_size);
818
819 for (i = 0; i < nb_field_locators; ++i) {
820 const struct sfc_mae_field_locator *fl = &field_locators[i];
821
822 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
823 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
824 }
825 }
826
827 static int
sfc_mae_parse_item(const struct sfc_mae_field_locator * field_locators,unsigned int nb_field_locators,const uint8_t * spec,const uint8_t * mask,struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)828 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
829 unsigned int nb_field_locators, const uint8_t *spec,
830 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
831 struct rte_flow_error *error)
832 {
833 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
834 unsigned int i;
835 int rc = 0;
836
837 for (i = 0; i < nb_field_locators; ++i) {
838 const struct sfc_mae_field_locator *fl = &field_locators[i];
839
840 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
841 continue;
842
843 rc = efx_mae_match_spec_field_set(ctx->match_spec,
844 fremap[fl->field_id],
845 fl->size, spec + fl->ofst,
846 fl->size, mask + fl->ofst);
847 if (rc != 0)
848 break;
849 }
850
851 if (rc != 0) {
852 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
853 NULL, "Failed to process item fields");
854 }
855
856 return rc;
857 }
858
859 static const struct sfc_mae_field_locator flocs_eth[] = {
860 {
861 /*
862 * This locator is used only for building supported fields mask.
863 * The field is handled by sfc_mae_rule_process_pattern_data().
864 */
865 SFC_MAE_FIELD_HANDLING_DEFERRED,
866 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
867 offsetof(struct rte_flow_item_eth, type),
868 },
869 {
870 EFX_MAE_FIELD_ETH_DADDR_BE,
871 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
872 offsetof(struct rte_flow_item_eth, dst),
873 },
874 {
875 EFX_MAE_FIELD_ETH_SADDR_BE,
876 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
877 offsetof(struct rte_flow_item_eth, src),
878 },
879 };
880
881 static int
sfc_mae_rule_parse_item_eth(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)882 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
883 struct sfc_flow_parse_ctx *ctx,
884 struct rte_flow_error *error)
885 {
886 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
887 struct rte_flow_item_eth supp_mask;
888 const uint8_t *spec = NULL;
889 const uint8_t *mask = NULL;
890 int rc;
891
892 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
893 &supp_mask, sizeof(supp_mask));
894
895 rc = sfc_flow_parse_init(item,
896 (const void **)&spec, (const void **)&mask,
897 (const void *)&supp_mask,
898 &rte_flow_item_eth_mask,
899 sizeof(struct rte_flow_item_eth), error);
900 if (rc != 0)
901 return rc;
902
903 if (spec != NULL) {
904 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
905 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
906 const struct rte_flow_item_eth *item_spec;
907 const struct rte_flow_item_eth *item_mask;
908
909 item_spec = (const struct rte_flow_item_eth *)spec;
910 item_mask = (const struct rte_flow_item_eth *)mask;
911
912 ethertypes[0].value = item_spec->type;
913 ethertypes[0].mask = item_mask->type;
914 } else {
915 /*
916 * The specification is empty. This is wrong in the case
917 * when there are more network patterns in line. Other
918 * than that, any Ethernet can match. All of that is
919 * checked at the end of parsing.
920 */
921 return 0;
922 }
923
924 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
925 ctx_mae, error);
926 }
927
928 static const struct sfc_mae_field_locator flocs_vlan[] = {
929 /* Outermost tag */
930 {
931 EFX_MAE_FIELD_VLAN0_TCI_BE,
932 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
933 offsetof(struct rte_flow_item_vlan, tci),
934 },
935 {
936 /*
937 * This locator is used only for building supported fields mask.
938 * The field is handled by sfc_mae_rule_process_pattern_data().
939 */
940 SFC_MAE_FIELD_HANDLING_DEFERRED,
941 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
942 offsetof(struct rte_flow_item_vlan, inner_type),
943 },
944
945 /* Innermost tag */
946 {
947 EFX_MAE_FIELD_VLAN1_TCI_BE,
948 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
949 offsetof(struct rte_flow_item_vlan, tci),
950 },
951 {
952 /*
953 * This locator is used only for building supported fields mask.
954 * The field is handled by sfc_mae_rule_process_pattern_data().
955 */
956 SFC_MAE_FIELD_HANDLING_DEFERRED,
957 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
958 offsetof(struct rte_flow_item_vlan, inner_type),
959 },
960 };
961
962 static int
sfc_mae_rule_parse_item_vlan(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)963 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
964 struct sfc_flow_parse_ctx *ctx,
965 struct rte_flow_error *error)
966 {
967 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
968 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
969 const struct sfc_mae_field_locator *flocs;
970 struct rte_flow_item_vlan supp_mask;
971 const uint8_t *spec = NULL;
972 const uint8_t *mask = NULL;
973 unsigned int nb_flocs;
974 int rc;
975
976 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
977
978 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
979 return rte_flow_error_set(error, ENOTSUP,
980 RTE_FLOW_ERROR_TYPE_ITEM, item,
981 "Can't match that many VLAN tags");
982 }
983
984 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
985 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
986
987 /* If parsing fails, this can remain incremented. */
988 ++pdata->nb_vlan_tags;
989
990 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
991 &supp_mask, sizeof(supp_mask));
992
993 rc = sfc_flow_parse_init(item,
994 (const void **)&spec, (const void **)&mask,
995 (const void *)&supp_mask,
996 &rte_flow_item_vlan_mask,
997 sizeof(struct rte_flow_item_vlan), error);
998 if (rc != 0)
999 return rc;
1000
1001 if (spec != NULL) {
1002 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1003 const struct rte_flow_item_vlan *item_spec;
1004 const struct rte_flow_item_vlan *item_mask;
1005
1006 item_spec = (const struct rte_flow_item_vlan *)spec;
1007 item_mask = (const struct rte_flow_item_vlan *)mask;
1008
1009 ethertypes[pdata->nb_vlan_tags].value = item_spec->inner_type;
1010 ethertypes[pdata->nb_vlan_tags].mask = item_mask->inner_type;
1011 } else {
1012 /*
1013 * The specification is empty. This is wrong in the case
1014 * when there are more network patterns in line. Other
1015 * than that, any Ethernet can match. All of that is
1016 * checked at the end of parsing.
1017 */
1018 return 0;
1019 }
1020
1021 return sfc_mae_parse_item(flocs, nb_flocs, spec, mask, ctx_mae, error);
1022 }
1023
1024 static const struct sfc_mae_field_locator flocs_ipv4[] = {
1025 {
1026 EFX_MAE_FIELD_SRC_IP4_BE,
1027 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
1028 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
1029 },
1030 {
1031 EFX_MAE_FIELD_DST_IP4_BE,
1032 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
1033 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
1034 },
1035 {
1036 /*
1037 * This locator is used only for building supported fields mask.
1038 * The field is handled by sfc_mae_rule_process_pattern_data().
1039 */
1040 SFC_MAE_FIELD_HANDLING_DEFERRED,
1041 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
1042 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
1043 },
1044 {
1045 EFX_MAE_FIELD_IP_TOS,
1046 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
1047 hdr.type_of_service),
1048 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
1049 },
1050 {
1051 EFX_MAE_FIELD_IP_TTL,
1052 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
1053 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
1054 },
1055 };
1056
1057 static int
sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1058 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
1059 struct sfc_flow_parse_ctx *ctx,
1060 struct rte_flow_error *error)
1061 {
1062 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1063 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1064 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1065 struct rte_flow_item_ipv4 supp_mask;
1066 const uint8_t *spec = NULL;
1067 const uint8_t *mask = NULL;
1068 int rc;
1069
1070 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
1071 &supp_mask, sizeof(supp_mask));
1072
1073 rc = sfc_flow_parse_init(item,
1074 (const void **)&spec, (const void **)&mask,
1075 (const void *)&supp_mask,
1076 &rte_flow_item_ipv4_mask,
1077 sizeof(struct rte_flow_item_ipv4), error);
1078 if (rc != 0)
1079 return rc;
1080
1081 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
1082 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1083
1084 if (spec != NULL) {
1085 const struct rte_flow_item_ipv4 *item_spec;
1086 const struct rte_flow_item_ipv4 *item_mask;
1087
1088 item_spec = (const struct rte_flow_item_ipv4 *)spec;
1089 item_mask = (const struct rte_flow_item_ipv4 *)mask;
1090
1091 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
1092 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
1093 } else {
1094 return 0;
1095 }
1096
1097 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
1098 ctx_mae, error);
1099 }
1100
1101 static const struct sfc_mae_field_locator flocs_ipv6[] = {
1102 {
1103 EFX_MAE_FIELD_SRC_IP6_BE,
1104 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
1105 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
1106 },
1107 {
1108 EFX_MAE_FIELD_DST_IP6_BE,
1109 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
1110 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
1111 },
1112 {
1113 /*
1114 * This locator is used only for building supported fields mask.
1115 * The field is handled by sfc_mae_rule_process_pattern_data().
1116 */
1117 SFC_MAE_FIELD_HANDLING_DEFERRED,
1118 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
1119 offsetof(struct rte_flow_item_ipv6, hdr.proto),
1120 },
1121 {
1122 EFX_MAE_FIELD_IP_TTL,
1123 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
1124 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
1125 },
1126 };
1127
1128 static int
sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1129 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
1130 struct sfc_flow_parse_ctx *ctx,
1131 struct rte_flow_error *error)
1132 {
1133 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1134 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1135 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
1136 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1137 struct rte_flow_item_ipv6 supp_mask;
1138 const uint8_t *spec = NULL;
1139 const uint8_t *mask = NULL;
1140 rte_be32_t vtc_flow_be;
1141 uint32_t vtc_flow;
1142 uint8_t tc_value;
1143 uint8_t tc_mask;
1144 int rc;
1145
1146 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
1147 &supp_mask, sizeof(supp_mask));
1148
1149 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
1150 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
1151
1152 rc = sfc_flow_parse_init(item,
1153 (const void **)&spec, (const void **)&mask,
1154 (const void *)&supp_mask,
1155 &rte_flow_item_ipv6_mask,
1156 sizeof(struct rte_flow_item_ipv6), error);
1157 if (rc != 0)
1158 return rc;
1159
1160 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
1161 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
1162
1163 if (spec != NULL) {
1164 const struct rte_flow_item_ipv6 *item_spec;
1165 const struct rte_flow_item_ipv6 *item_mask;
1166
1167 item_spec = (const struct rte_flow_item_ipv6 *)spec;
1168 item_mask = (const struct rte_flow_item_ipv6 *)mask;
1169
1170 pdata->l3_next_proto_value = item_spec->hdr.proto;
1171 pdata->l3_next_proto_mask = item_mask->hdr.proto;
1172 } else {
1173 return 0;
1174 }
1175
1176 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
1177 ctx_mae, error);
1178 if (rc != 0)
1179 return rc;
1180
1181 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
1182 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1183 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1184
1185 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
1186 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
1187 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
1188
1189 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1190 fremap[EFX_MAE_FIELD_IP_TOS],
1191 sizeof(tc_value), &tc_value,
1192 sizeof(tc_mask), &tc_mask);
1193 if (rc != 0) {
1194 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1195 NULL, "Failed to process item fields");
1196 }
1197
1198 return 0;
1199 }
1200
1201 static const struct sfc_mae_field_locator flocs_tcp[] = {
1202 {
1203 EFX_MAE_FIELD_L4_SPORT_BE,
1204 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
1205 offsetof(struct rte_flow_item_tcp, hdr.src_port),
1206 },
1207 {
1208 EFX_MAE_FIELD_L4_DPORT_BE,
1209 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
1210 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
1211 },
1212 {
1213 EFX_MAE_FIELD_TCP_FLAGS_BE,
1214 /*
1215 * The values have been picked intentionally since the
1216 * target MAE field is oversize (16 bit). This mapping
1217 * relies on the fact that the MAE field is big-endian.
1218 */
1219 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
1220 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
1221 offsetof(struct rte_flow_item_tcp, hdr.data_off),
1222 },
1223 };
1224
1225 static int
sfc_mae_rule_parse_item_tcp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1226 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
1227 struct sfc_flow_parse_ctx *ctx,
1228 struct rte_flow_error *error)
1229 {
1230 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1231 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1232 struct rte_flow_item_tcp supp_mask;
1233 const uint8_t *spec = NULL;
1234 const uint8_t *mask = NULL;
1235 int rc;
1236
1237 /*
1238 * When encountered among outermost items, item TCP is invalid.
1239 * Check which match specification is being constructed now.
1240 */
1241 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
1242 return rte_flow_error_set(error, EINVAL,
1243 RTE_FLOW_ERROR_TYPE_ITEM, item,
1244 "TCP in outer frame is invalid");
1245 }
1246
1247 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
1248 &supp_mask, sizeof(supp_mask));
1249
1250 rc = sfc_flow_parse_init(item,
1251 (const void **)&spec, (const void **)&mask,
1252 (const void *)&supp_mask,
1253 &rte_flow_item_tcp_mask,
1254 sizeof(struct rte_flow_item_tcp), error);
1255 if (rc != 0)
1256 return rc;
1257
1258 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
1259 pdata->l3_next_proto_restriction_mask = 0xff;
1260
1261 if (spec == NULL)
1262 return 0;
1263
1264 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
1265 ctx_mae, error);
1266 }
1267
1268 static const struct sfc_mae_field_locator flocs_udp[] = {
1269 {
1270 EFX_MAE_FIELD_L4_SPORT_BE,
1271 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
1272 offsetof(struct rte_flow_item_udp, hdr.src_port),
1273 },
1274 {
1275 EFX_MAE_FIELD_L4_DPORT_BE,
1276 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
1277 offsetof(struct rte_flow_item_udp, hdr.dst_port),
1278 },
1279 };
1280
1281 static int
sfc_mae_rule_parse_item_udp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1282 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
1283 struct sfc_flow_parse_ctx *ctx,
1284 struct rte_flow_error *error)
1285 {
1286 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1287 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1288 struct rte_flow_item_udp supp_mask;
1289 const uint8_t *spec = NULL;
1290 const uint8_t *mask = NULL;
1291 int rc;
1292
1293 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
1294 &supp_mask, sizeof(supp_mask));
1295
1296 rc = sfc_flow_parse_init(item,
1297 (const void **)&spec, (const void **)&mask,
1298 (const void *)&supp_mask,
1299 &rte_flow_item_udp_mask,
1300 sizeof(struct rte_flow_item_udp), error);
1301 if (rc != 0)
1302 return rc;
1303
1304 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
1305 pdata->l3_next_proto_restriction_mask = 0xff;
1306
1307 if (spec == NULL)
1308 return 0;
1309
1310 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
1311 ctx_mae, error);
1312 }
1313
1314 static const struct sfc_mae_field_locator flocs_tunnel[] = {
1315 {
1316 /*
1317 * The size and offset values are relevant
1318 * for Geneve and NVGRE, too.
1319 */
1320 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
1321 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
1322 },
1323 };
1324
1325 /*
1326 * An auxiliary registry which allows using non-encap. field IDs
1327 * directly when building a match specification of type ACTION.
1328 *
1329 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
1330 */
1331 static const efx_mae_field_id_t field_ids_no_remap[] = {
1332 #define FIELD_ID_NO_REMAP(_field) \
1333 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
1334
1335 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
1336 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
1337 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
1338 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
1339 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
1340 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
1341 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
1342 FIELD_ID_NO_REMAP(SRC_IP4_BE),
1343 FIELD_ID_NO_REMAP(DST_IP4_BE),
1344 FIELD_ID_NO_REMAP(IP_PROTO),
1345 FIELD_ID_NO_REMAP(IP_TOS),
1346 FIELD_ID_NO_REMAP(IP_TTL),
1347 FIELD_ID_NO_REMAP(SRC_IP6_BE),
1348 FIELD_ID_NO_REMAP(DST_IP6_BE),
1349 FIELD_ID_NO_REMAP(L4_SPORT_BE),
1350 FIELD_ID_NO_REMAP(L4_DPORT_BE),
1351 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
1352
1353 #undef FIELD_ID_NO_REMAP
1354 };
1355
1356 /*
1357 * An auxiliary registry which allows using "ENC" field IDs
1358 * when building a match specification of type OUTER.
1359 *
1360 * See sfc_mae_rule_encap_parse_init().
1361 */
1362 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
1363 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
1364 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
1365
1366 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
1367 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
1368 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
1369 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
1370 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
1371 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
1372 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
1373 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
1374 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
1375 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
1376 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
1377 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
1378 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
1379 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
1380 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
1381 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
1382
1383 #undef FIELD_ID_REMAP_TO_ENCAP
1384 };
1385
1386 static int
sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1387 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
1388 struct sfc_flow_parse_ctx *ctx,
1389 struct rte_flow_error *error)
1390 {
1391 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1392 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
1393 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
1394 const struct rte_flow_item_vxlan *vxp;
1395 uint8_t supp_mask[sizeof(uint64_t)];
1396 const uint8_t *spec = NULL;
1397 const uint8_t *mask = NULL;
1398 const void *def_mask;
1399 int rc;
1400
1401 /*
1402 * We're about to start processing inner frame items.
1403 * Process pattern data that has been deferred so far
1404 * and reset pattern data storage.
1405 */
1406 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
1407 if (rc != 0)
1408 return rc;
1409
1410 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
1411
1412 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
1413 &supp_mask, sizeof(supp_mask));
1414
1415 /*
1416 * This tunnel item was preliminarily detected by
1417 * sfc_mae_rule_encap_parse_init(). Default mask
1418 * was also picked by that helper. Use it here.
1419 */
1420 def_mask = ctx_mae->tunnel_def_mask;
1421
1422 rc = sfc_flow_parse_init(item,
1423 (const void **)&spec, (const void **)&mask,
1424 (const void *)&supp_mask, def_mask,
1425 sizeof(def_mask), error);
1426 if (rc != 0)
1427 return rc;
1428
1429 /*
1430 * This item and later ones comprise a
1431 * match specification of type ACTION.
1432 */
1433 ctx_mae->match_spec = ctx_mae->match_spec_action;
1434
1435 /* This item and later ones use non-encap. EFX MAE field IDs. */
1436 ctx_mae->field_ids_remap = field_ids_no_remap;
1437
1438 if (spec == NULL)
1439 return 0;
1440
1441 /*
1442 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
1443 * Copy 24-bit VNI, which is BE, at offset 1 in it.
1444 * The extra byte is 0 both in the mask and in the value.
1445 */
1446 vxp = (const struct rte_flow_item_vxlan *)spec;
1447 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
1448
1449 vxp = (const struct rte_flow_item_vxlan *)mask;
1450 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
1451
1452 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
1453 EFX_MAE_FIELD_ENC_VNET_ID_BE,
1454 sizeof(vnet_id_v), vnet_id_v,
1455 sizeof(vnet_id_m), vnet_id_m);
1456 if (rc != 0) {
1457 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1458 item, "Failed to set VXLAN VNI");
1459 }
1460
1461 return rc;
1462 }
1463
1464 static const struct sfc_flow_item sfc_flow_items[] = {
1465 {
1466 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
1467 /*
1468 * In terms of RTE flow, this item is a META one,
1469 * and its position in the pattern is don't care.
1470 */
1471 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1472 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1473 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1474 .parse = sfc_mae_rule_parse_item_port_id,
1475 },
1476 {
1477 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
1478 /*
1479 * In terms of RTE flow, this item is a META one,
1480 * and its position in the pattern is don't care.
1481 */
1482 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1483 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1484 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1485 .parse = sfc_mae_rule_parse_item_phy_port,
1486 },
1487 {
1488 .type = RTE_FLOW_ITEM_TYPE_PF,
1489 /*
1490 * In terms of RTE flow, this item is a META one,
1491 * and its position in the pattern is don't care.
1492 */
1493 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1494 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1495 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1496 .parse = sfc_mae_rule_parse_item_pf,
1497 },
1498 {
1499 .type = RTE_FLOW_ITEM_TYPE_VF,
1500 /*
1501 * In terms of RTE flow, this item is a META one,
1502 * and its position in the pattern is don't care.
1503 */
1504 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1505 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1506 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1507 .parse = sfc_mae_rule_parse_item_vf,
1508 },
1509 {
1510 .type = RTE_FLOW_ITEM_TYPE_ETH,
1511 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1512 .layer = SFC_FLOW_ITEM_L2,
1513 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1514 .parse = sfc_mae_rule_parse_item_eth,
1515 },
1516 {
1517 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1518 .prev_layer = SFC_FLOW_ITEM_L2,
1519 .layer = SFC_FLOW_ITEM_L2,
1520 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1521 .parse = sfc_mae_rule_parse_item_vlan,
1522 },
1523 {
1524 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1525 .prev_layer = SFC_FLOW_ITEM_L2,
1526 .layer = SFC_FLOW_ITEM_L3,
1527 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1528 .parse = sfc_mae_rule_parse_item_ipv4,
1529 },
1530 {
1531 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1532 .prev_layer = SFC_FLOW_ITEM_L2,
1533 .layer = SFC_FLOW_ITEM_L3,
1534 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1535 .parse = sfc_mae_rule_parse_item_ipv6,
1536 },
1537 {
1538 .type = RTE_FLOW_ITEM_TYPE_TCP,
1539 .prev_layer = SFC_FLOW_ITEM_L3,
1540 .layer = SFC_FLOW_ITEM_L4,
1541 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1542 .parse = sfc_mae_rule_parse_item_tcp,
1543 },
1544 {
1545 .type = RTE_FLOW_ITEM_TYPE_UDP,
1546 .prev_layer = SFC_FLOW_ITEM_L3,
1547 .layer = SFC_FLOW_ITEM_L4,
1548 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1549 .parse = sfc_mae_rule_parse_item_udp,
1550 },
1551 {
1552 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1553 .prev_layer = SFC_FLOW_ITEM_L4,
1554 .layer = SFC_FLOW_ITEM_START_LAYER,
1555 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1556 .parse = sfc_mae_rule_parse_item_tunnel,
1557 },
1558 {
1559 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1560 .prev_layer = SFC_FLOW_ITEM_L4,
1561 .layer = SFC_FLOW_ITEM_START_LAYER,
1562 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1563 .parse = sfc_mae_rule_parse_item_tunnel,
1564 },
1565 {
1566 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1567 .prev_layer = SFC_FLOW_ITEM_L3,
1568 .layer = SFC_FLOW_ITEM_START_LAYER,
1569 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
1570 .parse = sfc_mae_rule_parse_item_tunnel,
1571 },
1572 };
1573
1574 static int
sfc_mae_rule_process_outer(struct sfc_adapter * sa,struct sfc_mae_parse_ctx * ctx,struct sfc_mae_outer_rule ** rulep,struct rte_flow_error * error)1575 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
1576 struct sfc_mae_parse_ctx *ctx,
1577 struct sfc_mae_outer_rule **rulep,
1578 struct rte_flow_error *error)
1579 {
1580 struct sfc_mae_outer_rule *rule;
1581 int rc;
1582
1583 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
1584 *rulep = NULL;
1585 return 0;
1586 }
1587
1588 SFC_ASSERT(ctx->match_spec_outer != NULL);
1589
1590 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
1591 return rte_flow_error_set(error, ENOTSUP,
1592 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1593 "Inconsistent pattern (outer)");
1594 }
1595
1596 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
1597 ctx->encap_type);
1598 if (*rulep != NULL) {
1599 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1600 } else {
1601 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
1602 ctx->encap_type, rulep);
1603 if (rc != 0) {
1604 return rte_flow_error_set(error, rc,
1605 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1606 "Failed to process the pattern");
1607 }
1608 }
1609
1610 /* The spec has now been tracked by the outer rule entry. */
1611 ctx->match_spec_outer = NULL;
1612
1613 /*
1614 * Depending on whether we reuse an existing outer rule or create a
1615 * new one (see above), outer rule ID is either a valid value or
1616 * EFX_MAE_RSRC_ID_INVALID. Set it in the action rule match
1617 * specification (and the full mask, too) in order to have correct
1618 * class comparisons of the new rule with existing ones.
1619 * Also, action rule match specification will be validated shortly,
1620 * and having the full mask set for outer rule ID indicates that we
1621 * will use this field, and support for this field has to be checked.
1622 */
1623 rule = *rulep;
1624 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
1625 &rule->fw_rsrc.rule_id);
1626 if (rc != 0) {
1627 sfc_mae_outer_rule_del(sa, *rulep);
1628 *rulep = NULL;
1629
1630 return rte_flow_error_set(error, rc,
1631 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1632 "Failed to process the pattern");
1633 }
1634
1635 return 0;
1636 }
1637
1638 static int
sfc_mae_rule_encap_parse_init(struct sfc_adapter * sa,const struct rte_flow_item pattern[],struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)1639 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
1640 const struct rte_flow_item pattern[],
1641 struct sfc_mae_parse_ctx *ctx,
1642 struct rte_flow_error *error)
1643 {
1644 struct sfc_mae *mae = &sa->mae;
1645 int rc;
1646
1647 if (pattern == NULL) {
1648 rte_flow_error_set(error, EINVAL,
1649 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1650 "NULL pattern");
1651 return -rte_errno;
1652 }
1653
1654 for (;;) {
1655 switch (pattern->type) {
1656 case RTE_FLOW_ITEM_TYPE_VXLAN:
1657 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
1658 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
1659 RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1660 sizeof(rte_flow_item_vxlan_mask));
1661 break;
1662 case RTE_FLOW_ITEM_TYPE_GENEVE:
1663 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
1664 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
1665 RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1666 sizeof(rte_flow_item_geneve_mask));
1667 break;
1668 case RTE_FLOW_ITEM_TYPE_NVGRE:
1669 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1670 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
1671 RTE_BUILD_BUG_ON(sizeof(ctx->tunnel_def_mask) !=
1672 sizeof(rte_flow_item_nvgre_mask));
1673 break;
1674 case RTE_FLOW_ITEM_TYPE_END:
1675 break;
1676 default:
1677 ++pattern;
1678 continue;
1679 };
1680
1681 break;
1682 }
1683
1684 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
1685 return 0;
1686
1687 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
1688 return rte_flow_error_set(error, ENOTSUP,
1689 RTE_FLOW_ERROR_TYPE_ITEM,
1690 pattern, "Unsupported tunnel item");
1691 }
1692
1693 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
1694 return rte_flow_error_set(error, ENOTSUP,
1695 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1696 NULL, "Unsupported priority level");
1697 }
1698
1699 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_OUTER, ctx->priority,
1700 &ctx->match_spec_outer);
1701 if (rc != 0) {
1702 return rte_flow_error_set(error, rc,
1703 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1704 "Failed to initialise outer rule match specification");
1705 }
1706
1707 /* Outermost items comprise a match specification of type OUTER. */
1708 ctx->match_spec = ctx->match_spec_outer;
1709
1710 /* Outermost items use "ENC" EFX MAE field IDs. */
1711 ctx->field_ids_remap = field_ids_remap_to_encap;
1712
1713 return 0;
1714 }
1715
1716 static void
sfc_mae_rule_encap_parse_fini(struct sfc_adapter * sa,struct sfc_mae_parse_ctx * ctx)1717 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
1718 struct sfc_mae_parse_ctx *ctx)
1719 {
1720 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1721 return;
1722
1723 if (ctx->match_spec_outer != NULL)
1724 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
1725 }
1726
1727 int
sfc_mae_rule_parse_pattern(struct sfc_adapter * sa,const struct rte_flow_item pattern[],struct sfc_flow_spec_mae * spec,struct rte_flow_error * error)1728 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
1729 const struct rte_flow_item pattern[],
1730 struct sfc_flow_spec_mae *spec,
1731 struct rte_flow_error *error)
1732 {
1733 struct sfc_mae_parse_ctx ctx_mae;
1734 struct sfc_flow_parse_ctx ctx;
1735 int rc;
1736
1737 memset(&ctx_mae, 0, sizeof(ctx_mae));
1738 ctx_mae.priority = spec->priority;
1739 ctx_mae.sa = sa;
1740
1741 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
1742 spec->priority,
1743 &ctx_mae.match_spec_action);
1744 if (rc != 0) {
1745 rc = rte_flow_error_set(error, rc,
1746 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1747 "Failed to initialise action rule match specification");
1748 goto fail_init_match_spec_action;
1749 }
1750
1751 /*
1752 * As a preliminary setting, assume that there is no encapsulation
1753 * in the pattern. That is, pattern items are about to comprise a
1754 * match specification of type ACTION and use non-encap. field IDs.
1755 *
1756 * sfc_mae_rule_encap_parse_init() below may override this.
1757 */
1758 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
1759 ctx_mae.match_spec = ctx_mae.match_spec_action;
1760 ctx_mae.field_ids_remap = field_ids_no_remap;
1761
1762 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
1763 ctx.mae = &ctx_mae;
1764
1765 rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
1766 if (rc != 0)
1767 goto fail_encap_parse_init;
1768
1769 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
1770 pattern, &ctx, error);
1771 if (rc != 0)
1772 goto fail_parse_pattern;
1773
1774 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
1775 if (rc != 0)
1776 goto fail_process_pattern_data;
1777
1778 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
1779 if (rc != 0)
1780 goto fail_process_outer;
1781
1782 if (!efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
1783 rc = rte_flow_error_set(error, ENOTSUP,
1784 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1785 "Inconsistent pattern");
1786 goto fail_validate_match_spec_action;
1787 }
1788
1789 spec->match_spec = ctx_mae.match_spec_action;
1790
1791 return 0;
1792
1793 fail_validate_match_spec_action:
1794 fail_process_outer:
1795 fail_process_pattern_data:
1796 fail_parse_pattern:
1797 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
1798
1799 fail_encap_parse_init:
1800 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
1801
1802 fail_init_match_spec_action:
1803 return rc;
1804 }
1805
1806 /*
1807 * An action supported by MAE may correspond to a bundle of RTE flow actions,
1808 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
1809 * That is, related RTE flow actions need to be tracked as parts of a whole
1810 * so that they can be combined into a single action and submitted to MAE
1811 * representation of a given rule's action set.
1812 *
1813 * Each RTE flow action provided by an application gets classified as
1814 * one belonging to some bundle type. If an action is not supposed to
1815 * belong to any bundle, or if this action is END, it is described as
1816 * one belonging to a dummy bundle of type EMPTY.
1817 *
1818 * A currently tracked bundle will be submitted if a repeating
1819 * action or an action of different bundle type follows.
1820 */
1821
1822 enum sfc_mae_actions_bundle_type {
1823 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
1824 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
1825 };
1826
1827 struct sfc_mae_actions_bundle {
1828 enum sfc_mae_actions_bundle_type type;
1829
1830 /* Indicates actions already tracked by the current bundle */
1831 uint64_t actions_mask;
1832
1833 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
1834 rte_be16_t vlan_push_tpid;
1835 rte_be16_t vlan_push_tci;
1836 };
1837
1838 /*
1839 * Combine configuration of RTE flow actions tracked by the bundle into a
1840 * single action and submit the result to MAE action set specification.
1841 * Do nothing in the case of dummy action bundle.
1842 */
1843 static int
sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle * bundle,efx_mae_actions_t * spec)1844 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
1845 efx_mae_actions_t *spec)
1846 {
1847 int rc = 0;
1848
1849 switch (bundle->type) {
1850 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
1851 break;
1852 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
1853 rc = efx_mae_action_set_populate_vlan_push(
1854 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
1855 break;
1856 default:
1857 SFC_ASSERT(B_FALSE);
1858 break;
1859 }
1860
1861 return rc;
1862 }
1863
1864 /*
1865 * Given the type of the next RTE flow action in the line, decide
1866 * whether a new bundle is about to start, and, if this is the case,
1867 * submit and reset the current bundle.
1868 */
1869 static int
sfc_mae_actions_bundle_sync(const struct rte_flow_action * action,struct sfc_mae_actions_bundle * bundle,efx_mae_actions_t * spec,struct rte_flow_error * error)1870 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
1871 struct sfc_mae_actions_bundle *bundle,
1872 efx_mae_actions_t *spec,
1873 struct rte_flow_error *error)
1874 {
1875 enum sfc_mae_actions_bundle_type bundle_type_new;
1876 int rc;
1877
1878 switch (action->type) {
1879 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1880 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
1881 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
1882 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
1883 break;
1884 default:
1885 /*
1886 * Self-sufficient actions, including END, are handled in this
1887 * case. No checks for unsupported actions are needed here
1888 * because parsing doesn't occur at this point.
1889 */
1890 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
1891 break;
1892 }
1893
1894 if (bundle_type_new != bundle->type ||
1895 (bundle->actions_mask & (1ULL << action->type)) != 0) {
1896 rc = sfc_mae_actions_bundle_submit(bundle, spec);
1897 if (rc != 0)
1898 goto fail_submit;
1899
1900 memset(bundle, 0, sizeof(*bundle));
1901 }
1902
1903 bundle->type = bundle_type_new;
1904
1905 return 0;
1906
1907 fail_submit:
1908 return rte_flow_error_set(error, rc,
1909 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1910 "Failed to request the (group of) action(s)");
1911 }
1912
1913 static void
sfc_mae_rule_parse_action_of_push_vlan(const struct rte_flow_action_of_push_vlan * conf,struct sfc_mae_actions_bundle * bundle)1914 sfc_mae_rule_parse_action_of_push_vlan(
1915 const struct rte_flow_action_of_push_vlan *conf,
1916 struct sfc_mae_actions_bundle *bundle)
1917 {
1918 bundle->vlan_push_tpid = conf->ethertype;
1919 }
1920
1921 static void
sfc_mae_rule_parse_action_of_set_vlan_vid(const struct rte_flow_action_of_set_vlan_vid * conf,struct sfc_mae_actions_bundle * bundle)1922 sfc_mae_rule_parse_action_of_set_vlan_vid(
1923 const struct rte_flow_action_of_set_vlan_vid *conf,
1924 struct sfc_mae_actions_bundle *bundle)
1925 {
1926 bundle->vlan_push_tci |= (conf->vlan_vid &
1927 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
1928 }
1929
1930 static void
sfc_mae_rule_parse_action_of_set_vlan_pcp(const struct rte_flow_action_of_set_vlan_pcp * conf,struct sfc_mae_actions_bundle * bundle)1931 sfc_mae_rule_parse_action_of_set_vlan_pcp(
1932 const struct rte_flow_action_of_set_vlan_pcp *conf,
1933 struct sfc_mae_actions_bundle *bundle)
1934 {
1935 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
1936 RTE_LEN2MASK(3, uint8_t)) << 13;
1937
1938 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
1939 }
1940
1941 static int
sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark * conf,efx_mae_actions_t * spec)1942 sfc_mae_rule_parse_action_mark(const struct rte_flow_action_mark *conf,
1943 efx_mae_actions_t *spec)
1944 {
1945 return efx_mae_action_set_populate_mark(spec, conf->id);
1946 }
1947
1948 static int
sfc_mae_rule_parse_action_phy_port(struct sfc_adapter * sa,const struct rte_flow_action_phy_port * conf,efx_mae_actions_t * spec)1949 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
1950 const struct rte_flow_action_phy_port *conf,
1951 efx_mae_actions_t *spec)
1952 {
1953 efx_mport_sel_t mport;
1954 uint32_t phy_port;
1955 int rc;
1956
1957 if (conf->original != 0)
1958 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
1959 else
1960 phy_port = conf->index;
1961
1962 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
1963 if (rc != 0)
1964 return rc;
1965
1966 return efx_mae_action_set_populate_deliver(spec, &mport);
1967 }
1968
1969 static int
sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter * sa,const struct rte_flow_action_vf * vf_conf,efx_mae_actions_t * spec)1970 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
1971 const struct rte_flow_action_vf *vf_conf,
1972 efx_mae_actions_t *spec)
1973 {
1974 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1975 efx_mport_sel_t mport;
1976 uint32_t vf;
1977 int rc;
1978
1979 if (vf_conf == NULL)
1980 vf = EFX_PCI_VF_INVALID;
1981 else if (vf_conf->original != 0)
1982 vf = encp->enc_vf;
1983 else
1984 vf = vf_conf->id;
1985
1986 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
1987 if (rc != 0)
1988 return rc;
1989
1990 return efx_mae_action_set_populate_deliver(spec, &mport);
1991 }
1992
1993 static int
sfc_mae_rule_parse_action_port_id(struct sfc_adapter * sa,const struct rte_flow_action_port_id * conf,efx_mae_actions_t * spec)1994 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
1995 const struct rte_flow_action_port_id *conf,
1996 efx_mae_actions_t *spec)
1997 {
1998 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1999 struct sfc_mae *mae = &sa->mae;
2000 efx_mport_sel_t mport;
2001 uint16_t port_id;
2002 int rc;
2003
2004 port_id = (conf->original != 0) ? sas->port_id : conf->id;
2005
2006 rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
2007 port_id, &mport);
2008 if (rc != 0)
2009 return rc;
2010
2011 return efx_mae_action_set_populate_deliver(spec, &mport);
2012 }
2013
2014 static int
sfc_mae_rule_parse_action(struct sfc_adapter * sa,const struct rte_flow_action * action,struct sfc_mae_actions_bundle * bundle,efx_mae_actions_t * spec,struct rte_flow_error * error)2015 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
2016 const struct rte_flow_action *action,
2017 struct sfc_mae_actions_bundle *bundle,
2018 efx_mae_actions_t *spec,
2019 struct rte_flow_error *error)
2020 {
2021 int rc = 0;
2022
2023 switch (action->type) {
2024 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2025 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
2026 bundle->actions_mask);
2027 rc = efx_mae_action_set_populate_vlan_pop(spec);
2028 break;
2029 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2030 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
2031 bundle->actions_mask);
2032 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
2033 break;
2034 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
2035 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
2036 bundle->actions_mask);
2037 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
2038 break;
2039 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
2040 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
2041 bundle->actions_mask);
2042 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
2043 break;
2044 case RTE_FLOW_ACTION_TYPE_FLAG:
2045 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
2046 bundle->actions_mask);
2047 rc = efx_mae_action_set_populate_flag(spec);
2048 break;
2049 case RTE_FLOW_ACTION_TYPE_MARK:
2050 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
2051 bundle->actions_mask);
2052 rc = sfc_mae_rule_parse_action_mark(action->conf, spec);
2053 break;
2054 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
2055 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
2056 bundle->actions_mask);
2057 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
2058 break;
2059 case RTE_FLOW_ACTION_TYPE_PF:
2060 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
2061 bundle->actions_mask);
2062 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
2063 break;
2064 case RTE_FLOW_ACTION_TYPE_VF:
2065 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
2066 bundle->actions_mask);
2067 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
2068 break;
2069 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2070 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
2071 bundle->actions_mask);
2072 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
2073 break;
2074 case RTE_FLOW_ACTION_TYPE_DROP:
2075 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
2076 bundle->actions_mask);
2077 rc = efx_mae_action_set_populate_drop(spec);
2078 break;
2079 default:
2080 return rte_flow_error_set(error, ENOTSUP,
2081 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2082 "Unsupported action");
2083 }
2084
2085 if (rc != 0) {
2086 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
2087 NULL, "Failed to request the action");
2088 } else {
2089 bundle->actions_mask |= (1ULL << action->type);
2090 }
2091
2092 return rc;
2093 }
2094
2095 int
sfc_mae_rule_parse_actions(struct sfc_adapter * sa,const struct rte_flow_action actions[],struct sfc_mae_action_set ** action_setp,struct rte_flow_error * error)2096 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
2097 const struct rte_flow_action actions[],
2098 struct sfc_mae_action_set **action_setp,
2099 struct rte_flow_error *error)
2100 {
2101 struct sfc_mae_actions_bundle bundle = {0};
2102 const struct rte_flow_action *action;
2103 efx_mae_actions_t *spec;
2104 int rc;
2105
2106 if (actions == NULL) {
2107 return rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
2109 "NULL actions");
2110 }
2111
2112 rc = efx_mae_action_set_spec_init(sa->nic, &spec);
2113 if (rc != 0)
2114 goto fail_action_set_spec_init;
2115
2116 for (action = actions;
2117 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
2118 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2119 if (rc != 0)
2120 goto fail_rule_parse_action;
2121
2122 rc = sfc_mae_rule_parse_action(sa, action, &bundle, spec,
2123 error);
2124 if (rc != 0)
2125 goto fail_rule_parse_action;
2126 }
2127
2128 rc = sfc_mae_actions_bundle_sync(action, &bundle, spec, error);
2129 if (rc != 0)
2130 goto fail_rule_parse_action;
2131
2132 *action_setp = sfc_mae_action_set_attach(sa, spec);
2133 if (*action_setp != NULL) {
2134 efx_mae_action_set_spec_fini(sa->nic, spec);
2135 return 0;
2136 }
2137
2138 rc = sfc_mae_action_set_add(sa, spec, action_setp);
2139 if (rc != 0)
2140 goto fail_action_set_add;
2141
2142 return 0;
2143
2144 fail_action_set_add:
2145 fail_rule_parse_action:
2146 efx_mae_action_set_spec_fini(sa->nic, spec);
2147
2148 fail_action_set_spec_init:
2149 if (rc > 0) {
2150 rc = rte_flow_error_set(error, rc,
2151 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2152 NULL, "Failed to process the action");
2153 }
2154 return rc;
2155 }
2156
2157 static bool
sfc_mae_rules_class_cmp(struct sfc_adapter * sa,const efx_mae_match_spec_t * left,const efx_mae_match_spec_t * right)2158 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
2159 const efx_mae_match_spec_t *left,
2160 const efx_mae_match_spec_t *right)
2161 {
2162 bool have_same_class;
2163 int rc;
2164
2165 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
2166 &have_same_class);
2167
2168 return (rc == 0) ? have_same_class : false;
2169 }
2170
2171 static int
sfc_mae_outer_rule_class_verify(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)2172 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
2173 struct sfc_mae_outer_rule *rule)
2174 {
2175 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
2176 struct sfc_mae_outer_rule *entry;
2177 struct sfc_mae *mae = &sa->mae;
2178
2179 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
2180 /* An active rule is reused. It's class is wittingly valid. */
2181 return 0;
2182 }
2183
2184 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
2185 sfc_mae_outer_rules, entries) {
2186 const efx_mae_match_spec_t *left = entry->match_spec;
2187 const efx_mae_match_spec_t *right = rule->match_spec;
2188
2189 if (entry == rule)
2190 continue;
2191
2192 if (sfc_mae_rules_class_cmp(sa, left, right))
2193 return 0;
2194 }
2195
2196 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2197 "support for outer frame pattern items is not guaranteed; "
2198 "other than that, the items are valid from SW standpoint");
2199 return 0;
2200 }
2201
2202 static int
sfc_mae_action_rule_class_verify(struct sfc_adapter * sa,struct sfc_flow_spec_mae * spec)2203 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
2204 struct sfc_flow_spec_mae *spec)
2205 {
2206 const struct rte_flow *entry;
2207
2208 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
2209 const struct sfc_flow_spec *entry_spec = &entry->spec;
2210 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
2211 const efx_mae_match_spec_t *left = es_mae->match_spec;
2212 const efx_mae_match_spec_t *right = spec->match_spec;
2213
2214 switch (entry_spec->type) {
2215 case SFC_FLOW_SPEC_FILTER:
2216 /* Ignore VNIC-level flows */
2217 break;
2218 case SFC_FLOW_SPEC_MAE:
2219 if (sfc_mae_rules_class_cmp(sa, left, right))
2220 return 0;
2221 break;
2222 default:
2223 SFC_ASSERT(false);
2224 }
2225 }
2226
2227 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
2228 "support for inner frame pattern items is not guaranteed; "
2229 "other than that, the items are valid from SW standpoint");
2230 return 0;
2231 }
2232
2233 /**
2234 * Confirm that a given flow can be accepted by the FW.
2235 *
2236 * @param sa
2237 * Software adapter context
2238 * @param flow
2239 * Flow to be verified
2240 * @return
2241 * Zero on success and non-zero in the case of error.
2242 * A special value of EAGAIN indicates that the adapter is
2243 * not in started state. This state is compulsory because
2244 * it only makes sense to compare the rule class of the flow
2245 * being validated with classes of the active rules.
2246 * Such classes are wittingly supported by the FW.
2247 */
2248 int
sfc_mae_flow_verify(struct sfc_adapter * sa,struct rte_flow * flow)2249 sfc_mae_flow_verify(struct sfc_adapter *sa,
2250 struct rte_flow *flow)
2251 {
2252 struct sfc_flow_spec *spec = &flow->spec;
2253 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2254 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2255 int rc;
2256
2257 SFC_ASSERT(sfc_adapter_is_locked(sa));
2258
2259 if (sa->state != SFC_ADAPTER_STARTED)
2260 return EAGAIN;
2261
2262 if (outer_rule != NULL) {
2263 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
2264 if (rc != 0)
2265 return rc;
2266 }
2267
2268 return sfc_mae_action_rule_class_verify(sa, spec_mae);
2269 }
2270
2271 int
sfc_mae_flow_insert(struct sfc_adapter * sa,struct rte_flow * flow)2272 sfc_mae_flow_insert(struct sfc_adapter *sa,
2273 struct rte_flow *flow)
2274 {
2275 struct sfc_flow_spec *spec = &flow->spec;
2276 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2277 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2278 struct sfc_mae_action_set *action_set = spec_mae->action_set;
2279 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
2280 int rc;
2281
2282 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
2283 SFC_ASSERT(action_set != NULL);
2284
2285 if (outer_rule != NULL) {
2286 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
2287 spec_mae->match_spec);
2288 if (rc != 0)
2289 goto fail_outer_rule_enable;
2290 }
2291
2292 rc = sfc_mae_action_set_enable(sa, action_set);
2293 if (rc != 0)
2294 goto fail_action_set_enable;
2295
2296 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
2297 NULL, &fw_rsrc->aset_id,
2298 &spec_mae->rule_id);
2299 if (rc != 0)
2300 goto fail_action_rule_insert;
2301
2302 return 0;
2303
2304 fail_action_rule_insert:
2305 (void)sfc_mae_action_set_disable(sa, action_set);
2306
2307 fail_action_set_enable:
2308 if (outer_rule != NULL)
2309 (void)sfc_mae_outer_rule_disable(sa, outer_rule);
2310
2311 fail_outer_rule_enable:
2312 return rc;
2313 }
2314
2315 int
sfc_mae_flow_remove(struct sfc_adapter * sa,struct rte_flow * flow)2316 sfc_mae_flow_remove(struct sfc_adapter *sa,
2317 struct rte_flow *flow)
2318 {
2319 struct sfc_flow_spec *spec = &flow->spec;
2320 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2321 struct sfc_mae_action_set *action_set = spec_mae->action_set;
2322 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
2323 int rc;
2324
2325 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
2326 SFC_ASSERT(action_set != NULL);
2327
2328 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
2329 if (rc != 0)
2330 return rc;
2331
2332 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
2333
2334 rc = sfc_mae_action_set_disable(sa, action_set);
2335 if (rc != 0) {
2336 sfc_err(sa, "failed to disable the action set (rc = %d)", rc);
2337 /* Despite the error, proceed with outer rule removal. */
2338 }
2339
2340 if (outer_rule != NULL)
2341 return sfc_mae_outer_rule_disable(sa, outer_rule);
2342
2343 return 0;
2344 }
2345