xref: /dpdk/drivers/net/sfc/sfc_mae.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15 
16 #include "efx.h"
17 
18 #include "sfc.h"
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_log.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
24 
25 static int
26 sfc_mae_assign_ethdev_mport(struct sfc_adapter *sa,
27 			    efx_mport_sel_t *mportp)
28 {
29 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30 
31 	return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
32 					      mportp);
33 }
34 
35 static int
36 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
37 			    efx_mport_sel_t *mportp)
38 {
39 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
40 	int rc = 0;
41 
42 	if (encp->enc_mae_admin) {
43 		/*
44 		 * This ethdev sits on MAE admin PF. The represented
45 		 * entity is the network port assigned to that PF.
46 		 */
47 		rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, mportp);
48 	} else {
49 		/*
50 		 * This ethdev sits on unprivileged PF / VF. The entity
51 		 * represented by the ethdev can change dynamically
52 		 * as MAE admin changes default traffic rules.
53 		 *
54 		 * For the sake of simplicity, do not fill in the m-port
55 		 * and assume that flow rules should not be allowed to
56 		 * reference the entity represented by this ethdev.
57 		 */
58 		efx_mae_mport_invalid(mportp);
59 	}
60 
61 	return rc;
62 }
63 
64 static int
65 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
66 			      uint32_t nb_counters_max)
67 {
68 	return sfc_mae_counters_init(&registry->counters, nb_counters_max);
69 }
70 
71 static void
72 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
73 {
74 	sfc_mae_counters_fini(&registry->counters);
75 }
76 
77 static int
78 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
79 				      struct sfc_mae_rule **rule)
80 {
81 	struct sfc_mae *mae = &sa->mae;
82 	struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
83 	unsigned int entry;
84 	int rc;
85 
86 	for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
87 		if (internal_rules->rules[entry].spec == NULL)
88 			break;
89 	}
90 
91 	if (entry == SFC_MAE_NB_RULES_MAX) {
92 		rc = ENOSPC;
93 		sfc_err(sa, "failed too many rules (%u rules used)", entry);
94 		goto fail_too_many_rules;
95 	}
96 
97 	*rule = &internal_rules->rules[entry];
98 
99 	return 0;
100 
101 fail_too_many_rules:
102 	return rc;
103 }
104 
105 int
106 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
107 				     const efx_mport_sel_t *mport_match,
108 				     const efx_mport_sel_t *mport_deliver,
109 				     int prio, struct sfc_mae_rule **rulep)
110 {
111 	struct sfc_mae *mae = &sa->mae;
112 	struct sfc_mae_rule *rule;
113 	int rc;
114 
115 	sfc_log_init(sa, "entry");
116 
117 	if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
118 		rc = EINVAL;
119 		sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
120 			mae->nb_action_rule_prios_max);
121 		goto fail_invalid_prio;
122 	}
123 	if (prio < 0)
124 		prio = mae->nb_action_rule_prios_max - 1;
125 
126 	rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
127 	if (rc != 0)
128 		goto fail_find_empty_slot;
129 
130 	sfc_log_init(sa, "init MAE match spec");
131 	rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
132 				     (uint32_t)prio, &rule->spec);
133 	if (rc != 0) {
134 		sfc_err(sa, "failed to init MAE match spec");
135 		goto fail_match_init;
136 	}
137 
138 	rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
139 	if (rc != 0) {
140 		sfc_err(sa, "failed to get MAE match mport selector");
141 		goto fail_mport_set;
142 	}
143 
144 	rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
145 	if (rc != 0) {
146 		sfc_err(sa, "failed to init MAE action set");
147 		goto fail_action_init;
148 	}
149 
150 	rc = efx_mae_action_set_populate_deliver(rule->actions,
151 						 mport_deliver);
152 	if (rc != 0) {
153 		sfc_err(sa, "failed to populate deliver action");
154 		goto fail_populate_deliver;
155 	}
156 
157 	rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
158 				      &rule->action_set);
159 	if (rc != 0) {
160 		sfc_err(sa, "failed to allocate action set");
161 		goto fail_action_set_alloc;
162 	}
163 
164 	rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
165 					&rule->action_set,
166 					&rule->rule_id);
167 	if (rc != 0) {
168 		sfc_err(sa, "failed to insert action rule");
169 		goto fail_rule_insert;
170 	}
171 
172 	*rulep = rule;
173 
174 	sfc_log_init(sa, "done");
175 
176 	return 0;
177 
178 fail_rule_insert:
179 	efx_mae_action_set_free(sa->nic, &rule->action_set);
180 
181 fail_action_set_alloc:
182 fail_populate_deliver:
183 	efx_mae_action_set_spec_fini(sa->nic, rule->actions);
184 
185 fail_action_init:
186 fail_mport_set:
187 	efx_mae_match_spec_fini(sa->nic, rule->spec);
188 
189 fail_match_init:
190 fail_find_empty_slot:
191 fail_invalid_prio:
192 	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
193 	return rc;
194 }
195 
196 void
197 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
198 {
199 	if (rule == NULL || rule->spec == NULL)
200 		return;
201 
202 	efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
203 	efx_mae_action_set_free(sa->nic, &rule->action_set);
204 	efx_mae_action_set_spec_fini(sa->nic, rule->actions);
205 	efx_mae_match_spec_fini(sa->nic, rule->spec);
206 
207 	rule->spec = NULL;
208 }
209 
210 int
211 sfc_mae_attach(struct sfc_adapter *sa)
212 {
213 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
214 	struct sfc_mae_switch_port_request switch_port_request = {0};
215 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
216 	efx_mport_sel_t ethdev_mport;
217 	efx_mport_sel_t entity_mport;
218 	struct sfc_mae *mae = &sa->mae;
219 	struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
220 	efx_mae_limits_t limits;
221 	int rc;
222 
223 	sfc_log_init(sa, "entry");
224 
225 	if (!encp->enc_mae_supported) {
226 		mae->status = SFC_MAE_STATUS_UNSUPPORTED;
227 		return 0;
228 	}
229 
230 	if (encp->enc_mae_admin) {
231 		sfc_log_init(sa, "init MAE");
232 		rc = efx_mae_init(sa->nic);
233 		if (rc != 0)
234 			goto fail_mae_init;
235 
236 		sfc_log_init(sa, "get MAE limits");
237 		rc = efx_mae_get_limits(sa->nic, &limits);
238 		if (rc != 0)
239 			goto fail_mae_get_limits;
240 
241 		sfc_log_init(sa, "init MAE counter registry");
242 		rc = sfc_mae_counter_registry_init(&mae->counter_registry,
243 						   limits.eml_max_n_counters);
244 		if (rc != 0) {
245 			sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
246 				limits.eml_max_n_counters, rte_strerror(rc));
247 			goto fail_counter_registry_init;
248 		}
249 	}
250 
251 	sfc_log_init(sa, "assign ethdev MPORT");
252 	rc = sfc_mae_assign_ethdev_mport(sa, &ethdev_mport);
253 	if (rc != 0)
254 		goto fail_mae_assign_ethdev_mport;
255 
256 	sfc_log_init(sa, "assign entity MPORT");
257 	rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
258 	if (rc != 0)
259 		goto fail_mae_assign_entity_mport;
260 
261 	sfc_log_init(sa, "assign RTE switch domain");
262 	rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
263 	if (rc != 0)
264 		goto fail_mae_assign_switch_domain;
265 
266 	sfc_log_init(sa, "assign RTE switch port");
267 	switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
268 	switch_port_request.ethdev_mportp = &ethdev_mport;
269 	switch_port_request.entity_mportp = &entity_mport;
270 	switch_port_request.ethdev_port_id = sas->port_id;
271 	switch_port_request.port_data.indep.mae_admin =
272 		encp->enc_mae_admin == B_TRUE;
273 	rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
274 					&switch_port_request,
275 					&mae->switch_port_id);
276 	if (rc != 0)
277 		goto fail_mae_assign_switch_port;
278 
279 	if (encp->enc_mae_admin) {
280 		sfc_log_init(sa, "allocate encap. header bounce buffer");
281 		bounce_eh->buf_size = limits.eml_encap_header_size_limit;
282 		bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
283 					    bounce_eh->buf_size, 0);
284 		if (bounce_eh->buf == NULL)
285 			goto fail_mae_alloc_bounce_eh;
286 
287 		mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
288 		mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
289 		mae->encap_types_supported = limits.eml_encap_types_supported;
290 	}
291 
292 	TAILQ_INIT(&mae->outer_rules);
293 	TAILQ_INIT(&mae->mac_addrs);
294 	TAILQ_INIT(&mae->encap_headers);
295 	TAILQ_INIT(&mae->action_sets);
296 
297 	if (encp->enc_mae_admin)
298 		mae->status = SFC_MAE_STATUS_ADMIN;
299 	else
300 		mae->status = SFC_MAE_STATUS_SUPPORTED;
301 
302 	sfc_log_init(sa, "done");
303 
304 	return 0;
305 
306 fail_mae_alloc_bounce_eh:
307 fail_mae_assign_switch_port:
308 fail_mae_assign_switch_domain:
309 fail_mae_assign_entity_mport:
310 fail_mae_assign_ethdev_mport:
311 	if (encp->enc_mae_admin)
312 		sfc_mae_counter_registry_fini(&mae->counter_registry);
313 
314 fail_counter_registry_init:
315 fail_mae_get_limits:
316 	if (encp->enc_mae_admin)
317 		efx_mae_fini(sa->nic);
318 
319 fail_mae_init:
320 	sfc_log_init(sa, "failed %d", rc);
321 
322 	return rc;
323 }
324 
325 void
326 sfc_mae_detach(struct sfc_adapter *sa)
327 {
328 	struct sfc_mae *mae = &sa->mae;
329 	enum sfc_mae_status status_prev = mae->status;
330 
331 	sfc_log_init(sa, "entry");
332 
333 	mae->nb_action_rule_prios_max = 0;
334 	mae->status = SFC_MAE_STATUS_UNKNOWN;
335 
336 	if (status_prev != SFC_MAE_STATUS_ADMIN)
337 		return;
338 
339 	rte_free(mae->bounce_eh.buf);
340 	sfc_mae_counter_registry_fini(&mae->counter_registry);
341 
342 	efx_mae_fini(sa->nic);
343 
344 	sfc_log_init(sa, "done");
345 }
346 
347 static struct sfc_mae_outer_rule *
348 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
349 			  const efx_mae_match_spec_t *match_spec,
350 			  efx_tunnel_protocol_t encap_type)
351 {
352 	struct sfc_mae_outer_rule *rule;
353 	struct sfc_mae *mae = &sa->mae;
354 
355 	SFC_ASSERT(sfc_adapter_is_locked(sa));
356 
357 	TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
358 		if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
359 		    rule->encap_type == encap_type) {
360 			sfc_dbg(sa, "attaching to outer_rule=%p", rule);
361 			++(rule->refcnt);
362 			return rule;
363 		}
364 	}
365 
366 	return NULL;
367 }
368 
369 static int
370 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
371 		       efx_mae_match_spec_t *match_spec,
372 		       efx_tunnel_protocol_t encap_type,
373 		       struct sfc_mae_outer_rule **rulep)
374 {
375 	struct sfc_mae_outer_rule *rule;
376 	struct sfc_mae *mae = &sa->mae;
377 
378 	SFC_ASSERT(sfc_adapter_is_locked(sa));
379 
380 	rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
381 	if (rule == NULL)
382 		return ENOMEM;
383 
384 	rule->refcnt = 1;
385 	rule->match_spec = match_spec;
386 	rule->encap_type = encap_type;
387 
388 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
389 
390 	TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
391 
392 	*rulep = rule;
393 
394 	sfc_dbg(sa, "added outer_rule=%p", rule);
395 
396 	return 0;
397 }
398 
399 static void
400 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
401 		       struct sfc_mae_outer_rule *rule)
402 {
403 	struct sfc_mae *mae = &sa->mae;
404 
405 	SFC_ASSERT(sfc_adapter_is_locked(sa));
406 	SFC_ASSERT(rule->refcnt != 0);
407 
408 	--(rule->refcnt);
409 
410 	if (rule->refcnt != 0)
411 		return;
412 
413 	if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
414 	    rule->fw_rsrc.refcnt != 0) {
415 		sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
416 			rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
417 	}
418 
419 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
420 
421 	TAILQ_REMOVE(&mae->outer_rules, rule, entries);
422 	rte_free(rule);
423 
424 	sfc_dbg(sa, "deleted outer_rule=%p", rule);
425 }
426 
427 static int
428 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
429 			  struct sfc_mae_outer_rule *rule,
430 			  efx_mae_match_spec_t *match_spec_action)
431 {
432 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
433 	int rc;
434 
435 	SFC_ASSERT(sfc_adapter_is_locked(sa));
436 
437 	if (fw_rsrc->refcnt == 0) {
438 		SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
439 		SFC_ASSERT(rule->match_spec != NULL);
440 
441 		rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
442 					       rule->encap_type,
443 					       &fw_rsrc->rule_id);
444 		if (rc != 0) {
445 			sfc_err(sa, "failed to enable outer_rule=%p: %s",
446 				rule, strerror(rc));
447 			return rc;
448 		}
449 	}
450 
451 	if (match_spec_action == NULL)
452 		goto skip_action_rule;
453 
454 	rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
455 						  &fw_rsrc->rule_id);
456 	if (rc != 0) {
457 		if (fw_rsrc->refcnt == 0) {
458 			(void)efx_mae_outer_rule_remove(sa->nic,
459 							&fw_rsrc->rule_id);
460 			fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
461 		}
462 
463 		sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
464 
465 		return rc;
466 	}
467 
468 skip_action_rule:
469 	if (fw_rsrc->refcnt == 0) {
470 		sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
471 			rule, fw_rsrc->rule_id.id);
472 	}
473 
474 	++(fw_rsrc->refcnt);
475 
476 	return 0;
477 }
478 
479 static void
480 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
481 			   struct sfc_mae_outer_rule *rule)
482 {
483 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
484 	int rc;
485 
486 	SFC_ASSERT(sfc_adapter_is_locked(sa));
487 
488 	if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
489 	    fw_rsrc->refcnt == 0) {
490 		sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
491 			rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
492 		return;
493 	}
494 
495 	if (fw_rsrc->refcnt == 1) {
496 		rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
497 		if (rc == 0) {
498 			sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
499 				rule, fw_rsrc->rule_id.id);
500 		} else {
501 			sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
502 				rule, fw_rsrc->rule_id.id, strerror(rc));
503 		}
504 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
505 	}
506 
507 	--(fw_rsrc->refcnt);
508 }
509 
510 static struct sfc_mae_mac_addr *
511 sfc_mae_mac_addr_attach(struct sfc_adapter *sa,
512 			const uint8_t addr_bytes[EFX_MAC_ADDR_LEN])
513 {
514 	struct sfc_mae_mac_addr *mac_addr;
515 	struct sfc_mae *mae = &sa->mae;
516 
517 	SFC_ASSERT(sfc_adapter_is_locked(sa));
518 
519 	TAILQ_FOREACH(mac_addr, &mae->mac_addrs, entries) {
520 		if (memcmp(mac_addr->addr_bytes, addr_bytes,
521 			   EFX_MAC_ADDR_LEN) == 0) {
522 			sfc_dbg(sa, "attaching to mac_addr=%p", mac_addr);
523 			++(mac_addr->refcnt);
524 			return mac_addr;
525 		}
526 	}
527 
528 	return NULL;
529 }
530 
531 static int
532 sfc_mae_mac_addr_add(struct sfc_adapter *sa,
533 		     const uint8_t addr_bytes[EFX_MAC_ADDR_LEN],
534 		     struct sfc_mae_mac_addr **mac_addrp)
535 {
536 	struct sfc_mae_mac_addr *mac_addr;
537 	struct sfc_mae *mae = &sa->mae;
538 
539 	SFC_ASSERT(sfc_adapter_is_locked(sa));
540 
541 	mac_addr = rte_zmalloc("sfc_mae_mac_addr", sizeof(*mac_addr), 0);
542 	if (mac_addr == NULL)
543 		return ENOMEM;
544 
545 	rte_memcpy(mac_addr->addr_bytes, addr_bytes, EFX_MAC_ADDR_LEN);
546 
547 	mac_addr->refcnt = 1;
548 	mac_addr->fw_rsrc.mac_id.id = EFX_MAE_RSRC_ID_INVALID;
549 
550 	TAILQ_INSERT_TAIL(&mae->mac_addrs, mac_addr, entries);
551 
552 	*mac_addrp = mac_addr;
553 
554 	sfc_dbg(sa, "added mac_addr=%p", mac_addr);
555 
556 	return 0;
557 }
558 
559 static void
560 sfc_mae_mac_addr_del(struct sfc_adapter *sa, struct sfc_mae_mac_addr *mac_addr)
561 {
562 	struct sfc_mae *mae = &sa->mae;
563 
564 	if (mac_addr == NULL)
565 		return;
566 
567 	SFC_ASSERT(sfc_adapter_is_locked(sa));
568 	SFC_ASSERT(mac_addr->refcnt != 0);
569 
570 	--(mac_addr->refcnt);
571 
572 	if (mac_addr->refcnt != 0)
573 		return;
574 
575 	if (mac_addr->fw_rsrc.mac_id.id != EFX_MAE_RSRC_ID_INVALID ||
576 	    mac_addr->fw_rsrc.refcnt != 0) {
577 		sfc_err(sa, "deleting mac_addr=%p abandons its FW resource: MAC_ID=0x%08x, refcnt=%u",
578 			mac_addr, mac_addr->fw_rsrc.mac_id.id,
579 			mac_addr->fw_rsrc.refcnt);
580 	}
581 
582 	TAILQ_REMOVE(&mae->mac_addrs, mac_addr, entries);
583 	rte_free(mac_addr);
584 
585 	sfc_dbg(sa, "deleted mac_addr=%p", mac_addr);
586 }
587 
588 enum sfc_mae_mac_addr_type {
589 	SFC_MAE_MAC_ADDR_DST,
590 	SFC_MAE_MAC_ADDR_SRC
591 };
592 
593 static int
594 sfc_mae_mac_addr_enable(struct sfc_adapter *sa,
595 			struct sfc_mae_mac_addr *mac_addr,
596 			enum sfc_mae_mac_addr_type type,
597 			efx_mae_actions_t *aset_spec)
598 {
599 	struct sfc_mae_fw_rsrc *fw_rsrc;
600 	int rc = 0;
601 
602 	if (mac_addr == NULL)
603 		return 0;
604 
605 	SFC_ASSERT(sfc_adapter_is_locked(sa));
606 
607 	fw_rsrc = &mac_addr->fw_rsrc;
608 
609 	if (fw_rsrc->refcnt == 0) {
610 		SFC_ASSERT(fw_rsrc->mac_id.id == EFX_MAE_RSRC_ID_INVALID);
611 
612 		rc = efx_mae_mac_addr_alloc(sa->nic, mac_addr->addr_bytes,
613 					    &fw_rsrc->mac_id);
614 		if (rc != 0) {
615 			sfc_err(sa, "failed to enable mac_addr=%p: %s",
616 				mac_addr, strerror(rc));
617 			return rc;
618 		}
619 	}
620 
621 	switch (type) {
622 	case SFC_MAE_MAC_ADDR_DST:
623 		rc = efx_mae_action_set_fill_in_dst_mac_id(aset_spec,
624 							   &fw_rsrc->mac_id);
625 		break;
626 	case SFC_MAE_MAC_ADDR_SRC:
627 		rc = efx_mae_action_set_fill_in_src_mac_id(aset_spec,
628 							   &fw_rsrc->mac_id);
629 		break;
630 	default:
631 		rc = EINVAL;
632 		break;
633 	}
634 
635 	if (rc != 0) {
636 		if (fw_rsrc->refcnt == 0) {
637 			(void)efx_mae_mac_addr_free(sa->nic, &fw_rsrc->mac_id);
638 			fw_rsrc->mac_id.id = EFX_MAE_RSRC_ID_INVALID;
639 		}
640 
641 		sfc_err(sa, "cannot fill in MAC address entry ID: %s",
642 			strerror(rc));
643 
644 		return rc;
645 	}
646 
647 	if (fw_rsrc->refcnt == 0) {
648 		sfc_dbg(sa, "enabled mac_addr=%p: MAC_ID=0x%08x",
649 			mac_addr, fw_rsrc->mac_id.id);
650 	}
651 
652 	++(fw_rsrc->refcnt);
653 
654 	return 0;
655 }
656 
657 static void
658 sfc_mae_mac_addr_disable(struct sfc_adapter *sa,
659 			 struct sfc_mae_mac_addr *mac_addr)
660 {
661 	struct sfc_mae_fw_rsrc *fw_rsrc;
662 	int rc;
663 
664 	if (mac_addr == NULL)
665 		return;
666 
667 	SFC_ASSERT(sfc_adapter_is_locked(sa));
668 
669 	fw_rsrc = &mac_addr->fw_rsrc;
670 
671 	if (fw_rsrc->mac_id.id == EFX_MAE_RSRC_ID_INVALID ||
672 	    fw_rsrc->refcnt == 0) {
673 		sfc_err(sa, "failed to disable mac_addr=%p: already disabled; MAC_ID=0x%08x, refcnt=%u",
674 			mac_addr, fw_rsrc->mac_id.id, fw_rsrc->refcnt);
675 		return;
676 	}
677 
678 	if (fw_rsrc->refcnt == 1) {
679 		rc = efx_mae_mac_addr_free(sa->nic, &fw_rsrc->mac_id);
680 		if (rc == 0) {
681 			sfc_dbg(sa, "disabled mac_addr=%p with MAC_ID=0x%08x",
682 				mac_addr, fw_rsrc->mac_id.id);
683 		} else {
684 			sfc_err(sa, "failed to disable mac_addr=%p with MAC_ID=0x%08x: %s",
685 				mac_addr, fw_rsrc->mac_id.id, strerror(rc));
686 		}
687 		fw_rsrc->mac_id.id = EFX_MAE_RSRC_ID_INVALID;
688 	}
689 
690 	--(fw_rsrc->refcnt);
691 }
692 
693 static struct sfc_mae_encap_header *
694 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
695 			    const struct sfc_mae_bounce_eh *bounce_eh)
696 {
697 	struct sfc_mae_encap_header *encap_header;
698 	struct sfc_mae *mae = &sa->mae;
699 
700 	SFC_ASSERT(sfc_adapter_is_locked(sa));
701 
702 	TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
703 		if (encap_header->size == bounce_eh->size &&
704 		    memcmp(encap_header->buf, bounce_eh->buf,
705 			   bounce_eh->size) == 0) {
706 			sfc_dbg(sa, "attaching to encap_header=%p",
707 				encap_header);
708 			++(encap_header->refcnt);
709 			return encap_header;
710 		}
711 	}
712 
713 	return NULL;
714 }
715 
716 static int
717 sfc_mae_encap_header_add(struct sfc_adapter *sa,
718 			 const struct sfc_mae_bounce_eh *bounce_eh,
719 			 struct sfc_mae_encap_header **encap_headerp)
720 {
721 	struct sfc_mae_encap_header *encap_header;
722 	struct sfc_mae *mae = &sa->mae;
723 
724 	SFC_ASSERT(sfc_adapter_is_locked(sa));
725 
726 	encap_header = rte_zmalloc("sfc_mae_encap_header",
727 				   sizeof(*encap_header), 0);
728 	if (encap_header == NULL)
729 		return ENOMEM;
730 
731 	encap_header->size = bounce_eh->size;
732 
733 	encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
734 				       encap_header->size, 0);
735 	if (encap_header->buf == NULL) {
736 		rte_free(encap_header);
737 		return ENOMEM;
738 	}
739 
740 	rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
741 
742 	encap_header->refcnt = 1;
743 	encap_header->type = bounce_eh->type;
744 	encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
745 
746 	TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
747 
748 	*encap_headerp = encap_header;
749 
750 	sfc_dbg(sa, "added encap_header=%p", encap_header);
751 
752 	return 0;
753 }
754 
755 static void
756 sfc_mae_encap_header_del(struct sfc_adapter *sa,
757 		       struct sfc_mae_encap_header *encap_header)
758 {
759 	struct sfc_mae *mae = &sa->mae;
760 
761 	if (encap_header == NULL)
762 		return;
763 
764 	SFC_ASSERT(sfc_adapter_is_locked(sa));
765 	SFC_ASSERT(encap_header->refcnt != 0);
766 
767 	--(encap_header->refcnt);
768 
769 	if (encap_header->refcnt != 0)
770 		return;
771 
772 	if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
773 	    encap_header->fw_rsrc.refcnt != 0) {
774 		sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
775 			encap_header, encap_header->fw_rsrc.eh_id.id,
776 			encap_header->fw_rsrc.refcnt);
777 	}
778 
779 	TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
780 	rte_free(encap_header->buf);
781 	rte_free(encap_header);
782 
783 	sfc_dbg(sa, "deleted encap_header=%p", encap_header);
784 }
785 
786 static int
787 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
788 			    struct sfc_mae_encap_header *encap_header,
789 			    efx_mae_actions_t *action_set_spec)
790 {
791 	struct sfc_mae_fw_rsrc *fw_rsrc;
792 	int rc;
793 
794 	if (encap_header == NULL)
795 		return 0;
796 
797 	SFC_ASSERT(sfc_adapter_is_locked(sa));
798 
799 	fw_rsrc = &encap_header->fw_rsrc;
800 
801 	if (fw_rsrc->refcnt == 0) {
802 		SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
803 		SFC_ASSERT(encap_header->buf != NULL);
804 		SFC_ASSERT(encap_header->size != 0);
805 
806 		rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
807 						encap_header->buf,
808 						encap_header->size,
809 						&fw_rsrc->eh_id);
810 		if (rc != 0) {
811 			sfc_err(sa, "failed to enable encap_header=%p: %s",
812 				encap_header, strerror(rc));
813 			return rc;
814 		}
815 	}
816 
817 	rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
818 					      &fw_rsrc->eh_id);
819 	if (rc != 0) {
820 		if (fw_rsrc->refcnt == 0) {
821 			(void)efx_mae_encap_header_free(sa->nic,
822 							&fw_rsrc->eh_id);
823 			fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
824 		}
825 
826 		sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
827 
828 		return rc;
829 	}
830 
831 	if (fw_rsrc->refcnt == 0) {
832 		sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
833 			encap_header, fw_rsrc->eh_id.id);
834 	}
835 
836 	++(fw_rsrc->refcnt);
837 
838 	return 0;
839 }
840 
841 static void
842 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
843 			     struct sfc_mae_encap_header *encap_header)
844 {
845 	struct sfc_mae_fw_rsrc *fw_rsrc;
846 	int rc;
847 
848 	if (encap_header == NULL)
849 		return;
850 
851 	SFC_ASSERT(sfc_adapter_is_locked(sa));
852 
853 	fw_rsrc = &encap_header->fw_rsrc;
854 
855 	if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
856 	    fw_rsrc->refcnt == 0) {
857 		sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
858 			encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
859 		return;
860 	}
861 
862 	if (fw_rsrc->refcnt == 1) {
863 		rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
864 		if (rc == 0) {
865 			sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
866 				encap_header, fw_rsrc->eh_id.id);
867 		} else {
868 			sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
869 				encap_header, fw_rsrc->eh_id.id, strerror(rc));
870 		}
871 		fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
872 	}
873 
874 	--(fw_rsrc->refcnt);
875 }
876 
877 static int
878 sfc_mae_counters_enable(struct sfc_adapter *sa,
879 			struct sfc_mae_counter_id *counters,
880 			unsigned int n_counters,
881 			efx_mae_actions_t *action_set_spec)
882 {
883 	int rc;
884 
885 	sfc_log_init(sa, "entry");
886 
887 	if (n_counters == 0) {
888 		sfc_log_init(sa, "no counters - skip");
889 		return 0;
890 	}
891 
892 	SFC_ASSERT(sfc_adapter_is_locked(sa));
893 	SFC_ASSERT(n_counters == 1);
894 
895 	rc = sfc_mae_counter_enable(sa, &counters[0]);
896 	if (rc != 0) {
897 		sfc_err(sa, "failed to enable MAE counter %u: %s",
898 			counters[0].mae_id.id, rte_strerror(rc));
899 		goto fail_counter_add;
900 	}
901 
902 	rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
903 						   &counters[0].mae_id);
904 	if (rc != 0) {
905 		sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
906 			counters[0].mae_id.id, rte_strerror(rc));
907 		goto fail_fill_in_id;
908 	}
909 
910 	return 0;
911 
912 fail_fill_in_id:
913 	(void)sfc_mae_counter_disable(sa, &counters[0]);
914 
915 fail_counter_add:
916 	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
917 	return rc;
918 }
919 
920 static int
921 sfc_mae_counters_disable(struct sfc_adapter *sa,
922 			 struct sfc_mae_counter_id *counters,
923 			 unsigned int n_counters)
924 {
925 	if (n_counters == 0)
926 		return 0;
927 
928 	SFC_ASSERT(sfc_adapter_is_locked(sa));
929 	SFC_ASSERT(n_counters == 1);
930 
931 	if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
932 		sfc_err(sa, "failed to disable: already disabled");
933 		return EALREADY;
934 	}
935 
936 	return sfc_mae_counter_disable(sa, &counters[0]);
937 }
938 
939 struct sfc_mae_aset_ctx {
940 	uint64_t			*ft_group_hit_counter;
941 	struct sfc_mae_encap_header	*encap_header;
942 	struct sfc_flow_tunnel		*counter_ft;
943 	unsigned int			n_counters;
944 	struct sfc_mae_mac_addr		*dst_mac;
945 	struct sfc_mae_mac_addr		*src_mac;
946 
947 	efx_mae_actions_t		*spec;
948 };
949 
950 static struct sfc_mae_action_set *
951 sfc_mae_action_set_attach(struct sfc_adapter *sa,
952 			  const struct sfc_mae_aset_ctx *ctx)
953 {
954 	struct sfc_mae_action_set *action_set;
955 	struct sfc_mae *mae = &sa->mae;
956 
957 	SFC_ASSERT(sfc_adapter_is_locked(sa));
958 
959 	/*
960 	 * Shared counters are not supported, hence, action
961 	 * sets with counters are not attachable.
962 	 */
963 	if (ctx->n_counters != 0)
964 		return NULL;
965 
966 	TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
967 		if (action_set->encap_header == ctx->encap_header &&
968 		    action_set->dst_mac_addr == ctx->dst_mac &&
969 		    action_set->src_mac_addr == ctx->src_mac &&
970 		    efx_mae_action_set_specs_equal(action_set->spec,
971 						   ctx->spec)) {
972 			sfc_dbg(sa, "attaching to action_set=%p", action_set);
973 			++(action_set->refcnt);
974 			return action_set;
975 		}
976 	}
977 
978 	return NULL;
979 }
980 
981 static int
982 sfc_mae_action_set_add(struct sfc_adapter *sa,
983 		       const struct rte_flow_action actions[],
984 		       const struct sfc_mae_aset_ctx *ctx,
985 		       struct sfc_mae_action_set **action_setp)
986 {
987 	struct sfc_mae_action_set *action_set;
988 	struct sfc_mae *mae = &sa->mae;
989 	unsigned int i;
990 
991 	SFC_ASSERT(sfc_adapter_is_locked(sa));
992 
993 	action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
994 	if (action_set == NULL) {
995 		sfc_err(sa, "failed to alloc action set");
996 		return ENOMEM;
997 	}
998 
999 	if (ctx->n_counters > 0) {
1000 		const struct rte_flow_action *action;
1001 
1002 		action_set->counters = rte_malloc("sfc_mae_counter_ids",
1003 			sizeof(action_set->counters[0]) * ctx->n_counters, 0);
1004 		if (action_set->counters == NULL) {
1005 			rte_free(action_set);
1006 			sfc_err(sa, "failed to alloc counters");
1007 			return ENOMEM;
1008 		}
1009 
1010 		for (i = 0; i < ctx->n_counters; ++i) {
1011 			action_set->counters[i].rte_id_valid = B_FALSE;
1012 			action_set->counters[i].mae_id.id =
1013 				EFX_MAE_RSRC_ID_INVALID;
1014 
1015 			action_set->counters[i].ft_group_hit_counter =
1016 				ctx->ft_group_hit_counter;
1017 			action_set->counters[i].ft = ctx->counter_ft;
1018 		}
1019 
1020 		for (action = actions, i = 0;
1021 		     action->type != RTE_FLOW_ACTION_TYPE_END &&
1022 		     i < ctx->n_counters; ++action) {
1023 			const struct rte_flow_action_count *conf;
1024 
1025 			if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1026 				continue;
1027 
1028 			conf = action->conf;
1029 
1030 			action_set->counters[i].rte_id_valid = B_TRUE;
1031 			action_set->counters[i].rte_id = conf->id;
1032 			i++;
1033 		}
1034 		action_set->n_counters = ctx->n_counters;
1035 	}
1036 
1037 	action_set->refcnt = 1;
1038 	action_set->spec = ctx->spec;
1039 	action_set->encap_header = ctx->encap_header;
1040 	action_set->dst_mac_addr = ctx->dst_mac;
1041 	action_set->src_mac_addr = ctx->src_mac;
1042 
1043 	action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
1044 
1045 	TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
1046 
1047 	*action_setp = action_set;
1048 
1049 	sfc_dbg(sa, "added action_set=%p", action_set);
1050 
1051 	return 0;
1052 }
1053 
1054 static void
1055 sfc_mae_action_set_del(struct sfc_adapter *sa,
1056 		       struct sfc_mae_action_set *action_set)
1057 {
1058 	struct sfc_mae *mae = &sa->mae;
1059 
1060 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1061 	SFC_ASSERT(action_set->refcnt != 0);
1062 
1063 	--(action_set->refcnt);
1064 
1065 	if (action_set->refcnt != 0)
1066 		return;
1067 
1068 	if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
1069 	    action_set->fw_rsrc.refcnt != 0) {
1070 		sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
1071 			action_set, action_set->fw_rsrc.aset_id.id,
1072 			action_set->fw_rsrc.refcnt);
1073 	}
1074 
1075 	efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
1076 	sfc_mae_encap_header_del(sa, action_set->encap_header);
1077 	sfc_mae_mac_addr_del(sa, action_set->dst_mac_addr);
1078 	sfc_mae_mac_addr_del(sa, action_set->src_mac_addr);
1079 	if (action_set->n_counters > 0) {
1080 		SFC_ASSERT(action_set->n_counters == 1);
1081 		SFC_ASSERT(action_set->counters[0].mae_id.id ==
1082 			   EFX_MAE_RSRC_ID_INVALID);
1083 		rte_free(action_set->counters);
1084 	}
1085 	TAILQ_REMOVE(&mae->action_sets, action_set, entries);
1086 	rte_free(action_set);
1087 
1088 	sfc_dbg(sa, "deleted action_set=%p", action_set);
1089 }
1090 
1091 static int
1092 sfc_mae_action_set_enable(struct sfc_adapter *sa,
1093 			  struct sfc_mae_action_set *action_set)
1094 {
1095 	struct sfc_mae_encap_header *encap_header = action_set->encap_header;
1096 	struct sfc_mae_mac_addr *dst_mac_addr = action_set->dst_mac_addr;
1097 	struct sfc_mae_mac_addr *src_mac_addr = action_set->src_mac_addr;
1098 	struct sfc_mae_counter_id *counters = action_set->counters;
1099 	struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1100 	int rc;
1101 
1102 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1103 
1104 	if (fw_rsrc->refcnt == 0) {
1105 		SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
1106 		SFC_ASSERT(action_set->spec != NULL);
1107 
1108 		rc = sfc_mae_mac_addr_enable(sa, dst_mac_addr,
1109 					     SFC_MAE_MAC_ADDR_DST,
1110 					     action_set->spec);
1111 		if (rc != 0)
1112 			return rc;
1113 
1114 		rc = sfc_mae_mac_addr_enable(sa, src_mac_addr,
1115 					     SFC_MAE_MAC_ADDR_SRC,
1116 					     action_set->spec);
1117 		if (rc != 0) {
1118 			sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1119 			return rc;
1120 		}
1121 
1122 		rc = sfc_mae_encap_header_enable(sa, encap_header,
1123 						 action_set->spec);
1124 		if (rc != 0) {
1125 			sfc_mae_mac_addr_disable(sa, src_mac_addr);
1126 			sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1127 			return rc;
1128 		}
1129 
1130 		rc = sfc_mae_counters_enable(sa, counters,
1131 					     action_set->n_counters,
1132 					     action_set->spec);
1133 		if (rc != 0) {
1134 			sfc_err(sa, "failed to enable %u MAE counters: %s",
1135 				action_set->n_counters, rte_strerror(rc));
1136 
1137 			sfc_mae_encap_header_disable(sa, encap_header);
1138 			sfc_mae_mac_addr_disable(sa, src_mac_addr);
1139 			sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1140 			return rc;
1141 		}
1142 
1143 		rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
1144 					      &fw_rsrc->aset_id);
1145 		if (rc != 0) {
1146 			sfc_err(sa, "failed to enable action_set=%p: %s",
1147 				action_set, strerror(rc));
1148 
1149 			(void)sfc_mae_counters_disable(sa, counters,
1150 						       action_set->n_counters);
1151 			sfc_mae_encap_header_disable(sa, encap_header);
1152 			sfc_mae_mac_addr_disable(sa, src_mac_addr);
1153 			sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1154 			return rc;
1155 		}
1156 
1157 		sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
1158 			action_set, fw_rsrc->aset_id.id);
1159 	}
1160 
1161 	++(fw_rsrc->refcnt);
1162 
1163 	return 0;
1164 }
1165 
1166 static void
1167 sfc_mae_action_set_disable(struct sfc_adapter *sa,
1168 			   struct sfc_mae_action_set *action_set)
1169 {
1170 	struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1171 	int rc;
1172 
1173 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1174 
1175 	if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
1176 	    fw_rsrc->refcnt == 0) {
1177 		sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
1178 			action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
1179 		return;
1180 	}
1181 
1182 	if (fw_rsrc->refcnt == 1) {
1183 		rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
1184 		if (rc == 0) {
1185 			sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
1186 				action_set, fw_rsrc->aset_id.id);
1187 		} else {
1188 			sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
1189 				action_set, fw_rsrc->aset_id.id, strerror(rc));
1190 		}
1191 		fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
1192 
1193 		rc = sfc_mae_counters_disable(sa, action_set->counters,
1194 					      action_set->n_counters);
1195 		if (rc != 0) {
1196 			sfc_err(sa, "failed to disable %u MAE counters: %s",
1197 				action_set->n_counters, rte_strerror(rc));
1198 		}
1199 
1200 		sfc_mae_encap_header_disable(sa, action_set->encap_header);
1201 		sfc_mae_mac_addr_disable(sa, action_set->src_mac_addr);
1202 		sfc_mae_mac_addr_disable(sa, action_set->dst_mac_addr);
1203 	}
1204 
1205 	--(fw_rsrc->refcnt);
1206 }
1207 
1208 void
1209 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
1210 		     struct rte_flow *flow)
1211 {
1212 	struct sfc_flow_spec *spec;
1213 	struct sfc_flow_spec_mae *spec_mae;
1214 
1215 	if (flow == NULL)
1216 		return;
1217 
1218 	spec = &flow->spec;
1219 
1220 	if (spec == NULL)
1221 		return;
1222 
1223 	spec_mae = &spec->mae;
1224 
1225 	if (spec_mae->ft != NULL) {
1226 		if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
1227 			spec_mae->ft->jump_rule_is_set = B_FALSE;
1228 
1229 		SFC_ASSERT(spec_mae->ft->refcnt != 0);
1230 		--(spec_mae->ft->refcnt);
1231 	}
1232 
1233 	SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1234 
1235 	if (spec_mae->outer_rule != NULL)
1236 		sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
1237 
1238 	if (spec_mae->action_set != NULL)
1239 		sfc_mae_action_set_del(sa, spec_mae->action_set);
1240 
1241 	if (spec_mae->match_spec != NULL)
1242 		efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
1243 }
1244 
1245 static int
1246 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
1247 {
1248 	struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1249 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1250 	const efx_mae_field_id_t field_ids[] = {
1251 		EFX_MAE_FIELD_VLAN0_PROTO_BE,
1252 		EFX_MAE_FIELD_VLAN1_PROTO_BE,
1253 	};
1254 	const struct sfc_mae_ethertype *et;
1255 	unsigned int i;
1256 	int rc;
1257 
1258 	/*
1259 	 * In accordance with RTE flow API convention, the innermost L2
1260 	 * item's "type" ("inner_type") is a L3 EtherType. If there is
1261 	 * no L3 item, it's 0x0000/0x0000.
1262 	 */
1263 	et = &pdata->ethertypes[pdata->nb_vlan_tags];
1264 	rc = efx_mae_match_spec_field_set(ctx->match_spec,
1265 					  fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
1266 					  sizeof(et->value),
1267 					  (const uint8_t *)&et->value,
1268 					  sizeof(et->mask),
1269 					  (const uint8_t *)&et->mask);
1270 	if (rc != 0)
1271 		return rc;
1272 
1273 	/*
1274 	 * sfc_mae_rule_parse_item_vlan() has already made sure
1275 	 * that pdata->nb_vlan_tags does not exceed this figure.
1276 	 */
1277 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1278 
1279 	for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1280 		et = &pdata->ethertypes[i];
1281 
1282 		rc = efx_mae_match_spec_field_set(ctx->match_spec,
1283 						  fremap[field_ids[i]],
1284 						  sizeof(et->value),
1285 						  (const uint8_t *)&et->value,
1286 						  sizeof(et->mask),
1287 						  (const uint8_t *)&et->mask);
1288 		if (rc != 0)
1289 			return rc;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static int
1296 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1297 				  struct rte_flow_error *error)
1298 {
1299 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1300 	struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1301 	struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1302 	const rte_be16_t supported_tpids[] = {
1303 		/* VLAN standard TPID (always the first element) */
1304 		RTE_BE16(RTE_ETHER_TYPE_VLAN),
1305 
1306 		/* Double-tagging TPIDs */
1307 		RTE_BE16(RTE_ETHER_TYPE_QINQ),
1308 		RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1309 		RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1310 		RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1311 	};
1312 	bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1313 	unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1314 	unsigned int ethertype_idx;
1315 	const uint8_t *valuep;
1316 	const uint8_t *maskp;
1317 	int rc;
1318 
1319 	if (pdata->innermost_ethertype_restriction.mask != 0 &&
1320 	    pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1321 		/*
1322 		 * If a single item VLAN is followed by a L3 item, value
1323 		 * of "type" in item ETH can't be a double-tagging TPID.
1324 		 */
1325 		nb_supported_tpids = 1;
1326 	}
1327 
1328 	/*
1329 	 * sfc_mae_rule_parse_item_vlan() has already made sure
1330 	 * that pdata->nb_vlan_tags does not exceed this figure.
1331 	 */
1332 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1333 
1334 	for (ethertype_idx = 0;
1335 	     ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1336 		rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1337 		rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1338 		unsigned int tpid_idx;
1339 
1340 		/*
1341 		 * This loop can have only two iterations. On the second one,
1342 		 * drop outer tag presence enforcement bit because the inner
1343 		 * tag presence automatically assumes that for the outer tag.
1344 		 */
1345 		enforce_tag_presence[0] = B_FALSE;
1346 
1347 		if (tpid_m == RTE_BE16(0)) {
1348 			if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1349 				enforce_tag_presence[ethertype_idx] = B_TRUE;
1350 
1351 			/* No match on this field, and no value check. */
1352 			nb_supported_tpids = 1;
1353 			continue;
1354 		}
1355 
1356 		/* Exact match is supported only. */
1357 		if (tpid_m != RTE_BE16(0xffff)) {
1358 			sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1359 				rte_be_to_cpu_16(tpid_m));
1360 			rc = EINVAL;
1361 			goto fail;
1362 		}
1363 
1364 		for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1365 		     tpid_idx < nb_supported_tpids; ++tpid_idx) {
1366 			if (tpid_v == supported_tpids[tpid_idx])
1367 				break;
1368 		}
1369 
1370 		if (tpid_idx == nb_supported_tpids) {
1371 			sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1372 				rte_be_to_cpu_16(tpid_v));
1373 			rc = EINVAL;
1374 			goto fail;
1375 		}
1376 
1377 		nb_supported_tpids = 1;
1378 	}
1379 
1380 	if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1381 		struct sfc_mae_ethertype *et = &ethertypes[ethertype_idx];
1382 		rte_be16_t enforced_et;
1383 
1384 		enforced_et = pdata->innermost_ethertype_restriction.value;
1385 
1386 		if (et->mask == 0) {
1387 			et->mask = RTE_BE16(0xffff);
1388 			et->value = enforced_et;
1389 		} else if (et->mask != RTE_BE16(0xffff) ||
1390 			   et->value != enforced_et) {
1391 			sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1392 				rte_be_to_cpu_16(enforced_et),
1393 				rte_be_to_cpu_16(et->value),
1394 				rte_be_to_cpu_16(et->mask));
1395 			rc = EINVAL;
1396 			goto fail;
1397 		}
1398 	}
1399 
1400 	/*
1401 	 * Now, when the number of VLAN tags is known, set fields
1402 	 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1403 	 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1404 	 * and the last two are valid TPIDs (or 0x0000/0x0000).
1405 	 */
1406 	rc = sfc_mae_set_ethertypes(ctx);
1407 	if (rc != 0)
1408 		goto fail;
1409 
1410 	if (pdata->l3_next_proto_restriction_mask == 0xff) {
1411 		if (pdata->l3_next_proto_mask == 0) {
1412 			pdata->l3_next_proto_mask = 0xff;
1413 			pdata->l3_next_proto_value =
1414 				pdata->l3_next_proto_restriction_value;
1415 		} else if (pdata->l3_next_proto_mask != 0xff ||
1416 			   pdata->l3_next_proto_value !=
1417 			   pdata->l3_next_proto_restriction_value) {
1418 			sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1419 				pdata->l3_next_proto_restriction_value,
1420 				pdata->l3_next_proto_value,
1421 				pdata->l3_next_proto_mask);
1422 			rc = EINVAL;
1423 			goto fail;
1424 		}
1425 	}
1426 
1427 	if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1428 		rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1429 						fremap[EFX_MAE_FIELD_HAS_OVLAN],
1430 						enforce_tag_presence[0] ||
1431 						pdata->has_ovlan_value);
1432 		if (rc != 0)
1433 			goto fail;
1434 	}
1435 
1436 	if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1437 		rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1438 						fremap[EFX_MAE_FIELD_HAS_IVLAN],
1439 						enforce_tag_presence[1] ||
1440 						pdata->has_ivlan_value);
1441 		if (rc != 0)
1442 			goto fail;
1443 	}
1444 
1445 	valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1446 	maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1447 	rc = efx_mae_match_spec_field_set(ctx->match_spec,
1448 					  fremap[EFX_MAE_FIELD_IP_PROTO],
1449 					  sizeof(pdata->l3_next_proto_value),
1450 					  valuep,
1451 					  sizeof(pdata->l3_next_proto_mask),
1452 					  maskp);
1453 	if (rc != 0)
1454 		goto fail;
1455 
1456 	return 0;
1457 
1458 fail:
1459 	return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1460 				  "Failed to process pattern data");
1461 }
1462 
1463 static int
1464 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1465 			     struct sfc_flow_parse_ctx *ctx,
1466 			     struct rte_flow_error *error)
1467 {
1468 	const struct rte_flow_item_mark *spec = item->spec;
1469 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1470 
1471 	if (spec == NULL) {
1472 		return rte_flow_error_set(error, EINVAL,
1473 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1474 				"NULL spec in item MARK");
1475 	}
1476 
1477 	/*
1478 	 * This item is used in tunnel offload support only.
1479 	 * It must go before any network header items. This
1480 	 * way, sfc_mae_rule_preparse_item_mark() must have
1481 	 * already parsed it. Only one item MARK is allowed.
1482 	 */
1483 	if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1484 	    spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1485 		return rte_flow_error_set(error, EINVAL,
1486 					  RTE_FLOW_ERROR_TYPE_ITEM,
1487 					  item, "invalid item MARK");
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int
1494 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1495 				struct sfc_flow_parse_ctx *ctx,
1496 				struct rte_flow_error *error)
1497 {
1498 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1499 	const struct rte_flow_item_port_id supp_mask = {
1500 		.id = 0xffffffff,
1501 	};
1502 	const void *def_mask = &rte_flow_item_port_id_mask;
1503 	const struct rte_flow_item_port_id *spec = NULL;
1504 	const struct rte_flow_item_port_id *mask = NULL;
1505 	efx_mport_sel_t mport_sel;
1506 	int rc;
1507 
1508 	if (ctx_mae->match_mport_set) {
1509 		return rte_flow_error_set(error, ENOTSUP,
1510 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1511 				"Can't handle multiple traffic source items");
1512 	}
1513 
1514 	rc = sfc_flow_parse_init(item,
1515 				 (const void **)&spec, (const void **)&mask,
1516 				 (const void *)&supp_mask, def_mask,
1517 				 sizeof(struct rte_flow_item_port_id), error);
1518 	if (rc != 0)
1519 		return rc;
1520 
1521 	if (mask->id != supp_mask.id) {
1522 		return rte_flow_error_set(error, EINVAL,
1523 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1524 				"Bad mask in the PORT_ID pattern item");
1525 	}
1526 
1527 	/* If "spec" is not set, could be any port ID */
1528 	if (spec == NULL)
1529 		return 0;
1530 
1531 	if (spec->id > UINT16_MAX) {
1532 		return rte_flow_error_set(error, EOVERFLOW,
1533 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1534 					  "The port ID is too large");
1535 	}
1536 
1537 	rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
1538 					     spec->id, &mport_sel);
1539 	if (rc != 0) {
1540 		return rte_flow_error_set(error, rc,
1541 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1542 				"Can't get m-port for the given ethdev");
1543 	}
1544 
1545 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1546 					  &mport_sel, NULL);
1547 	if (rc != 0) {
1548 		return rte_flow_error_set(error, rc,
1549 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1550 				"Failed to set MPORT for the port ID");
1551 	}
1552 
1553 	ctx_mae->match_mport_set = B_TRUE;
1554 
1555 	return 0;
1556 }
1557 
1558 static int
1559 sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item *item,
1560 				     struct sfc_flow_parse_ctx *ctx,
1561 				     struct rte_flow_error *error)
1562 {
1563 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1564 	const struct rte_flow_item_ethdev supp_mask = {
1565 		.port_id = 0xffff,
1566 	};
1567 	const void *def_mask = &rte_flow_item_ethdev_mask;
1568 	const struct rte_flow_item_ethdev *spec = NULL;
1569 	const struct rte_flow_item_ethdev *mask = NULL;
1570 	efx_mport_sel_t mport_sel;
1571 	int rc;
1572 
1573 	if (ctx_mae->match_mport_set) {
1574 		return rte_flow_error_set(error, ENOTSUP,
1575 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1576 				"Can't handle multiple traffic source items");
1577 	}
1578 
1579 	rc = sfc_flow_parse_init(item,
1580 				 (const void **)&spec, (const void **)&mask,
1581 				 (const void *)&supp_mask, def_mask,
1582 				 sizeof(struct rte_flow_item_ethdev), error);
1583 	if (rc != 0)
1584 		return rc;
1585 
1586 	if (mask->port_id != supp_mask.port_id) {
1587 		return rte_flow_error_set(error, EINVAL,
1588 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1589 				"Bad mask in the ethdev-based pattern item");
1590 	}
1591 
1592 	/* If "spec" is not set, could be any port ID */
1593 	if (spec == NULL)
1594 		return 0;
1595 
1596 	switch (item->type) {
1597 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
1598 		rc = sfc_mae_switch_get_ethdev_mport(
1599 				ctx_mae->sa->mae.switch_domain_id,
1600 				spec->port_id, &mport_sel);
1601 		if (rc != 0) {
1602 			return rte_flow_error_set(error, rc,
1603 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1604 					"Can't get m-port for the given ethdev");
1605 		}
1606 		break;
1607 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
1608 		rc = sfc_mae_switch_get_entity_mport(
1609 				ctx_mae->sa->mae.switch_domain_id,
1610 				spec->port_id, &mport_sel);
1611 		if (rc != 0) {
1612 			return rte_flow_error_set(error, rc,
1613 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1614 					"Can't get m-port for the given ethdev");
1615 		}
1616 		break;
1617 	default:
1618 		return rte_flow_error_set(error, EINVAL,
1619 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1620 				"Unsupported ethdev-based flow item");
1621 	}
1622 
1623 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1624 					  &mport_sel, NULL);
1625 	if (rc != 0) {
1626 		return rte_flow_error_set(error, rc,
1627 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1628 				"Failed to set MPORT for the port ID");
1629 	}
1630 
1631 	ctx_mae->match_mport_set = B_TRUE;
1632 
1633 	return 0;
1634 }
1635 
1636 static int
1637 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1638 				 struct sfc_flow_parse_ctx *ctx,
1639 				 struct rte_flow_error *error)
1640 {
1641 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1642 	const struct rte_flow_item_phy_port supp_mask = {
1643 		.index = 0xffffffff,
1644 	};
1645 	const void *def_mask = &rte_flow_item_phy_port_mask;
1646 	const struct rte_flow_item_phy_port *spec = NULL;
1647 	const struct rte_flow_item_phy_port *mask = NULL;
1648 	efx_mport_sel_t mport_v;
1649 	int rc;
1650 
1651 	if (ctx_mae->match_mport_set) {
1652 		return rte_flow_error_set(error, ENOTSUP,
1653 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1654 				"Can't handle multiple traffic source items");
1655 	}
1656 
1657 	rc = sfc_flow_parse_init(item,
1658 				 (const void **)&spec, (const void **)&mask,
1659 				 (const void *)&supp_mask, def_mask,
1660 				 sizeof(struct rte_flow_item_phy_port), error);
1661 	if (rc != 0)
1662 		return rc;
1663 
1664 	if (mask->index != supp_mask.index) {
1665 		return rte_flow_error_set(error, EINVAL,
1666 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1667 				"Bad mask in the PHY_PORT pattern item");
1668 	}
1669 
1670 	/* If "spec" is not set, could be any physical port */
1671 	if (spec == NULL)
1672 		return 0;
1673 
1674 	rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1675 	if (rc != 0) {
1676 		return rte_flow_error_set(error, rc,
1677 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1678 				"Failed to convert the PHY_PORT index");
1679 	}
1680 
1681 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1682 	if (rc != 0) {
1683 		return rte_flow_error_set(error, rc,
1684 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1685 				"Failed to set MPORT for the PHY_PORT");
1686 	}
1687 
1688 	ctx_mae->match_mport_set = B_TRUE;
1689 
1690 	return 0;
1691 }
1692 
1693 static int
1694 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1695 			   struct sfc_flow_parse_ctx *ctx,
1696 			   struct rte_flow_error *error)
1697 {
1698 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1699 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1700 	efx_mport_sel_t mport_v;
1701 	int rc;
1702 
1703 	if (ctx_mae->match_mport_set) {
1704 		return rte_flow_error_set(error, ENOTSUP,
1705 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1706 				"Can't handle multiple traffic source items");
1707 	}
1708 
1709 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1710 					    &mport_v);
1711 	if (rc != 0) {
1712 		return rte_flow_error_set(error, rc,
1713 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1714 				"Failed to convert the PF ID");
1715 	}
1716 
1717 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1718 	if (rc != 0) {
1719 		return rte_flow_error_set(error, rc,
1720 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1721 				"Failed to set MPORT for the PF");
1722 	}
1723 
1724 	ctx_mae->match_mport_set = B_TRUE;
1725 
1726 	return 0;
1727 }
1728 
1729 static int
1730 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1731 			   struct sfc_flow_parse_ctx *ctx,
1732 			   struct rte_flow_error *error)
1733 {
1734 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1735 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1736 	const struct rte_flow_item_vf supp_mask = {
1737 		.id = 0xffffffff,
1738 	};
1739 	const void *def_mask = &rte_flow_item_vf_mask;
1740 	const struct rte_flow_item_vf *spec = NULL;
1741 	const struct rte_flow_item_vf *mask = NULL;
1742 	efx_mport_sel_t mport_v;
1743 	int rc;
1744 
1745 	if (ctx_mae->match_mport_set) {
1746 		return rte_flow_error_set(error, ENOTSUP,
1747 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1748 				"Can't handle multiple traffic source items");
1749 	}
1750 
1751 	rc = sfc_flow_parse_init(item,
1752 				 (const void **)&spec, (const void **)&mask,
1753 				 (const void *)&supp_mask, def_mask,
1754 				 sizeof(struct rte_flow_item_vf), error);
1755 	if (rc != 0)
1756 		return rc;
1757 
1758 	if (mask->id != supp_mask.id) {
1759 		return rte_flow_error_set(error, EINVAL,
1760 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1761 				"Bad mask in the VF pattern item");
1762 	}
1763 
1764 	/*
1765 	 * If "spec" is not set, the item requests any VF related to the
1766 	 * PF of the current DPDK port (but not the PF itself).
1767 	 * Reject this match criterion as unsupported.
1768 	 */
1769 	if (spec == NULL) {
1770 		return rte_flow_error_set(error, EINVAL,
1771 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1772 				"Bad spec in the VF pattern item");
1773 	}
1774 
1775 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1776 	if (rc != 0) {
1777 		return rte_flow_error_set(error, rc,
1778 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1779 				"Failed to convert the PF + VF IDs");
1780 	}
1781 
1782 	rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1783 	if (rc != 0) {
1784 		return rte_flow_error_set(error, rc,
1785 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1786 				"Failed to set MPORT for the PF + VF");
1787 	}
1788 
1789 	ctx_mae->match_mport_set = B_TRUE;
1790 
1791 	return 0;
1792 }
1793 
1794 /*
1795  * Having this field ID in a field locator means that this
1796  * locator cannot be used to actually set the field at the
1797  * time when the corresponding item gets encountered. Such
1798  * fields get stashed in the parsing context instead. This
1799  * is required to resolve dependencies between the stashed
1800  * fields. See sfc_mae_rule_process_pattern_data().
1801  */
1802 #define SFC_MAE_FIELD_HANDLING_DEFERRED	EFX_MAE_FIELD_NIDS
1803 
1804 struct sfc_mae_field_locator {
1805 	efx_mae_field_id_t		field_id;
1806 	size_t				size;
1807 	/* Field offset in the corresponding rte_flow_item_ struct */
1808 	size_t				ofst;
1809 };
1810 
1811 static void
1812 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1813 			     unsigned int nb_field_locators, void *mask_ptr,
1814 			     size_t mask_size)
1815 {
1816 	unsigned int i;
1817 
1818 	memset(mask_ptr, 0, mask_size);
1819 
1820 	for (i = 0; i < nb_field_locators; ++i) {
1821 		const struct sfc_mae_field_locator *fl = &field_locators[i];
1822 
1823 		SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1824 		memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1825 	}
1826 }
1827 
1828 static int
1829 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1830 		   unsigned int nb_field_locators, const uint8_t *spec,
1831 		   const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1832 		   struct rte_flow_error *error)
1833 {
1834 	const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1835 	unsigned int i;
1836 	int rc = 0;
1837 
1838 	for (i = 0; i < nb_field_locators; ++i) {
1839 		const struct sfc_mae_field_locator *fl = &field_locators[i];
1840 
1841 		if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1842 			continue;
1843 
1844 		rc = efx_mae_match_spec_field_set(ctx->match_spec,
1845 						  fremap[fl->field_id],
1846 						  fl->size, spec + fl->ofst,
1847 						  fl->size, mask + fl->ofst);
1848 		if (rc != 0)
1849 			break;
1850 	}
1851 
1852 	if (rc != 0) {
1853 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1854 				NULL, "Failed to process item fields");
1855 	}
1856 
1857 	return rc;
1858 }
1859 
1860 static const struct sfc_mae_field_locator flocs_eth[] = {
1861 	{
1862 		/*
1863 		 * This locator is used only for building supported fields mask.
1864 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1865 		 */
1866 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1867 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1868 		offsetof(struct rte_flow_item_eth, type),
1869 	},
1870 	{
1871 		EFX_MAE_FIELD_ETH_DADDR_BE,
1872 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1873 		offsetof(struct rte_flow_item_eth, dst),
1874 	},
1875 	{
1876 		EFX_MAE_FIELD_ETH_SADDR_BE,
1877 		RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1878 		offsetof(struct rte_flow_item_eth, src),
1879 	},
1880 };
1881 
1882 static int
1883 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1884 			    struct sfc_flow_parse_ctx *ctx,
1885 			    struct rte_flow_error *error)
1886 {
1887 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1888 	struct rte_flow_item_eth override_mask;
1889 	struct rte_flow_item_eth supp_mask;
1890 	const uint8_t *spec = NULL;
1891 	const uint8_t *mask = NULL;
1892 	int rc;
1893 
1894 	sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1895 				     &supp_mask, sizeof(supp_mask));
1896 	supp_mask.has_vlan = 1;
1897 
1898 	rc = sfc_flow_parse_init(item,
1899 				 (const void **)&spec, (const void **)&mask,
1900 				 (const void *)&supp_mask,
1901 				 &rte_flow_item_eth_mask,
1902 				 sizeof(struct rte_flow_item_eth), error);
1903 	if (rc != 0)
1904 		return rc;
1905 
1906 	if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1907 		/*
1908 		 * The HW/FW hasn't got support for match on MAC addresses in
1909 		 * outer rules yet (this will change). Match on VLAN presence
1910 		 * isn't supported either. Ignore these match criteria.
1911 		 */
1912 		memcpy(&override_mask, mask, sizeof(override_mask));
1913 		memset(&override_mask.hdr.dst_addr, 0,
1914 		       sizeof(override_mask.hdr.dst_addr));
1915 		memset(&override_mask.hdr.src_addr, 0,
1916 		       sizeof(override_mask.hdr.src_addr));
1917 		override_mask.has_vlan = 0;
1918 
1919 		mask = (const uint8_t *)&override_mask;
1920 	}
1921 
1922 	if (spec != NULL) {
1923 		struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1924 		struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1925 		const struct rte_flow_item_eth *item_spec;
1926 		const struct rte_flow_item_eth *item_mask;
1927 
1928 		item_spec = (const struct rte_flow_item_eth *)spec;
1929 		item_mask = (const struct rte_flow_item_eth *)mask;
1930 
1931 		/*
1932 		 * Remember various match criteria in the parsing context.
1933 		 * sfc_mae_rule_process_pattern_data() will consider them
1934 		 * altogether when the rest of the items have been parsed.
1935 		 */
1936 		ethertypes[0].value = item_spec->type;
1937 		ethertypes[0].mask = item_mask->type;
1938 		if (item_mask->has_vlan) {
1939 			pdata->has_ovlan_mask = B_TRUE;
1940 			if (item_spec->has_vlan)
1941 				pdata->has_ovlan_value = B_TRUE;
1942 		}
1943 	} else {
1944 		/*
1945 		 * The specification is empty. The overall pattern
1946 		 * validity will be enforced at the end of parsing.
1947 		 * See sfc_mae_rule_process_pattern_data().
1948 		 */
1949 		return 0;
1950 	}
1951 
1952 	return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1953 				  ctx_mae, error);
1954 }
1955 
1956 static const struct sfc_mae_field_locator flocs_vlan[] = {
1957 	/* Outermost tag */
1958 	{
1959 		EFX_MAE_FIELD_VLAN0_TCI_BE,
1960 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1961 		offsetof(struct rte_flow_item_vlan, tci),
1962 	},
1963 	{
1964 		/*
1965 		 * This locator is used only for building supported fields mask.
1966 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1967 		 */
1968 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1969 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1970 		offsetof(struct rte_flow_item_vlan, inner_type),
1971 	},
1972 
1973 	/* Innermost tag */
1974 	{
1975 		EFX_MAE_FIELD_VLAN1_TCI_BE,
1976 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1977 		offsetof(struct rte_flow_item_vlan, tci),
1978 	},
1979 	{
1980 		/*
1981 		 * This locator is used only for building supported fields mask.
1982 		 * The field is handled by sfc_mae_rule_process_pattern_data().
1983 		 */
1984 		SFC_MAE_FIELD_HANDLING_DEFERRED,
1985 		RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1986 		offsetof(struct rte_flow_item_vlan, inner_type),
1987 	},
1988 };
1989 
1990 static int
1991 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1992 			     struct sfc_flow_parse_ctx *ctx,
1993 			     struct rte_flow_error *error)
1994 {
1995 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1996 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1997 	boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1998 		&pdata->has_ovlan_mask,
1999 		&pdata->has_ivlan_mask,
2000 	};
2001 	boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
2002 		&pdata->has_ovlan_value,
2003 		&pdata->has_ivlan_value,
2004 	};
2005 	boolean_t *cur_tag_presence_bit_mp;
2006 	boolean_t *cur_tag_presence_bit_vp;
2007 	const struct sfc_mae_field_locator *flocs;
2008 	struct rte_flow_item_vlan supp_mask;
2009 	const uint8_t *spec = NULL;
2010 	const uint8_t *mask = NULL;
2011 	unsigned int nb_flocs;
2012 	int rc;
2013 
2014 	RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
2015 
2016 	if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
2017 		return rte_flow_error_set(error, ENOTSUP,
2018 				RTE_FLOW_ERROR_TYPE_ITEM, item,
2019 				"Can't match that many VLAN tags");
2020 	}
2021 
2022 	cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
2023 	cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
2024 
2025 	if (*cur_tag_presence_bit_mp == B_TRUE &&
2026 	    *cur_tag_presence_bit_vp == B_FALSE) {
2027 		return rte_flow_error_set(error, EINVAL,
2028 				RTE_FLOW_ERROR_TYPE_ITEM, item,
2029 				"The previous item enforces no (more) VLAN, "
2030 				"so the current item (VLAN) must not exist");
2031 	}
2032 
2033 	nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
2034 	flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
2035 
2036 	sfc_mae_item_build_supp_mask(flocs, nb_flocs,
2037 				     &supp_mask, sizeof(supp_mask));
2038 	/*
2039 	 * This only means that the field is supported by the driver and libefx.
2040 	 * Support on NIC level will be checked when all items have been parsed.
2041 	 */
2042 	supp_mask.has_more_vlan = 1;
2043 
2044 	rc = sfc_flow_parse_init(item,
2045 				 (const void **)&spec, (const void **)&mask,
2046 				 (const void *)&supp_mask,
2047 				 &rte_flow_item_vlan_mask,
2048 				 sizeof(struct rte_flow_item_vlan), error);
2049 	if (rc != 0)
2050 		return rc;
2051 
2052 	if (spec != NULL) {
2053 		struct sfc_mae_ethertype *et = pdata->ethertypes;
2054 		const struct rte_flow_item_vlan *item_spec;
2055 		const struct rte_flow_item_vlan *item_mask;
2056 
2057 		item_spec = (const struct rte_flow_item_vlan *)spec;
2058 		item_mask = (const struct rte_flow_item_vlan *)mask;
2059 
2060 		/*
2061 		 * Remember various match criteria in the parsing context.
2062 		 * sfc_mae_rule_process_pattern_data() will consider them
2063 		 * altogether when the rest of the items have been parsed.
2064 		 */
2065 		et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
2066 		et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
2067 		pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
2068 		if (item_mask->has_more_vlan) {
2069 			if (pdata->nb_vlan_tags ==
2070 			    SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
2071 				return rte_flow_error_set(error, ENOTSUP,
2072 					RTE_FLOW_ERROR_TYPE_ITEM, item,
2073 					"Can't use 'has_more_vlan' in "
2074 					"the second item VLAN");
2075 			}
2076 			pdata->has_ivlan_mask = B_TRUE;
2077 			if (item_spec->has_more_vlan)
2078 				pdata->has_ivlan_value = B_TRUE;
2079 		}
2080 
2081 		/* Convert TCI to MAE representation right now. */
2082 		rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
2083 					ctx_mae, error);
2084 		if (rc != 0)
2085 			return rc;
2086 	}
2087 
2088 	++(pdata->nb_vlan_tags);
2089 
2090 	return 0;
2091 }
2092 
2093 static const struct sfc_mae_field_locator flocs_ipv4[] = {
2094 	{
2095 		EFX_MAE_FIELD_SRC_IP4_BE,
2096 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
2097 		offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
2098 	},
2099 	{
2100 		EFX_MAE_FIELD_DST_IP4_BE,
2101 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
2102 		offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
2103 	},
2104 	{
2105 		/*
2106 		 * This locator is used only for building supported fields mask.
2107 		 * The field is handled by sfc_mae_rule_process_pattern_data().
2108 		 */
2109 		SFC_MAE_FIELD_HANDLING_DEFERRED,
2110 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
2111 		offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
2112 	},
2113 	{
2114 		EFX_MAE_FIELD_IP_TOS,
2115 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
2116 				 hdr.type_of_service),
2117 		offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
2118 	},
2119 	{
2120 		EFX_MAE_FIELD_IP_TTL,
2121 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
2122 		offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
2123 	},
2124 };
2125 
2126 static int
2127 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
2128 			     struct sfc_flow_parse_ctx *ctx,
2129 			     struct rte_flow_error *error)
2130 {
2131 	rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2132 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2133 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2134 	struct rte_flow_item_ipv4 supp_mask;
2135 	const uint8_t *spec = NULL;
2136 	const uint8_t *mask = NULL;
2137 	int rc;
2138 
2139 	sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
2140 				     &supp_mask, sizeof(supp_mask));
2141 
2142 	rc = sfc_flow_parse_init(item,
2143 				 (const void **)&spec, (const void **)&mask,
2144 				 (const void *)&supp_mask,
2145 				 &rte_flow_item_ipv4_mask,
2146 				 sizeof(struct rte_flow_item_ipv4), error);
2147 	if (rc != 0)
2148 		return rc;
2149 
2150 	pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
2151 	pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2152 
2153 	if (spec != NULL) {
2154 		const struct rte_flow_item_ipv4 *item_spec;
2155 		const struct rte_flow_item_ipv4 *item_mask;
2156 
2157 		item_spec = (const struct rte_flow_item_ipv4 *)spec;
2158 		item_mask = (const struct rte_flow_item_ipv4 *)mask;
2159 
2160 		pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
2161 		pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
2162 	} else {
2163 		return 0;
2164 	}
2165 
2166 	return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
2167 				  ctx_mae, error);
2168 }
2169 
2170 static const struct sfc_mae_field_locator flocs_ipv6[] = {
2171 	{
2172 		EFX_MAE_FIELD_SRC_IP6_BE,
2173 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
2174 		offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
2175 	},
2176 	{
2177 		EFX_MAE_FIELD_DST_IP6_BE,
2178 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
2179 		offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
2180 	},
2181 	{
2182 		/*
2183 		 * This locator is used only for building supported fields mask.
2184 		 * The field is handled by sfc_mae_rule_process_pattern_data().
2185 		 */
2186 		SFC_MAE_FIELD_HANDLING_DEFERRED,
2187 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
2188 		offsetof(struct rte_flow_item_ipv6, hdr.proto),
2189 	},
2190 	{
2191 		EFX_MAE_FIELD_IP_TTL,
2192 		RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
2193 		offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
2194 	},
2195 };
2196 
2197 static int
2198 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
2199 			     struct sfc_flow_parse_ctx *ctx,
2200 			     struct rte_flow_error *error)
2201 {
2202 	rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2203 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2204 	const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
2205 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2206 	struct rte_flow_item_ipv6 supp_mask;
2207 	const uint8_t *spec = NULL;
2208 	const uint8_t *mask = NULL;
2209 	rte_be32_t vtc_flow_be;
2210 	uint32_t vtc_flow;
2211 	uint8_t tc_value;
2212 	uint8_t tc_mask;
2213 	int rc;
2214 
2215 	sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
2216 				     &supp_mask, sizeof(supp_mask));
2217 
2218 	vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
2219 	memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
2220 
2221 	rc = sfc_flow_parse_init(item,
2222 				 (const void **)&spec, (const void **)&mask,
2223 				 (const void *)&supp_mask,
2224 				 &rte_flow_item_ipv6_mask,
2225 				 sizeof(struct rte_flow_item_ipv6), error);
2226 	if (rc != 0)
2227 		return rc;
2228 
2229 	pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
2230 	pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2231 
2232 	if (spec != NULL) {
2233 		const struct rte_flow_item_ipv6 *item_spec;
2234 		const struct rte_flow_item_ipv6 *item_mask;
2235 
2236 		item_spec = (const struct rte_flow_item_ipv6 *)spec;
2237 		item_mask = (const struct rte_flow_item_ipv6 *)mask;
2238 
2239 		pdata->l3_next_proto_value = item_spec->hdr.proto;
2240 		pdata->l3_next_proto_mask = item_mask->hdr.proto;
2241 	} else {
2242 		return 0;
2243 	}
2244 
2245 	rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
2246 				ctx_mae, error);
2247 	if (rc != 0)
2248 		return rc;
2249 
2250 	memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
2251 	vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2252 	tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2253 
2254 	memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
2255 	vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2256 	tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2257 
2258 	rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2259 					  fremap[EFX_MAE_FIELD_IP_TOS],
2260 					  sizeof(tc_value), &tc_value,
2261 					  sizeof(tc_mask), &tc_mask);
2262 	if (rc != 0) {
2263 		return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2264 				NULL, "Failed to process item fields");
2265 	}
2266 
2267 	return 0;
2268 }
2269 
2270 static const struct sfc_mae_field_locator flocs_tcp[] = {
2271 	{
2272 		EFX_MAE_FIELD_L4_SPORT_BE,
2273 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
2274 		offsetof(struct rte_flow_item_tcp, hdr.src_port),
2275 	},
2276 	{
2277 		EFX_MAE_FIELD_L4_DPORT_BE,
2278 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
2279 		offsetof(struct rte_flow_item_tcp, hdr.dst_port),
2280 	},
2281 	{
2282 		EFX_MAE_FIELD_TCP_FLAGS_BE,
2283 		/*
2284 		 * The values have been picked intentionally since the
2285 		 * target MAE field is oversize (16 bit). This mapping
2286 		 * relies on the fact that the MAE field is big-endian.
2287 		 */
2288 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
2289 		RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
2290 		offsetof(struct rte_flow_item_tcp, hdr.data_off),
2291 	},
2292 };
2293 
2294 static int
2295 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
2296 			    struct sfc_flow_parse_ctx *ctx,
2297 			    struct rte_flow_error *error)
2298 {
2299 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2300 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2301 	struct rte_flow_item_tcp supp_mask;
2302 	const uint8_t *spec = NULL;
2303 	const uint8_t *mask = NULL;
2304 	int rc;
2305 
2306 	/*
2307 	 * When encountered among outermost items, item TCP is invalid.
2308 	 * Check which match specification is being constructed now.
2309 	 */
2310 	if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
2311 		return rte_flow_error_set(error, EINVAL,
2312 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
2313 					  "TCP in outer frame is invalid");
2314 	}
2315 
2316 	sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
2317 				     &supp_mask, sizeof(supp_mask));
2318 
2319 	rc = sfc_flow_parse_init(item,
2320 				 (const void **)&spec, (const void **)&mask,
2321 				 (const void *)&supp_mask,
2322 				 &rte_flow_item_tcp_mask,
2323 				 sizeof(struct rte_flow_item_tcp), error);
2324 	if (rc != 0)
2325 		return rc;
2326 
2327 	pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
2328 	pdata->l3_next_proto_restriction_mask = 0xff;
2329 
2330 	if (spec == NULL)
2331 		return 0;
2332 
2333 	return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
2334 				  ctx_mae, error);
2335 }
2336 
2337 static const struct sfc_mae_field_locator flocs_udp[] = {
2338 	{
2339 		EFX_MAE_FIELD_L4_SPORT_BE,
2340 		RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
2341 		offsetof(struct rte_flow_item_udp, hdr.src_port),
2342 	},
2343 	{
2344 		EFX_MAE_FIELD_L4_DPORT_BE,
2345 		RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
2346 		offsetof(struct rte_flow_item_udp, hdr.dst_port),
2347 	},
2348 };
2349 
2350 static int
2351 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2352 			    struct sfc_flow_parse_ctx *ctx,
2353 			    struct rte_flow_error *error)
2354 {
2355 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2356 	struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2357 	struct rte_flow_item_udp supp_mask;
2358 	const uint8_t *spec = NULL;
2359 	const uint8_t *mask = NULL;
2360 	int rc;
2361 
2362 	sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2363 				     &supp_mask, sizeof(supp_mask));
2364 
2365 	rc = sfc_flow_parse_init(item,
2366 				 (const void **)&spec, (const void **)&mask,
2367 				 (const void *)&supp_mask,
2368 				 &rte_flow_item_udp_mask,
2369 				 sizeof(struct rte_flow_item_udp), error);
2370 	if (rc != 0)
2371 		return rc;
2372 
2373 	pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2374 	pdata->l3_next_proto_restriction_mask = 0xff;
2375 
2376 	if (spec == NULL)
2377 		return 0;
2378 
2379 	return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2380 				  ctx_mae, error);
2381 }
2382 
2383 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2384 	{
2385 		/*
2386 		 * The size and offset values are relevant
2387 		 * for Geneve and NVGRE, too.
2388 		 */
2389 		.size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2390 		.ofst = offsetof(struct rte_flow_item_vxlan, vni),
2391 	},
2392 };
2393 
2394 /*
2395  * An auxiliary registry which allows using non-encap. field IDs
2396  * directly when building a match specification of type ACTION.
2397  *
2398  * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2399  */
2400 static const efx_mae_field_id_t field_ids_no_remap[] = {
2401 #define FIELD_ID_NO_REMAP(_field) \
2402 	[EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2403 
2404 	FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2405 	FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2406 	FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2407 	FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2408 	FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2409 	FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2410 	FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2411 	FIELD_ID_NO_REMAP(SRC_IP4_BE),
2412 	FIELD_ID_NO_REMAP(DST_IP4_BE),
2413 	FIELD_ID_NO_REMAP(IP_PROTO),
2414 	FIELD_ID_NO_REMAP(IP_TOS),
2415 	FIELD_ID_NO_REMAP(IP_TTL),
2416 	FIELD_ID_NO_REMAP(SRC_IP6_BE),
2417 	FIELD_ID_NO_REMAP(DST_IP6_BE),
2418 	FIELD_ID_NO_REMAP(L4_SPORT_BE),
2419 	FIELD_ID_NO_REMAP(L4_DPORT_BE),
2420 	FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2421 	FIELD_ID_NO_REMAP(HAS_OVLAN),
2422 	FIELD_ID_NO_REMAP(HAS_IVLAN),
2423 
2424 #undef FIELD_ID_NO_REMAP
2425 };
2426 
2427 /*
2428  * An auxiliary registry which allows using "ENC" field IDs
2429  * when building a match specification of type OUTER.
2430  *
2431  * See sfc_mae_rule_encap_parse_init().
2432  */
2433 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2434 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2435 	[EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2436 
2437 	FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2438 	FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2439 	FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2440 	FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2441 	FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2442 	FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2443 	FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2444 	FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2445 	FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2446 	FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2447 	FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2448 	FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2449 	FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2450 	FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2451 	FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2452 	FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2453 	FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2454 	FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2455 
2456 #undef FIELD_ID_REMAP_TO_ENCAP
2457 };
2458 
2459 static int
2460 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2461 			       struct sfc_flow_parse_ctx *ctx,
2462 			       struct rte_flow_error *error)
2463 {
2464 	struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2465 	uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2466 	uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2467 	const struct rte_flow_item_vxlan *vxp;
2468 	uint8_t supp_mask[sizeof(uint64_t)];
2469 	const uint8_t *spec = NULL;
2470 	const uint8_t *mask = NULL;
2471 	int rc;
2472 
2473 	if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2474 		/*
2475 		 * As a workaround, pattern processing has started from
2476 		 * this (tunnel) item. No pattern data to process yet.
2477 		 */
2478 	} else {
2479 		/*
2480 		 * We're about to start processing inner frame items.
2481 		 * Process pattern data that has been deferred so far
2482 		 * and reset pattern data storage.
2483 		 */
2484 		rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2485 		if (rc != 0)
2486 			return rc;
2487 	}
2488 
2489 	memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2490 
2491 	sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2492 				     &supp_mask, sizeof(supp_mask));
2493 
2494 	/*
2495 	 * This tunnel item was preliminarily detected by
2496 	 * sfc_mae_rule_encap_parse_init(). Default mask
2497 	 * was also picked by that helper. Use it here.
2498 	 */
2499 	rc = sfc_flow_parse_init(item,
2500 				 (const void **)&spec, (const void **)&mask,
2501 				 (const void *)&supp_mask,
2502 				 ctx_mae->tunnel_def_mask,
2503 				 ctx_mae->tunnel_def_mask_size,  error);
2504 	if (rc != 0)
2505 		return rc;
2506 
2507 	/*
2508 	 * This item and later ones comprise a
2509 	 * match specification of type ACTION.
2510 	 */
2511 	ctx_mae->match_spec = ctx_mae->match_spec_action;
2512 
2513 	/* This item and later ones use non-encap. EFX MAE field IDs. */
2514 	ctx_mae->field_ids_remap = field_ids_no_remap;
2515 
2516 	if (spec == NULL)
2517 		return 0;
2518 
2519 	/*
2520 	 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2521 	 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2522 	 * The extra byte is 0 both in the mask and in the value.
2523 	 */
2524 	vxp = (const struct rte_flow_item_vxlan *)spec;
2525 	memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2526 
2527 	vxp = (const struct rte_flow_item_vxlan *)mask;
2528 	memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2529 
2530 	rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2531 					  EFX_MAE_FIELD_ENC_VNET_ID_BE,
2532 					  sizeof(vnet_id_v), vnet_id_v,
2533 					  sizeof(vnet_id_m), vnet_id_m);
2534 	if (rc != 0) {
2535 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2536 					item, "Failed to set VXLAN VNI");
2537 	}
2538 
2539 	return rc;
2540 }
2541 
2542 static const struct sfc_flow_item sfc_flow_items[] = {
2543 	{
2544 		.type = RTE_FLOW_ITEM_TYPE_MARK,
2545 		.name = "MARK",
2546 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2547 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2548 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2549 		.parse = sfc_mae_rule_parse_item_mark,
2550 	},
2551 	{
2552 		.type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2553 		.name = "PORT_ID",
2554 		/*
2555 		 * In terms of RTE flow, this item is a META one,
2556 		 * and its position in the pattern is don't care.
2557 		 */
2558 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2559 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2560 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2561 		.parse = sfc_mae_rule_parse_item_port_id,
2562 	},
2563 	{
2564 		.type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
2565 		.name = "PORT_REPRESENTOR",
2566 		/*
2567 		 * In terms of RTE flow, this item is a META one,
2568 		 * and its position in the pattern is don't care.
2569 		 */
2570 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2571 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2572 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2573 		.parse = sfc_mae_rule_parse_item_ethdev_based,
2574 	},
2575 	{
2576 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
2577 		.name = "REPRESENTED_PORT",
2578 		/*
2579 		 * In terms of RTE flow, this item is a META one,
2580 		 * and its position in the pattern is don't care.
2581 		 */
2582 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2583 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2584 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2585 		.parse = sfc_mae_rule_parse_item_ethdev_based,
2586 	},
2587 	{
2588 		.type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2589 		.name = "PHY_PORT",
2590 		/*
2591 		 * In terms of RTE flow, this item is a META one,
2592 		 * and its position in the pattern is don't care.
2593 		 */
2594 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2595 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2596 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2597 		.parse = sfc_mae_rule_parse_item_phy_port,
2598 	},
2599 	{
2600 		.type = RTE_FLOW_ITEM_TYPE_PF,
2601 		.name = "PF",
2602 		/*
2603 		 * In terms of RTE flow, this item is a META one,
2604 		 * and its position in the pattern is don't care.
2605 		 */
2606 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2607 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2608 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2609 		.parse = sfc_mae_rule_parse_item_pf,
2610 	},
2611 	{
2612 		.type = RTE_FLOW_ITEM_TYPE_VF,
2613 		.name = "VF",
2614 		/*
2615 		 * In terms of RTE flow, this item is a META one,
2616 		 * and its position in the pattern is don't care.
2617 		 */
2618 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2619 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
2620 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2621 		.parse = sfc_mae_rule_parse_item_vf,
2622 	},
2623 	{
2624 		.type = RTE_FLOW_ITEM_TYPE_ETH,
2625 		.name = "ETH",
2626 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
2627 		.layer = SFC_FLOW_ITEM_L2,
2628 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2629 		.parse = sfc_mae_rule_parse_item_eth,
2630 	},
2631 	{
2632 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
2633 		.name = "VLAN",
2634 		.prev_layer = SFC_FLOW_ITEM_L2,
2635 		.layer = SFC_FLOW_ITEM_L2,
2636 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2637 		.parse = sfc_mae_rule_parse_item_vlan,
2638 	},
2639 	{
2640 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
2641 		.name = "IPV4",
2642 		.prev_layer = SFC_FLOW_ITEM_L2,
2643 		.layer = SFC_FLOW_ITEM_L3,
2644 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2645 		.parse = sfc_mae_rule_parse_item_ipv4,
2646 	},
2647 	{
2648 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
2649 		.name = "IPV6",
2650 		.prev_layer = SFC_FLOW_ITEM_L2,
2651 		.layer = SFC_FLOW_ITEM_L3,
2652 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2653 		.parse = sfc_mae_rule_parse_item_ipv6,
2654 	},
2655 	{
2656 		.type = RTE_FLOW_ITEM_TYPE_TCP,
2657 		.name = "TCP",
2658 		.prev_layer = SFC_FLOW_ITEM_L3,
2659 		.layer = SFC_FLOW_ITEM_L4,
2660 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2661 		.parse = sfc_mae_rule_parse_item_tcp,
2662 	},
2663 	{
2664 		.type = RTE_FLOW_ITEM_TYPE_UDP,
2665 		.name = "UDP",
2666 		.prev_layer = SFC_FLOW_ITEM_L3,
2667 		.layer = SFC_FLOW_ITEM_L4,
2668 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2669 		.parse = sfc_mae_rule_parse_item_udp,
2670 	},
2671 	{
2672 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
2673 		.name = "VXLAN",
2674 		.prev_layer = SFC_FLOW_ITEM_L4,
2675 		.layer = SFC_FLOW_ITEM_START_LAYER,
2676 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2677 		.parse = sfc_mae_rule_parse_item_tunnel,
2678 	},
2679 	{
2680 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
2681 		.name = "GENEVE",
2682 		.prev_layer = SFC_FLOW_ITEM_L4,
2683 		.layer = SFC_FLOW_ITEM_START_LAYER,
2684 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2685 		.parse = sfc_mae_rule_parse_item_tunnel,
2686 	},
2687 	{
2688 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
2689 		.name = "NVGRE",
2690 		.prev_layer = SFC_FLOW_ITEM_L3,
2691 		.layer = SFC_FLOW_ITEM_START_LAYER,
2692 		.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2693 		.parse = sfc_mae_rule_parse_item_tunnel,
2694 	},
2695 };
2696 
2697 static int
2698 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2699 			   struct sfc_mae_parse_ctx *ctx,
2700 			   struct sfc_mae_outer_rule **rulep,
2701 			   struct rte_flow_error *error)
2702 {
2703 	efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2704 	int rc;
2705 
2706 	if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2707 		*rulep = NULL;
2708 		goto no_or_id;
2709 	}
2710 
2711 	SFC_ASSERT(ctx->match_spec_outer != NULL);
2712 
2713 	if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2714 		return rte_flow_error_set(error, ENOTSUP,
2715 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2716 					  "Inconsistent pattern (outer)");
2717 	}
2718 
2719 	*rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2720 					   ctx->encap_type);
2721 	if (*rulep != NULL) {
2722 		efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2723 	} else {
2724 		rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2725 					    ctx->encap_type, rulep);
2726 		if (rc != 0) {
2727 			return rte_flow_error_set(error, rc,
2728 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2729 					"Failed to process the pattern");
2730 		}
2731 	}
2732 
2733 	/* The spec has now been tracked by the outer rule entry. */
2734 	ctx->match_spec_outer = NULL;
2735 
2736 no_or_id:
2737 	switch (ctx->ft_rule_type) {
2738 	case SFC_FT_RULE_NONE:
2739 		break;
2740 	case SFC_FT_RULE_JUMP:
2741 		/* No action rule */
2742 		return 0;
2743 	case SFC_FT_RULE_GROUP:
2744 		/*
2745 		 * Match on recirculation ID rather than
2746 		 * on the outer rule allocation handle.
2747 		 */
2748 		rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2749 					SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2750 		if (rc != 0) {
2751 			return rte_flow_error_set(error, rc,
2752 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2753 					"tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2754 		}
2755 		return 0;
2756 	default:
2757 		SFC_ASSERT(B_FALSE);
2758 	}
2759 
2760 	/*
2761 	 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2762 	 * inner parse (when some outer rule is hit) and action rule lookup.
2763 	 * If the currently processed flow does not come with an outer rule,
2764 	 * its action rule must be available only for packets which miss in
2765 	 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2766 	 * in the action rule specification; this ensures correct behaviour.
2767 	 *
2768 	 * If, on the other hand, this flow does have an outer rule, its ID
2769 	 * may be unknown at the moment (not yet allocated), but OR_ID mask
2770 	 * has to be set to 0xffffffff anyway for correct class comparisons.
2771 	 * When the outer rule has been allocated, this match field will be
2772 	 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2773 	 */
2774 	rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2775 						  &invalid_rule_id);
2776 	if (rc != 0) {
2777 		if (*rulep != NULL)
2778 			sfc_mae_outer_rule_del(sa, *rulep);
2779 
2780 		*rulep = NULL;
2781 
2782 		return rte_flow_error_set(error, rc,
2783 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2784 					  "Failed to process the pattern");
2785 	}
2786 
2787 	return 0;
2788 }
2789 
2790 static int
2791 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2792 				struct sfc_mae_parse_ctx *ctx)
2793 {
2794 	struct sfc_flow_tunnel *ft;
2795 	uint32_t user_mark;
2796 
2797 	if (spec == NULL) {
2798 		sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2799 		return EINVAL;
2800 	}
2801 
2802 	ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2803 	if (ft == NULL) {
2804 		sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2805 		return EINVAL;
2806 	}
2807 
2808 	if (ft->refcnt == 0) {
2809 		sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2810 			ft->id);
2811 		return ENOENT;
2812 	}
2813 
2814 	user_mark = SFC_FT_GET_USER_MARK(spec->id);
2815 	if (user_mark != 0) {
2816 		sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2817 		return EINVAL;
2818 	}
2819 
2820 	sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2821 
2822 	ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2823 	ctx->ft = ft;
2824 
2825 	return 0;
2826 }
2827 
2828 static int
2829 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2830 			      struct sfc_mae_parse_ctx *ctx,
2831 			      struct rte_flow_error *error)
2832 {
2833 	const struct rte_flow_item *pattern = ctx->pattern;
2834 	struct sfc_mae *mae = &sa->mae;
2835 	uint8_t recirc_id = 0;
2836 	int rc;
2837 
2838 	if (pattern == NULL) {
2839 		rte_flow_error_set(error, EINVAL,
2840 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2841 				   "NULL pattern");
2842 		return -rte_errno;
2843 	}
2844 
2845 	for (;;) {
2846 		switch (pattern->type) {
2847 		case RTE_FLOW_ITEM_TYPE_MARK:
2848 			rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2849 							     ctx);
2850 			if (rc != 0) {
2851 				return rte_flow_error_set(error, rc,
2852 						  RTE_FLOW_ERROR_TYPE_ITEM,
2853 						  pattern, "tunnel offload: GROUP: invalid item MARK");
2854 			}
2855 			++pattern;
2856 			continue;
2857 		case RTE_FLOW_ITEM_TYPE_VXLAN:
2858 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2859 			ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2860 			ctx->tunnel_def_mask_size =
2861 				sizeof(rte_flow_item_vxlan_mask);
2862 			break;
2863 		case RTE_FLOW_ITEM_TYPE_GENEVE:
2864 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2865 			ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2866 			ctx->tunnel_def_mask_size =
2867 				sizeof(rte_flow_item_geneve_mask);
2868 			break;
2869 		case RTE_FLOW_ITEM_TYPE_NVGRE:
2870 			ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2871 			ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2872 			ctx->tunnel_def_mask_size =
2873 				sizeof(rte_flow_item_nvgre_mask);
2874 			break;
2875 		case RTE_FLOW_ITEM_TYPE_END:
2876 			break;
2877 		default:
2878 			++pattern;
2879 			continue;
2880 		};
2881 
2882 		break;
2883 	}
2884 
2885 	switch (ctx->ft_rule_type) {
2886 	case SFC_FT_RULE_NONE:
2887 		if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2888 			return 0;
2889 		break;
2890 	case SFC_FT_RULE_JUMP:
2891 		if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2892 			return rte_flow_error_set(error, ENOTSUP,
2893 						  RTE_FLOW_ERROR_TYPE_ITEM,
2894 						  pattern, "tunnel offload: JUMP: invalid item");
2895 		}
2896 		ctx->encap_type = ctx->ft->encap_type;
2897 		break;
2898 	case SFC_FT_RULE_GROUP:
2899 		if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2900 			return rte_flow_error_set(error, EINVAL,
2901 						  RTE_FLOW_ERROR_TYPE_ITEM,
2902 						  NULL, "tunnel offload: GROUP: missing tunnel item");
2903 		} else if (ctx->encap_type != ctx->ft->encap_type) {
2904 			return rte_flow_error_set(error, EINVAL,
2905 						  RTE_FLOW_ERROR_TYPE_ITEM,
2906 						  pattern, "tunnel offload: GROUP: tunnel type mismatch");
2907 		}
2908 
2909 		/*
2910 		 * The HW/FW hasn't got support for the use of "ENC" fields in
2911 		 * action rules (except the VNET_ID one) yet. As a workaround,
2912 		 * start parsing the pattern from the tunnel item.
2913 		 */
2914 		ctx->pattern = pattern;
2915 		break;
2916 	default:
2917 		SFC_ASSERT(B_FALSE);
2918 		break;
2919 	}
2920 
2921 	if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2922 		return rte_flow_error_set(error, ENOTSUP,
2923 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2924 					  "OR: unsupported tunnel type");
2925 	}
2926 
2927 	switch (ctx->ft_rule_type) {
2928 	case SFC_FT_RULE_JUMP:
2929 		recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2930 		/* FALLTHROUGH */
2931 	case SFC_FT_RULE_NONE:
2932 		if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2933 			return rte_flow_error_set(error, ENOTSUP,
2934 					RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2935 					NULL, "OR: unsupported priority level");
2936 		}
2937 
2938 		rc = efx_mae_match_spec_init(sa->nic,
2939 					     EFX_MAE_RULE_OUTER, ctx->priority,
2940 					     &ctx->match_spec_outer);
2941 		if (rc != 0) {
2942 			return rte_flow_error_set(error, rc,
2943 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2944 				"OR: failed to initialise the match specification");
2945 		}
2946 
2947 		/*
2948 		 * Outermost items comprise a match
2949 		 * specification of type OUTER.
2950 		 */
2951 		ctx->match_spec = ctx->match_spec_outer;
2952 
2953 		/* Outermost items use "ENC" EFX MAE field IDs. */
2954 		ctx->field_ids_remap = field_ids_remap_to_encap;
2955 
2956 		rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2957 						      recirc_id);
2958 		if (rc != 0) {
2959 			return rte_flow_error_set(error, rc,
2960 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2961 					"OR: failed to initialise RECIRC_ID");
2962 		}
2963 		break;
2964 	case SFC_FT_RULE_GROUP:
2965 		/* Outermost items -> "ENC" match fields in the action rule. */
2966 		ctx->field_ids_remap = field_ids_remap_to_encap;
2967 		ctx->match_spec = ctx->match_spec_action;
2968 
2969 		/* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2970 		ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2971 		break;
2972 	default:
2973 		SFC_ASSERT(B_FALSE);
2974 		break;
2975 	}
2976 
2977 	return 0;
2978 }
2979 
2980 static void
2981 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2982 			      struct sfc_mae_parse_ctx *ctx)
2983 {
2984 	if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2985 		return;
2986 
2987 	if (ctx->match_spec_outer != NULL)
2988 		efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2989 }
2990 
2991 int
2992 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2993 			   const struct rte_flow_item pattern[],
2994 			   struct sfc_flow_spec_mae *spec,
2995 			   struct rte_flow_error *error)
2996 {
2997 	struct sfc_mae_parse_ctx ctx_mae;
2998 	unsigned int priority_shift = 0;
2999 	struct sfc_flow_parse_ctx ctx;
3000 	int rc;
3001 
3002 	memset(&ctx_mae, 0, sizeof(ctx_mae));
3003 	ctx_mae.ft_rule_type = spec->ft_rule_type;
3004 	ctx_mae.priority = spec->priority;
3005 	ctx_mae.ft = spec->ft;
3006 	ctx_mae.sa = sa;
3007 
3008 	switch (ctx_mae.ft_rule_type) {
3009 	case SFC_FT_RULE_JUMP:
3010 		/*
3011 		 * By design, this flow should be represented solely by the
3012 		 * outer rule. But the HW/FW hasn't got support for setting
3013 		 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
3014 		 * does it support outer rule counters. As a workaround, an
3015 		 * action rule of lower priority is used to do the job.
3016 		 */
3017 		priority_shift = 1;
3018 
3019 		/* FALLTHROUGH */
3020 	case SFC_FT_RULE_GROUP:
3021 		if (ctx_mae.priority != 0) {
3022 			/*
3023 			 * Because of the above workaround, deny the
3024 			 * use of priorities to JUMP and GROUP rules.
3025 			 */
3026 			rc = rte_flow_error_set(error, ENOTSUP,
3027 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
3028 				"tunnel offload: priorities are not supported");
3029 			goto fail_priority_check;
3030 		}
3031 
3032 		/* FALLTHROUGH */
3033 	case SFC_FT_RULE_NONE:
3034 		rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
3035 					     spec->priority + priority_shift,
3036 					     &ctx_mae.match_spec_action);
3037 		if (rc != 0) {
3038 			rc = rte_flow_error_set(error, rc,
3039 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3040 				"AR: failed to initialise the match specification");
3041 			goto fail_init_match_spec_action;
3042 		}
3043 		break;
3044 	default:
3045 		SFC_ASSERT(B_FALSE);
3046 		break;
3047 	}
3048 
3049 	/*
3050 	 * As a preliminary setting, assume that there is no encapsulation
3051 	 * in the pattern. That is, pattern items are about to comprise a
3052 	 * match specification of type ACTION and use non-encap. field IDs.
3053 	 *
3054 	 * sfc_mae_rule_encap_parse_init() below may override this.
3055 	 */
3056 	ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
3057 	ctx_mae.match_spec = ctx_mae.match_spec_action;
3058 	ctx_mae.field_ids_remap = field_ids_no_remap;
3059 	ctx_mae.pattern = pattern;
3060 
3061 	ctx.type = SFC_FLOW_PARSE_CTX_MAE;
3062 	ctx.mae = &ctx_mae;
3063 
3064 	rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
3065 	if (rc != 0)
3066 		goto fail_encap_parse_init;
3067 
3068 	/*
3069 	 * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
3070 	 * GROUP rule. Remember its properties for later use.
3071 	 */
3072 	spec->ft_rule_type = ctx_mae.ft_rule_type;
3073 	spec->ft = ctx_mae.ft;
3074 
3075 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
3076 				    ctx_mae.pattern, &ctx, error);
3077 	if (rc != 0)
3078 		goto fail_parse_pattern;
3079 
3080 	rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
3081 	if (rc != 0)
3082 		goto fail_process_pattern_data;
3083 
3084 	rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
3085 	if (rc != 0)
3086 		goto fail_process_outer;
3087 
3088 	if (ctx_mae.match_spec_action != NULL &&
3089 	    !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
3090 		rc = rte_flow_error_set(error, ENOTSUP,
3091 					RTE_FLOW_ERROR_TYPE_ITEM, NULL,
3092 					"Inconsistent pattern");
3093 		goto fail_validate_match_spec_action;
3094 	}
3095 
3096 	spec->match_spec = ctx_mae.match_spec_action;
3097 
3098 	return 0;
3099 
3100 fail_validate_match_spec_action:
3101 fail_process_outer:
3102 fail_process_pattern_data:
3103 fail_parse_pattern:
3104 	sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
3105 
3106 fail_encap_parse_init:
3107 	if (ctx_mae.match_spec_action != NULL)
3108 		efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
3109 
3110 fail_init_match_spec_action:
3111 fail_priority_check:
3112 	return rc;
3113 }
3114 
3115 static int
3116 sfc_mae_rule_parse_action_set_mac(struct sfc_adapter *sa,
3117 				  enum sfc_mae_mac_addr_type type,
3118 				  const struct rte_flow_action_set_mac *conf,
3119 				  struct sfc_mae_aset_ctx *ctx,
3120 				  struct rte_flow_error *error)
3121 {
3122 	struct sfc_mae_mac_addr **mac_addrp;
3123 	int rc;
3124 
3125 	if (conf == NULL) {
3126 		return rte_flow_error_set(error, EINVAL,
3127 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3128 				"the MAC address entry definition is NULL");
3129 	}
3130 
3131 	switch (type) {
3132 	case SFC_MAE_MAC_ADDR_DST:
3133 		rc = efx_mae_action_set_populate_set_dst_mac(ctx->spec);
3134 		mac_addrp = &ctx->dst_mac;
3135 		break;
3136 	case SFC_MAE_MAC_ADDR_SRC:
3137 		rc = efx_mae_action_set_populate_set_src_mac(ctx->spec);
3138 		mac_addrp = &ctx->src_mac;
3139 		break;
3140 	default:
3141 		rc = EINVAL;
3142 		break;
3143 	}
3144 
3145 	if (rc != 0)
3146 		goto error;
3147 
3148 	*mac_addrp = sfc_mae_mac_addr_attach(sa, conf->mac_addr);
3149 	if (*mac_addrp != NULL)
3150 		return 0;
3151 
3152 	rc = sfc_mae_mac_addr_add(sa, conf->mac_addr, mac_addrp);
3153 	if (rc != 0)
3154 		goto error;
3155 
3156 	return 0;
3157 
3158 error:
3159 	return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3160 				  NULL, "failed to request set MAC action");
3161 }
3162 
3163 /*
3164  * An action supported by MAE may correspond to a bundle of RTE flow actions,
3165  * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
3166  * That is, related RTE flow actions need to be tracked as parts of a whole
3167  * so that they can be combined into a single action and submitted to MAE
3168  * representation of a given rule's action set.
3169  *
3170  * Each RTE flow action provided by an application gets classified as
3171  * one belonging to some bundle type. If an action is not supposed to
3172  * belong to any bundle, or if this action is END, it is described as
3173  * one belonging to a dummy bundle of type EMPTY.
3174  *
3175  * A currently tracked bundle will be submitted if a repeating
3176  * action or an action of different bundle type follows.
3177  */
3178 
3179 enum sfc_mae_actions_bundle_type {
3180 	SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
3181 	SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
3182 };
3183 
3184 struct sfc_mae_actions_bundle {
3185 	enum sfc_mae_actions_bundle_type	type;
3186 
3187 	/* Indicates actions already tracked by the current bundle */
3188 	uint64_t				actions_mask;
3189 
3190 	/* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
3191 	rte_be16_t				vlan_push_tpid;
3192 	rte_be16_t				vlan_push_tci;
3193 };
3194 
3195 /*
3196  * Combine configuration of RTE flow actions tracked by the bundle into a
3197  * single action and submit the result to MAE action set specification.
3198  * Do nothing in the case of dummy action bundle.
3199  */
3200 static int
3201 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
3202 			      efx_mae_actions_t *spec)
3203 {
3204 	int rc = 0;
3205 
3206 	switch (bundle->type) {
3207 	case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
3208 		break;
3209 	case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
3210 		rc = efx_mae_action_set_populate_vlan_push(
3211 			spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
3212 		break;
3213 	default:
3214 		SFC_ASSERT(B_FALSE);
3215 		break;
3216 	}
3217 
3218 	return rc;
3219 }
3220 
3221 /*
3222  * Given the type of the next RTE flow action in the line, decide
3223  * whether a new bundle is about to start, and, if this is the case,
3224  * submit and reset the current bundle.
3225  */
3226 static int
3227 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
3228 			    struct sfc_mae_actions_bundle *bundle,
3229 			    efx_mae_actions_t *spec,
3230 			    struct rte_flow_error *error)
3231 {
3232 	enum sfc_mae_actions_bundle_type bundle_type_new;
3233 	int rc;
3234 
3235 	switch (action->type) {
3236 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3237 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3238 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3239 		bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
3240 		break;
3241 	default:
3242 		/*
3243 		 * Self-sufficient actions, including END, are handled in this
3244 		 * case. No checks for unsupported actions are needed here
3245 		 * because parsing doesn't occur at this point.
3246 		 */
3247 		bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
3248 		break;
3249 	}
3250 
3251 	if (bundle_type_new != bundle->type ||
3252 	    (bundle->actions_mask & (1ULL << action->type)) != 0) {
3253 		rc = sfc_mae_actions_bundle_submit(bundle, spec);
3254 		if (rc != 0)
3255 			goto fail_submit;
3256 
3257 		memset(bundle, 0, sizeof(*bundle));
3258 	}
3259 
3260 	bundle->type = bundle_type_new;
3261 
3262 	return 0;
3263 
3264 fail_submit:
3265 	return rte_flow_error_set(error, rc,
3266 			RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3267 			"Failed to request the (group of) action(s)");
3268 }
3269 
3270 static void
3271 sfc_mae_rule_parse_action_of_push_vlan(
3272 			    const struct rte_flow_action_of_push_vlan *conf,
3273 			    struct sfc_mae_actions_bundle *bundle)
3274 {
3275 	bundle->vlan_push_tpid = conf->ethertype;
3276 }
3277 
3278 static void
3279 sfc_mae_rule_parse_action_of_set_vlan_vid(
3280 			    const struct rte_flow_action_of_set_vlan_vid *conf,
3281 			    struct sfc_mae_actions_bundle *bundle)
3282 {
3283 	bundle->vlan_push_tci |= (conf->vlan_vid &
3284 				  rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
3285 }
3286 
3287 static void
3288 sfc_mae_rule_parse_action_of_set_vlan_pcp(
3289 			    const struct rte_flow_action_of_set_vlan_pcp *conf,
3290 			    struct sfc_mae_actions_bundle *bundle)
3291 {
3292 	uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
3293 					   RTE_LEN2MASK(3, uint8_t)) << 13;
3294 
3295 	bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
3296 }
3297 
3298 struct sfc_mae_parsed_item {
3299 	const struct rte_flow_item	*item;
3300 	size_t				proto_header_ofst;
3301 	size_t				proto_header_size;
3302 };
3303 
3304 /*
3305  * For each 16-bit word of the given header, override
3306  * bits enforced by the corresponding 16-bit mask.
3307  */
3308 static void
3309 sfc_mae_header_force_item_masks(uint8_t *header_buf,
3310 				const struct sfc_mae_parsed_item *parsed_items,
3311 				unsigned int nb_parsed_items)
3312 {
3313 	unsigned int item_idx;
3314 
3315 	for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
3316 		const struct sfc_mae_parsed_item *parsed_item;
3317 		const struct rte_flow_item *item;
3318 		size_t proto_header_size;
3319 		size_t ofst;
3320 
3321 		parsed_item = &parsed_items[item_idx];
3322 		proto_header_size = parsed_item->proto_header_size;
3323 		item = parsed_item->item;
3324 
3325 		for (ofst = 0; ofst < proto_header_size;
3326 		     ofst += sizeof(rte_be16_t)) {
3327 			rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
3328 			const rte_be16_t *w_maskp;
3329 			const rte_be16_t *w_specp;
3330 
3331 			w_maskp = RTE_PTR_ADD(item->mask, ofst);
3332 			w_specp = RTE_PTR_ADD(item->spec, ofst);
3333 
3334 			*wp &= ~(*w_maskp);
3335 			*wp |= (*w_specp & *w_maskp);
3336 		}
3337 
3338 		header_buf += proto_header_size;
3339 	}
3340 }
3341 
3342 #define SFC_IPV4_TTL_DEF	0x40
3343 #define SFC_IPV6_VTC_FLOW_DEF	0x60000000
3344 #define SFC_IPV6_HOP_LIMITS_DEF	0xff
3345 #define SFC_VXLAN_FLAGS_DEF	0x08000000
3346 
3347 static int
3348 sfc_mae_rule_parse_action_vxlan_encap(
3349 			    struct sfc_mae *mae,
3350 			    const struct rte_flow_action_vxlan_encap *conf,
3351 			    efx_mae_actions_t *spec,
3352 			    struct rte_flow_error *error)
3353 {
3354 	struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
3355 	struct rte_flow_item *pattern = conf->definition;
3356 	uint8_t *buf = bounce_eh->buf;
3357 
3358 	/* This array will keep track of non-VOID pattern items. */
3359 	struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
3360 						2 /* VLAN tags */ +
3361 						1 /* IPv4 or IPv6 */ +
3362 						1 /* UDP */ +
3363 						1 /* VXLAN */];
3364 	unsigned int nb_parsed_items = 0;
3365 
3366 	size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
3367 	uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
3368 				  sizeof(struct rte_ipv6_hdr))];
3369 	struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
3370 	struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
3371 	struct rte_vxlan_hdr *vxlan = NULL;
3372 	struct rte_udp_hdr *udp = NULL;
3373 	unsigned int nb_vlan_tags = 0;
3374 	size_t next_proto_ofst = 0;
3375 	size_t ethertype_ofst = 0;
3376 	uint64_t exp_items;
3377 	int rc;
3378 
3379 	if (pattern == NULL) {
3380 		return rte_flow_error_set(error, EINVAL,
3381 				RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3382 				"The encap. header definition is NULL");
3383 	}
3384 
3385 	bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
3386 	bounce_eh->size = 0;
3387 
3388 	/*
3389 	 * Process pattern items and remember non-VOID ones.
3390 	 * Defer applying masks until after the complete header
3391 	 * has been built from the pattern items.
3392 	 */
3393 	exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
3394 
3395 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
3396 		struct sfc_mae_parsed_item *parsed_item;
3397 		const uint64_t exp_items_extra_vlan[] = {
3398 			RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
3399 		};
3400 		size_t proto_header_size;
3401 		rte_be16_t *ethertypep;
3402 		uint8_t *next_protop;
3403 		uint8_t *buf_cur;
3404 
3405 		if (pattern->spec == NULL) {
3406 			return rte_flow_error_set(error, EINVAL,
3407 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3408 					"NULL item spec in the encap. header");
3409 		}
3410 
3411 		if (pattern->mask == NULL) {
3412 			return rte_flow_error_set(error, EINVAL,
3413 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3414 					"NULL item mask in the encap. header");
3415 		}
3416 
3417 		if (pattern->last != NULL) {
3418 			/* This is not a match pattern, so disallow range. */
3419 			return rte_flow_error_set(error, EINVAL,
3420 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3421 					"Range item in the encap. header");
3422 		}
3423 
3424 		if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3425 			/* Handle VOID separately, for clarity. */
3426 			continue;
3427 		}
3428 
3429 		if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3430 			return rte_flow_error_set(error, ENOTSUP,
3431 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3432 					"Unexpected item in the encap. header");
3433 		}
3434 
3435 		parsed_item = &parsed_items[nb_parsed_items];
3436 		buf_cur = buf + bounce_eh->size;
3437 
3438 		switch (pattern->type) {
3439 		case RTE_FLOW_ITEM_TYPE_ETH:
3440 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3441 					       exp_items);
3442 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3443 						  hdr) != 0);
3444 
3445 			proto_header_size = sizeof(struct rte_ether_hdr);
3446 
3447 			ethertype_ofst = eth_ethertype_ofst;
3448 
3449 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3450 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3451 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3452 			break;
3453 		case RTE_FLOW_ITEM_TYPE_VLAN:
3454 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3455 					       exp_items);
3456 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3457 						  hdr) != 0);
3458 
3459 			proto_header_size = sizeof(struct rte_vlan_hdr);
3460 
3461 			ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3462 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3463 
3464 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3465 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3466 
3467 			ethertype_ofst =
3468 			    bounce_eh->size +
3469 			    offsetof(struct rte_vlan_hdr, eth_proto);
3470 
3471 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3472 				    RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3473 			exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3474 
3475 			++nb_vlan_tags;
3476 			break;
3477 		case RTE_FLOW_ITEM_TYPE_IPV4:
3478 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3479 					       exp_items);
3480 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3481 						  hdr) != 0);
3482 
3483 			proto_header_size = sizeof(struct rte_ipv4_hdr);
3484 
3485 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3486 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3487 
3488 			next_proto_ofst =
3489 			    bounce_eh->size +
3490 			    offsetof(struct rte_ipv4_hdr, next_proto_id);
3491 
3492 			ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3493 
3494 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3495 			break;
3496 		case RTE_FLOW_ITEM_TYPE_IPV6:
3497 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3498 					       exp_items);
3499 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3500 						  hdr) != 0);
3501 
3502 			proto_header_size = sizeof(struct rte_ipv6_hdr);
3503 
3504 			ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3505 			*ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3506 
3507 			next_proto_ofst = bounce_eh->size +
3508 					  offsetof(struct rte_ipv6_hdr, proto);
3509 
3510 			ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3511 
3512 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3513 			break;
3514 		case RTE_FLOW_ITEM_TYPE_UDP:
3515 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3516 					       exp_items);
3517 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3518 						  hdr) != 0);
3519 
3520 			proto_header_size = sizeof(struct rte_udp_hdr);
3521 
3522 			next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3523 			*next_protop = IPPROTO_UDP;
3524 
3525 			udp = (struct rte_udp_hdr *)buf_cur;
3526 
3527 			exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3528 			break;
3529 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3530 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3531 					       exp_items);
3532 			RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3533 						  hdr) != 0);
3534 
3535 			proto_header_size = sizeof(struct rte_vxlan_hdr);
3536 
3537 			vxlan = (struct rte_vxlan_hdr *)buf_cur;
3538 
3539 			udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3540 			udp->dgram_len = RTE_BE16(sizeof(*udp) +
3541 						  sizeof(*vxlan));
3542 			udp->dgram_cksum = 0;
3543 
3544 			exp_items = 0;
3545 			break;
3546 		default:
3547 			return rte_flow_error_set(error, ENOTSUP,
3548 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3549 					"Unknown item in the encap. header");
3550 		}
3551 
3552 		if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3553 			return rte_flow_error_set(error, E2BIG,
3554 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3555 					"The encap. header is too big");
3556 		}
3557 
3558 		if ((proto_header_size & 1) != 0) {
3559 			return rte_flow_error_set(error, EINVAL,
3560 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3561 					"Odd layer size in the encap. header");
3562 		}
3563 
3564 		rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3565 		bounce_eh->size += proto_header_size;
3566 
3567 		parsed_item->item = pattern;
3568 		parsed_item->proto_header_size = proto_header_size;
3569 		++nb_parsed_items;
3570 	}
3571 
3572 	if (exp_items != 0) {
3573 		/* Parsing item VXLAN would have reset exp_items to 0. */
3574 		return rte_flow_error_set(error, ENOTSUP,
3575 					RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3576 					"No item VXLAN in the encap. header");
3577 	}
3578 
3579 	/* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3580 	ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3581 	ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3582 	ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3583 				      sizeof(*vxlan));
3584 	/* The HW cannot compute this checksum. */
3585 	ipv4->hdr_checksum = 0;
3586 	ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3587 
3588 	ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3589 	ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3590 	ipv6->payload_len = udp->dgram_len;
3591 
3592 	vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3593 
3594 	/* Take care of the masks. */
3595 	sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3596 
3597 	rc = efx_mae_action_set_populate_encap(spec);
3598 	if (rc != 0) {
3599 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3600 				NULL, "failed to request action ENCAP");
3601 	}
3602 
3603 	return rc;
3604 }
3605 
3606 static int
3607 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3608 			       const struct rte_flow_action_mark *conf,
3609 			       const struct sfc_flow_spec_mae *spec_mae,
3610 			       efx_mae_actions_t *spec)
3611 {
3612 	int rc;
3613 
3614 	if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3615 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
3616 	} else if (conf->id > SFC_FT_USER_MARK_MASK) {
3617 		sfc_err(sa, "the mark value is too large");
3618 		return EINVAL;
3619 	}
3620 
3621 	rc = efx_mae_action_set_populate_mark(spec, conf->id);
3622 	if (rc != 0)
3623 		sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3624 
3625 	return rc;
3626 }
3627 
3628 static int
3629 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3630 				const struct rte_flow_action_count *conf
3631 					__rte_unused,
3632 				efx_mae_actions_t *spec)
3633 {
3634 	int rc;
3635 
3636 	if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3637 		sfc_err(sa,
3638 			"counter queue is not configured for COUNT action");
3639 		rc = EINVAL;
3640 		goto fail_counter_queue_uninit;
3641 	}
3642 
3643 	if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3644 		rc = EINVAL;
3645 		goto fail_no_service_core;
3646 	}
3647 
3648 	rc = efx_mae_action_set_populate_count(spec);
3649 	if (rc != 0) {
3650 		sfc_err(sa,
3651 			"failed to populate counters in MAE action set: %s",
3652 			rte_strerror(rc));
3653 		goto fail_populate_count;
3654 	}
3655 
3656 	return 0;
3657 
3658 fail_populate_count:
3659 fail_no_service_core:
3660 fail_counter_queue_uninit:
3661 
3662 	return rc;
3663 }
3664 
3665 static int
3666 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3667 				   const struct rte_flow_action_phy_port *conf,
3668 				   efx_mae_actions_t *spec)
3669 {
3670 	efx_mport_sel_t mport;
3671 	uint32_t phy_port;
3672 	int rc;
3673 
3674 	if (conf->original != 0)
3675 		phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3676 	else
3677 		phy_port = conf->index;
3678 
3679 	rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3680 	if (rc != 0) {
3681 		sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3682 			phy_port, strerror(rc));
3683 		return rc;
3684 	}
3685 
3686 	rc = efx_mae_action_set_populate_deliver(spec, &mport);
3687 	if (rc != 0) {
3688 		sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3689 			mport.sel, strerror(rc));
3690 	}
3691 
3692 	return rc;
3693 }
3694 
3695 static int
3696 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3697 				const struct rte_flow_action_vf *vf_conf,
3698 				efx_mae_actions_t *spec)
3699 {
3700 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3701 	efx_mport_sel_t mport;
3702 	uint32_t vf;
3703 	int rc;
3704 
3705 	if (vf_conf == NULL)
3706 		vf = EFX_PCI_VF_INVALID;
3707 	else if (vf_conf->original != 0)
3708 		vf = encp->enc_vf;
3709 	else
3710 		vf = vf_conf->id;
3711 
3712 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3713 	if (rc != 0) {
3714 		sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3715 			encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3716 			strerror(rc));
3717 		return rc;
3718 	}
3719 
3720 	rc = efx_mae_action_set_populate_deliver(spec, &mport);
3721 	if (rc != 0) {
3722 		sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3723 			mport.sel, strerror(rc));
3724 	}
3725 
3726 	return rc;
3727 }
3728 
3729 static int
3730 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3731 				  const struct rte_flow_action_port_id *conf,
3732 				  efx_mae_actions_t *spec)
3733 {
3734 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3735 	struct sfc_mae *mae = &sa->mae;
3736 	efx_mport_sel_t mport;
3737 	uint16_t port_id;
3738 	int rc;
3739 
3740 	if (conf->id > UINT16_MAX)
3741 		return EOVERFLOW;
3742 
3743 	port_id = (conf->original != 0) ? sas->port_id : conf->id;
3744 
3745 	rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3746 					     port_id, &mport);
3747 	if (rc != 0) {
3748 		sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3749 			port_id, strerror(rc));
3750 		return rc;
3751 	}
3752 
3753 	rc = efx_mae_action_set_populate_deliver(spec, &mport);
3754 	if (rc != 0) {
3755 		sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3756 			mport.sel, strerror(rc));
3757 	}
3758 
3759 	return rc;
3760 }
3761 
3762 static int
3763 sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
3764 		const struct rte_flow_action_ethdev *conf,
3765 		efx_mae_actions_t *spec)
3766 {
3767 	struct sfc_mae *mae = &sa->mae;
3768 	efx_mport_sel_t mport;
3769 	int rc;
3770 
3771 	rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3772 					     conf->port_id, &mport);
3773 	if (rc != 0) {
3774 		sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3775 			conf->port_id, strerror(rc));
3776 		return rc;
3777 	}
3778 
3779 	rc = efx_mae_action_set_populate_deliver(spec, &mport);
3780 	if (rc != 0) {
3781 		sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3782 			mport.sel, strerror(rc));
3783 	}
3784 
3785 	return rc;
3786 }
3787 
3788 static int
3789 sfc_mae_rule_parse_action_represented_port(struct sfc_adapter *sa,
3790 		const struct rte_flow_action_ethdev *conf,
3791 		efx_mae_actions_t *spec)
3792 {
3793 	struct sfc_mae *mae = &sa->mae;
3794 	efx_mport_sel_t mport;
3795 	int rc;
3796 
3797 	rc = sfc_mae_switch_get_entity_mport(mae->switch_domain_id,
3798 					     conf->port_id, &mport);
3799 	if (rc != 0) {
3800 		sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3801 			conf->port_id, strerror(rc));
3802 		return rc;
3803 	}
3804 
3805 	rc = efx_mae_action_set_populate_deliver(spec, &mport);
3806 	if (rc != 0) {
3807 		sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3808 			mport.sel, strerror(rc));
3809 	}
3810 
3811 	return rc;
3812 }
3813 
3814 static const char * const action_names[] = {
3815 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3816 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3817 	[RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = "SET_MAC_DST",
3818 	[RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = "SET_MAC_SRC",
3819 	[RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = "OF_DEC_NW_TTL",
3820 	[RTE_FLOW_ACTION_TYPE_DEC_TTL] = "DEC_TTL",
3821 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3822 	[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3823 	[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3824 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3825 	[RTE_FLOW_ACTION_TYPE_COUNT] = "COUNT",
3826 	[RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3827 	[RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3828 	[RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3829 	[RTE_FLOW_ACTION_TYPE_PF] = "PF",
3830 	[RTE_FLOW_ACTION_TYPE_VF] = "VF",
3831 	[RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3832 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = "PORT_REPRESENTOR",
3833 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = "REPRESENTED_PORT",
3834 	[RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3835 	[RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3836 };
3837 
3838 static int
3839 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3840 			  const struct rte_flow_action *action,
3841 			  const struct sfc_flow_spec_mae *spec_mae,
3842 			  struct sfc_mae_actions_bundle *bundle,
3843 			  struct sfc_mae_aset_ctx *ctx,
3844 			  struct rte_flow_error *error)
3845 {
3846 	const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3847 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3848 	efx_mae_actions_t *spec = ctx->spec;
3849 	bool custom_error = B_FALSE;
3850 	int rc = 0;
3851 
3852 	switch (action->type) {
3853 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3854 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3855 				       bundle->actions_mask);
3856 		if (outer_rule == NULL ||
3857 		    outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3858 			rc = EINVAL;
3859 		else
3860 			rc = efx_mae_action_set_populate_decap(spec);
3861 		break;
3862 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3863 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3864 				       bundle->actions_mask);
3865 		rc = efx_mae_action_set_populate_vlan_pop(spec);
3866 		break;
3867 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3868 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_SET_MAC_DST,
3869 				       bundle->actions_mask);
3870 		rc = sfc_mae_rule_parse_action_set_mac(sa, SFC_MAE_MAC_ADDR_DST,
3871 						       action->conf, ctx,
3872 						       error);
3873 		custom_error = B_TRUE;
3874 		break;
3875 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3876 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_SET_MAC_SRC,
3877 				       bundle->actions_mask);
3878 		rc = sfc_mae_rule_parse_action_set_mac(sa, SFC_MAE_MAC_ADDR_SRC,
3879 						       action->conf, ctx,
3880 						       error);
3881 		custom_error = B_TRUE;
3882 		break;
3883 	case RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL:
3884 	case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3885 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
3886 				       bundle->actions_mask);
3887 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DEC_TTL,
3888 				       bundle->actions_mask);
3889 		rc = efx_mae_action_set_populate_decr_ip_ttl(spec);
3890 		break;
3891 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3892 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3893 				       bundle->actions_mask);
3894 		sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3895 		break;
3896 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3897 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3898 				       bundle->actions_mask);
3899 		sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3900 		break;
3901 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3902 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3903 				       bundle->actions_mask);
3904 		sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3905 		break;
3906 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3907 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3908 				       bundle->actions_mask);
3909 		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3910 							   action->conf,
3911 							   spec, error);
3912 		custom_error = B_TRUE;
3913 		break;
3914 	case RTE_FLOW_ACTION_TYPE_COUNT:
3915 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3916 				       bundle->actions_mask);
3917 		rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3918 		break;
3919 	case RTE_FLOW_ACTION_TYPE_FLAG:
3920 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3921 				       bundle->actions_mask);
3922 		if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3923 			rc = efx_mae_action_set_populate_flag(spec);
3924 		} else {
3925 			rc = rte_flow_error_set(error, ENOTSUP,
3926 						RTE_FLOW_ERROR_TYPE_ACTION,
3927 						action,
3928 						"flag delivery has not been negotiated");
3929 			custom_error = B_TRUE;
3930 		}
3931 		break;
3932 	case RTE_FLOW_ACTION_TYPE_MARK:
3933 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3934 				       bundle->actions_mask);
3935 		if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3936 		    spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3937 			rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3938 							    spec_mae, spec);
3939 		} else {
3940 			rc = rte_flow_error_set(error, ENOTSUP,
3941 						RTE_FLOW_ERROR_TYPE_ACTION,
3942 						action,
3943 						"mark delivery has not been negotiated");
3944 			custom_error = B_TRUE;
3945 		}
3946 		break;
3947 	case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3948 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3949 				       bundle->actions_mask);
3950 		rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3951 		break;
3952 	case RTE_FLOW_ACTION_TYPE_PF:
3953 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3954 				       bundle->actions_mask);
3955 		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3956 		break;
3957 	case RTE_FLOW_ACTION_TYPE_VF:
3958 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3959 				       bundle->actions_mask);
3960 		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3961 		break;
3962 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
3963 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3964 				       bundle->actions_mask);
3965 		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3966 		break;
3967 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
3968 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
3969 				       bundle->actions_mask);
3970 		rc = sfc_mae_rule_parse_action_port_representor(sa,
3971 				action->conf, spec);
3972 		break;
3973 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3974 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
3975 				       bundle->actions_mask);
3976 		rc = sfc_mae_rule_parse_action_represented_port(sa,
3977 				action->conf, spec);
3978 		break;
3979 	case RTE_FLOW_ACTION_TYPE_DROP:
3980 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3981 				       bundle->actions_mask);
3982 		rc = efx_mae_action_set_populate_drop(spec);
3983 		break;
3984 	case RTE_FLOW_ACTION_TYPE_JUMP:
3985 		if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3986 			/* Workaround. See sfc_flow_parse_rte_to_mae() */
3987 			break;
3988 		}
3989 		/* FALLTHROUGH */
3990 	default:
3991 		return rte_flow_error_set(error, ENOTSUP,
3992 				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3993 				"Unsupported action");
3994 	}
3995 
3996 	if (rc == 0) {
3997 		bundle->actions_mask |= (1ULL << action->type);
3998 	} else if (!custom_error) {
3999 		if (action->type < RTE_DIM(action_names)) {
4000 			const char *action_name = action_names[action->type];
4001 
4002 			if (action_name != NULL) {
4003 				sfc_err(sa, "action %s was rejected: %s",
4004 					action_name, strerror(rc));
4005 			}
4006 		}
4007 		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
4008 				NULL, "Failed to request the action");
4009 	}
4010 
4011 	return rc;
4012 }
4013 
4014 static void
4015 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
4016 {
4017 	bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
4018 }
4019 
4020 static int
4021 sfc_mae_process_encap_header(struct sfc_adapter *sa,
4022 			     const struct sfc_mae_bounce_eh *bounce_eh,
4023 			     struct sfc_mae_encap_header **encap_headerp)
4024 {
4025 	if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
4026 		encap_headerp = NULL;
4027 		return 0;
4028 	}
4029 
4030 	*encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
4031 	if (*encap_headerp != NULL)
4032 		return 0;
4033 
4034 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
4035 }
4036 
4037 int
4038 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
4039 			   const struct rte_flow_action actions[],
4040 			   struct sfc_flow_spec_mae *spec_mae,
4041 			   struct rte_flow_error *error)
4042 {
4043 	struct sfc_mae_actions_bundle bundle = {0};
4044 	const struct rte_flow_action *action;
4045 	struct sfc_mae_aset_ctx ctx = {0};
4046 	struct sfc_mae *mae = &sa->mae;
4047 	int rc;
4048 
4049 	rte_errno = 0;
4050 
4051 	if (actions == NULL) {
4052 		return rte_flow_error_set(error, EINVAL,
4053 				RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
4054 				"NULL actions");
4055 	}
4056 
4057 	rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
4058 	if (rc != 0)
4059 		goto fail_action_set_spec_init;
4060 
4061 	for (action = actions;
4062 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
4063 		if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
4064 			++(ctx.n_counters);
4065 	}
4066 
4067 	if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
4068 		/* JUMP rules don't decapsulate packets. GROUP rules do. */
4069 		rc = efx_mae_action_set_populate_decap(ctx.spec);
4070 		if (rc != 0)
4071 			goto fail_enforce_ft_decap;
4072 
4073 		if (ctx.n_counters == 0 &&
4074 		    sfc_mae_counter_stream_enabled(sa)) {
4075 			/*
4076 			 * The user opted not to use action COUNT in this rule,
4077 			 * but the counter should be enabled implicitly because
4078 			 * packets hitting this rule contribute to the tunnel's
4079 			 * total number of hits. See sfc_mae_counter_get().
4080 			 */
4081 			rc = efx_mae_action_set_populate_count(ctx.spec);
4082 			if (rc != 0)
4083 				goto fail_enforce_ft_count;
4084 
4085 			ctx.n_counters = 1;
4086 		}
4087 	}
4088 
4089 	/* Cleanup after previous encap. header bounce buffer usage. */
4090 	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
4091 
4092 	for (action = actions;
4093 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
4094 		rc = sfc_mae_actions_bundle_sync(action, &bundle,
4095 						 ctx.spec, error);
4096 		if (rc != 0)
4097 			goto fail_rule_parse_action;
4098 
4099 		rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
4100 					       &bundle, &ctx, error);
4101 		if (rc != 0)
4102 			goto fail_rule_parse_action;
4103 	}
4104 
4105 	rc = sfc_mae_actions_bundle_sync(action, &bundle, ctx.spec, error);
4106 	if (rc != 0)
4107 		goto fail_rule_parse_action;
4108 
4109 	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
4110 					  &ctx.encap_header);
4111 	if (rc != 0)
4112 		goto fail_process_encap_header;
4113 
4114 	if (ctx.n_counters > 1) {
4115 		rc = ENOTSUP;
4116 		sfc_err(sa, "too many count actions requested: %u",
4117 			ctx.n_counters);
4118 		goto fail_nb_count;
4119 	}
4120 
4121 	switch (spec_mae->ft_rule_type) {
4122 	case SFC_FT_RULE_NONE:
4123 		break;
4124 	case SFC_FT_RULE_JUMP:
4125 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
4126 		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
4127 		if (rc != 0)
4128 			goto fail_workaround_jump_delivery;
4129 
4130 		ctx.counter_ft = spec_mae->ft;
4131 		break;
4132 	case SFC_FT_RULE_GROUP:
4133 		/*
4134 		 * Packets that go to the rule's AR have FT mark set (from the
4135 		 * JUMP rule OR's RECIRC_ID). Remove this mark in matching
4136 		 * packets. The user may have provided their own action
4137 		 * MARK above, so don't check the return value here.
4138 		 */
4139 		(void)efx_mae_action_set_populate_mark(ctx.spec, 0);
4140 
4141 		ctx.ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
4142 		break;
4143 	default:
4144 		SFC_ASSERT(B_FALSE);
4145 	}
4146 
4147 	spec_mae->action_set = sfc_mae_action_set_attach(sa, &ctx);
4148 	if (spec_mae->action_set != NULL) {
4149 		sfc_mae_encap_header_del(sa, ctx.encap_header);
4150 		efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
4151 		return 0;
4152 	}
4153 
4154 	rc = sfc_mae_action_set_add(sa, actions, &ctx, &spec_mae->action_set);
4155 	if (rc != 0)
4156 		goto fail_action_set_add;
4157 
4158 	return 0;
4159 
4160 fail_action_set_add:
4161 fail_workaround_jump_delivery:
4162 fail_nb_count:
4163 	sfc_mae_encap_header_del(sa, ctx.encap_header);
4164 
4165 fail_process_encap_header:
4166 fail_rule_parse_action:
4167 	sfc_mae_mac_addr_del(sa, ctx.src_mac);
4168 	sfc_mae_mac_addr_del(sa, ctx.dst_mac);
4169 	efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
4170 
4171 fail_enforce_ft_count:
4172 fail_enforce_ft_decap:
4173 fail_action_set_spec_init:
4174 	if (rc > 0 && rte_errno == 0) {
4175 		rc = rte_flow_error_set(error, rc,
4176 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4177 			NULL, "Failed to process the action");
4178 	}
4179 	return rc;
4180 }
4181 
4182 static bool
4183 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
4184 			const efx_mae_match_spec_t *left,
4185 			const efx_mae_match_spec_t *right)
4186 {
4187 	bool have_same_class;
4188 	int rc;
4189 
4190 	rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
4191 					   &have_same_class);
4192 
4193 	return (rc == 0) ? have_same_class : false;
4194 }
4195 
4196 static int
4197 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
4198 				struct sfc_mae_outer_rule *rule)
4199 {
4200 	struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
4201 	struct sfc_mae_outer_rule *entry;
4202 	struct sfc_mae *mae = &sa->mae;
4203 
4204 	if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
4205 		/* An active rule is reused. It's class is wittingly valid. */
4206 		return 0;
4207 	}
4208 
4209 	TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
4210 			      sfc_mae_outer_rules, entries) {
4211 		const efx_mae_match_spec_t *left = entry->match_spec;
4212 		const efx_mae_match_spec_t *right = rule->match_spec;
4213 
4214 		if (entry == rule)
4215 			continue;
4216 
4217 		if (sfc_mae_rules_class_cmp(sa, left, right))
4218 			return 0;
4219 	}
4220 
4221 	sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
4222 		 "support for outer frame pattern items is not guaranteed; "
4223 		 "other than that, the items are valid from SW standpoint");
4224 	return 0;
4225 }
4226 
4227 static int
4228 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
4229 				 struct sfc_flow_spec_mae *spec)
4230 {
4231 	const struct rte_flow *entry;
4232 
4233 	if (spec->match_spec == NULL)
4234 		return 0;
4235 
4236 	TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
4237 		const struct sfc_flow_spec *entry_spec = &entry->spec;
4238 		const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
4239 		const efx_mae_match_spec_t *left = es_mae->match_spec;
4240 		const efx_mae_match_spec_t *right = spec->match_spec;
4241 
4242 		switch (entry_spec->type) {
4243 		case SFC_FLOW_SPEC_FILTER:
4244 			/* Ignore VNIC-level flows */
4245 			break;
4246 		case SFC_FLOW_SPEC_MAE:
4247 			if (sfc_mae_rules_class_cmp(sa, left, right))
4248 				return 0;
4249 			break;
4250 		default:
4251 			SFC_ASSERT(false);
4252 		}
4253 	}
4254 
4255 	sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
4256 		 "support for inner frame pattern items is not guaranteed; "
4257 		 "other than that, the items are valid from SW standpoint");
4258 	return 0;
4259 }
4260 
4261 /**
4262  * Confirm that a given flow can be accepted by the FW.
4263  *
4264  * @param sa
4265  *   Software adapter context
4266  * @param flow
4267  *   Flow to be verified
4268  * @return
4269  *   Zero on success and non-zero in the case of error.
4270  *   A special value of EAGAIN indicates that the adapter is
4271  *   not in started state. This state is compulsory because
4272  *   it only makes sense to compare the rule class of the flow
4273  *   being validated with classes of the active rules.
4274  *   Such classes are wittingly supported by the FW.
4275  */
4276 int
4277 sfc_mae_flow_verify(struct sfc_adapter *sa,
4278 		    struct rte_flow *flow)
4279 {
4280 	struct sfc_flow_spec *spec = &flow->spec;
4281 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4282 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4283 	int rc;
4284 
4285 	SFC_ASSERT(sfc_adapter_is_locked(sa));
4286 
4287 	if (sa->state != SFC_ETHDEV_STARTED)
4288 		return EAGAIN;
4289 
4290 	if (outer_rule != NULL) {
4291 		rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
4292 		if (rc != 0)
4293 			return rc;
4294 	}
4295 
4296 	return sfc_mae_action_rule_class_verify(sa, spec_mae);
4297 }
4298 
4299 int
4300 sfc_mae_flow_insert(struct sfc_adapter *sa,
4301 		    struct rte_flow *flow)
4302 {
4303 	struct sfc_flow_spec *spec = &flow->spec;
4304 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4305 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4306 	struct sfc_mae_action_set *action_set = spec_mae->action_set;
4307 	struct sfc_mae_fw_rsrc *fw_rsrc;
4308 	int rc;
4309 
4310 	SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
4311 
4312 	if (outer_rule != NULL) {
4313 		rc = sfc_mae_outer_rule_enable(sa, outer_rule,
4314 					       spec_mae->match_spec);
4315 		if (rc != 0)
4316 			goto fail_outer_rule_enable;
4317 	}
4318 
4319 	if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
4320 		spec_mae->ft->reset_jump_hit_counter =
4321 			spec_mae->ft->group_hit_counter;
4322 	}
4323 
4324 	if (action_set == NULL) {
4325 		sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
4326 		return 0;
4327 	}
4328 
4329 	rc = sfc_mae_action_set_enable(sa, action_set);
4330 	if (rc != 0)
4331 		goto fail_action_set_enable;
4332 
4333 	if (action_set->n_counters > 0) {
4334 		rc = sfc_mae_counter_start(sa);
4335 		if (rc != 0) {
4336 			sfc_err(sa, "failed to start MAE counters support: %s",
4337 				rte_strerror(rc));
4338 			goto fail_mae_counter_start;
4339 		}
4340 	}
4341 
4342 	fw_rsrc = &action_set->fw_rsrc;
4343 
4344 	rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
4345 					NULL, &fw_rsrc->aset_id,
4346 					&spec_mae->rule_id);
4347 	if (rc != 0)
4348 		goto fail_action_rule_insert;
4349 
4350 	sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
4351 		flow, spec_mae->rule_id.id);
4352 
4353 	return 0;
4354 
4355 fail_action_rule_insert:
4356 fail_mae_counter_start:
4357 	sfc_mae_action_set_disable(sa, action_set);
4358 
4359 fail_action_set_enable:
4360 	if (outer_rule != NULL)
4361 		sfc_mae_outer_rule_disable(sa, outer_rule);
4362 
4363 fail_outer_rule_enable:
4364 	return rc;
4365 }
4366 
4367 int
4368 sfc_mae_flow_remove(struct sfc_adapter *sa,
4369 		    struct rte_flow *flow)
4370 {
4371 	struct sfc_flow_spec *spec = &flow->spec;
4372 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4373 	struct sfc_mae_action_set *action_set = spec_mae->action_set;
4374 	struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4375 	int rc;
4376 
4377 	if (action_set == NULL) {
4378 		sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
4379 		goto skip_action_rule;
4380 	}
4381 
4382 	SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
4383 
4384 	rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
4385 	if (rc != 0) {
4386 		sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
4387 			flow, spec_mae->rule_id.id, strerror(rc));
4388 	}
4389 	sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
4390 		flow, spec_mae->rule_id.id);
4391 	spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
4392 
4393 	sfc_mae_action_set_disable(sa, action_set);
4394 
4395 skip_action_rule:
4396 	if (outer_rule != NULL)
4397 		sfc_mae_outer_rule_disable(sa, outer_rule);
4398 
4399 	return 0;
4400 }
4401 
4402 static int
4403 sfc_mae_query_counter(struct sfc_adapter *sa,
4404 		      struct sfc_flow_spec_mae *spec,
4405 		      const struct rte_flow_action *action,
4406 		      struct rte_flow_query_count *data,
4407 		      struct rte_flow_error *error)
4408 {
4409 	struct sfc_mae_action_set *action_set = spec->action_set;
4410 	const struct rte_flow_action_count *conf = action->conf;
4411 	unsigned int i;
4412 	int rc;
4413 
4414 	if (action_set == NULL || action_set->n_counters == 0) {
4415 		return rte_flow_error_set(error, EINVAL,
4416 			RTE_FLOW_ERROR_TYPE_ACTION, action,
4417 			"Queried flow rule does not have count actions");
4418 	}
4419 
4420 	for (i = 0; i < action_set->n_counters; i++) {
4421 		/*
4422 		 * Get the first available counter of the flow rule if
4423 		 * counter ID is not specified, provided that this
4424 		 * counter is not an automatic (implicit) one.
4425 		 */
4426 		if (conf != NULL && action_set->counters[i].rte_id != conf->id)
4427 			continue;
4428 
4429 		rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
4430 					 &action_set->counters[i], data);
4431 		if (rc != 0) {
4432 			return rte_flow_error_set(error, EINVAL,
4433 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4434 				"Queried flow rule counter action is invalid");
4435 		}
4436 
4437 		return 0;
4438 	}
4439 
4440 	return rte_flow_error_set(error, ENOENT,
4441 				  RTE_FLOW_ERROR_TYPE_ACTION, action,
4442 				  "no such flow rule action or such count ID");
4443 }
4444 
4445 int
4446 sfc_mae_flow_query(struct rte_eth_dev *dev,
4447 		   struct rte_flow *flow,
4448 		   const struct rte_flow_action *action,
4449 		   void *data,
4450 		   struct rte_flow_error *error)
4451 {
4452 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
4453 	struct sfc_flow_spec *spec = &flow->spec;
4454 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4455 
4456 	switch (action->type) {
4457 	case RTE_FLOW_ACTION_TYPE_COUNT:
4458 		return sfc_mae_query_counter(sa, spec_mae, action,
4459 					     data, error);
4460 	default:
4461 		return rte_flow_error_set(error, ENOTSUP,
4462 			RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4463 			"Query for action of this type is not supported");
4464 	}
4465 }
4466 
4467 int
4468 sfc_mae_switchdev_init(struct sfc_adapter *sa)
4469 {
4470 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
4471 	struct sfc_mae *mae = &sa->mae;
4472 	efx_mport_sel_t pf;
4473 	efx_mport_sel_t phy;
4474 	int rc;
4475 
4476 	sfc_log_init(sa, "entry");
4477 
4478 	if (!sa->switchdev) {
4479 		sfc_log_init(sa, "switchdev is not enabled - skip");
4480 		return 0;
4481 	}
4482 
4483 	if (mae->status != SFC_MAE_STATUS_ADMIN) {
4484 		rc = ENOTSUP;
4485 		sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
4486 		goto fail_no_mae;
4487 	}
4488 
4489 	rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
4490 					    &pf);
4491 	if (rc != 0) {
4492 		sfc_err(sa, "failed get PF mport");
4493 		goto fail_pf_get;
4494 	}
4495 
4496 	rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
4497 	if (rc != 0) {
4498 		sfc_err(sa, "failed get PHY mport");
4499 		goto fail_phy_get;
4500 	}
4501 
4502 	rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
4503 			SFC_MAE_RULE_PRIO_LOWEST,
4504 			&mae->switchdev_rule_pf_to_ext);
4505 	if (rc != 0) {
4506 		sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
4507 		goto fail_pf_add;
4508 	}
4509 
4510 	rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
4511 			SFC_MAE_RULE_PRIO_LOWEST,
4512 			&mae->switchdev_rule_ext_to_pf);
4513 	if (rc != 0) {
4514 		sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4515 		goto fail_phy_add;
4516 	}
4517 
4518 	sfc_log_init(sa, "done");
4519 
4520 	return 0;
4521 
4522 fail_phy_add:
4523 	sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4524 
4525 fail_pf_add:
4526 fail_phy_get:
4527 fail_pf_get:
4528 fail_no_mae:
4529 	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4530 	return rc;
4531 }
4532 
4533 void
4534 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4535 {
4536 	struct sfc_mae *mae = &sa->mae;
4537 
4538 	if (!sa->switchdev)
4539 		return;
4540 
4541 	sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4542 	sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);
4543 }
4544