1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #include <stdbool.h>
11
12 #include <rte_bitops.h>
13 #include <rte_common.h>
14 #include <rte_vxlan.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_log.h"
22 #include "sfc_switch.h"
23 #include "sfc_service.h"
24
25 static int
sfc_mae_assign_ethdev_mport(struct sfc_adapter * sa,efx_mport_sel_t * mportp)26 sfc_mae_assign_ethdev_mport(struct sfc_adapter *sa,
27 efx_mport_sel_t *mportp)
28 {
29 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
30
31 return efx_mae_mport_by_pcie_function(encp->enc_pf, encp->enc_vf,
32 mportp);
33 }
34
35 static int
sfc_mae_assign_entity_mport(struct sfc_adapter * sa,efx_mport_sel_t * mportp)36 sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
37 efx_mport_sel_t *mportp)
38 {
39 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
40 int rc = 0;
41
42 if (encp->enc_mae_admin) {
43 /*
44 * This ethdev sits on MAE admin PF. The represented
45 * entity is the network port assigned to that PF.
46 */
47 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, mportp);
48 } else {
49 /*
50 * This ethdev sits on unprivileged PF / VF. The entity
51 * represented by the ethdev can change dynamically
52 * as MAE admin changes default traffic rules.
53 *
54 * For the sake of simplicity, do not fill in the m-port
55 * and assume that flow rules should not be allowed to
56 * reference the entity represented by this ethdev.
57 */
58 efx_mae_mport_invalid(mportp);
59 }
60
61 return rc;
62 }
63
64 static int
sfc_mae_counter_registry_init(struct sfc_mae_counter_registry * registry,uint32_t nb_counters_max)65 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
66 uint32_t nb_counters_max)
67 {
68 return sfc_mae_counters_init(®istry->counters, nb_counters_max);
69 }
70
71 static void
sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry * registry)72 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
73 {
74 sfc_mae_counters_fini(®istry->counters);
75 }
76
77 static int
sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter * sa,struct sfc_mae_rule ** rule)78 sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
79 struct sfc_mae_rule **rule)
80 {
81 struct sfc_mae *mae = &sa->mae;
82 struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
83 unsigned int entry;
84 int rc;
85
86 for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
87 if (internal_rules->rules[entry].spec == NULL)
88 break;
89 }
90
91 if (entry == SFC_MAE_NB_RULES_MAX) {
92 rc = ENOSPC;
93 sfc_err(sa, "failed too many rules (%u rules used)", entry);
94 goto fail_too_many_rules;
95 }
96
97 *rule = &internal_rules->rules[entry];
98
99 return 0;
100
101 fail_too_many_rules:
102 return rc;
103 }
104
105 int
sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter * sa,const efx_mport_sel_t * mport_match,const efx_mport_sel_t * mport_deliver,int prio,struct sfc_mae_rule ** rulep)106 sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
107 const efx_mport_sel_t *mport_match,
108 const efx_mport_sel_t *mport_deliver,
109 int prio, struct sfc_mae_rule **rulep)
110 {
111 struct sfc_mae *mae = &sa->mae;
112 struct sfc_mae_rule *rule;
113 int rc;
114
115 sfc_log_init(sa, "entry");
116
117 if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
118 rc = EINVAL;
119 sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
120 mae->nb_action_rule_prios_max);
121 goto fail_invalid_prio;
122 }
123 if (prio < 0)
124 prio = mae->nb_action_rule_prios_max - 1;
125
126 rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
127 if (rc != 0)
128 goto fail_find_empty_slot;
129
130 sfc_log_init(sa, "init MAE match spec");
131 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
132 (uint32_t)prio, &rule->spec);
133 if (rc != 0) {
134 sfc_err(sa, "failed to init MAE match spec");
135 goto fail_match_init;
136 }
137
138 rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
139 if (rc != 0) {
140 sfc_err(sa, "failed to get MAE match mport selector");
141 goto fail_mport_set;
142 }
143
144 rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
145 if (rc != 0) {
146 sfc_err(sa, "failed to init MAE action set");
147 goto fail_action_init;
148 }
149
150 rc = efx_mae_action_set_populate_deliver(rule->actions,
151 mport_deliver);
152 if (rc != 0) {
153 sfc_err(sa, "failed to populate deliver action");
154 goto fail_populate_deliver;
155 }
156
157 rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
158 &rule->action_set);
159 if (rc != 0) {
160 sfc_err(sa, "failed to allocate action set");
161 goto fail_action_set_alloc;
162 }
163
164 rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
165 &rule->action_set,
166 &rule->rule_id);
167 if (rc != 0) {
168 sfc_err(sa, "failed to insert action rule");
169 goto fail_rule_insert;
170 }
171
172 *rulep = rule;
173
174 sfc_log_init(sa, "done");
175
176 return 0;
177
178 fail_rule_insert:
179 efx_mae_action_set_free(sa->nic, &rule->action_set);
180
181 fail_action_set_alloc:
182 fail_populate_deliver:
183 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
184
185 fail_action_init:
186 fail_mport_set:
187 efx_mae_match_spec_fini(sa->nic, rule->spec);
188
189 fail_match_init:
190 fail_find_empty_slot:
191 fail_invalid_prio:
192 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
193 return rc;
194 }
195
196 void
sfc_mae_rule_del(struct sfc_adapter * sa,struct sfc_mae_rule * rule)197 sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule)
198 {
199 if (rule == NULL || rule->spec == NULL)
200 return;
201
202 efx_mae_action_rule_remove(sa->nic, &rule->rule_id);
203 efx_mae_action_set_free(sa->nic, &rule->action_set);
204 efx_mae_action_set_spec_fini(sa->nic, rule->actions);
205 efx_mae_match_spec_fini(sa->nic, rule->spec);
206
207 rule->spec = NULL;
208 }
209
210 int
sfc_mae_attach(struct sfc_adapter * sa)211 sfc_mae_attach(struct sfc_adapter *sa)
212 {
213 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
214 struct sfc_mae_switch_port_request switch_port_request = {0};
215 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
216 efx_mport_sel_t ethdev_mport;
217 efx_mport_sel_t entity_mport;
218 struct sfc_mae *mae = &sa->mae;
219 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
220 efx_mae_limits_t limits;
221 int rc;
222
223 sfc_log_init(sa, "entry");
224
225 if (!encp->enc_mae_supported) {
226 mae->status = SFC_MAE_STATUS_UNSUPPORTED;
227 return 0;
228 }
229
230 if (encp->enc_mae_admin) {
231 sfc_log_init(sa, "init MAE");
232 rc = efx_mae_init(sa->nic);
233 if (rc != 0)
234 goto fail_mae_init;
235
236 sfc_log_init(sa, "get MAE limits");
237 rc = efx_mae_get_limits(sa->nic, &limits);
238 if (rc != 0)
239 goto fail_mae_get_limits;
240
241 sfc_log_init(sa, "init MAE counter registry");
242 rc = sfc_mae_counter_registry_init(&mae->counter_registry,
243 limits.eml_max_n_counters);
244 if (rc != 0) {
245 sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
246 limits.eml_max_n_counters, rte_strerror(rc));
247 goto fail_counter_registry_init;
248 }
249 }
250
251 sfc_log_init(sa, "assign ethdev MPORT");
252 rc = sfc_mae_assign_ethdev_mport(sa, ðdev_mport);
253 if (rc != 0)
254 goto fail_mae_assign_ethdev_mport;
255
256 sfc_log_init(sa, "assign entity MPORT");
257 rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
258 if (rc != 0)
259 goto fail_mae_assign_entity_mport;
260
261 sfc_log_init(sa, "assign RTE switch domain");
262 rc = sfc_mae_assign_switch_domain(sa, &mae->switch_domain_id);
263 if (rc != 0)
264 goto fail_mae_assign_switch_domain;
265
266 sfc_log_init(sa, "assign RTE switch port");
267 switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
268 switch_port_request.ethdev_mportp = ðdev_mport;
269 switch_port_request.entity_mportp = &entity_mport;
270 switch_port_request.ethdev_port_id = sas->port_id;
271 switch_port_request.port_data.indep.mae_admin =
272 encp->enc_mae_admin == B_TRUE;
273 rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
274 &switch_port_request,
275 &mae->switch_port_id);
276 if (rc != 0)
277 goto fail_mae_assign_switch_port;
278
279 if (encp->enc_mae_admin) {
280 sfc_log_init(sa, "allocate encap. header bounce buffer");
281 bounce_eh->buf_size = limits.eml_encap_header_size_limit;
282 bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
283 bounce_eh->buf_size, 0);
284 if (bounce_eh->buf == NULL)
285 goto fail_mae_alloc_bounce_eh;
286
287 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
288 mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
289 mae->encap_types_supported = limits.eml_encap_types_supported;
290 }
291
292 TAILQ_INIT(&mae->outer_rules);
293 TAILQ_INIT(&mae->mac_addrs);
294 TAILQ_INIT(&mae->encap_headers);
295 TAILQ_INIT(&mae->action_sets);
296
297 if (encp->enc_mae_admin)
298 mae->status = SFC_MAE_STATUS_ADMIN;
299 else
300 mae->status = SFC_MAE_STATUS_SUPPORTED;
301
302 sfc_log_init(sa, "done");
303
304 return 0;
305
306 fail_mae_alloc_bounce_eh:
307 fail_mae_assign_switch_port:
308 fail_mae_assign_switch_domain:
309 fail_mae_assign_entity_mport:
310 fail_mae_assign_ethdev_mport:
311 if (encp->enc_mae_admin)
312 sfc_mae_counter_registry_fini(&mae->counter_registry);
313
314 fail_counter_registry_init:
315 fail_mae_get_limits:
316 if (encp->enc_mae_admin)
317 efx_mae_fini(sa->nic);
318
319 fail_mae_init:
320 sfc_log_init(sa, "failed %d", rc);
321
322 return rc;
323 }
324
325 void
sfc_mae_detach(struct sfc_adapter * sa)326 sfc_mae_detach(struct sfc_adapter *sa)
327 {
328 struct sfc_mae *mae = &sa->mae;
329 enum sfc_mae_status status_prev = mae->status;
330
331 sfc_log_init(sa, "entry");
332
333 mae->nb_action_rule_prios_max = 0;
334 mae->status = SFC_MAE_STATUS_UNKNOWN;
335
336 if (status_prev != SFC_MAE_STATUS_ADMIN)
337 return;
338
339 rte_free(mae->bounce_eh.buf);
340 sfc_mae_counter_registry_fini(&mae->counter_registry);
341
342 efx_mae_fini(sa->nic);
343
344 sfc_log_init(sa, "done");
345 }
346
347 static struct sfc_mae_outer_rule *
sfc_mae_outer_rule_attach(struct sfc_adapter * sa,const efx_mae_match_spec_t * match_spec,efx_tunnel_protocol_t encap_type)348 sfc_mae_outer_rule_attach(struct sfc_adapter *sa,
349 const efx_mae_match_spec_t *match_spec,
350 efx_tunnel_protocol_t encap_type)
351 {
352 struct sfc_mae_outer_rule *rule;
353 struct sfc_mae *mae = &sa->mae;
354
355 SFC_ASSERT(sfc_adapter_is_locked(sa));
356
357 TAILQ_FOREACH(rule, &mae->outer_rules, entries) {
358 if (efx_mae_match_specs_equal(rule->match_spec, match_spec) &&
359 rule->encap_type == encap_type) {
360 sfc_dbg(sa, "attaching to outer_rule=%p", rule);
361 ++(rule->refcnt);
362 return rule;
363 }
364 }
365
366 return NULL;
367 }
368
369 static int
sfc_mae_outer_rule_add(struct sfc_adapter * sa,efx_mae_match_spec_t * match_spec,efx_tunnel_protocol_t encap_type,struct sfc_mae_outer_rule ** rulep)370 sfc_mae_outer_rule_add(struct sfc_adapter *sa,
371 efx_mae_match_spec_t *match_spec,
372 efx_tunnel_protocol_t encap_type,
373 struct sfc_mae_outer_rule **rulep)
374 {
375 struct sfc_mae_outer_rule *rule;
376 struct sfc_mae *mae = &sa->mae;
377
378 SFC_ASSERT(sfc_adapter_is_locked(sa));
379
380 rule = rte_zmalloc("sfc_mae_outer_rule", sizeof(*rule), 0);
381 if (rule == NULL)
382 return ENOMEM;
383
384 rule->refcnt = 1;
385 rule->match_spec = match_spec;
386 rule->encap_type = encap_type;
387
388 rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
389
390 TAILQ_INSERT_TAIL(&mae->outer_rules, rule, entries);
391
392 *rulep = rule;
393
394 sfc_dbg(sa, "added outer_rule=%p", rule);
395
396 return 0;
397 }
398
399 static void
sfc_mae_outer_rule_del(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)400 sfc_mae_outer_rule_del(struct sfc_adapter *sa,
401 struct sfc_mae_outer_rule *rule)
402 {
403 struct sfc_mae *mae = &sa->mae;
404
405 SFC_ASSERT(sfc_adapter_is_locked(sa));
406 SFC_ASSERT(rule->refcnt != 0);
407
408 --(rule->refcnt);
409
410 if (rule->refcnt != 0)
411 return;
412
413 if (rule->fw_rsrc.rule_id.id != EFX_MAE_RSRC_ID_INVALID ||
414 rule->fw_rsrc.refcnt != 0) {
415 sfc_err(sa, "deleting outer_rule=%p abandons its FW resource: OR_ID=0x%08x, refcnt=%u",
416 rule, rule->fw_rsrc.rule_id.id, rule->fw_rsrc.refcnt);
417 }
418
419 efx_mae_match_spec_fini(sa->nic, rule->match_spec);
420
421 TAILQ_REMOVE(&mae->outer_rules, rule, entries);
422 rte_free(rule);
423
424 sfc_dbg(sa, "deleted outer_rule=%p", rule);
425 }
426
427 static int
sfc_mae_outer_rule_enable(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule,efx_mae_match_spec_t * match_spec_action)428 sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
429 struct sfc_mae_outer_rule *rule,
430 efx_mae_match_spec_t *match_spec_action)
431 {
432 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
433 int rc;
434
435 SFC_ASSERT(sfc_adapter_is_locked(sa));
436
437 if (fw_rsrc->refcnt == 0) {
438 SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
439 SFC_ASSERT(rule->match_spec != NULL);
440
441 rc = efx_mae_outer_rule_insert(sa->nic, rule->match_spec,
442 rule->encap_type,
443 &fw_rsrc->rule_id);
444 if (rc != 0) {
445 sfc_err(sa, "failed to enable outer_rule=%p: %s",
446 rule, strerror(rc));
447 return rc;
448 }
449 }
450
451 if (match_spec_action == NULL)
452 goto skip_action_rule;
453
454 rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
455 &fw_rsrc->rule_id);
456 if (rc != 0) {
457 if (fw_rsrc->refcnt == 0) {
458 (void)efx_mae_outer_rule_remove(sa->nic,
459 &fw_rsrc->rule_id);
460 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
461 }
462
463 sfc_err(sa, "can't match on outer rule ID: %s", strerror(rc));
464
465 return rc;
466 }
467
468 skip_action_rule:
469 if (fw_rsrc->refcnt == 0) {
470 sfc_dbg(sa, "enabled outer_rule=%p: OR_ID=0x%08x",
471 rule, fw_rsrc->rule_id.id);
472 }
473
474 ++(fw_rsrc->refcnt);
475
476 return 0;
477 }
478
479 static void
sfc_mae_outer_rule_disable(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)480 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
481 struct sfc_mae_outer_rule *rule)
482 {
483 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
484 int rc;
485
486 SFC_ASSERT(sfc_adapter_is_locked(sa));
487
488 if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
489 fw_rsrc->refcnt == 0) {
490 sfc_err(sa, "failed to disable outer_rule=%p: already disabled; OR_ID=0x%08x, refcnt=%u",
491 rule, fw_rsrc->rule_id.id, fw_rsrc->refcnt);
492 return;
493 }
494
495 if (fw_rsrc->refcnt == 1) {
496 rc = efx_mae_outer_rule_remove(sa->nic, &fw_rsrc->rule_id);
497 if (rc == 0) {
498 sfc_dbg(sa, "disabled outer_rule=%p with OR_ID=0x%08x",
499 rule, fw_rsrc->rule_id.id);
500 } else {
501 sfc_err(sa, "failed to disable outer_rule=%p with OR_ID=0x%08x: %s",
502 rule, fw_rsrc->rule_id.id, strerror(rc));
503 }
504 fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
505 }
506
507 --(fw_rsrc->refcnt);
508 }
509
510 static struct sfc_mae_mac_addr *
sfc_mae_mac_addr_attach(struct sfc_adapter * sa,const uint8_t addr_bytes[EFX_MAC_ADDR_LEN])511 sfc_mae_mac_addr_attach(struct sfc_adapter *sa,
512 const uint8_t addr_bytes[EFX_MAC_ADDR_LEN])
513 {
514 struct sfc_mae_mac_addr *mac_addr;
515 struct sfc_mae *mae = &sa->mae;
516
517 SFC_ASSERT(sfc_adapter_is_locked(sa));
518
519 TAILQ_FOREACH(mac_addr, &mae->mac_addrs, entries) {
520 if (memcmp(mac_addr->addr_bytes, addr_bytes,
521 EFX_MAC_ADDR_LEN) == 0) {
522 sfc_dbg(sa, "attaching to mac_addr=%p", mac_addr);
523 ++(mac_addr->refcnt);
524 return mac_addr;
525 }
526 }
527
528 return NULL;
529 }
530
531 static int
sfc_mae_mac_addr_add(struct sfc_adapter * sa,const uint8_t addr_bytes[EFX_MAC_ADDR_LEN],struct sfc_mae_mac_addr ** mac_addrp)532 sfc_mae_mac_addr_add(struct sfc_adapter *sa,
533 const uint8_t addr_bytes[EFX_MAC_ADDR_LEN],
534 struct sfc_mae_mac_addr **mac_addrp)
535 {
536 struct sfc_mae_mac_addr *mac_addr;
537 struct sfc_mae *mae = &sa->mae;
538
539 SFC_ASSERT(sfc_adapter_is_locked(sa));
540
541 mac_addr = rte_zmalloc("sfc_mae_mac_addr", sizeof(*mac_addr), 0);
542 if (mac_addr == NULL)
543 return ENOMEM;
544
545 rte_memcpy(mac_addr->addr_bytes, addr_bytes, EFX_MAC_ADDR_LEN);
546
547 mac_addr->refcnt = 1;
548 mac_addr->fw_rsrc.mac_id.id = EFX_MAE_RSRC_ID_INVALID;
549
550 TAILQ_INSERT_TAIL(&mae->mac_addrs, mac_addr, entries);
551
552 *mac_addrp = mac_addr;
553
554 sfc_dbg(sa, "added mac_addr=%p", mac_addr);
555
556 return 0;
557 }
558
559 static void
sfc_mae_mac_addr_del(struct sfc_adapter * sa,struct sfc_mae_mac_addr * mac_addr)560 sfc_mae_mac_addr_del(struct sfc_adapter *sa, struct sfc_mae_mac_addr *mac_addr)
561 {
562 struct sfc_mae *mae = &sa->mae;
563
564 if (mac_addr == NULL)
565 return;
566
567 SFC_ASSERT(sfc_adapter_is_locked(sa));
568 SFC_ASSERT(mac_addr->refcnt != 0);
569
570 --(mac_addr->refcnt);
571
572 if (mac_addr->refcnt != 0)
573 return;
574
575 if (mac_addr->fw_rsrc.mac_id.id != EFX_MAE_RSRC_ID_INVALID ||
576 mac_addr->fw_rsrc.refcnt != 0) {
577 sfc_err(sa, "deleting mac_addr=%p abandons its FW resource: MAC_ID=0x%08x, refcnt=%u",
578 mac_addr, mac_addr->fw_rsrc.mac_id.id,
579 mac_addr->fw_rsrc.refcnt);
580 }
581
582 TAILQ_REMOVE(&mae->mac_addrs, mac_addr, entries);
583 rte_free(mac_addr);
584
585 sfc_dbg(sa, "deleted mac_addr=%p", mac_addr);
586 }
587
588 enum sfc_mae_mac_addr_type {
589 SFC_MAE_MAC_ADDR_DST,
590 SFC_MAE_MAC_ADDR_SRC
591 };
592
593 static int
sfc_mae_mac_addr_enable(struct sfc_adapter * sa,struct sfc_mae_mac_addr * mac_addr,enum sfc_mae_mac_addr_type type,efx_mae_actions_t * aset_spec)594 sfc_mae_mac_addr_enable(struct sfc_adapter *sa,
595 struct sfc_mae_mac_addr *mac_addr,
596 enum sfc_mae_mac_addr_type type,
597 efx_mae_actions_t *aset_spec)
598 {
599 struct sfc_mae_fw_rsrc *fw_rsrc;
600 int rc = 0;
601
602 if (mac_addr == NULL)
603 return 0;
604
605 SFC_ASSERT(sfc_adapter_is_locked(sa));
606
607 fw_rsrc = &mac_addr->fw_rsrc;
608
609 if (fw_rsrc->refcnt == 0) {
610 SFC_ASSERT(fw_rsrc->mac_id.id == EFX_MAE_RSRC_ID_INVALID);
611
612 rc = efx_mae_mac_addr_alloc(sa->nic, mac_addr->addr_bytes,
613 &fw_rsrc->mac_id);
614 if (rc != 0) {
615 sfc_err(sa, "failed to enable mac_addr=%p: %s",
616 mac_addr, strerror(rc));
617 return rc;
618 }
619 }
620
621 switch (type) {
622 case SFC_MAE_MAC_ADDR_DST:
623 rc = efx_mae_action_set_fill_in_dst_mac_id(aset_spec,
624 &fw_rsrc->mac_id);
625 break;
626 case SFC_MAE_MAC_ADDR_SRC:
627 rc = efx_mae_action_set_fill_in_src_mac_id(aset_spec,
628 &fw_rsrc->mac_id);
629 break;
630 default:
631 rc = EINVAL;
632 break;
633 }
634
635 if (rc != 0) {
636 if (fw_rsrc->refcnt == 0) {
637 (void)efx_mae_mac_addr_free(sa->nic, &fw_rsrc->mac_id);
638 fw_rsrc->mac_id.id = EFX_MAE_RSRC_ID_INVALID;
639 }
640
641 sfc_err(sa, "cannot fill in MAC address entry ID: %s",
642 strerror(rc));
643
644 return rc;
645 }
646
647 if (fw_rsrc->refcnt == 0) {
648 sfc_dbg(sa, "enabled mac_addr=%p: MAC_ID=0x%08x",
649 mac_addr, fw_rsrc->mac_id.id);
650 }
651
652 ++(fw_rsrc->refcnt);
653
654 return 0;
655 }
656
657 static void
sfc_mae_mac_addr_disable(struct sfc_adapter * sa,struct sfc_mae_mac_addr * mac_addr)658 sfc_mae_mac_addr_disable(struct sfc_adapter *sa,
659 struct sfc_mae_mac_addr *mac_addr)
660 {
661 struct sfc_mae_fw_rsrc *fw_rsrc;
662 int rc;
663
664 if (mac_addr == NULL)
665 return;
666
667 SFC_ASSERT(sfc_adapter_is_locked(sa));
668
669 fw_rsrc = &mac_addr->fw_rsrc;
670
671 if (fw_rsrc->mac_id.id == EFX_MAE_RSRC_ID_INVALID ||
672 fw_rsrc->refcnt == 0) {
673 sfc_err(sa, "failed to disable mac_addr=%p: already disabled; MAC_ID=0x%08x, refcnt=%u",
674 mac_addr, fw_rsrc->mac_id.id, fw_rsrc->refcnt);
675 return;
676 }
677
678 if (fw_rsrc->refcnt == 1) {
679 rc = efx_mae_mac_addr_free(sa->nic, &fw_rsrc->mac_id);
680 if (rc == 0) {
681 sfc_dbg(sa, "disabled mac_addr=%p with MAC_ID=0x%08x",
682 mac_addr, fw_rsrc->mac_id.id);
683 } else {
684 sfc_err(sa, "failed to disable mac_addr=%p with MAC_ID=0x%08x: %s",
685 mac_addr, fw_rsrc->mac_id.id, strerror(rc));
686 }
687 fw_rsrc->mac_id.id = EFX_MAE_RSRC_ID_INVALID;
688 }
689
690 --(fw_rsrc->refcnt);
691 }
692
693 static struct sfc_mae_encap_header *
sfc_mae_encap_header_attach(struct sfc_adapter * sa,const struct sfc_mae_bounce_eh * bounce_eh)694 sfc_mae_encap_header_attach(struct sfc_adapter *sa,
695 const struct sfc_mae_bounce_eh *bounce_eh)
696 {
697 struct sfc_mae_encap_header *encap_header;
698 struct sfc_mae *mae = &sa->mae;
699
700 SFC_ASSERT(sfc_adapter_is_locked(sa));
701
702 TAILQ_FOREACH(encap_header, &mae->encap_headers, entries) {
703 if (encap_header->size == bounce_eh->size &&
704 memcmp(encap_header->buf, bounce_eh->buf,
705 bounce_eh->size) == 0) {
706 sfc_dbg(sa, "attaching to encap_header=%p",
707 encap_header);
708 ++(encap_header->refcnt);
709 return encap_header;
710 }
711 }
712
713 return NULL;
714 }
715
716 static int
sfc_mae_encap_header_add(struct sfc_adapter * sa,const struct sfc_mae_bounce_eh * bounce_eh,struct sfc_mae_encap_header ** encap_headerp)717 sfc_mae_encap_header_add(struct sfc_adapter *sa,
718 const struct sfc_mae_bounce_eh *bounce_eh,
719 struct sfc_mae_encap_header **encap_headerp)
720 {
721 struct sfc_mae_encap_header *encap_header;
722 struct sfc_mae *mae = &sa->mae;
723
724 SFC_ASSERT(sfc_adapter_is_locked(sa));
725
726 encap_header = rte_zmalloc("sfc_mae_encap_header",
727 sizeof(*encap_header), 0);
728 if (encap_header == NULL)
729 return ENOMEM;
730
731 encap_header->size = bounce_eh->size;
732
733 encap_header->buf = rte_malloc("sfc_mae_encap_header_buf",
734 encap_header->size, 0);
735 if (encap_header->buf == NULL) {
736 rte_free(encap_header);
737 return ENOMEM;
738 }
739
740 rte_memcpy(encap_header->buf, bounce_eh->buf, bounce_eh->size);
741
742 encap_header->refcnt = 1;
743 encap_header->type = bounce_eh->type;
744 encap_header->fw_rsrc.eh_id.id = EFX_MAE_RSRC_ID_INVALID;
745
746 TAILQ_INSERT_TAIL(&mae->encap_headers, encap_header, entries);
747
748 *encap_headerp = encap_header;
749
750 sfc_dbg(sa, "added encap_header=%p", encap_header);
751
752 return 0;
753 }
754
755 static void
sfc_mae_encap_header_del(struct sfc_adapter * sa,struct sfc_mae_encap_header * encap_header)756 sfc_mae_encap_header_del(struct sfc_adapter *sa,
757 struct sfc_mae_encap_header *encap_header)
758 {
759 struct sfc_mae *mae = &sa->mae;
760
761 if (encap_header == NULL)
762 return;
763
764 SFC_ASSERT(sfc_adapter_is_locked(sa));
765 SFC_ASSERT(encap_header->refcnt != 0);
766
767 --(encap_header->refcnt);
768
769 if (encap_header->refcnt != 0)
770 return;
771
772 if (encap_header->fw_rsrc.eh_id.id != EFX_MAE_RSRC_ID_INVALID ||
773 encap_header->fw_rsrc.refcnt != 0) {
774 sfc_err(sa, "deleting encap_header=%p abandons its FW resource: EH_ID=0x%08x, refcnt=%u",
775 encap_header, encap_header->fw_rsrc.eh_id.id,
776 encap_header->fw_rsrc.refcnt);
777 }
778
779 TAILQ_REMOVE(&mae->encap_headers, encap_header, entries);
780 rte_free(encap_header->buf);
781 rte_free(encap_header);
782
783 sfc_dbg(sa, "deleted encap_header=%p", encap_header);
784 }
785
786 static int
sfc_mae_encap_header_enable(struct sfc_adapter * sa,struct sfc_mae_encap_header * encap_header,efx_mae_actions_t * action_set_spec)787 sfc_mae_encap_header_enable(struct sfc_adapter *sa,
788 struct sfc_mae_encap_header *encap_header,
789 efx_mae_actions_t *action_set_spec)
790 {
791 struct sfc_mae_fw_rsrc *fw_rsrc;
792 int rc;
793
794 if (encap_header == NULL)
795 return 0;
796
797 SFC_ASSERT(sfc_adapter_is_locked(sa));
798
799 fw_rsrc = &encap_header->fw_rsrc;
800
801 if (fw_rsrc->refcnt == 0) {
802 SFC_ASSERT(fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID);
803 SFC_ASSERT(encap_header->buf != NULL);
804 SFC_ASSERT(encap_header->size != 0);
805
806 rc = efx_mae_encap_header_alloc(sa->nic, encap_header->type,
807 encap_header->buf,
808 encap_header->size,
809 &fw_rsrc->eh_id);
810 if (rc != 0) {
811 sfc_err(sa, "failed to enable encap_header=%p: %s",
812 encap_header, strerror(rc));
813 return rc;
814 }
815 }
816
817 rc = efx_mae_action_set_fill_in_eh_id(action_set_spec,
818 &fw_rsrc->eh_id);
819 if (rc != 0) {
820 if (fw_rsrc->refcnt == 0) {
821 (void)efx_mae_encap_header_free(sa->nic,
822 &fw_rsrc->eh_id);
823 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
824 }
825
826 sfc_err(sa, "can't fill in encap. header ID: %s", strerror(rc));
827
828 return rc;
829 }
830
831 if (fw_rsrc->refcnt == 0) {
832 sfc_dbg(sa, "enabled encap_header=%p: EH_ID=0x%08x",
833 encap_header, fw_rsrc->eh_id.id);
834 }
835
836 ++(fw_rsrc->refcnt);
837
838 return 0;
839 }
840
841 static void
sfc_mae_encap_header_disable(struct sfc_adapter * sa,struct sfc_mae_encap_header * encap_header)842 sfc_mae_encap_header_disable(struct sfc_adapter *sa,
843 struct sfc_mae_encap_header *encap_header)
844 {
845 struct sfc_mae_fw_rsrc *fw_rsrc;
846 int rc;
847
848 if (encap_header == NULL)
849 return;
850
851 SFC_ASSERT(sfc_adapter_is_locked(sa));
852
853 fw_rsrc = &encap_header->fw_rsrc;
854
855 if (fw_rsrc->eh_id.id == EFX_MAE_RSRC_ID_INVALID ||
856 fw_rsrc->refcnt == 0) {
857 sfc_err(sa, "failed to disable encap_header=%p: already disabled; EH_ID=0x%08x, refcnt=%u",
858 encap_header, fw_rsrc->eh_id.id, fw_rsrc->refcnt);
859 return;
860 }
861
862 if (fw_rsrc->refcnt == 1) {
863 rc = efx_mae_encap_header_free(sa->nic, &fw_rsrc->eh_id);
864 if (rc == 0) {
865 sfc_dbg(sa, "disabled encap_header=%p with EH_ID=0x%08x",
866 encap_header, fw_rsrc->eh_id.id);
867 } else {
868 sfc_err(sa, "failed to disable encap_header=%p with EH_ID=0x%08x: %s",
869 encap_header, fw_rsrc->eh_id.id, strerror(rc));
870 }
871 fw_rsrc->eh_id.id = EFX_MAE_RSRC_ID_INVALID;
872 }
873
874 --(fw_rsrc->refcnt);
875 }
876
877 static int
sfc_mae_counters_enable(struct sfc_adapter * sa,struct sfc_mae_counter_id * counters,unsigned int n_counters,efx_mae_actions_t * action_set_spec)878 sfc_mae_counters_enable(struct sfc_adapter *sa,
879 struct sfc_mae_counter_id *counters,
880 unsigned int n_counters,
881 efx_mae_actions_t *action_set_spec)
882 {
883 int rc;
884
885 sfc_log_init(sa, "entry");
886
887 if (n_counters == 0) {
888 sfc_log_init(sa, "no counters - skip");
889 return 0;
890 }
891
892 SFC_ASSERT(sfc_adapter_is_locked(sa));
893 SFC_ASSERT(n_counters == 1);
894
895 rc = sfc_mae_counter_enable(sa, &counters[0]);
896 if (rc != 0) {
897 sfc_err(sa, "failed to enable MAE counter %u: %s",
898 counters[0].mae_id.id, rte_strerror(rc));
899 goto fail_counter_add;
900 }
901
902 rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
903 &counters[0].mae_id);
904 if (rc != 0) {
905 sfc_err(sa, "failed to fill in MAE counter %u in action set: %s",
906 counters[0].mae_id.id, rte_strerror(rc));
907 goto fail_fill_in_id;
908 }
909
910 return 0;
911
912 fail_fill_in_id:
913 (void)sfc_mae_counter_disable(sa, &counters[0]);
914
915 fail_counter_add:
916 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
917 return rc;
918 }
919
920 static int
sfc_mae_counters_disable(struct sfc_adapter * sa,struct sfc_mae_counter_id * counters,unsigned int n_counters)921 sfc_mae_counters_disable(struct sfc_adapter *sa,
922 struct sfc_mae_counter_id *counters,
923 unsigned int n_counters)
924 {
925 if (n_counters == 0)
926 return 0;
927
928 SFC_ASSERT(sfc_adapter_is_locked(sa));
929 SFC_ASSERT(n_counters == 1);
930
931 if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
932 sfc_err(sa, "failed to disable: already disabled");
933 return EALREADY;
934 }
935
936 return sfc_mae_counter_disable(sa, &counters[0]);
937 }
938
939 struct sfc_mae_aset_ctx {
940 uint64_t *ft_group_hit_counter;
941 struct sfc_mae_encap_header *encap_header;
942 struct sfc_flow_tunnel *counter_ft;
943 unsigned int n_counters;
944 struct sfc_mae_mac_addr *dst_mac;
945 struct sfc_mae_mac_addr *src_mac;
946
947 efx_mae_actions_t *spec;
948 };
949
950 static struct sfc_mae_action_set *
sfc_mae_action_set_attach(struct sfc_adapter * sa,const struct sfc_mae_aset_ctx * ctx)951 sfc_mae_action_set_attach(struct sfc_adapter *sa,
952 const struct sfc_mae_aset_ctx *ctx)
953 {
954 struct sfc_mae_action_set *action_set;
955 struct sfc_mae *mae = &sa->mae;
956
957 SFC_ASSERT(sfc_adapter_is_locked(sa));
958
959 /*
960 * Shared counters are not supported, hence, action
961 * sets with counters are not attachable.
962 */
963 if (ctx->n_counters != 0)
964 return NULL;
965
966 TAILQ_FOREACH(action_set, &mae->action_sets, entries) {
967 if (action_set->encap_header == ctx->encap_header &&
968 action_set->dst_mac_addr == ctx->dst_mac &&
969 action_set->src_mac_addr == ctx->src_mac &&
970 efx_mae_action_set_specs_equal(action_set->spec,
971 ctx->spec)) {
972 sfc_dbg(sa, "attaching to action_set=%p", action_set);
973 ++(action_set->refcnt);
974 return action_set;
975 }
976 }
977
978 return NULL;
979 }
980
981 static int
sfc_mae_action_set_add(struct sfc_adapter * sa,const struct rte_flow_action actions[],const struct sfc_mae_aset_ctx * ctx,struct sfc_mae_action_set ** action_setp)982 sfc_mae_action_set_add(struct sfc_adapter *sa,
983 const struct rte_flow_action actions[],
984 const struct sfc_mae_aset_ctx *ctx,
985 struct sfc_mae_action_set **action_setp)
986 {
987 struct sfc_mae_action_set *action_set;
988 struct sfc_mae *mae = &sa->mae;
989 unsigned int i;
990
991 SFC_ASSERT(sfc_adapter_is_locked(sa));
992
993 action_set = rte_zmalloc("sfc_mae_action_set", sizeof(*action_set), 0);
994 if (action_set == NULL) {
995 sfc_err(sa, "failed to alloc action set");
996 return ENOMEM;
997 }
998
999 if (ctx->n_counters > 0) {
1000 const struct rte_flow_action *action;
1001
1002 action_set->counters = rte_malloc("sfc_mae_counter_ids",
1003 sizeof(action_set->counters[0]) * ctx->n_counters, 0);
1004 if (action_set->counters == NULL) {
1005 rte_free(action_set);
1006 sfc_err(sa, "failed to alloc counters");
1007 return ENOMEM;
1008 }
1009
1010 for (i = 0; i < ctx->n_counters; ++i) {
1011 action_set->counters[i].rte_id_valid = B_FALSE;
1012 action_set->counters[i].mae_id.id =
1013 EFX_MAE_RSRC_ID_INVALID;
1014
1015 action_set->counters[i].ft_group_hit_counter =
1016 ctx->ft_group_hit_counter;
1017 action_set->counters[i].ft = ctx->counter_ft;
1018 }
1019
1020 for (action = actions, i = 0;
1021 action->type != RTE_FLOW_ACTION_TYPE_END &&
1022 i < ctx->n_counters; ++action) {
1023 const struct rte_flow_action_count *conf;
1024
1025 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1026 continue;
1027
1028 conf = action->conf;
1029
1030 action_set->counters[i].rte_id_valid = B_TRUE;
1031 action_set->counters[i].rte_id = conf->id;
1032 i++;
1033 }
1034 action_set->n_counters = ctx->n_counters;
1035 }
1036
1037 action_set->refcnt = 1;
1038 action_set->spec = ctx->spec;
1039 action_set->encap_header = ctx->encap_header;
1040 action_set->dst_mac_addr = ctx->dst_mac;
1041 action_set->src_mac_addr = ctx->src_mac;
1042
1043 action_set->fw_rsrc.aset_id.id = EFX_MAE_RSRC_ID_INVALID;
1044
1045 TAILQ_INSERT_TAIL(&mae->action_sets, action_set, entries);
1046
1047 *action_setp = action_set;
1048
1049 sfc_dbg(sa, "added action_set=%p", action_set);
1050
1051 return 0;
1052 }
1053
1054 static void
sfc_mae_action_set_del(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)1055 sfc_mae_action_set_del(struct sfc_adapter *sa,
1056 struct sfc_mae_action_set *action_set)
1057 {
1058 struct sfc_mae *mae = &sa->mae;
1059
1060 SFC_ASSERT(sfc_adapter_is_locked(sa));
1061 SFC_ASSERT(action_set->refcnt != 0);
1062
1063 --(action_set->refcnt);
1064
1065 if (action_set->refcnt != 0)
1066 return;
1067
1068 if (action_set->fw_rsrc.aset_id.id != EFX_MAE_RSRC_ID_INVALID ||
1069 action_set->fw_rsrc.refcnt != 0) {
1070 sfc_err(sa, "deleting action_set=%p abandons its FW resource: AS_ID=0x%08x, refcnt=%u",
1071 action_set, action_set->fw_rsrc.aset_id.id,
1072 action_set->fw_rsrc.refcnt);
1073 }
1074
1075 efx_mae_action_set_spec_fini(sa->nic, action_set->spec);
1076 sfc_mae_encap_header_del(sa, action_set->encap_header);
1077 sfc_mae_mac_addr_del(sa, action_set->dst_mac_addr);
1078 sfc_mae_mac_addr_del(sa, action_set->src_mac_addr);
1079 if (action_set->n_counters > 0) {
1080 SFC_ASSERT(action_set->n_counters == 1);
1081 SFC_ASSERT(action_set->counters[0].mae_id.id ==
1082 EFX_MAE_RSRC_ID_INVALID);
1083 rte_free(action_set->counters);
1084 }
1085 TAILQ_REMOVE(&mae->action_sets, action_set, entries);
1086 rte_free(action_set);
1087
1088 sfc_dbg(sa, "deleted action_set=%p", action_set);
1089 }
1090
1091 static int
sfc_mae_action_set_enable(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)1092 sfc_mae_action_set_enable(struct sfc_adapter *sa,
1093 struct sfc_mae_action_set *action_set)
1094 {
1095 struct sfc_mae_encap_header *encap_header = action_set->encap_header;
1096 struct sfc_mae_mac_addr *dst_mac_addr = action_set->dst_mac_addr;
1097 struct sfc_mae_mac_addr *src_mac_addr = action_set->src_mac_addr;
1098 struct sfc_mae_counter_id *counters = action_set->counters;
1099 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1100 int rc;
1101
1102 SFC_ASSERT(sfc_adapter_is_locked(sa));
1103
1104 if (fw_rsrc->refcnt == 0) {
1105 SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
1106 SFC_ASSERT(action_set->spec != NULL);
1107
1108 rc = sfc_mae_mac_addr_enable(sa, dst_mac_addr,
1109 SFC_MAE_MAC_ADDR_DST,
1110 action_set->spec);
1111 if (rc != 0)
1112 return rc;
1113
1114 rc = sfc_mae_mac_addr_enable(sa, src_mac_addr,
1115 SFC_MAE_MAC_ADDR_SRC,
1116 action_set->spec);
1117 if (rc != 0) {
1118 sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1119 return rc;
1120 }
1121
1122 rc = sfc_mae_encap_header_enable(sa, encap_header,
1123 action_set->spec);
1124 if (rc != 0) {
1125 sfc_mae_mac_addr_disable(sa, src_mac_addr);
1126 sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1127 return rc;
1128 }
1129
1130 rc = sfc_mae_counters_enable(sa, counters,
1131 action_set->n_counters,
1132 action_set->spec);
1133 if (rc != 0) {
1134 sfc_err(sa, "failed to enable %u MAE counters: %s",
1135 action_set->n_counters, rte_strerror(rc));
1136
1137 sfc_mae_encap_header_disable(sa, encap_header);
1138 sfc_mae_mac_addr_disable(sa, src_mac_addr);
1139 sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1140 return rc;
1141 }
1142
1143 rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
1144 &fw_rsrc->aset_id);
1145 if (rc != 0) {
1146 sfc_err(sa, "failed to enable action_set=%p: %s",
1147 action_set, strerror(rc));
1148
1149 (void)sfc_mae_counters_disable(sa, counters,
1150 action_set->n_counters);
1151 sfc_mae_encap_header_disable(sa, encap_header);
1152 sfc_mae_mac_addr_disable(sa, src_mac_addr);
1153 sfc_mae_mac_addr_disable(sa, dst_mac_addr);
1154 return rc;
1155 }
1156
1157 sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
1158 action_set, fw_rsrc->aset_id.id);
1159 }
1160
1161 ++(fw_rsrc->refcnt);
1162
1163 return 0;
1164 }
1165
1166 static void
sfc_mae_action_set_disable(struct sfc_adapter * sa,struct sfc_mae_action_set * action_set)1167 sfc_mae_action_set_disable(struct sfc_adapter *sa,
1168 struct sfc_mae_action_set *action_set)
1169 {
1170 struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
1171 int rc;
1172
1173 SFC_ASSERT(sfc_adapter_is_locked(sa));
1174
1175 if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
1176 fw_rsrc->refcnt == 0) {
1177 sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
1178 action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
1179 return;
1180 }
1181
1182 if (fw_rsrc->refcnt == 1) {
1183 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
1184 if (rc == 0) {
1185 sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
1186 action_set, fw_rsrc->aset_id.id);
1187 } else {
1188 sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
1189 action_set, fw_rsrc->aset_id.id, strerror(rc));
1190 }
1191 fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
1192
1193 rc = sfc_mae_counters_disable(sa, action_set->counters,
1194 action_set->n_counters);
1195 if (rc != 0) {
1196 sfc_err(sa, "failed to disable %u MAE counters: %s",
1197 action_set->n_counters, rte_strerror(rc));
1198 }
1199
1200 sfc_mae_encap_header_disable(sa, action_set->encap_header);
1201 sfc_mae_mac_addr_disable(sa, action_set->src_mac_addr);
1202 sfc_mae_mac_addr_disable(sa, action_set->dst_mac_addr);
1203 }
1204
1205 --(fw_rsrc->refcnt);
1206 }
1207
1208 void
sfc_mae_flow_cleanup(struct sfc_adapter * sa,struct rte_flow * flow)1209 sfc_mae_flow_cleanup(struct sfc_adapter *sa,
1210 struct rte_flow *flow)
1211 {
1212 struct sfc_flow_spec_mae *spec_mae;
1213
1214 if (flow == NULL)
1215 return;
1216
1217 spec_mae = &flow->spec.mae;
1218
1219 if (spec_mae->ft != NULL) {
1220 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
1221 spec_mae->ft->jump_rule_is_set = B_FALSE;
1222
1223 SFC_ASSERT(spec_mae->ft->refcnt != 0);
1224 --(spec_mae->ft->refcnt);
1225 }
1226
1227 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
1228
1229 if (spec_mae->outer_rule != NULL)
1230 sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
1231
1232 if (spec_mae->action_set != NULL)
1233 sfc_mae_action_set_del(sa, spec_mae->action_set);
1234
1235 if (spec_mae->match_spec != NULL)
1236 efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
1237 }
1238
1239 static int
sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx * ctx)1240 sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
1241 {
1242 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1243 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1244 const efx_mae_field_id_t field_ids[] = {
1245 EFX_MAE_FIELD_VLAN0_PROTO_BE,
1246 EFX_MAE_FIELD_VLAN1_PROTO_BE,
1247 };
1248 const struct sfc_mae_ethertype *et;
1249 unsigned int i;
1250 int rc;
1251
1252 /*
1253 * In accordance with RTE flow API convention, the innermost L2
1254 * item's "type" ("inner_type") is a L3 EtherType. If there is
1255 * no L3 item, it's 0x0000/0x0000.
1256 */
1257 et = &pdata->ethertypes[pdata->nb_vlan_tags];
1258 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1259 fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
1260 sizeof(et->value),
1261 (const uint8_t *)&et->value,
1262 sizeof(et->mask),
1263 (const uint8_t *)&et->mask);
1264 if (rc != 0)
1265 return rc;
1266
1267 /*
1268 * sfc_mae_rule_parse_item_vlan() has already made sure
1269 * that pdata->nb_vlan_tags does not exceed this figure.
1270 */
1271 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1272
1273 for (i = 0; i < pdata->nb_vlan_tags; ++i) {
1274 et = &pdata->ethertypes[i];
1275
1276 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1277 fremap[field_ids[i]],
1278 sizeof(et->value),
1279 (const uint8_t *)&et->value,
1280 sizeof(et->mask),
1281 (const uint8_t *)&et->mask);
1282 if (rc != 0)
1283 return rc;
1284 }
1285
1286 return 0;
1287 }
1288
1289 static int
sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)1290 sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
1291 struct rte_flow_error *error)
1292 {
1293 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1294 struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
1295 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1296 const rte_be16_t supported_tpids[] = {
1297 /* VLAN standard TPID (always the first element) */
1298 RTE_BE16(RTE_ETHER_TYPE_VLAN),
1299
1300 /* Double-tagging TPIDs */
1301 RTE_BE16(RTE_ETHER_TYPE_QINQ),
1302 RTE_BE16(RTE_ETHER_TYPE_QINQ1),
1303 RTE_BE16(RTE_ETHER_TYPE_QINQ2),
1304 RTE_BE16(RTE_ETHER_TYPE_QINQ3),
1305 };
1306 bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
1307 unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
1308 unsigned int ethertype_idx;
1309 const uint8_t *valuep;
1310 const uint8_t *maskp;
1311 int rc;
1312
1313 if (pdata->innermost_ethertype_restriction.mask != 0 &&
1314 pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
1315 /*
1316 * If a single item VLAN is followed by a L3 item, value
1317 * of "type" in item ETH can't be a double-tagging TPID.
1318 */
1319 nb_supported_tpids = 1;
1320 }
1321
1322 /*
1323 * sfc_mae_rule_parse_item_vlan() has already made sure
1324 * that pdata->nb_vlan_tags does not exceed this figure.
1325 */
1326 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
1327
1328 for (ethertype_idx = 0;
1329 ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
1330 rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
1331 rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
1332 unsigned int tpid_idx;
1333
1334 /*
1335 * This loop can have only two iterations. On the second one,
1336 * drop outer tag presence enforcement bit because the inner
1337 * tag presence automatically assumes that for the outer tag.
1338 */
1339 enforce_tag_presence[0] = B_FALSE;
1340
1341 if (tpid_m == RTE_BE16(0)) {
1342 if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
1343 enforce_tag_presence[ethertype_idx] = B_TRUE;
1344
1345 /* No match on this field, and no value check. */
1346 nb_supported_tpids = 1;
1347 continue;
1348 }
1349
1350 /* Exact match is supported only. */
1351 if (tpid_m != RTE_BE16(0xffff)) {
1352 sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
1353 rte_be_to_cpu_16(tpid_m));
1354 rc = EINVAL;
1355 goto fail;
1356 }
1357
1358 for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
1359 tpid_idx < nb_supported_tpids; ++tpid_idx) {
1360 if (tpid_v == supported_tpids[tpid_idx])
1361 break;
1362 }
1363
1364 if (tpid_idx == nb_supported_tpids) {
1365 sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
1366 rte_be_to_cpu_16(tpid_v));
1367 rc = EINVAL;
1368 goto fail;
1369 }
1370
1371 nb_supported_tpids = 1;
1372 }
1373
1374 if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
1375 struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
1376 rte_be16_t enforced_et;
1377
1378 enforced_et = pdata->innermost_ethertype_restriction.value;
1379
1380 if (et->mask == 0) {
1381 et->mask = RTE_BE16(0xffff);
1382 et->value = enforced_et;
1383 } else if (et->mask != RTE_BE16(0xffff) ||
1384 et->value != enforced_et) {
1385 sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
1386 rte_be_to_cpu_16(enforced_et),
1387 rte_be_to_cpu_16(et->value),
1388 rte_be_to_cpu_16(et->mask));
1389 rc = EINVAL;
1390 goto fail;
1391 }
1392 }
1393
1394 /*
1395 * Now, when the number of VLAN tags is known, set fields
1396 * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
1397 * one is either a valid L3 EtherType (or 0x0000/0x0000),
1398 * and the last two are valid TPIDs (or 0x0000/0x0000).
1399 */
1400 rc = sfc_mae_set_ethertypes(ctx);
1401 if (rc != 0)
1402 goto fail;
1403
1404 if (pdata->l3_next_proto_restriction_mask == 0xff) {
1405 if (pdata->l3_next_proto_mask == 0) {
1406 pdata->l3_next_proto_mask = 0xff;
1407 pdata->l3_next_proto_value =
1408 pdata->l3_next_proto_restriction_value;
1409 } else if (pdata->l3_next_proto_mask != 0xff ||
1410 pdata->l3_next_proto_value !=
1411 pdata->l3_next_proto_restriction_value) {
1412 sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
1413 pdata->l3_next_proto_restriction_value,
1414 pdata->l3_next_proto_value,
1415 pdata->l3_next_proto_mask);
1416 rc = EINVAL;
1417 goto fail;
1418 }
1419 }
1420
1421 if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
1422 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1423 fremap[EFX_MAE_FIELD_HAS_OVLAN],
1424 enforce_tag_presence[0] ||
1425 pdata->has_ovlan_value);
1426 if (rc != 0)
1427 goto fail;
1428 }
1429
1430 if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
1431 rc = efx_mae_match_spec_bit_set(ctx->match_spec,
1432 fremap[EFX_MAE_FIELD_HAS_IVLAN],
1433 enforce_tag_presence[1] ||
1434 pdata->has_ivlan_value);
1435 if (rc != 0)
1436 goto fail;
1437 }
1438
1439 valuep = (const uint8_t *)&pdata->l3_next_proto_value;
1440 maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
1441 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1442 fremap[EFX_MAE_FIELD_IP_PROTO],
1443 sizeof(pdata->l3_next_proto_value),
1444 valuep,
1445 sizeof(pdata->l3_next_proto_mask),
1446 maskp);
1447 if (rc != 0)
1448 goto fail;
1449
1450 return 0;
1451
1452 fail:
1453 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1454 "Failed to process pattern data");
1455 }
1456
1457 static int
sfc_mae_rule_parse_item_mark(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1458 sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
1459 struct sfc_flow_parse_ctx *ctx,
1460 struct rte_flow_error *error)
1461 {
1462 const struct rte_flow_item_mark *spec = item->spec;
1463 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1464
1465 if (spec == NULL) {
1466 return rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ITEM, item,
1468 "NULL spec in item MARK");
1469 }
1470
1471 /*
1472 * This item is used in tunnel offload support only.
1473 * It must go before any network header items. This
1474 * way, sfc_mae_rule_preparse_item_mark() must have
1475 * already parsed it. Only one item MARK is allowed.
1476 */
1477 if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
1478 spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
1479 return rte_flow_error_set(error, EINVAL,
1480 RTE_FLOW_ERROR_TYPE_ITEM,
1481 item, "invalid item MARK");
1482 }
1483
1484 return 0;
1485 }
1486
1487 static int
sfc_mae_rule_parse_item_port_id(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1488 sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
1489 struct sfc_flow_parse_ctx *ctx,
1490 struct rte_flow_error *error)
1491 {
1492 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1493 const struct rte_flow_item_port_id supp_mask = {
1494 .id = 0xffffffff,
1495 };
1496 const void *def_mask = &rte_flow_item_port_id_mask;
1497 const struct rte_flow_item_port_id *spec = NULL;
1498 const struct rte_flow_item_port_id *mask = NULL;
1499 efx_mport_sel_t mport_sel;
1500 int rc;
1501
1502 if (ctx_mae->match_mport_set) {
1503 return rte_flow_error_set(error, ENOTSUP,
1504 RTE_FLOW_ERROR_TYPE_ITEM, item,
1505 "Can't handle multiple traffic source items");
1506 }
1507
1508 rc = sfc_flow_parse_init(item,
1509 (const void **)&spec, (const void **)&mask,
1510 (const void *)&supp_mask, def_mask,
1511 sizeof(struct rte_flow_item_port_id), error);
1512 if (rc != 0)
1513 return rc;
1514
1515 if (mask->id != supp_mask.id) {
1516 return rte_flow_error_set(error, EINVAL,
1517 RTE_FLOW_ERROR_TYPE_ITEM, item,
1518 "Bad mask in the PORT_ID pattern item");
1519 }
1520
1521 /* If "spec" is not set, could be any port ID */
1522 if (spec == NULL)
1523 return 0;
1524
1525 if (spec->id > UINT16_MAX) {
1526 return rte_flow_error_set(error, EOVERFLOW,
1527 RTE_FLOW_ERROR_TYPE_ITEM, item,
1528 "The port ID is too large");
1529 }
1530
1531 rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
1532 spec->id, &mport_sel);
1533 if (rc != 0) {
1534 return rte_flow_error_set(error, rc,
1535 RTE_FLOW_ERROR_TYPE_ITEM, item,
1536 "Can't get m-port for the given ethdev");
1537 }
1538
1539 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1540 &mport_sel, NULL);
1541 if (rc != 0) {
1542 return rte_flow_error_set(error, rc,
1543 RTE_FLOW_ERROR_TYPE_ITEM, item,
1544 "Failed to set MPORT for the port ID");
1545 }
1546
1547 ctx_mae->match_mport_set = B_TRUE;
1548
1549 return 0;
1550 }
1551
1552 static int
sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1553 sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item *item,
1554 struct sfc_flow_parse_ctx *ctx,
1555 struct rte_flow_error *error)
1556 {
1557 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1558 const struct rte_flow_item_ethdev supp_mask = {
1559 .port_id = 0xffff,
1560 };
1561 const void *def_mask = &rte_flow_item_ethdev_mask;
1562 const struct rte_flow_item_ethdev *spec = NULL;
1563 const struct rte_flow_item_ethdev *mask = NULL;
1564 efx_mport_sel_t mport_sel;
1565 int rc;
1566
1567 if (ctx_mae->match_mport_set) {
1568 return rte_flow_error_set(error, ENOTSUP,
1569 RTE_FLOW_ERROR_TYPE_ITEM, item,
1570 "Can't handle multiple traffic source items");
1571 }
1572
1573 rc = sfc_flow_parse_init(item,
1574 (const void **)&spec, (const void **)&mask,
1575 (const void *)&supp_mask, def_mask,
1576 sizeof(struct rte_flow_item_ethdev), error);
1577 if (rc != 0)
1578 return rc;
1579
1580 if (mask->port_id != supp_mask.port_id) {
1581 return rte_flow_error_set(error, EINVAL,
1582 RTE_FLOW_ERROR_TYPE_ITEM, item,
1583 "Bad mask in the ethdev-based pattern item");
1584 }
1585
1586 /* If "spec" is not set, could be any port ID */
1587 if (spec == NULL)
1588 return 0;
1589
1590 switch (item->type) {
1591 case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
1592 rc = sfc_mae_switch_get_ethdev_mport(
1593 ctx_mae->sa->mae.switch_domain_id,
1594 spec->port_id, &mport_sel);
1595 if (rc != 0) {
1596 return rte_flow_error_set(error, rc,
1597 RTE_FLOW_ERROR_TYPE_ITEM, item,
1598 "Can't get m-port for the given ethdev");
1599 }
1600 break;
1601 case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
1602 rc = sfc_mae_switch_get_entity_mport(
1603 ctx_mae->sa->mae.switch_domain_id,
1604 spec->port_id, &mport_sel);
1605 if (rc != 0) {
1606 return rte_flow_error_set(error, rc,
1607 RTE_FLOW_ERROR_TYPE_ITEM, item,
1608 "Can't get m-port for the given ethdev");
1609 }
1610 break;
1611 default:
1612 return rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_ITEM, item,
1614 "Unsupported ethdev-based flow item");
1615 }
1616
1617 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
1618 &mport_sel, NULL);
1619 if (rc != 0) {
1620 return rte_flow_error_set(error, rc,
1621 RTE_FLOW_ERROR_TYPE_ITEM, item,
1622 "Failed to set MPORT for the port ID");
1623 }
1624
1625 ctx_mae->match_mport_set = B_TRUE;
1626
1627 return 0;
1628 }
1629
1630 static int
sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1631 sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
1632 struct sfc_flow_parse_ctx *ctx,
1633 struct rte_flow_error *error)
1634 {
1635 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1636 const struct rte_flow_item_phy_port supp_mask = {
1637 .index = 0xffffffff,
1638 };
1639 const void *def_mask = &rte_flow_item_phy_port_mask;
1640 const struct rte_flow_item_phy_port *spec = NULL;
1641 const struct rte_flow_item_phy_port *mask = NULL;
1642 efx_mport_sel_t mport_v;
1643 int rc;
1644
1645 if (ctx_mae->match_mport_set) {
1646 return rte_flow_error_set(error, ENOTSUP,
1647 RTE_FLOW_ERROR_TYPE_ITEM, item,
1648 "Can't handle multiple traffic source items");
1649 }
1650
1651 rc = sfc_flow_parse_init(item,
1652 (const void **)&spec, (const void **)&mask,
1653 (const void *)&supp_mask, def_mask,
1654 sizeof(struct rte_flow_item_phy_port), error);
1655 if (rc != 0)
1656 return rc;
1657
1658 if (mask->index != supp_mask.index) {
1659 return rte_flow_error_set(error, EINVAL,
1660 RTE_FLOW_ERROR_TYPE_ITEM, item,
1661 "Bad mask in the PHY_PORT pattern item");
1662 }
1663
1664 /* If "spec" is not set, could be any physical port */
1665 if (spec == NULL)
1666 return 0;
1667
1668 rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
1669 if (rc != 0) {
1670 return rte_flow_error_set(error, rc,
1671 RTE_FLOW_ERROR_TYPE_ITEM, item,
1672 "Failed to convert the PHY_PORT index");
1673 }
1674
1675 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1676 if (rc != 0) {
1677 return rte_flow_error_set(error, rc,
1678 RTE_FLOW_ERROR_TYPE_ITEM, item,
1679 "Failed to set MPORT for the PHY_PORT");
1680 }
1681
1682 ctx_mae->match_mport_set = B_TRUE;
1683
1684 return 0;
1685 }
1686
1687 static int
sfc_mae_rule_parse_item_pf(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1688 sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
1689 struct sfc_flow_parse_ctx *ctx,
1690 struct rte_flow_error *error)
1691 {
1692 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1693 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1694 efx_mport_sel_t mport_v;
1695 int rc;
1696
1697 if (ctx_mae->match_mport_set) {
1698 return rte_flow_error_set(error, ENOTSUP,
1699 RTE_FLOW_ERROR_TYPE_ITEM, item,
1700 "Can't handle multiple traffic source items");
1701 }
1702
1703 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
1704 &mport_v);
1705 if (rc != 0) {
1706 return rte_flow_error_set(error, rc,
1707 RTE_FLOW_ERROR_TYPE_ITEM, item,
1708 "Failed to convert the PF ID");
1709 }
1710
1711 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1712 if (rc != 0) {
1713 return rte_flow_error_set(error, rc,
1714 RTE_FLOW_ERROR_TYPE_ITEM, item,
1715 "Failed to set MPORT for the PF");
1716 }
1717
1718 ctx_mae->match_mport_set = B_TRUE;
1719
1720 return 0;
1721 }
1722
1723 static int
sfc_mae_rule_parse_item_vf(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1724 sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
1725 struct sfc_flow_parse_ctx *ctx,
1726 struct rte_flow_error *error)
1727 {
1728 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1729 const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
1730 const struct rte_flow_item_vf supp_mask = {
1731 .id = 0xffffffff,
1732 };
1733 const void *def_mask = &rte_flow_item_vf_mask;
1734 const struct rte_flow_item_vf *spec = NULL;
1735 const struct rte_flow_item_vf *mask = NULL;
1736 efx_mport_sel_t mport_v;
1737 int rc;
1738
1739 if (ctx_mae->match_mport_set) {
1740 return rte_flow_error_set(error, ENOTSUP,
1741 RTE_FLOW_ERROR_TYPE_ITEM, item,
1742 "Can't handle multiple traffic source items");
1743 }
1744
1745 rc = sfc_flow_parse_init(item,
1746 (const void **)&spec, (const void **)&mask,
1747 (const void *)&supp_mask, def_mask,
1748 sizeof(struct rte_flow_item_vf), error);
1749 if (rc != 0)
1750 return rc;
1751
1752 if (mask->id != supp_mask.id) {
1753 return rte_flow_error_set(error, EINVAL,
1754 RTE_FLOW_ERROR_TYPE_ITEM, item,
1755 "Bad mask in the VF pattern item");
1756 }
1757
1758 /*
1759 * If "spec" is not set, the item requests any VF related to the
1760 * PF of the current DPDK port (but not the PF itself).
1761 * Reject this match criterion as unsupported.
1762 */
1763 if (spec == NULL) {
1764 return rte_flow_error_set(error, EINVAL,
1765 RTE_FLOW_ERROR_TYPE_ITEM, item,
1766 "Bad spec in the VF pattern item");
1767 }
1768
1769 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
1770 if (rc != 0) {
1771 return rte_flow_error_set(error, rc,
1772 RTE_FLOW_ERROR_TYPE_ITEM, item,
1773 "Failed to convert the PF + VF IDs");
1774 }
1775
1776 rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
1777 if (rc != 0) {
1778 return rte_flow_error_set(error, rc,
1779 RTE_FLOW_ERROR_TYPE_ITEM, item,
1780 "Failed to set MPORT for the PF + VF");
1781 }
1782
1783 ctx_mae->match_mport_set = B_TRUE;
1784
1785 return 0;
1786 }
1787
1788 /*
1789 * Having this field ID in a field locator means that this
1790 * locator cannot be used to actually set the field at the
1791 * time when the corresponding item gets encountered. Such
1792 * fields get stashed in the parsing context instead. This
1793 * is required to resolve dependencies between the stashed
1794 * fields. See sfc_mae_rule_process_pattern_data().
1795 */
1796 #define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
1797
1798 struct sfc_mae_field_locator {
1799 efx_mae_field_id_t field_id;
1800 size_t size;
1801 /* Field offset in the corresponding rte_flow_item_ struct */
1802 size_t ofst;
1803 };
1804
1805 static void
sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator * field_locators,unsigned int nb_field_locators,void * mask_ptr,size_t mask_size)1806 sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
1807 unsigned int nb_field_locators, void *mask_ptr,
1808 size_t mask_size)
1809 {
1810 unsigned int i;
1811
1812 memset(mask_ptr, 0, mask_size);
1813
1814 for (i = 0; i < nb_field_locators; ++i) {
1815 const struct sfc_mae_field_locator *fl = &field_locators[i];
1816
1817 SFC_ASSERT(fl->ofst + fl->size <= mask_size);
1818 memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
1819 }
1820 }
1821
1822 static int
sfc_mae_parse_item(const struct sfc_mae_field_locator * field_locators,unsigned int nb_field_locators,const uint8_t * spec,const uint8_t * mask,struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)1823 sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
1824 unsigned int nb_field_locators, const uint8_t *spec,
1825 const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
1826 struct rte_flow_error *error)
1827 {
1828 const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
1829 unsigned int i;
1830 int rc = 0;
1831
1832 for (i = 0; i < nb_field_locators; ++i) {
1833 const struct sfc_mae_field_locator *fl = &field_locators[i];
1834
1835 if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
1836 continue;
1837
1838 rc = efx_mae_match_spec_field_set(ctx->match_spec,
1839 fremap[fl->field_id],
1840 fl->size, spec + fl->ofst,
1841 fl->size, mask + fl->ofst);
1842 if (rc != 0)
1843 break;
1844 }
1845
1846 if (rc != 0) {
1847 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
1848 NULL, "Failed to process item fields");
1849 }
1850
1851 return rc;
1852 }
1853
1854 static const struct sfc_mae_field_locator flocs_eth[] = {
1855 {
1856 /*
1857 * This locator is used only for building supported fields mask.
1858 * The field is handled by sfc_mae_rule_process_pattern_data().
1859 */
1860 SFC_MAE_FIELD_HANDLING_DEFERRED,
1861 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
1862 offsetof(struct rte_flow_item_eth, type),
1863 },
1864 {
1865 EFX_MAE_FIELD_ETH_DADDR_BE,
1866 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
1867 offsetof(struct rte_flow_item_eth, dst),
1868 },
1869 {
1870 EFX_MAE_FIELD_ETH_SADDR_BE,
1871 RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
1872 offsetof(struct rte_flow_item_eth, src),
1873 },
1874 };
1875
1876 static int
sfc_mae_rule_parse_item_eth(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1877 sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
1878 struct sfc_flow_parse_ctx *ctx,
1879 struct rte_flow_error *error)
1880 {
1881 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1882 struct rte_flow_item_eth override_mask;
1883 struct rte_flow_item_eth supp_mask;
1884 const uint8_t *spec = NULL;
1885 const uint8_t *mask = NULL;
1886 int rc;
1887
1888 sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
1889 &supp_mask, sizeof(supp_mask));
1890 supp_mask.has_vlan = 1;
1891
1892 rc = sfc_flow_parse_init(item,
1893 (const void **)&spec, (const void **)&mask,
1894 (const void *)&supp_mask,
1895 &rte_flow_item_eth_mask,
1896 sizeof(struct rte_flow_item_eth), error);
1897 if (rc != 0)
1898 return rc;
1899
1900 if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
1901 /*
1902 * The HW/FW hasn't got support for match on MAC addresses in
1903 * outer rules yet (this will change). Match on VLAN presence
1904 * isn't supported either. Ignore these match criteria.
1905 */
1906 memcpy(&override_mask, mask, sizeof(override_mask));
1907 memset(&override_mask.hdr.dst_addr, 0,
1908 sizeof(override_mask.hdr.dst_addr));
1909 memset(&override_mask.hdr.src_addr, 0,
1910 sizeof(override_mask.hdr.src_addr));
1911 override_mask.has_vlan = 0;
1912
1913 mask = (const uint8_t *)&override_mask;
1914 }
1915
1916 if (spec != NULL) {
1917 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1918 struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
1919 const struct rte_flow_item_eth *item_spec;
1920 const struct rte_flow_item_eth *item_mask;
1921
1922 item_spec = (const struct rte_flow_item_eth *)spec;
1923 item_mask = (const struct rte_flow_item_eth *)mask;
1924
1925 /*
1926 * Remember various match criteria in the parsing context.
1927 * sfc_mae_rule_process_pattern_data() will consider them
1928 * altogether when the rest of the items have been parsed.
1929 */
1930 ethertypes[0].value = item_spec->type;
1931 ethertypes[0].mask = item_mask->type;
1932 if (item_mask->has_vlan) {
1933 pdata->has_ovlan_mask = B_TRUE;
1934 if (item_spec->has_vlan)
1935 pdata->has_ovlan_value = B_TRUE;
1936 }
1937 } else {
1938 /*
1939 * The specification is empty. The overall pattern
1940 * validity will be enforced at the end of parsing.
1941 * See sfc_mae_rule_process_pattern_data().
1942 */
1943 return 0;
1944 }
1945
1946 return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
1947 ctx_mae, error);
1948 }
1949
1950 static const struct sfc_mae_field_locator flocs_vlan[] = {
1951 /* Outermost tag */
1952 {
1953 EFX_MAE_FIELD_VLAN0_TCI_BE,
1954 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1955 offsetof(struct rte_flow_item_vlan, tci),
1956 },
1957 {
1958 /*
1959 * This locator is used only for building supported fields mask.
1960 * The field is handled by sfc_mae_rule_process_pattern_data().
1961 */
1962 SFC_MAE_FIELD_HANDLING_DEFERRED,
1963 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1964 offsetof(struct rte_flow_item_vlan, inner_type),
1965 },
1966
1967 /* Innermost tag */
1968 {
1969 EFX_MAE_FIELD_VLAN1_TCI_BE,
1970 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
1971 offsetof(struct rte_flow_item_vlan, tci),
1972 },
1973 {
1974 /*
1975 * This locator is used only for building supported fields mask.
1976 * The field is handled by sfc_mae_rule_process_pattern_data().
1977 */
1978 SFC_MAE_FIELD_HANDLING_DEFERRED,
1979 RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
1980 offsetof(struct rte_flow_item_vlan, inner_type),
1981 },
1982 };
1983
1984 static int
sfc_mae_rule_parse_item_vlan(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)1985 sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
1986 struct sfc_flow_parse_ctx *ctx,
1987 struct rte_flow_error *error)
1988 {
1989 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
1990 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
1991 boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1992 &pdata->has_ovlan_mask,
1993 &pdata->has_ivlan_mask,
1994 };
1995 boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
1996 &pdata->has_ovlan_value,
1997 &pdata->has_ivlan_value,
1998 };
1999 boolean_t *cur_tag_presence_bit_mp;
2000 boolean_t *cur_tag_presence_bit_vp;
2001 const struct sfc_mae_field_locator *flocs;
2002 struct rte_flow_item_vlan supp_mask;
2003 const uint8_t *spec = NULL;
2004 const uint8_t *mask = NULL;
2005 unsigned int nb_flocs;
2006 int rc;
2007
2008 RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
2009
2010 if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
2011 return rte_flow_error_set(error, ENOTSUP,
2012 RTE_FLOW_ERROR_TYPE_ITEM, item,
2013 "Can't match that many VLAN tags");
2014 }
2015
2016 cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
2017 cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
2018
2019 if (*cur_tag_presence_bit_mp == B_TRUE &&
2020 *cur_tag_presence_bit_vp == B_FALSE) {
2021 return rte_flow_error_set(error, EINVAL,
2022 RTE_FLOW_ERROR_TYPE_ITEM, item,
2023 "The previous item enforces no (more) VLAN, "
2024 "so the current item (VLAN) must not exist");
2025 }
2026
2027 nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
2028 flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
2029
2030 sfc_mae_item_build_supp_mask(flocs, nb_flocs,
2031 &supp_mask, sizeof(supp_mask));
2032 /*
2033 * This only means that the field is supported by the driver and libefx.
2034 * Support on NIC level will be checked when all items have been parsed.
2035 */
2036 supp_mask.has_more_vlan = 1;
2037
2038 rc = sfc_flow_parse_init(item,
2039 (const void **)&spec, (const void **)&mask,
2040 (const void *)&supp_mask,
2041 &rte_flow_item_vlan_mask,
2042 sizeof(struct rte_flow_item_vlan), error);
2043 if (rc != 0)
2044 return rc;
2045
2046 if (spec != NULL) {
2047 struct sfc_mae_ethertype *et = pdata->ethertypes;
2048 const struct rte_flow_item_vlan *item_spec;
2049 const struct rte_flow_item_vlan *item_mask;
2050
2051 item_spec = (const struct rte_flow_item_vlan *)spec;
2052 item_mask = (const struct rte_flow_item_vlan *)mask;
2053
2054 /*
2055 * Remember various match criteria in the parsing context.
2056 * sfc_mae_rule_process_pattern_data() will consider them
2057 * altogether when the rest of the items have been parsed.
2058 */
2059 et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
2060 et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
2061 pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
2062 if (item_mask->has_more_vlan) {
2063 if (pdata->nb_vlan_tags ==
2064 SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
2065 return rte_flow_error_set(error, ENOTSUP,
2066 RTE_FLOW_ERROR_TYPE_ITEM, item,
2067 "Can't use 'has_more_vlan' in "
2068 "the second item VLAN");
2069 }
2070 pdata->has_ivlan_mask = B_TRUE;
2071 if (item_spec->has_more_vlan)
2072 pdata->has_ivlan_value = B_TRUE;
2073 }
2074
2075 /* Convert TCI to MAE representation right now. */
2076 rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
2077 ctx_mae, error);
2078 if (rc != 0)
2079 return rc;
2080 }
2081
2082 ++(pdata->nb_vlan_tags);
2083
2084 return 0;
2085 }
2086
2087 static const struct sfc_mae_field_locator flocs_ipv4[] = {
2088 {
2089 EFX_MAE_FIELD_SRC_IP4_BE,
2090 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
2091 offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
2092 },
2093 {
2094 EFX_MAE_FIELD_DST_IP4_BE,
2095 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
2096 offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
2097 },
2098 {
2099 /*
2100 * This locator is used only for building supported fields mask.
2101 * The field is handled by sfc_mae_rule_process_pattern_data().
2102 */
2103 SFC_MAE_FIELD_HANDLING_DEFERRED,
2104 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
2105 offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
2106 },
2107 {
2108 EFX_MAE_FIELD_IP_TOS,
2109 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
2110 hdr.type_of_service),
2111 offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
2112 },
2113 {
2114 EFX_MAE_FIELD_IP_TTL,
2115 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
2116 offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
2117 },
2118 };
2119
2120 static int
sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)2121 sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
2122 struct sfc_flow_parse_ctx *ctx,
2123 struct rte_flow_error *error)
2124 {
2125 rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2126 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2127 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2128 struct rte_flow_item_ipv4 supp_mask;
2129 const uint8_t *spec = NULL;
2130 const uint8_t *mask = NULL;
2131 int rc;
2132
2133 sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
2134 &supp_mask, sizeof(supp_mask));
2135
2136 rc = sfc_flow_parse_init(item,
2137 (const void **)&spec, (const void **)&mask,
2138 (const void *)&supp_mask,
2139 &rte_flow_item_ipv4_mask,
2140 sizeof(struct rte_flow_item_ipv4), error);
2141 if (rc != 0)
2142 return rc;
2143
2144 pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
2145 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2146
2147 if (spec != NULL) {
2148 const struct rte_flow_item_ipv4 *item_spec;
2149 const struct rte_flow_item_ipv4 *item_mask;
2150
2151 item_spec = (const struct rte_flow_item_ipv4 *)spec;
2152 item_mask = (const struct rte_flow_item_ipv4 *)mask;
2153
2154 pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
2155 pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
2156 } else {
2157 return 0;
2158 }
2159
2160 return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
2161 ctx_mae, error);
2162 }
2163
2164 static const struct sfc_mae_field_locator flocs_ipv6[] = {
2165 {
2166 EFX_MAE_FIELD_SRC_IP6_BE,
2167 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
2168 offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
2169 },
2170 {
2171 EFX_MAE_FIELD_DST_IP6_BE,
2172 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
2173 offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
2174 },
2175 {
2176 /*
2177 * This locator is used only for building supported fields mask.
2178 * The field is handled by sfc_mae_rule_process_pattern_data().
2179 */
2180 SFC_MAE_FIELD_HANDLING_DEFERRED,
2181 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
2182 offsetof(struct rte_flow_item_ipv6, hdr.proto),
2183 },
2184 {
2185 EFX_MAE_FIELD_IP_TTL,
2186 RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
2187 offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
2188 },
2189 };
2190
2191 static int
sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)2192 sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
2193 struct sfc_flow_parse_ctx *ctx,
2194 struct rte_flow_error *error)
2195 {
2196 rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2197 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2198 const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
2199 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2200 struct rte_flow_item_ipv6 supp_mask;
2201 const uint8_t *spec = NULL;
2202 const uint8_t *mask = NULL;
2203 rte_be32_t vtc_flow_be;
2204 uint32_t vtc_flow;
2205 uint8_t tc_value;
2206 uint8_t tc_mask;
2207 int rc;
2208
2209 sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
2210 &supp_mask, sizeof(supp_mask));
2211
2212 vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
2213 memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
2214
2215 rc = sfc_flow_parse_init(item,
2216 (const void **)&spec, (const void **)&mask,
2217 (const void *)&supp_mask,
2218 &rte_flow_item_ipv6_mask,
2219 sizeof(struct rte_flow_item_ipv6), error);
2220 if (rc != 0)
2221 return rc;
2222
2223 pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
2224 pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
2225
2226 if (spec != NULL) {
2227 const struct rte_flow_item_ipv6 *item_spec;
2228 const struct rte_flow_item_ipv6 *item_mask;
2229
2230 item_spec = (const struct rte_flow_item_ipv6 *)spec;
2231 item_mask = (const struct rte_flow_item_ipv6 *)mask;
2232
2233 pdata->l3_next_proto_value = item_spec->hdr.proto;
2234 pdata->l3_next_proto_mask = item_mask->hdr.proto;
2235 } else {
2236 return 0;
2237 }
2238
2239 rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
2240 ctx_mae, error);
2241 if (rc != 0)
2242 return rc;
2243
2244 memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
2245 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2246 tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2247
2248 memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
2249 vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
2250 tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
2251
2252 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2253 fremap[EFX_MAE_FIELD_IP_TOS],
2254 sizeof(tc_value), &tc_value,
2255 sizeof(tc_mask), &tc_mask);
2256 if (rc != 0) {
2257 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2258 NULL, "Failed to process item fields");
2259 }
2260
2261 return 0;
2262 }
2263
2264 static const struct sfc_mae_field_locator flocs_tcp[] = {
2265 {
2266 EFX_MAE_FIELD_L4_SPORT_BE,
2267 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
2268 offsetof(struct rte_flow_item_tcp, hdr.src_port),
2269 },
2270 {
2271 EFX_MAE_FIELD_L4_DPORT_BE,
2272 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
2273 offsetof(struct rte_flow_item_tcp, hdr.dst_port),
2274 },
2275 {
2276 EFX_MAE_FIELD_TCP_FLAGS_BE,
2277 /*
2278 * The values have been picked intentionally since the
2279 * target MAE field is oversize (16 bit). This mapping
2280 * relies on the fact that the MAE field is big-endian.
2281 */
2282 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
2283 RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
2284 offsetof(struct rte_flow_item_tcp, hdr.data_off),
2285 },
2286 };
2287
2288 static int
sfc_mae_rule_parse_item_tcp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)2289 sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
2290 struct sfc_flow_parse_ctx *ctx,
2291 struct rte_flow_error *error)
2292 {
2293 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2294 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2295 struct rte_flow_item_tcp supp_mask;
2296 const uint8_t *spec = NULL;
2297 const uint8_t *mask = NULL;
2298 int rc;
2299
2300 /*
2301 * When encountered among outermost items, item TCP is invalid.
2302 * Check which match specification is being constructed now.
2303 */
2304 if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
2305 return rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM, item,
2307 "TCP in outer frame is invalid");
2308 }
2309
2310 sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
2311 &supp_mask, sizeof(supp_mask));
2312
2313 rc = sfc_flow_parse_init(item,
2314 (const void **)&spec, (const void **)&mask,
2315 (const void *)&supp_mask,
2316 &rte_flow_item_tcp_mask,
2317 sizeof(struct rte_flow_item_tcp), error);
2318 if (rc != 0)
2319 return rc;
2320
2321 pdata->l3_next_proto_restriction_value = IPPROTO_TCP;
2322 pdata->l3_next_proto_restriction_mask = 0xff;
2323
2324 if (spec == NULL)
2325 return 0;
2326
2327 return sfc_mae_parse_item(flocs_tcp, RTE_DIM(flocs_tcp), spec, mask,
2328 ctx_mae, error);
2329 }
2330
2331 static const struct sfc_mae_field_locator flocs_udp[] = {
2332 {
2333 EFX_MAE_FIELD_L4_SPORT_BE,
2334 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.src_port),
2335 offsetof(struct rte_flow_item_udp, hdr.src_port),
2336 },
2337 {
2338 EFX_MAE_FIELD_L4_DPORT_BE,
2339 RTE_SIZEOF_FIELD(struct rte_flow_item_udp, hdr.dst_port),
2340 offsetof(struct rte_flow_item_udp, hdr.dst_port),
2341 },
2342 };
2343
2344 static int
sfc_mae_rule_parse_item_udp(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)2345 sfc_mae_rule_parse_item_udp(const struct rte_flow_item *item,
2346 struct sfc_flow_parse_ctx *ctx,
2347 struct rte_flow_error *error)
2348 {
2349 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2350 struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
2351 struct rte_flow_item_udp supp_mask;
2352 const uint8_t *spec = NULL;
2353 const uint8_t *mask = NULL;
2354 int rc;
2355
2356 sfc_mae_item_build_supp_mask(flocs_udp, RTE_DIM(flocs_udp),
2357 &supp_mask, sizeof(supp_mask));
2358
2359 rc = sfc_flow_parse_init(item,
2360 (const void **)&spec, (const void **)&mask,
2361 (const void *)&supp_mask,
2362 &rte_flow_item_udp_mask,
2363 sizeof(struct rte_flow_item_udp), error);
2364 if (rc != 0)
2365 return rc;
2366
2367 pdata->l3_next_proto_restriction_value = IPPROTO_UDP;
2368 pdata->l3_next_proto_restriction_mask = 0xff;
2369
2370 if (spec == NULL)
2371 return 0;
2372
2373 return sfc_mae_parse_item(flocs_udp, RTE_DIM(flocs_udp), spec, mask,
2374 ctx_mae, error);
2375 }
2376
2377 static const struct sfc_mae_field_locator flocs_tunnel[] = {
2378 {
2379 /*
2380 * The size and offset values are relevant
2381 * for Geneve and NVGRE, too.
2382 */
2383 .size = RTE_SIZEOF_FIELD(struct rte_flow_item_vxlan, vni),
2384 .ofst = offsetof(struct rte_flow_item_vxlan, vni),
2385 },
2386 };
2387
2388 /*
2389 * An auxiliary registry which allows using non-encap. field IDs
2390 * directly when building a match specification of type ACTION.
2391 *
2392 * See sfc_mae_rule_parse_pattern() and sfc_mae_rule_parse_item_tunnel().
2393 */
2394 static const efx_mae_field_id_t field_ids_no_remap[] = {
2395 #define FIELD_ID_NO_REMAP(_field) \
2396 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_##_field
2397
2398 FIELD_ID_NO_REMAP(ETHER_TYPE_BE),
2399 FIELD_ID_NO_REMAP(ETH_SADDR_BE),
2400 FIELD_ID_NO_REMAP(ETH_DADDR_BE),
2401 FIELD_ID_NO_REMAP(VLAN0_TCI_BE),
2402 FIELD_ID_NO_REMAP(VLAN0_PROTO_BE),
2403 FIELD_ID_NO_REMAP(VLAN1_TCI_BE),
2404 FIELD_ID_NO_REMAP(VLAN1_PROTO_BE),
2405 FIELD_ID_NO_REMAP(SRC_IP4_BE),
2406 FIELD_ID_NO_REMAP(DST_IP4_BE),
2407 FIELD_ID_NO_REMAP(IP_PROTO),
2408 FIELD_ID_NO_REMAP(IP_TOS),
2409 FIELD_ID_NO_REMAP(IP_TTL),
2410 FIELD_ID_NO_REMAP(SRC_IP6_BE),
2411 FIELD_ID_NO_REMAP(DST_IP6_BE),
2412 FIELD_ID_NO_REMAP(L4_SPORT_BE),
2413 FIELD_ID_NO_REMAP(L4_DPORT_BE),
2414 FIELD_ID_NO_REMAP(TCP_FLAGS_BE),
2415 FIELD_ID_NO_REMAP(HAS_OVLAN),
2416 FIELD_ID_NO_REMAP(HAS_IVLAN),
2417
2418 #undef FIELD_ID_NO_REMAP
2419 };
2420
2421 /*
2422 * An auxiliary registry which allows using "ENC" field IDs
2423 * when building a match specification of type OUTER.
2424 *
2425 * See sfc_mae_rule_encap_parse_init().
2426 */
2427 static const efx_mae_field_id_t field_ids_remap_to_encap[] = {
2428 #define FIELD_ID_REMAP_TO_ENCAP(_field) \
2429 [EFX_MAE_FIELD_##_field] = EFX_MAE_FIELD_ENC_##_field
2430
2431 FIELD_ID_REMAP_TO_ENCAP(ETHER_TYPE_BE),
2432 FIELD_ID_REMAP_TO_ENCAP(ETH_SADDR_BE),
2433 FIELD_ID_REMAP_TO_ENCAP(ETH_DADDR_BE),
2434 FIELD_ID_REMAP_TO_ENCAP(VLAN0_TCI_BE),
2435 FIELD_ID_REMAP_TO_ENCAP(VLAN0_PROTO_BE),
2436 FIELD_ID_REMAP_TO_ENCAP(VLAN1_TCI_BE),
2437 FIELD_ID_REMAP_TO_ENCAP(VLAN1_PROTO_BE),
2438 FIELD_ID_REMAP_TO_ENCAP(SRC_IP4_BE),
2439 FIELD_ID_REMAP_TO_ENCAP(DST_IP4_BE),
2440 FIELD_ID_REMAP_TO_ENCAP(IP_PROTO),
2441 FIELD_ID_REMAP_TO_ENCAP(IP_TOS),
2442 FIELD_ID_REMAP_TO_ENCAP(IP_TTL),
2443 FIELD_ID_REMAP_TO_ENCAP(SRC_IP6_BE),
2444 FIELD_ID_REMAP_TO_ENCAP(DST_IP6_BE),
2445 FIELD_ID_REMAP_TO_ENCAP(L4_SPORT_BE),
2446 FIELD_ID_REMAP_TO_ENCAP(L4_DPORT_BE),
2447 FIELD_ID_REMAP_TO_ENCAP(HAS_OVLAN),
2448 FIELD_ID_REMAP_TO_ENCAP(HAS_IVLAN),
2449
2450 #undef FIELD_ID_REMAP_TO_ENCAP
2451 };
2452
2453 static int
sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item * item,struct sfc_flow_parse_ctx * ctx,struct rte_flow_error * error)2454 sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
2455 struct sfc_flow_parse_ctx *ctx,
2456 struct rte_flow_error *error)
2457 {
2458 struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
2459 uint8_t vnet_id_v[sizeof(uint32_t)] = {0};
2460 uint8_t vnet_id_m[sizeof(uint32_t)] = {0};
2461 const struct rte_flow_item_vxlan *vxp;
2462 uint8_t supp_mask[sizeof(uint64_t)];
2463 const uint8_t *spec = NULL;
2464 const uint8_t *mask = NULL;
2465 int rc;
2466
2467 if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
2468 /*
2469 * As a workaround, pattern processing has started from
2470 * this (tunnel) item. No pattern data to process yet.
2471 */
2472 } else {
2473 /*
2474 * We're about to start processing inner frame items.
2475 * Process pattern data that has been deferred so far
2476 * and reset pattern data storage.
2477 */
2478 rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
2479 if (rc != 0)
2480 return rc;
2481 }
2482
2483 memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
2484
2485 sfc_mae_item_build_supp_mask(flocs_tunnel, RTE_DIM(flocs_tunnel),
2486 &supp_mask, sizeof(supp_mask));
2487
2488 /*
2489 * This tunnel item was preliminarily detected by
2490 * sfc_mae_rule_encap_parse_init(). Default mask
2491 * was also picked by that helper. Use it here.
2492 */
2493 rc = sfc_flow_parse_init(item,
2494 (const void **)&spec, (const void **)&mask,
2495 (const void *)&supp_mask,
2496 ctx_mae->tunnel_def_mask,
2497 ctx_mae->tunnel_def_mask_size, error);
2498 if (rc != 0)
2499 return rc;
2500
2501 /*
2502 * This item and later ones comprise a
2503 * match specification of type ACTION.
2504 */
2505 ctx_mae->match_spec = ctx_mae->match_spec_action;
2506
2507 /* This item and later ones use non-encap. EFX MAE field IDs. */
2508 ctx_mae->field_ids_remap = field_ids_no_remap;
2509
2510 if (spec == NULL)
2511 return 0;
2512
2513 /*
2514 * Field EFX_MAE_FIELD_ENC_VNET_ID_BE is a 32-bit one.
2515 * Copy 24-bit VNI, which is BE, at offset 1 in it.
2516 * The extra byte is 0 both in the mask and in the value.
2517 */
2518 vxp = (const struct rte_flow_item_vxlan *)spec;
2519 memcpy(vnet_id_v + 1, &vxp->vni, sizeof(vxp->vni));
2520
2521 vxp = (const struct rte_flow_item_vxlan *)mask;
2522 memcpy(vnet_id_m + 1, &vxp->vni, sizeof(vxp->vni));
2523
2524 rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
2525 EFX_MAE_FIELD_ENC_VNET_ID_BE,
2526 sizeof(vnet_id_v), vnet_id_v,
2527 sizeof(vnet_id_m), vnet_id_m);
2528 if (rc != 0) {
2529 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
2530 item, "Failed to set VXLAN VNI");
2531 }
2532
2533 return rc;
2534 }
2535
2536 static const struct sfc_flow_item sfc_flow_items[] = {
2537 {
2538 .type = RTE_FLOW_ITEM_TYPE_MARK,
2539 .name = "MARK",
2540 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2541 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2542 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2543 .parse = sfc_mae_rule_parse_item_mark,
2544 },
2545 {
2546 .type = RTE_FLOW_ITEM_TYPE_PORT_ID,
2547 .name = "PORT_ID",
2548 /*
2549 * In terms of RTE flow, this item is a META one,
2550 * and its position in the pattern is don't care.
2551 */
2552 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2553 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2554 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2555 .parse = sfc_mae_rule_parse_item_port_id,
2556 },
2557 {
2558 .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
2559 .name = "PORT_REPRESENTOR",
2560 /*
2561 * In terms of RTE flow, this item is a META one,
2562 * and its position in the pattern is don't care.
2563 */
2564 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2565 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2566 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2567 .parse = sfc_mae_rule_parse_item_ethdev_based,
2568 },
2569 {
2570 .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
2571 .name = "REPRESENTED_PORT",
2572 /*
2573 * In terms of RTE flow, this item is a META one,
2574 * and its position in the pattern is don't care.
2575 */
2576 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2577 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2578 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2579 .parse = sfc_mae_rule_parse_item_ethdev_based,
2580 },
2581 {
2582 .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
2583 .name = "PHY_PORT",
2584 /*
2585 * In terms of RTE flow, this item is a META one,
2586 * and its position in the pattern is don't care.
2587 */
2588 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2589 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2590 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2591 .parse = sfc_mae_rule_parse_item_phy_port,
2592 },
2593 {
2594 .type = RTE_FLOW_ITEM_TYPE_PF,
2595 .name = "PF",
2596 /*
2597 * In terms of RTE flow, this item is a META one,
2598 * and its position in the pattern is don't care.
2599 */
2600 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2601 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2602 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2603 .parse = sfc_mae_rule_parse_item_pf,
2604 },
2605 {
2606 .type = RTE_FLOW_ITEM_TYPE_VF,
2607 .name = "VF",
2608 /*
2609 * In terms of RTE flow, this item is a META one,
2610 * and its position in the pattern is don't care.
2611 */
2612 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
2613 .layer = SFC_FLOW_ITEM_ANY_LAYER,
2614 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2615 .parse = sfc_mae_rule_parse_item_vf,
2616 },
2617 {
2618 .type = RTE_FLOW_ITEM_TYPE_ETH,
2619 .name = "ETH",
2620 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
2621 .layer = SFC_FLOW_ITEM_L2,
2622 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2623 .parse = sfc_mae_rule_parse_item_eth,
2624 },
2625 {
2626 .type = RTE_FLOW_ITEM_TYPE_VLAN,
2627 .name = "VLAN",
2628 .prev_layer = SFC_FLOW_ITEM_L2,
2629 .layer = SFC_FLOW_ITEM_L2,
2630 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2631 .parse = sfc_mae_rule_parse_item_vlan,
2632 },
2633 {
2634 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2635 .name = "IPV4",
2636 .prev_layer = SFC_FLOW_ITEM_L2,
2637 .layer = SFC_FLOW_ITEM_L3,
2638 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2639 .parse = sfc_mae_rule_parse_item_ipv4,
2640 },
2641 {
2642 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2643 .name = "IPV6",
2644 .prev_layer = SFC_FLOW_ITEM_L2,
2645 .layer = SFC_FLOW_ITEM_L3,
2646 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2647 .parse = sfc_mae_rule_parse_item_ipv6,
2648 },
2649 {
2650 .type = RTE_FLOW_ITEM_TYPE_TCP,
2651 .name = "TCP",
2652 .prev_layer = SFC_FLOW_ITEM_L3,
2653 .layer = SFC_FLOW_ITEM_L4,
2654 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2655 .parse = sfc_mae_rule_parse_item_tcp,
2656 },
2657 {
2658 .type = RTE_FLOW_ITEM_TYPE_UDP,
2659 .name = "UDP",
2660 .prev_layer = SFC_FLOW_ITEM_L3,
2661 .layer = SFC_FLOW_ITEM_L4,
2662 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2663 .parse = sfc_mae_rule_parse_item_udp,
2664 },
2665 {
2666 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
2667 .name = "VXLAN",
2668 .prev_layer = SFC_FLOW_ITEM_L4,
2669 .layer = SFC_FLOW_ITEM_START_LAYER,
2670 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2671 .parse = sfc_mae_rule_parse_item_tunnel,
2672 },
2673 {
2674 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
2675 .name = "GENEVE",
2676 .prev_layer = SFC_FLOW_ITEM_L4,
2677 .layer = SFC_FLOW_ITEM_START_LAYER,
2678 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2679 .parse = sfc_mae_rule_parse_item_tunnel,
2680 },
2681 {
2682 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
2683 .name = "NVGRE",
2684 .prev_layer = SFC_FLOW_ITEM_L3,
2685 .layer = SFC_FLOW_ITEM_START_LAYER,
2686 .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
2687 .parse = sfc_mae_rule_parse_item_tunnel,
2688 },
2689 };
2690
2691 static int
sfc_mae_rule_process_outer(struct sfc_adapter * sa,struct sfc_mae_parse_ctx * ctx,struct sfc_mae_outer_rule ** rulep,struct rte_flow_error * error)2692 sfc_mae_rule_process_outer(struct sfc_adapter *sa,
2693 struct sfc_mae_parse_ctx *ctx,
2694 struct sfc_mae_outer_rule **rulep,
2695 struct rte_flow_error *error)
2696 {
2697 efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
2698 int rc;
2699
2700 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE) {
2701 *rulep = NULL;
2702 goto no_or_id;
2703 }
2704
2705 SFC_ASSERT(ctx->match_spec_outer != NULL);
2706
2707 if (!efx_mae_match_spec_is_valid(sa->nic, ctx->match_spec_outer)) {
2708 return rte_flow_error_set(error, ENOTSUP,
2709 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2710 "Inconsistent pattern (outer)");
2711 }
2712
2713 *rulep = sfc_mae_outer_rule_attach(sa, ctx->match_spec_outer,
2714 ctx->encap_type);
2715 if (*rulep != NULL) {
2716 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2717 } else {
2718 rc = sfc_mae_outer_rule_add(sa, ctx->match_spec_outer,
2719 ctx->encap_type, rulep);
2720 if (rc != 0) {
2721 return rte_flow_error_set(error, rc,
2722 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2723 "Failed to process the pattern");
2724 }
2725 }
2726
2727 /* The spec has now been tracked by the outer rule entry. */
2728 ctx->match_spec_outer = NULL;
2729
2730 no_or_id:
2731 switch (ctx->ft_rule_type) {
2732 case SFC_FT_RULE_NONE:
2733 break;
2734 case SFC_FT_RULE_JUMP:
2735 /* No action rule */
2736 return 0;
2737 case SFC_FT_RULE_GROUP:
2738 /*
2739 * Match on recirculation ID rather than
2740 * on the outer rule allocation handle.
2741 */
2742 rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
2743 SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
2744 if (rc != 0) {
2745 return rte_flow_error_set(error, rc,
2746 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2747 "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
2748 }
2749 return 0;
2750 default:
2751 SFC_ASSERT(B_FALSE);
2752 }
2753
2754 /*
2755 * In MAE, lookup sequence comprises outer parse, outer rule lookup,
2756 * inner parse (when some outer rule is hit) and action rule lookup.
2757 * If the currently processed flow does not come with an outer rule,
2758 * its action rule must be available only for packets which miss in
2759 * outer rule table. Set OR_ID match field to 0xffffffff/0xffffffff
2760 * in the action rule specification; this ensures correct behaviour.
2761 *
2762 * If, on the other hand, this flow does have an outer rule, its ID
2763 * may be unknown at the moment (not yet allocated), but OR_ID mask
2764 * has to be set to 0xffffffff anyway for correct class comparisons.
2765 * When the outer rule has been allocated, this match field will be
2766 * overridden by sfc_mae_outer_rule_enable() to use the right value.
2767 */
2768 rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
2769 &invalid_rule_id);
2770 if (rc != 0) {
2771 if (*rulep != NULL)
2772 sfc_mae_outer_rule_del(sa, *rulep);
2773
2774 *rulep = NULL;
2775
2776 return rte_flow_error_set(error, rc,
2777 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2778 "Failed to process the pattern");
2779 }
2780
2781 return 0;
2782 }
2783
2784 static int
sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark * spec,struct sfc_mae_parse_ctx * ctx)2785 sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
2786 struct sfc_mae_parse_ctx *ctx)
2787 {
2788 struct sfc_flow_tunnel *ft;
2789 uint32_t user_mark;
2790
2791 if (spec == NULL) {
2792 sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
2793 return EINVAL;
2794 }
2795
2796 ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
2797 if (ft == NULL) {
2798 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
2799 return EINVAL;
2800 }
2801
2802 if (ft->refcnt == 0) {
2803 sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
2804 ft->id);
2805 return ENOENT;
2806 }
2807
2808 user_mark = SFC_FT_GET_USER_MARK(spec->id);
2809 if (user_mark != 0) {
2810 sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
2811 return EINVAL;
2812 }
2813
2814 sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
2815
2816 ctx->ft_rule_type = SFC_FT_RULE_GROUP;
2817 ctx->ft = ft;
2818
2819 return 0;
2820 }
2821
2822 static int
sfc_mae_rule_encap_parse_init(struct sfc_adapter * sa,struct sfc_mae_parse_ctx * ctx,struct rte_flow_error * error)2823 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
2824 struct sfc_mae_parse_ctx *ctx,
2825 struct rte_flow_error *error)
2826 {
2827 const struct rte_flow_item *pattern = ctx->pattern;
2828 struct sfc_mae *mae = &sa->mae;
2829 uint8_t recirc_id = 0;
2830 int rc;
2831
2832 if (pattern == NULL) {
2833 rte_flow_error_set(error, EINVAL,
2834 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
2835 "NULL pattern");
2836 return -rte_errno;
2837 }
2838
2839 for (;;) {
2840 switch (pattern->type) {
2841 case RTE_FLOW_ITEM_TYPE_MARK:
2842 rc = sfc_mae_rule_preparse_item_mark(pattern->spec,
2843 ctx);
2844 if (rc != 0) {
2845 return rte_flow_error_set(error, rc,
2846 RTE_FLOW_ERROR_TYPE_ITEM,
2847 pattern, "tunnel offload: GROUP: invalid item MARK");
2848 }
2849 ++pattern;
2850 continue;
2851 case RTE_FLOW_ITEM_TYPE_VXLAN:
2852 ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
2853 ctx->tunnel_def_mask = &rte_flow_item_vxlan_mask;
2854 ctx->tunnel_def_mask_size =
2855 sizeof(rte_flow_item_vxlan_mask);
2856 break;
2857 case RTE_FLOW_ITEM_TYPE_GENEVE:
2858 ctx->encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
2859 ctx->tunnel_def_mask = &rte_flow_item_geneve_mask;
2860 ctx->tunnel_def_mask_size =
2861 sizeof(rte_flow_item_geneve_mask);
2862 break;
2863 case RTE_FLOW_ITEM_TYPE_NVGRE:
2864 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
2865 ctx->tunnel_def_mask = &rte_flow_item_nvgre_mask;
2866 ctx->tunnel_def_mask_size =
2867 sizeof(rte_flow_item_nvgre_mask);
2868 break;
2869 case RTE_FLOW_ITEM_TYPE_END:
2870 break;
2871 default:
2872 ++pattern;
2873 continue;
2874 };
2875
2876 break;
2877 }
2878
2879 switch (ctx->ft_rule_type) {
2880 case SFC_FT_RULE_NONE:
2881 if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
2882 return 0;
2883 break;
2884 case SFC_FT_RULE_JUMP:
2885 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
2886 return rte_flow_error_set(error, ENOTSUP,
2887 RTE_FLOW_ERROR_TYPE_ITEM,
2888 pattern, "tunnel offload: JUMP: invalid item");
2889 }
2890 ctx->encap_type = ctx->ft->encap_type;
2891 break;
2892 case SFC_FT_RULE_GROUP:
2893 if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
2894 return rte_flow_error_set(error, EINVAL,
2895 RTE_FLOW_ERROR_TYPE_ITEM,
2896 NULL, "tunnel offload: GROUP: missing tunnel item");
2897 } else if (ctx->encap_type != ctx->ft->encap_type) {
2898 return rte_flow_error_set(error, EINVAL,
2899 RTE_FLOW_ERROR_TYPE_ITEM,
2900 pattern, "tunnel offload: GROUP: tunnel type mismatch");
2901 }
2902
2903 /*
2904 * The HW/FW hasn't got support for the use of "ENC" fields in
2905 * action rules (except the VNET_ID one) yet. As a workaround,
2906 * start parsing the pattern from the tunnel item.
2907 */
2908 ctx->pattern = pattern;
2909 break;
2910 default:
2911 SFC_ASSERT(B_FALSE);
2912 break;
2913 }
2914
2915 if ((mae->encap_types_supported & (1U << ctx->encap_type)) == 0) {
2916 return rte_flow_error_set(error, ENOTSUP,
2917 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2918 "OR: unsupported tunnel type");
2919 }
2920
2921 switch (ctx->ft_rule_type) {
2922 case SFC_FT_RULE_JUMP:
2923 recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
2924 /* FALLTHROUGH */
2925 case SFC_FT_RULE_NONE:
2926 if (ctx->priority >= mae->nb_outer_rule_prios_max) {
2927 return rte_flow_error_set(error, ENOTSUP,
2928 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2929 NULL, "OR: unsupported priority level");
2930 }
2931
2932 rc = efx_mae_match_spec_init(sa->nic,
2933 EFX_MAE_RULE_OUTER, ctx->priority,
2934 &ctx->match_spec_outer);
2935 if (rc != 0) {
2936 return rte_flow_error_set(error, rc,
2937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2938 "OR: failed to initialise the match specification");
2939 }
2940
2941 /*
2942 * Outermost items comprise a match
2943 * specification of type OUTER.
2944 */
2945 ctx->match_spec = ctx->match_spec_outer;
2946
2947 /* Outermost items use "ENC" EFX MAE field IDs. */
2948 ctx->field_ids_remap = field_ids_remap_to_encap;
2949
2950 rc = efx_mae_outer_rule_recirc_id_set(ctx->match_spec,
2951 recirc_id);
2952 if (rc != 0) {
2953 return rte_flow_error_set(error, rc,
2954 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2955 "OR: failed to initialise RECIRC_ID");
2956 }
2957 break;
2958 case SFC_FT_RULE_GROUP:
2959 /* Outermost items -> "ENC" match fields in the action rule. */
2960 ctx->field_ids_remap = field_ids_remap_to_encap;
2961 ctx->match_spec = ctx->match_spec_action;
2962
2963 /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
2964 ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
2965 break;
2966 default:
2967 SFC_ASSERT(B_FALSE);
2968 break;
2969 }
2970
2971 return 0;
2972 }
2973
2974 static void
sfc_mae_rule_encap_parse_fini(struct sfc_adapter * sa,struct sfc_mae_parse_ctx * ctx)2975 sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
2976 struct sfc_mae_parse_ctx *ctx)
2977 {
2978 if (ctx->encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2979 return;
2980
2981 if (ctx->match_spec_outer != NULL)
2982 efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
2983 }
2984
2985 int
sfc_mae_rule_parse_pattern(struct sfc_adapter * sa,const struct rte_flow_item pattern[],struct sfc_flow_spec_mae * spec,struct rte_flow_error * error)2986 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
2987 const struct rte_flow_item pattern[],
2988 struct sfc_flow_spec_mae *spec,
2989 struct rte_flow_error *error)
2990 {
2991 struct sfc_mae_parse_ctx ctx_mae;
2992 unsigned int priority_shift = 0;
2993 struct sfc_flow_parse_ctx ctx;
2994 int rc;
2995
2996 memset(&ctx_mae, 0, sizeof(ctx_mae));
2997 ctx_mae.ft_rule_type = spec->ft_rule_type;
2998 ctx_mae.priority = spec->priority;
2999 ctx_mae.ft = spec->ft;
3000 ctx_mae.sa = sa;
3001
3002 switch (ctx_mae.ft_rule_type) {
3003 case SFC_FT_RULE_JUMP:
3004 /*
3005 * By design, this flow should be represented solely by the
3006 * outer rule. But the HW/FW hasn't got support for setting
3007 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
3008 * does it support outer rule counters. As a workaround, an
3009 * action rule of lower priority is used to do the job.
3010 */
3011 priority_shift = 1;
3012
3013 /* FALLTHROUGH */
3014 case SFC_FT_RULE_GROUP:
3015 if (ctx_mae.priority != 0) {
3016 /*
3017 * Because of the above workaround, deny the
3018 * use of priorities to JUMP and GROUP rules.
3019 */
3020 rc = rte_flow_error_set(error, ENOTSUP,
3021 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
3022 "tunnel offload: priorities are not supported");
3023 goto fail_priority_check;
3024 }
3025
3026 /* FALLTHROUGH */
3027 case SFC_FT_RULE_NONE:
3028 rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
3029 spec->priority + priority_shift,
3030 &ctx_mae.match_spec_action);
3031 if (rc != 0) {
3032 rc = rte_flow_error_set(error, rc,
3033 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3034 "AR: failed to initialise the match specification");
3035 goto fail_init_match_spec_action;
3036 }
3037 break;
3038 default:
3039 SFC_ASSERT(B_FALSE);
3040 break;
3041 }
3042
3043 /*
3044 * As a preliminary setting, assume that there is no encapsulation
3045 * in the pattern. That is, pattern items are about to comprise a
3046 * match specification of type ACTION and use non-encap. field IDs.
3047 *
3048 * sfc_mae_rule_encap_parse_init() below may override this.
3049 */
3050 ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
3051 ctx_mae.match_spec = ctx_mae.match_spec_action;
3052 ctx_mae.field_ids_remap = field_ids_no_remap;
3053 ctx_mae.pattern = pattern;
3054
3055 ctx.type = SFC_FLOW_PARSE_CTX_MAE;
3056 ctx.mae = &ctx_mae;
3057
3058 rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
3059 if (rc != 0)
3060 goto fail_encap_parse_init;
3061
3062 /*
3063 * sfc_mae_rule_encap_parse_init() may have detected tunnel offload
3064 * GROUP rule. Remember its properties for later use.
3065 */
3066 spec->ft_rule_type = ctx_mae.ft_rule_type;
3067 spec->ft = ctx_mae.ft;
3068
3069 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
3070 ctx_mae.pattern, &ctx, error);
3071 if (rc != 0)
3072 goto fail_parse_pattern;
3073
3074 rc = sfc_mae_rule_process_pattern_data(&ctx_mae, error);
3075 if (rc != 0)
3076 goto fail_process_pattern_data;
3077
3078 rc = sfc_mae_rule_process_outer(sa, &ctx_mae, &spec->outer_rule, error);
3079 if (rc != 0)
3080 goto fail_process_outer;
3081
3082 if (ctx_mae.match_spec_action != NULL &&
3083 !efx_mae_match_spec_is_valid(sa->nic, ctx_mae.match_spec_action)) {
3084 rc = rte_flow_error_set(error, ENOTSUP,
3085 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
3086 "Inconsistent pattern");
3087 goto fail_validate_match_spec_action;
3088 }
3089
3090 spec->match_spec = ctx_mae.match_spec_action;
3091
3092 return 0;
3093
3094 fail_validate_match_spec_action:
3095 fail_process_outer:
3096 fail_process_pattern_data:
3097 fail_parse_pattern:
3098 sfc_mae_rule_encap_parse_fini(sa, &ctx_mae);
3099
3100 fail_encap_parse_init:
3101 if (ctx_mae.match_spec_action != NULL)
3102 efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
3103
3104 fail_init_match_spec_action:
3105 fail_priority_check:
3106 return rc;
3107 }
3108
3109 static int
sfc_mae_rule_parse_action_set_mac(struct sfc_adapter * sa,enum sfc_mae_mac_addr_type type,const struct rte_flow_action_set_mac * conf,struct sfc_mae_aset_ctx * ctx,struct rte_flow_error * error)3110 sfc_mae_rule_parse_action_set_mac(struct sfc_adapter *sa,
3111 enum sfc_mae_mac_addr_type type,
3112 const struct rte_flow_action_set_mac *conf,
3113 struct sfc_mae_aset_ctx *ctx,
3114 struct rte_flow_error *error)
3115 {
3116 struct sfc_mae_mac_addr **mac_addrp;
3117 int rc;
3118
3119 if (conf == NULL) {
3120 return rte_flow_error_set(error, EINVAL,
3121 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3122 "the MAC address entry definition is NULL");
3123 }
3124
3125 switch (type) {
3126 case SFC_MAE_MAC_ADDR_DST:
3127 rc = efx_mae_action_set_populate_set_dst_mac(ctx->spec);
3128 mac_addrp = &ctx->dst_mac;
3129 break;
3130 case SFC_MAE_MAC_ADDR_SRC:
3131 rc = efx_mae_action_set_populate_set_src_mac(ctx->spec);
3132 mac_addrp = &ctx->src_mac;
3133 break;
3134 default:
3135 rc = EINVAL;
3136 break;
3137 }
3138
3139 if (rc != 0)
3140 goto error;
3141
3142 *mac_addrp = sfc_mae_mac_addr_attach(sa, conf->mac_addr);
3143 if (*mac_addrp != NULL)
3144 return 0;
3145
3146 rc = sfc_mae_mac_addr_add(sa, conf->mac_addr, mac_addrp);
3147 if (rc != 0)
3148 goto error;
3149
3150 return 0;
3151
3152 error:
3153 return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3154 NULL, "failed to request set MAC action");
3155 }
3156
3157 /*
3158 * An action supported by MAE may correspond to a bundle of RTE flow actions,
3159 * in example, VLAN_PUSH = OF_PUSH_VLAN + OF_VLAN_SET_VID + OF_VLAN_SET_PCP.
3160 * That is, related RTE flow actions need to be tracked as parts of a whole
3161 * so that they can be combined into a single action and submitted to MAE
3162 * representation of a given rule's action set.
3163 *
3164 * Each RTE flow action provided by an application gets classified as
3165 * one belonging to some bundle type. If an action is not supposed to
3166 * belong to any bundle, or if this action is END, it is described as
3167 * one belonging to a dummy bundle of type EMPTY.
3168 *
3169 * A currently tracked bundle will be submitted if a repeating
3170 * action or an action of different bundle type follows.
3171 */
3172
3173 enum sfc_mae_actions_bundle_type {
3174 SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
3175 SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
3176 };
3177
3178 struct sfc_mae_actions_bundle {
3179 enum sfc_mae_actions_bundle_type type;
3180
3181 /* Indicates actions already tracked by the current bundle */
3182 uint64_t actions_mask;
3183
3184 /* Parameters used by SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH */
3185 rte_be16_t vlan_push_tpid;
3186 rte_be16_t vlan_push_tci;
3187 };
3188
3189 /*
3190 * Combine configuration of RTE flow actions tracked by the bundle into a
3191 * single action and submit the result to MAE action set specification.
3192 * Do nothing in the case of dummy action bundle.
3193 */
3194 static int
sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle * bundle,efx_mae_actions_t * spec)3195 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
3196 efx_mae_actions_t *spec)
3197 {
3198 int rc = 0;
3199
3200 switch (bundle->type) {
3201 case SFC_MAE_ACTIONS_BUNDLE_EMPTY:
3202 break;
3203 case SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH:
3204 rc = efx_mae_action_set_populate_vlan_push(
3205 spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
3206 break;
3207 default:
3208 SFC_ASSERT(B_FALSE);
3209 break;
3210 }
3211
3212 return rc;
3213 }
3214
3215 /*
3216 * Given the type of the next RTE flow action in the line, decide
3217 * whether a new bundle is about to start, and, if this is the case,
3218 * submit and reset the current bundle.
3219 */
3220 static int
sfc_mae_actions_bundle_sync(const struct rte_flow_action * action,struct sfc_mae_actions_bundle * bundle,efx_mae_actions_t * spec,struct rte_flow_error * error)3221 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
3222 struct sfc_mae_actions_bundle *bundle,
3223 efx_mae_actions_t *spec,
3224 struct rte_flow_error *error)
3225 {
3226 enum sfc_mae_actions_bundle_type bundle_type_new;
3227 int rc;
3228
3229 switch (action->type) {
3230 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3231 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3232 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3233 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH;
3234 break;
3235 default:
3236 /*
3237 * Self-sufficient actions, including END, are handled in this
3238 * case. No checks for unsupported actions are needed here
3239 * because parsing doesn't occur at this point.
3240 */
3241 bundle_type_new = SFC_MAE_ACTIONS_BUNDLE_EMPTY;
3242 break;
3243 }
3244
3245 if (bundle_type_new != bundle->type ||
3246 (bundle->actions_mask & (1ULL << action->type)) != 0) {
3247 rc = sfc_mae_actions_bundle_submit(bundle, spec);
3248 if (rc != 0)
3249 goto fail_submit;
3250
3251 memset(bundle, 0, sizeof(*bundle));
3252 }
3253
3254 bundle->type = bundle_type_new;
3255
3256 return 0;
3257
3258 fail_submit:
3259 return rte_flow_error_set(error, rc,
3260 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3261 "Failed to request the (group of) action(s)");
3262 }
3263
3264 static void
sfc_mae_rule_parse_action_of_push_vlan(const struct rte_flow_action_of_push_vlan * conf,struct sfc_mae_actions_bundle * bundle)3265 sfc_mae_rule_parse_action_of_push_vlan(
3266 const struct rte_flow_action_of_push_vlan *conf,
3267 struct sfc_mae_actions_bundle *bundle)
3268 {
3269 bundle->vlan_push_tpid = conf->ethertype;
3270 }
3271
3272 static void
sfc_mae_rule_parse_action_of_set_vlan_vid(const struct rte_flow_action_of_set_vlan_vid * conf,struct sfc_mae_actions_bundle * bundle)3273 sfc_mae_rule_parse_action_of_set_vlan_vid(
3274 const struct rte_flow_action_of_set_vlan_vid *conf,
3275 struct sfc_mae_actions_bundle *bundle)
3276 {
3277 bundle->vlan_push_tci |= (conf->vlan_vid &
3278 rte_cpu_to_be_16(RTE_LEN2MASK(12, uint16_t)));
3279 }
3280
3281 static void
sfc_mae_rule_parse_action_of_set_vlan_pcp(const struct rte_flow_action_of_set_vlan_pcp * conf,struct sfc_mae_actions_bundle * bundle)3282 sfc_mae_rule_parse_action_of_set_vlan_pcp(
3283 const struct rte_flow_action_of_set_vlan_pcp *conf,
3284 struct sfc_mae_actions_bundle *bundle)
3285 {
3286 uint16_t vlan_tci_pcp = (uint16_t)(conf->vlan_pcp &
3287 RTE_LEN2MASK(3, uint8_t)) << 13;
3288
3289 bundle->vlan_push_tci |= rte_cpu_to_be_16(vlan_tci_pcp);
3290 }
3291
3292 struct sfc_mae_parsed_item {
3293 const struct rte_flow_item *item;
3294 size_t proto_header_ofst;
3295 size_t proto_header_size;
3296 };
3297
3298 /*
3299 * For each 16-bit word of the given header, override
3300 * bits enforced by the corresponding 16-bit mask.
3301 */
3302 static void
sfc_mae_header_force_item_masks(uint8_t * header_buf,const struct sfc_mae_parsed_item * parsed_items,unsigned int nb_parsed_items)3303 sfc_mae_header_force_item_masks(uint8_t *header_buf,
3304 const struct sfc_mae_parsed_item *parsed_items,
3305 unsigned int nb_parsed_items)
3306 {
3307 unsigned int item_idx;
3308
3309 for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
3310 const struct sfc_mae_parsed_item *parsed_item;
3311 const struct rte_flow_item *item;
3312 size_t proto_header_size;
3313 size_t ofst;
3314
3315 parsed_item = &parsed_items[item_idx];
3316 proto_header_size = parsed_item->proto_header_size;
3317 item = parsed_item->item;
3318
3319 for (ofst = 0; ofst < proto_header_size;
3320 ofst += sizeof(rte_be16_t)) {
3321 rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
3322 const rte_be16_t *w_maskp;
3323 const rte_be16_t *w_specp;
3324
3325 w_maskp = RTE_PTR_ADD(item->mask, ofst);
3326 w_specp = RTE_PTR_ADD(item->spec, ofst);
3327
3328 *wp &= ~(*w_maskp);
3329 *wp |= (*w_specp & *w_maskp);
3330 }
3331
3332 header_buf += proto_header_size;
3333 }
3334 }
3335
3336 #define SFC_IPV4_TTL_DEF 0x40
3337 #define SFC_IPV6_VTC_FLOW_DEF 0x60000000
3338 #define SFC_IPV6_HOP_LIMITS_DEF 0xff
3339 #define SFC_VXLAN_FLAGS_DEF 0x08000000
3340
3341 static int
sfc_mae_rule_parse_action_vxlan_encap(struct sfc_mae * mae,const struct rte_flow_action_vxlan_encap * conf,efx_mae_actions_t * spec,struct rte_flow_error * error)3342 sfc_mae_rule_parse_action_vxlan_encap(
3343 struct sfc_mae *mae,
3344 const struct rte_flow_action_vxlan_encap *conf,
3345 efx_mae_actions_t *spec,
3346 struct rte_flow_error *error)
3347 {
3348 struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
3349 struct rte_flow_item *pattern = conf->definition;
3350 uint8_t *buf = bounce_eh->buf;
3351
3352 /* This array will keep track of non-VOID pattern items. */
3353 struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
3354 2 /* VLAN tags */ +
3355 1 /* IPv4 or IPv6 */ +
3356 1 /* UDP */ +
3357 1 /* VXLAN */];
3358 unsigned int nb_parsed_items = 0;
3359
3360 size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
3361 uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
3362 sizeof(struct rte_ipv6_hdr))];
3363 struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
3364 struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
3365 struct rte_vxlan_hdr *vxlan = NULL;
3366 struct rte_udp_hdr *udp = NULL;
3367 unsigned int nb_vlan_tags = 0;
3368 size_t next_proto_ofst = 0;
3369 size_t ethertype_ofst = 0;
3370 uint64_t exp_items;
3371 int rc;
3372
3373 if (pattern == NULL) {
3374 return rte_flow_error_set(error, EINVAL,
3375 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3376 "The encap. header definition is NULL");
3377 }
3378
3379 bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
3380 bounce_eh->size = 0;
3381
3382 /*
3383 * Process pattern items and remember non-VOID ones.
3384 * Defer applying masks until after the complete header
3385 * has been built from the pattern items.
3386 */
3387 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
3388
3389 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
3390 struct sfc_mae_parsed_item *parsed_item;
3391 const uint64_t exp_items_extra_vlan[] = {
3392 RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
3393 };
3394 size_t proto_header_size;
3395 rte_be16_t *ethertypep;
3396 uint8_t *next_protop;
3397 uint8_t *buf_cur;
3398
3399 if (pattern->spec == NULL) {
3400 return rte_flow_error_set(error, EINVAL,
3401 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3402 "NULL item spec in the encap. header");
3403 }
3404
3405 if (pattern->mask == NULL) {
3406 return rte_flow_error_set(error, EINVAL,
3407 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3408 "NULL item mask in the encap. header");
3409 }
3410
3411 if (pattern->last != NULL) {
3412 /* This is not a match pattern, so disallow range. */
3413 return rte_flow_error_set(error, EINVAL,
3414 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3415 "Range item in the encap. header");
3416 }
3417
3418 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
3419 /* Handle VOID separately, for clarity. */
3420 continue;
3421 }
3422
3423 if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
3424 return rte_flow_error_set(error, ENOTSUP,
3425 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3426 "Unexpected item in the encap. header");
3427 }
3428
3429 parsed_item = &parsed_items[nb_parsed_items];
3430 buf_cur = buf + bounce_eh->size;
3431
3432 switch (pattern->type) {
3433 case RTE_FLOW_ITEM_TYPE_ETH:
3434 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
3435 exp_items);
3436 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
3437 hdr) != 0);
3438
3439 proto_header_size = sizeof(struct rte_ether_hdr);
3440
3441 ethertype_ofst = eth_ethertype_ofst;
3442
3443 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
3444 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3445 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3446 break;
3447 case RTE_FLOW_ITEM_TYPE_VLAN:
3448 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
3449 exp_items);
3450 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
3451 hdr) != 0);
3452
3453 proto_header_size = sizeof(struct rte_vlan_hdr);
3454
3455 ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
3456 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
3457
3458 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3459 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3460
3461 ethertype_ofst =
3462 bounce_eh->size +
3463 offsetof(struct rte_vlan_hdr, eth_proto);
3464
3465 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
3466 RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
3467 exp_items |= exp_items_extra_vlan[nb_vlan_tags];
3468
3469 ++nb_vlan_tags;
3470 break;
3471 case RTE_FLOW_ITEM_TYPE_IPV4:
3472 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
3473 exp_items);
3474 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
3475 hdr) != 0);
3476
3477 proto_header_size = sizeof(struct rte_ipv4_hdr);
3478
3479 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3480 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3481
3482 next_proto_ofst =
3483 bounce_eh->size +
3484 offsetof(struct rte_ipv4_hdr, next_proto_id);
3485
3486 ipv4 = (struct rte_ipv4_hdr *)buf_cur;
3487
3488 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3489 break;
3490 case RTE_FLOW_ITEM_TYPE_IPV6:
3491 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
3492 exp_items);
3493 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
3494 hdr) != 0);
3495
3496 proto_header_size = sizeof(struct rte_ipv6_hdr);
3497
3498 ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
3499 *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3500
3501 next_proto_ofst = bounce_eh->size +
3502 offsetof(struct rte_ipv6_hdr, proto);
3503
3504 ipv6 = (struct rte_ipv6_hdr *)buf_cur;
3505
3506 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
3507 break;
3508 case RTE_FLOW_ITEM_TYPE_UDP:
3509 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
3510 exp_items);
3511 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
3512 hdr) != 0);
3513
3514 proto_header_size = sizeof(struct rte_udp_hdr);
3515
3516 next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
3517 *next_protop = IPPROTO_UDP;
3518
3519 udp = (struct rte_udp_hdr *)buf_cur;
3520
3521 exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
3522 break;
3523 case RTE_FLOW_ITEM_TYPE_VXLAN:
3524 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
3525 exp_items);
3526 RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
3527 hdr) != 0);
3528
3529 proto_header_size = sizeof(struct rte_vxlan_hdr);
3530
3531 vxlan = (struct rte_vxlan_hdr *)buf_cur;
3532
3533 udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
3534 udp->dgram_len = RTE_BE16(sizeof(*udp) +
3535 sizeof(*vxlan));
3536 udp->dgram_cksum = 0;
3537
3538 exp_items = 0;
3539 break;
3540 default:
3541 return rte_flow_error_set(error, ENOTSUP,
3542 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3543 "Unknown item in the encap. header");
3544 }
3545
3546 if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
3547 return rte_flow_error_set(error, E2BIG,
3548 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3549 "The encap. header is too big");
3550 }
3551
3552 if ((proto_header_size & 1) != 0) {
3553 return rte_flow_error_set(error, EINVAL,
3554 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3555 "Odd layer size in the encap. header");
3556 }
3557
3558 rte_memcpy(buf_cur, pattern->spec, proto_header_size);
3559 bounce_eh->size += proto_header_size;
3560
3561 parsed_item->item = pattern;
3562 parsed_item->proto_header_size = proto_header_size;
3563 ++nb_parsed_items;
3564 }
3565
3566 if (exp_items != 0) {
3567 /* Parsing item VXLAN would have reset exp_items to 0. */
3568 return rte_flow_error_set(error, ENOTSUP,
3569 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
3570 "No item VXLAN in the encap. header");
3571 }
3572
3573 /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
3574 ipv4->version_ihl = RTE_IPV4_VHL_DEF;
3575 ipv4->time_to_live = SFC_IPV4_TTL_DEF;
3576 ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
3577 sizeof(*vxlan));
3578 /* The HW cannot compute this checksum. */
3579 ipv4->hdr_checksum = 0;
3580 ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
3581
3582 ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
3583 ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
3584 ipv6->payload_len = udp->dgram_len;
3585
3586 vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
3587
3588 /* Take care of the masks. */
3589 sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
3590
3591 rc = efx_mae_action_set_populate_encap(spec);
3592 if (rc != 0) {
3593 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
3594 NULL, "failed to request action ENCAP");
3595 }
3596
3597 return rc;
3598 }
3599
3600 static int
sfc_mae_rule_parse_action_mark(struct sfc_adapter * sa,const struct rte_flow_action_mark * conf,const struct sfc_flow_spec_mae * spec_mae,efx_mae_actions_t * spec)3601 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
3602 const struct rte_flow_action_mark *conf,
3603 const struct sfc_flow_spec_mae *spec_mae,
3604 efx_mae_actions_t *spec)
3605 {
3606 int rc;
3607
3608 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3609 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3610 } else if (conf->id > SFC_FT_USER_MARK_MASK) {
3611 sfc_err(sa, "the mark value is too large");
3612 return EINVAL;
3613 }
3614
3615 rc = efx_mae_action_set_populate_mark(spec, conf->id);
3616 if (rc != 0)
3617 sfc_err(sa, "failed to request action MARK: %s", strerror(rc));
3618
3619 return rc;
3620 }
3621
3622 static int
sfc_mae_rule_parse_action_count(struct sfc_adapter * sa,const struct rte_flow_action_count * conf __rte_unused,efx_mae_actions_t * spec)3623 sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
3624 const struct rte_flow_action_count *conf
3625 __rte_unused,
3626 efx_mae_actions_t *spec)
3627 {
3628 int rc;
3629
3630 if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
3631 sfc_err(sa,
3632 "counter queue is not configured for COUNT action");
3633 rc = EINVAL;
3634 goto fail_counter_queue_uninit;
3635 }
3636
3637 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE) {
3638 rc = EINVAL;
3639 goto fail_no_service_core;
3640 }
3641
3642 rc = efx_mae_action_set_populate_count(spec);
3643 if (rc != 0) {
3644 sfc_err(sa,
3645 "failed to populate counters in MAE action set: %s",
3646 rte_strerror(rc));
3647 goto fail_populate_count;
3648 }
3649
3650 return 0;
3651
3652 fail_populate_count:
3653 fail_no_service_core:
3654 fail_counter_queue_uninit:
3655
3656 return rc;
3657 }
3658
3659 static int
sfc_mae_rule_parse_action_phy_port(struct sfc_adapter * sa,const struct rte_flow_action_phy_port * conf,efx_mae_actions_t * spec)3660 sfc_mae_rule_parse_action_phy_port(struct sfc_adapter *sa,
3661 const struct rte_flow_action_phy_port *conf,
3662 efx_mae_actions_t *spec)
3663 {
3664 efx_mport_sel_t mport;
3665 uint32_t phy_port;
3666 int rc;
3667
3668 if (conf->original != 0)
3669 phy_port = efx_nic_cfg_get(sa->nic)->enc_assigned_port;
3670 else
3671 phy_port = conf->index;
3672
3673 rc = efx_mae_mport_by_phy_port(phy_port, &mport);
3674 if (rc != 0) {
3675 sfc_err(sa, "failed to convert phys. port ID %u to m-port selector: %s",
3676 phy_port, strerror(rc));
3677 return rc;
3678 }
3679
3680 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3681 if (rc != 0) {
3682 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3683 mport.sel, strerror(rc));
3684 }
3685
3686 return rc;
3687 }
3688
3689 static int
sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter * sa,const struct rte_flow_action_vf * vf_conf,efx_mae_actions_t * spec)3690 sfc_mae_rule_parse_action_pf_vf(struct sfc_adapter *sa,
3691 const struct rte_flow_action_vf *vf_conf,
3692 efx_mae_actions_t *spec)
3693 {
3694 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3695 efx_mport_sel_t mport;
3696 uint32_t vf;
3697 int rc;
3698
3699 if (vf_conf == NULL)
3700 vf = EFX_PCI_VF_INVALID;
3701 else if (vf_conf->original != 0)
3702 vf = encp->enc_vf;
3703 else
3704 vf = vf_conf->id;
3705
3706 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, vf, &mport);
3707 if (rc != 0) {
3708 sfc_err(sa, "failed to convert PF %u VF %d to m-port: %s",
3709 encp->enc_pf, (vf != EFX_PCI_VF_INVALID) ? (int)vf : -1,
3710 strerror(rc));
3711 return rc;
3712 }
3713
3714 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3715 if (rc != 0) {
3716 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3717 mport.sel, strerror(rc));
3718 }
3719
3720 return rc;
3721 }
3722
3723 static int
sfc_mae_rule_parse_action_port_id(struct sfc_adapter * sa,const struct rte_flow_action_port_id * conf,efx_mae_actions_t * spec)3724 sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
3725 const struct rte_flow_action_port_id *conf,
3726 efx_mae_actions_t *spec)
3727 {
3728 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
3729 struct sfc_mae *mae = &sa->mae;
3730 efx_mport_sel_t mport;
3731 uint16_t port_id;
3732 int rc;
3733
3734 if (conf->id > UINT16_MAX)
3735 return EOVERFLOW;
3736
3737 port_id = (conf->original != 0) ? sas->port_id : conf->id;
3738
3739 rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3740 port_id, &mport);
3741 if (rc != 0) {
3742 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3743 port_id, strerror(rc));
3744 return rc;
3745 }
3746
3747 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3748 if (rc != 0) {
3749 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3750 mport.sel, strerror(rc));
3751 }
3752
3753 return rc;
3754 }
3755
3756 static int
sfc_mae_rule_parse_action_port_representor(struct sfc_adapter * sa,const struct rte_flow_action_ethdev * conf,efx_mae_actions_t * spec)3757 sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
3758 const struct rte_flow_action_ethdev *conf,
3759 efx_mae_actions_t *spec)
3760 {
3761 struct sfc_mae *mae = &sa->mae;
3762 efx_mport_sel_t mport;
3763 int rc;
3764
3765 rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
3766 conf->port_id, &mport);
3767 if (rc != 0) {
3768 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3769 conf->port_id, strerror(rc));
3770 return rc;
3771 }
3772
3773 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3774 if (rc != 0) {
3775 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3776 mport.sel, strerror(rc));
3777 }
3778
3779 return rc;
3780 }
3781
3782 static int
sfc_mae_rule_parse_action_represented_port(struct sfc_adapter * sa,const struct rte_flow_action_ethdev * conf,efx_mae_actions_t * spec)3783 sfc_mae_rule_parse_action_represented_port(struct sfc_adapter *sa,
3784 const struct rte_flow_action_ethdev *conf,
3785 efx_mae_actions_t *spec)
3786 {
3787 struct sfc_mae *mae = &sa->mae;
3788 efx_mport_sel_t mport;
3789 int rc;
3790
3791 rc = sfc_mae_switch_get_entity_mport(mae->switch_domain_id,
3792 conf->port_id, &mport);
3793 if (rc != 0) {
3794 sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
3795 conf->port_id, strerror(rc));
3796 return rc;
3797 }
3798
3799 rc = efx_mae_action_set_populate_deliver(spec, &mport);
3800 if (rc != 0) {
3801 sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
3802 mport.sel, strerror(rc));
3803 }
3804
3805 return rc;
3806 }
3807
3808 static const char * const action_names[] = {
3809 [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
3810 [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
3811 [RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = "SET_MAC_DST",
3812 [RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = "SET_MAC_SRC",
3813 [RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = "OF_DEC_NW_TTL",
3814 [RTE_FLOW_ACTION_TYPE_DEC_TTL] = "DEC_TTL",
3815 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = "OF_PUSH_VLAN",
3816 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
3817 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
3818 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
3819 [RTE_FLOW_ACTION_TYPE_COUNT] = "COUNT",
3820 [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
3821 [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
3822 [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
3823 [RTE_FLOW_ACTION_TYPE_PF] = "PF",
3824 [RTE_FLOW_ACTION_TYPE_VF] = "VF",
3825 [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
3826 [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = "PORT_REPRESENTOR",
3827 [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = "REPRESENTED_PORT",
3828 [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
3829 [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
3830 };
3831
3832 static int
sfc_mae_rule_parse_action(struct sfc_adapter * sa,const struct rte_flow_action * action,const struct sfc_flow_spec_mae * spec_mae,struct sfc_mae_actions_bundle * bundle,struct sfc_mae_aset_ctx * ctx,struct rte_flow_error * error)3833 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
3834 const struct rte_flow_action *action,
3835 const struct sfc_flow_spec_mae *spec_mae,
3836 struct sfc_mae_actions_bundle *bundle,
3837 struct sfc_mae_aset_ctx *ctx,
3838 struct rte_flow_error *error)
3839 {
3840 const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
3841 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
3842 efx_mae_actions_t *spec = ctx->spec;
3843 bool custom_error = B_FALSE;
3844 int rc = 0;
3845
3846 switch (action->type) {
3847 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3848 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
3849 bundle->actions_mask);
3850 if (outer_rule == NULL ||
3851 outer_rule->encap_type != EFX_TUNNEL_PROTOCOL_VXLAN)
3852 rc = EINVAL;
3853 else
3854 rc = efx_mae_action_set_populate_decap(spec);
3855 break;
3856 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3857 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
3858 bundle->actions_mask);
3859 rc = efx_mae_action_set_populate_vlan_pop(spec);
3860 break;
3861 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3862 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_SET_MAC_DST,
3863 bundle->actions_mask);
3864 rc = sfc_mae_rule_parse_action_set_mac(sa, SFC_MAE_MAC_ADDR_DST,
3865 action->conf, ctx,
3866 error);
3867 custom_error = B_TRUE;
3868 break;
3869 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3870 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_SET_MAC_SRC,
3871 bundle->actions_mask);
3872 rc = sfc_mae_rule_parse_action_set_mac(sa, SFC_MAE_MAC_ADDR_SRC,
3873 action->conf, ctx,
3874 error);
3875 custom_error = B_TRUE;
3876 break;
3877 case RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL:
3878 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3879 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
3880 bundle->actions_mask);
3881 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DEC_TTL,
3882 bundle->actions_mask);
3883 rc = efx_mae_action_set_populate_decr_ip_ttl(spec);
3884 break;
3885 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3886 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
3887 bundle->actions_mask);
3888 sfc_mae_rule_parse_action_of_push_vlan(action->conf, bundle);
3889 break;
3890 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3891 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
3892 bundle->actions_mask);
3893 sfc_mae_rule_parse_action_of_set_vlan_vid(action->conf, bundle);
3894 break;
3895 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3896 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
3897 bundle->actions_mask);
3898 sfc_mae_rule_parse_action_of_set_vlan_pcp(action->conf, bundle);
3899 break;
3900 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3901 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
3902 bundle->actions_mask);
3903 rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
3904 action->conf,
3905 spec, error);
3906 custom_error = B_TRUE;
3907 break;
3908 case RTE_FLOW_ACTION_TYPE_COUNT:
3909 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
3910 bundle->actions_mask);
3911 rc = sfc_mae_rule_parse_action_count(sa, action->conf, spec);
3912 break;
3913 case RTE_FLOW_ACTION_TYPE_FLAG:
3914 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
3915 bundle->actions_mask);
3916 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
3917 rc = efx_mae_action_set_populate_flag(spec);
3918 } else {
3919 rc = rte_flow_error_set(error, ENOTSUP,
3920 RTE_FLOW_ERROR_TYPE_ACTION,
3921 action,
3922 "flag delivery has not been negotiated");
3923 custom_error = B_TRUE;
3924 }
3925 break;
3926 case RTE_FLOW_ACTION_TYPE_MARK:
3927 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
3928 bundle->actions_mask);
3929 if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
3930 spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3931 rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
3932 spec_mae, spec);
3933 } else {
3934 rc = rte_flow_error_set(error, ENOTSUP,
3935 RTE_FLOW_ERROR_TYPE_ACTION,
3936 action,
3937 "mark delivery has not been negotiated");
3938 custom_error = B_TRUE;
3939 }
3940 break;
3941 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3942 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,
3943 bundle->actions_mask);
3944 rc = sfc_mae_rule_parse_action_phy_port(sa, action->conf, spec);
3945 break;
3946 case RTE_FLOW_ACTION_TYPE_PF:
3947 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
3948 bundle->actions_mask);
3949 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
3950 break;
3951 case RTE_FLOW_ACTION_TYPE_VF:
3952 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
3953 bundle->actions_mask);
3954 rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
3955 break;
3956 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3957 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
3958 bundle->actions_mask);
3959 rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
3960 break;
3961 case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
3962 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
3963 bundle->actions_mask);
3964 rc = sfc_mae_rule_parse_action_port_representor(sa,
3965 action->conf, spec);
3966 break;
3967 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3968 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
3969 bundle->actions_mask);
3970 rc = sfc_mae_rule_parse_action_represented_port(sa,
3971 action->conf, spec);
3972 break;
3973 case RTE_FLOW_ACTION_TYPE_DROP:
3974 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
3975 bundle->actions_mask);
3976 rc = efx_mae_action_set_populate_drop(spec);
3977 break;
3978 case RTE_FLOW_ACTION_TYPE_JUMP:
3979 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
3980 /* Workaround. See sfc_flow_parse_rte_to_mae() */
3981 break;
3982 }
3983 /* FALLTHROUGH */
3984 default:
3985 return rte_flow_error_set(error, ENOTSUP,
3986 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3987 "Unsupported action");
3988 }
3989
3990 if (rc == 0) {
3991 bundle->actions_mask |= (1ULL << action->type);
3992 } else if (!custom_error) {
3993 if (action->type < RTE_DIM(action_names)) {
3994 const char *action_name = action_names[action->type];
3995
3996 if (action_name != NULL) {
3997 sfc_err(sa, "action %s was rejected: %s",
3998 action_name, strerror(rc));
3999 }
4000 }
4001 rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
4002 NULL, "Failed to request the action");
4003 }
4004
4005 return rc;
4006 }
4007
4008 static void
sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh * bounce_eh)4009 sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh)
4010 {
4011 bounce_eh->type = EFX_TUNNEL_PROTOCOL_NONE;
4012 }
4013
4014 static int
sfc_mae_process_encap_header(struct sfc_adapter * sa,const struct sfc_mae_bounce_eh * bounce_eh,struct sfc_mae_encap_header ** encap_headerp)4015 sfc_mae_process_encap_header(struct sfc_adapter *sa,
4016 const struct sfc_mae_bounce_eh *bounce_eh,
4017 struct sfc_mae_encap_header **encap_headerp)
4018 {
4019 if (bounce_eh->type == EFX_TUNNEL_PROTOCOL_NONE) {
4020 encap_headerp = NULL;
4021 return 0;
4022 }
4023
4024 *encap_headerp = sfc_mae_encap_header_attach(sa, bounce_eh);
4025 if (*encap_headerp != NULL)
4026 return 0;
4027
4028 return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
4029 }
4030
4031 int
sfc_mae_rule_parse_actions(struct sfc_adapter * sa,const struct rte_flow_action actions[],struct sfc_flow_spec_mae * spec_mae,struct rte_flow_error * error)4032 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
4033 const struct rte_flow_action actions[],
4034 struct sfc_flow_spec_mae *spec_mae,
4035 struct rte_flow_error *error)
4036 {
4037 struct sfc_mae_actions_bundle bundle = {0};
4038 const struct rte_flow_action *action;
4039 struct sfc_mae_aset_ctx ctx = {0};
4040 struct sfc_mae *mae = &sa->mae;
4041 int rc;
4042
4043 rte_errno = 0;
4044
4045 if (actions == NULL) {
4046 return rte_flow_error_set(error, EINVAL,
4047 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
4048 "NULL actions");
4049 }
4050
4051 rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
4052 if (rc != 0)
4053 goto fail_action_set_spec_init;
4054
4055 for (action = actions;
4056 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
4057 if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
4058 ++(ctx.n_counters);
4059 }
4060
4061 if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
4062 /* JUMP rules don't decapsulate packets. GROUP rules do. */
4063 rc = efx_mae_action_set_populate_decap(ctx.spec);
4064 if (rc != 0)
4065 goto fail_enforce_ft_decap;
4066
4067 if (ctx.n_counters == 0 &&
4068 sfc_mae_counter_stream_enabled(sa)) {
4069 /*
4070 * The user opted not to use action COUNT in this rule,
4071 * but the counter should be enabled implicitly because
4072 * packets hitting this rule contribute to the tunnel's
4073 * total number of hits. See sfc_mae_counter_get().
4074 */
4075 rc = efx_mae_action_set_populate_count(ctx.spec);
4076 if (rc != 0)
4077 goto fail_enforce_ft_count;
4078
4079 ctx.n_counters = 1;
4080 }
4081 }
4082
4083 /* Cleanup after previous encap. header bounce buffer usage. */
4084 sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
4085
4086 for (action = actions;
4087 action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
4088 rc = sfc_mae_actions_bundle_sync(action, &bundle,
4089 ctx.spec, error);
4090 if (rc != 0)
4091 goto fail_rule_parse_action;
4092
4093 rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
4094 &bundle, &ctx, error);
4095 if (rc != 0)
4096 goto fail_rule_parse_action;
4097 }
4098
4099 rc = sfc_mae_actions_bundle_sync(action, &bundle, ctx.spec, error);
4100 if (rc != 0)
4101 goto fail_rule_parse_action;
4102
4103 rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
4104 &ctx.encap_header);
4105 if (rc != 0)
4106 goto fail_process_encap_header;
4107
4108 if (ctx.n_counters > 1) {
4109 rc = ENOTSUP;
4110 sfc_err(sa, "too many count actions requested: %u",
4111 ctx.n_counters);
4112 goto fail_nb_count;
4113 }
4114
4115 switch (spec_mae->ft_rule_type) {
4116 case SFC_FT_RULE_NONE:
4117 break;
4118 case SFC_FT_RULE_JUMP:
4119 /* Workaround. See sfc_flow_parse_rte_to_mae() */
4120 rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
4121 if (rc != 0)
4122 goto fail_workaround_jump_delivery;
4123
4124 ctx.counter_ft = spec_mae->ft;
4125 break;
4126 case SFC_FT_RULE_GROUP:
4127 /*
4128 * Packets that go to the rule's AR have FT mark set (from the
4129 * JUMP rule OR's RECIRC_ID). Remove this mark in matching
4130 * packets. The user may have provided their own action
4131 * MARK above, so don't check the return value here.
4132 */
4133 (void)efx_mae_action_set_populate_mark(ctx.spec, 0);
4134
4135 ctx.ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
4136 break;
4137 default:
4138 SFC_ASSERT(B_FALSE);
4139 }
4140
4141 spec_mae->action_set = sfc_mae_action_set_attach(sa, &ctx);
4142 if (spec_mae->action_set != NULL) {
4143 sfc_mae_encap_header_del(sa, ctx.encap_header);
4144 efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
4145 return 0;
4146 }
4147
4148 rc = sfc_mae_action_set_add(sa, actions, &ctx, &spec_mae->action_set);
4149 if (rc != 0)
4150 goto fail_action_set_add;
4151
4152 return 0;
4153
4154 fail_action_set_add:
4155 fail_workaround_jump_delivery:
4156 fail_nb_count:
4157 sfc_mae_encap_header_del(sa, ctx.encap_header);
4158
4159 fail_process_encap_header:
4160 fail_rule_parse_action:
4161 sfc_mae_mac_addr_del(sa, ctx.src_mac);
4162 sfc_mae_mac_addr_del(sa, ctx.dst_mac);
4163 efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
4164
4165 fail_enforce_ft_count:
4166 fail_enforce_ft_decap:
4167 fail_action_set_spec_init:
4168 if (rc > 0 && rte_errno == 0) {
4169 rc = rte_flow_error_set(error, rc,
4170 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4171 NULL, "Failed to process the action");
4172 }
4173 return rc;
4174 }
4175
4176 static bool
sfc_mae_rules_class_cmp(struct sfc_adapter * sa,const efx_mae_match_spec_t * left,const efx_mae_match_spec_t * right)4177 sfc_mae_rules_class_cmp(struct sfc_adapter *sa,
4178 const efx_mae_match_spec_t *left,
4179 const efx_mae_match_spec_t *right)
4180 {
4181 bool have_same_class;
4182 int rc;
4183
4184 rc = efx_mae_match_specs_class_cmp(sa->nic, left, right,
4185 &have_same_class);
4186
4187 return (rc == 0) ? have_same_class : false;
4188 }
4189
4190 static int
sfc_mae_outer_rule_class_verify(struct sfc_adapter * sa,struct sfc_mae_outer_rule * rule)4191 sfc_mae_outer_rule_class_verify(struct sfc_adapter *sa,
4192 struct sfc_mae_outer_rule *rule)
4193 {
4194 struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
4195 struct sfc_mae_outer_rule *entry;
4196 struct sfc_mae *mae = &sa->mae;
4197
4198 if (fw_rsrc->rule_id.id != EFX_MAE_RSRC_ID_INVALID) {
4199 /* An active rule is reused. It's class is wittingly valid. */
4200 return 0;
4201 }
4202
4203 TAILQ_FOREACH_REVERSE(entry, &mae->outer_rules,
4204 sfc_mae_outer_rules, entries) {
4205 const efx_mae_match_spec_t *left = entry->match_spec;
4206 const efx_mae_match_spec_t *right = rule->match_spec;
4207
4208 if (entry == rule)
4209 continue;
4210
4211 if (sfc_mae_rules_class_cmp(sa, left, right))
4212 return 0;
4213 }
4214
4215 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
4216 "support for outer frame pattern items is not guaranteed; "
4217 "other than that, the items are valid from SW standpoint");
4218 return 0;
4219 }
4220
4221 static int
sfc_mae_action_rule_class_verify(struct sfc_adapter * sa,struct sfc_flow_spec_mae * spec)4222 sfc_mae_action_rule_class_verify(struct sfc_adapter *sa,
4223 struct sfc_flow_spec_mae *spec)
4224 {
4225 const struct rte_flow *entry;
4226
4227 if (spec->match_spec == NULL)
4228 return 0;
4229
4230 TAILQ_FOREACH_REVERSE(entry, &sa->flow_list, sfc_flow_list, entries) {
4231 const struct sfc_flow_spec *entry_spec = &entry->spec;
4232 const struct sfc_flow_spec_mae *es_mae = &entry_spec->mae;
4233 const efx_mae_match_spec_t *left = es_mae->match_spec;
4234 const efx_mae_match_spec_t *right = spec->match_spec;
4235
4236 switch (entry_spec->type) {
4237 case SFC_FLOW_SPEC_FILTER:
4238 /* Ignore VNIC-level flows */
4239 break;
4240 case SFC_FLOW_SPEC_MAE:
4241 if (sfc_mae_rules_class_cmp(sa, left, right))
4242 return 0;
4243 break;
4244 default:
4245 SFC_ASSERT(false);
4246 }
4247 }
4248
4249 sfc_info(sa, "for now, the HW doesn't support rule validation, and HW "
4250 "support for inner frame pattern items is not guaranteed; "
4251 "other than that, the items are valid from SW standpoint");
4252 return 0;
4253 }
4254
4255 /**
4256 * Confirm that a given flow can be accepted by the FW.
4257 *
4258 * @param sa
4259 * Software adapter context
4260 * @param flow
4261 * Flow to be verified
4262 * @return
4263 * Zero on success and non-zero in the case of error.
4264 * A special value of EAGAIN indicates that the adapter is
4265 * not in started state. This state is compulsory because
4266 * it only makes sense to compare the rule class of the flow
4267 * being validated with classes of the active rules.
4268 * Such classes are wittingly supported by the FW.
4269 */
4270 int
sfc_mae_flow_verify(struct sfc_adapter * sa,struct rte_flow * flow)4271 sfc_mae_flow_verify(struct sfc_adapter *sa,
4272 struct rte_flow *flow)
4273 {
4274 struct sfc_flow_spec *spec = &flow->spec;
4275 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4276 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4277 int rc;
4278
4279 SFC_ASSERT(sfc_adapter_is_locked(sa));
4280
4281 if (sa->state != SFC_ETHDEV_STARTED)
4282 return EAGAIN;
4283
4284 if (outer_rule != NULL) {
4285 rc = sfc_mae_outer_rule_class_verify(sa, outer_rule);
4286 if (rc != 0)
4287 return rc;
4288 }
4289
4290 return sfc_mae_action_rule_class_verify(sa, spec_mae);
4291 }
4292
4293 int
sfc_mae_flow_insert(struct sfc_adapter * sa,struct rte_flow * flow)4294 sfc_mae_flow_insert(struct sfc_adapter *sa,
4295 struct rte_flow *flow)
4296 {
4297 struct sfc_flow_spec *spec = &flow->spec;
4298 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4299 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4300 struct sfc_mae_action_set *action_set = spec_mae->action_set;
4301 struct sfc_mae_fw_rsrc *fw_rsrc;
4302 int rc;
4303
4304 SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
4305
4306 if (outer_rule != NULL) {
4307 rc = sfc_mae_outer_rule_enable(sa, outer_rule,
4308 spec_mae->match_spec);
4309 if (rc != 0)
4310 goto fail_outer_rule_enable;
4311 }
4312
4313 if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
4314 spec_mae->ft->reset_jump_hit_counter =
4315 spec_mae->ft->group_hit_counter;
4316 }
4317
4318 if (action_set == NULL) {
4319 sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
4320 return 0;
4321 }
4322
4323 rc = sfc_mae_action_set_enable(sa, action_set);
4324 if (rc != 0)
4325 goto fail_action_set_enable;
4326
4327 if (action_set->n_counters > 0) {
4328 rc = sfc_mae_counter_start(sa);
4329 if (rc != 0) {
4330 sfc_err(sa, "failed to start MAE counters support: %s",
4331 rte_strerror(rc));
4332 goto fail_mae_counter_start;
4333 }
4334 }
4335
4336 fw_rsrc = &action_set->fw_rsrc;
4337
4338 rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
4339 NULL, &fw_rsrc->aset_id,
4340 &spec_mae->rule_id);
4341 if (rc != 0)
4342 goto fail_action_rule_insert;
4343
4344 sfc_dbg(sa, "enabled flow=%p: AR_ID=0x%08x",
4345 flow, spec_mae->rule_id.id);
4346
4347 return 0;
4348
4349 fail_action_rule_insert:
4350 fail_mae_counter_start:
4351 sfc_mae_action_set_disable(sa, action_set);
4352
4353 fail_action_set_enable:
4354 if (outer_rule != NULL)
4355 sfc_mae_outer_rule_disable(sa, outer_rule);
4356
4357 fail_outer_rule_enable:
4358 return rc;
4359 }
4360
4361 int
sfc_mae_flow_remove(struct sfc_adapter * sa,struct rte_flow * flow)4362 sfc_mae_flow_remove(struct sfc_adapter *sa,
4363 struct rte_flow *flow)
4364 {
4365 struct sfc_flow_spec *spec = &flow->spec;
4366 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4367 struct sfc_mae_action_set *action_set = spec_mae->action_set;
4368 struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
4369 int rc;
4370
4371 if (action_set == NULL) {
4372 sfc_dbg(sa, "disabled flow=%p (no AR)", flow);
4373 goto skip_action_rule;
4374 }
4375
4376 SFC_ASSERT(spec_mae->rule_id.id != EFX_MAE_RSRC_ID_INVALID);
4377
4378 rc = efx_mae_action_rule_remove(sa->nic, &spec_mae->rule_id);
4379 if (rc != 0) {
4380 sfc_err(sa, "failed to disable flow=%p with AR_ID=0x%08x: %s",
4381 flow, spec_mae->rule_id.id, strerror(rc));
4382 }
4383 sfc_dbg(sa, "disabled flow=%p with AR_ID=0x%08x",
4384 flow, spec_mae->rule_id.id);
4385 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
4386
4387 sfc_mae_action_set_disable(sa, action_set);
4388
4389 skip_action_rule:
4390 if (outer_rule != NULL)
4391 sfc_mae_outer_rule_disable(sa, outer_rule);
4392
4393 return 0;
4394 }
4395
4396 static int
sfc_mae_query_counter(struct sfc_adapter * sa,struct sfc_flow_spec_mae * spec,const struct rte_flow_action * action,struct rte_flow_query_count * data,struct rte_flow_error * error)4397 sfc_mae_query_counter(struct sfc_adapter *sa,
4398 struct sfc_flow_spec_mae *spec,
4399 const struct rte_flow_action *action,
4400 struct rte_flow_query_count *data,
4401 struct rte_flow_error *error)
4402 {
4403 struct sfc_mae_action_set *action_set = spec->action_set;
4404 const struct rte_flow_action_count *conf = action->conf;
4405 unsigned int i;
4406 int rc;
4407
4408 if (action_set == NULL || action_set->n_counters == 0) {
4409 return rte_flow_error_set(error, EINVAL,
4410 RTE_FLOW_ERROR_TYPE_ACTION, action,
4411 "Queried flow rule does not have count actions");
4412 }
4413
4414 for (i = 0; i < action_set->n_counters; i++) {
4415 /*
4416 * Get the first available counter of the flow rule if
4417 * counter ID is not specified, provided that this
4418 * counter is not an automatic (implicit) one.
4419 */
4420 if (conf != NULL && action_set->counters[i].rte_id != conf->id)
4421 continue;
4422
4423 rc = sfc_mae_counter_get(&sa->mae.counter_registry.counters,
4424 &action_set->counters[i], data);
4425 if (rc != 0) {
4426 return rte_flow_error_set(error, EINVAL,
4427 RTE_FLOW_ERROR_TYPE_ACTION, action,
4428 "Queried flow rule counter action is invalid");
4429 }
4430
4431 return 0;
4432 }
4433
4434 return rte_flow_error_set(error, ENOENT,
4435 RTE_FLOW_ERROR_TYPE_ACTION, action,
4436 "no such flow rule action or such count ID");
4437 }
4438
4439 int
sfc_mae_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * error)4440 sfc_mae_flow_query(struct rte_eth_dev *dev,
4441 struct rte_flow *flow,
4442 const struct rte_flow_action *action,
4443 void *data,
4444 struct rte_flow_error *error)
4445 {
4446 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
4447 struct sfc_flow_spec *spec = &flow->spec;
4448 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
4449
4450 switch (action->type) {
4451 case RTE_FLOW_ACTION_TYPE_COUNT:
4452 return sfc_mae_query_counter(sa, spec_mae, action,
4453 data, error);
4454 default:
4455 return rte_flow_error_set(error, ENOTSUP,
4456 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4457 "Query for action of this type is not supported");
4458 }
4459 }
4460
4461 int
sfc_mae_switchdev_init(struct sfc_adapter * sa)4462 sfc_mae_switchdev_init(struct sfc_adapter *sa)
4463 {
4464 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
4465 struct sfc_mae *mae = &sa->mae;
4466 efx_mport_sel_t pf;
4467 efx_mport_sel_t phy;
4468 int rc;
4469
4470 sfc_log_init(sa, "entry");
4471
4472 if (!sa->switchdev) {
4473 sfc_log_init(sa, "switchdev is not enabled - skip");
4474 return 0;
4475 }
4476
4477 if (mae->status != SFC_MAE_STATUS_ADMIN) {
4478 rc = ENOTSUP;
4479 sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
4480 goto fail_no_mae;
4481 }
4482
4483 rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
4484 &pf);
4485 if (rc != 0) {
4486 sfc_err(sa, "failed get PF mport");
4487 goto fail_pf_get;
4488 }
4489
4490 rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy);
4491 if (rc != 0) {
4492 sfc_err(sa, "failed get PHY mport");
4493 goto fail_phy_get;
4494 }
4495
4496 rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy,
4497 SFC_MAE_RULE_PRIO_LOWEST,
4498 &mae->switchdev_rule_pf_to_ext);
4499 if (rc != 0) {
4500 sfc_err(sa, "failed add MAE rule to forward from PF to PHY");
4501 goto fail_pf_add;
4502 }
4503
4504 rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf,
4505 SFC_MAE_RULE_PRIO_LOWEST,
4506 &mae->switchdev_rule_ext_to_pf);
4507 if (rc != 0) {
4508 sfc_err(sa, "failed add MAE rule to forward from PHY to PF");
4509 goto fail_phy_add;
4510 }
4511
4512 sfc_log_init(sa, "done");
4513
4514 return 0;
4515
4516 fail_phy_add:
4517 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4518
4519 fail_pf_add:
4520 fail_phy_get:
4521 fail_pf_get:
4522 fail_no_mae:
4523 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
4524 return rc;
4525 }
4526
4527 void
sfc_mae_switchdev_fini(struct sfc_adapter * sa)4528 sfc_mae_switchdev_fini(struct sfc_adapter *sa)
4529 {
4530 struct sfc_mae *mae = &sa->mae;
4531
4532 if (!sa->switchdev)
4533 return;
4534
4535 sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext);
4536 sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf);
4537 }
4538