1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 *
3*2d9fd380Sjfb8856606 * Copyright(c) 2019-2020 Xilinx, Inc.
4*2d9fd380Sjfb8856606 * Copyright(c) 2019 Solarflare Communications Inc.
5*2d9fd380Sjfb8856606 *
6*2d9fd380Sjfb8856606 * This software was jointly developed between OKTET Labs (under contract
7*2d9fd380Sjfb8856606 * for Solarflare) and Solarflare Communications, Inc.
8*2d9fd380Sjfb8856606 */
9*2d9fd380Sjfb8856606
10*2d9fd380Sjfb8856606 #include <stdbool.h>
11*2d9fd380Sjfb8856606
12*2d9fd380Sjfb8856606 #include <rte_common.h>
13*2d9fd380Sjfb8856606 #include <rte_spinlock.h>
14*2d9fd380Sjfb8856606
15*2d9fd380Sjfb8856606 #include "efx.h"
16*2d9fd380Sjfb8856606
17*2d9fd380Sjfb8856606 #include "sfc.h"
18*2d9fd380Sjfb8856606 #include "sfc_log.h"
19*2d9fd380Sjfb8856606 #include "sfc_switch.h"
20*2d9fd380Sjfb8856606
21*2d9fd380Sjfb8856606 /**
22*2d9fd380Sjfb8856606 * Switch port registry entry.
23*2d9fd380Sjfb8856606 *
24*2d9fd380Sjfb8856606 * Drivers aware of RTE switch domains also have to maintain RTE switch
25*2d9fd380Sjfb8856606 * port IDs for RTE ethdev instances they operate. These IDs are supposed
26*2d9fd380Sjfb8856606 * to stand for physical interconnect entities, in example, PCIe functions.
27*2d9fd380Sjfb8856606 *
28*2d9fd380Sjfb8856606 * In terms of MAE, a physical interconnect entity can be referred to using
29*2d9fd380Sjfb8856606 * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30*2d9fd380Sjfb8856606 * are 16-bit values, so indirect mapping has to be maintained:
31*2d9fd380Sjfb8856606 *
32*2d9fd380Sjfb8856606 * +--------------------+ +---------------------------------------+
33*2d9fd380Sjfb8856606 * | RTE switch port ID | ------ | MAE switch port entry |
34*2d9fd380Sjfb8856606 * +--------------------+ | --------------------- |
35*2d9fd380Sjfb8856606 * | |
36*2d9fd380Sjfb8856606 * | Entity (PCIe function) MPORT selector |
37*2d9fd380Sjfb8856606 * | + |
38*2d9fd380Sjfb8856606 * | Port type (independent/representor) |
39*2d9fd380Sjfb8856606 * +---------------------------------------+
40*2d9fd380Sjfb8856606 *
41*2d9fd380Sjfb8856606 * This mapping comprises a port type to ensure that RTE switch port ID
42*2d9fd380Sjfb8856606 * of a represented entity and that of its representor are different in
43*2d9fd380Sjfb8856606 * the case when the entity gets plugged into DPDK and not into a guest.
44*2d9fd380Sjfb8856606 *
45*2d9fd380Sjfb8856606 * Entry data also comprises RTE ethdev's own MPORT. This value
46*2d9fd380Sjfb8856606 * coincides with the entity MPORT in the case of independent ports.
47*2d9fd380Sjfb8856606 * In the case of representors, this ID is not a selector and refers
48*2d9fd380Sjfb8856606 * to an allocatable object (that is, it's likely to change on RTE
49*2d9fd380Sjfb8856606 * ethdev replug). Flow API backend must use this value rather
50*2d9fd380Sjfb8856606 * than entity_mport to support flow rule action PORT_ID.
51*2d9fd380Sjfb8856606 */
52*2d9fd380Sjfb8856606 struct sfc_mae_switch_port {
53*2d9fd380Sjfb8856606 TAILQ_ENTRY(sfc_mae_switch_port) switch_domain_ports;
54*2d9fd380Sjfb8856606
55*2d9fd380Sjfb8856606 /** RTE ethdev MPORT */
56*2d9fd380Sjfb8856606 efx_mport_sel_t ethdev_mport;
57*2d9fd380Sjfb8856606 /** RTE ethdev port ID */
58*2d9fd380Sjfb8856606 uint16_t ethdev_port_id;
59*2d9fd380Sjfb8856606
60*2d9fd380Sjfb8856606 /** Entity (PCIe function) MPORT selector */
61*2d9fd380Sjfb8856606 efx_mport_sel_t entity_mport;
62*2d9fd380Sjfb8856606 /** Port type (independent/representor) */
63*2d9fd380Sjfb8856606 enum sfc_mae_switch_port_type type;
64*2d9fd380Sjfb8856606 /** RTE switch port ID */
65*2d9fd380Sjfb8856606 uint16_t id;
66*2d9fd380Sjfb8856606 };
67*2d9fd380Sjfb8856606
68*2d9fd380Sjfb8856606 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
69*2d9fd380Sjfb8856606
70*2d9fd380Sjfb8856606 /**
71*2d9fd380Sjfb8856606 * Switch domain registry entry.
72*2d9fd380Sjfb8856606 *
73*2d9fd380Sjfb8856606 * Even if an RTE ethdev instance gets unplugged, the corresponding
74*2d9fd380Sjfb8856606 * entry in the switch port registry will not be removed because the
75*2d9fd380Sjfb8856606 * entity (PCIe function) MPORT is static and cannot change. If this
76*2d9fd380Sjfb8856606 * RTE ethdev gets plugged back, the entry will be reused, and
77*2d9fd380Sjfb8856606 * RTE switch port ID will be the same.
78*2d9fd380Sjfb8856606 */
79*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain {
80*2d9fd380Sjfb8856606 TAILQ_ENTRY(sfc_mae_switch_domain) entries;
81*2d9fd380Sjfb8856606
82*2d9fd380Sjfb8856606 /** HW switch ID */
83*2d9fd380Sjfb8856606 struct sfc_hw_switch_id *hw_switch_id;
84*2d9fd380Sjfb8856606 /** The number of ports in the switch port registry */
85*2d9fd380Sjfb8856606 unsigned int nb_ports;
86*2d9fd380Sjfb8856606 /** Switch port registry */
87*2d9fd380Sjfb8856606 struct sfc_mae_switch_ports ports;
88*2d9fd380Sjfb8856606 /** RTE switch domain ID allocated for a group of devices */
89*2d9fd380Sjfb8856606 uint16_t id;
90*2d9fd380Sjfb8856606 };
91*2d9fd380Sjfb8856606
92*2d9fd380Sjfb8856606 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
93*2d9fd380Sjfb8856606
94*2d9fd380Sjfb8856606 /**
95*2d9fd380Sjfb8856606 * MAE representation of RTE switch infrastructure.
96*2d9fd380Sjfb8856606 *
97*2d9fd380Sjfb8856606 * It is possible that an RTE flow API client tries to insert a rule
98*2d9fd380Sjfb8856606 * referencing an RTE ethdev deployed on top of a different physical
99*2d9fd380Sjfb8856606 * device (it may belong to the same vendor or not). This particular
100*2d9fd380Sjfb8856606 * driver/engine cannot support this and has to turn down such rules.
101*2d9fd380Sjfb8856606 *
102*2d9fd380Sjfb8856606 * Technically, it's HW switch identifier which, if queried for each
103*2d9fd380Sjfb8856606 * RTE ethdev instance, indicates relationship between the instances.
104*2d9fd380Sjfb8856606 * In the meantime, RTE flow API clients also need to somehow figure
105*2d9fd380Sjfb8856606 * out relationship between RTE ethdev instances in advance.
106*2d9fd380Sjfb8856606 *
107*2d9fd380Sjfb8856606 * The concept of RTE switch domains resolves this issue. The driver
108*2d9fd380Sjfb8856606 * maintains a static list of switch domains which is easy to browse,
109*2d9fd380Sjfb8856606 * and each RTE ethdev fills RTE switch parameters in device
110*2d9fd380Sjfb8856606 * information structure which is made available to clients.
111*2d9fd380Sjfb8856606 *
112*2d9fd380Sjfb8856606 * Even if all RTE ethdev instances belonging to a switch domain get
113*2d9fd380Sjfb8856606 * unplugged, the corresponding entry in the switch domain registry
114*2d9fd380Sjfb8856606 * will not be removed because the corresponding HW switch exists
115*2d9fd380Sjfb8856606 * regardless of its ports being plugged to DPDK or kept aside.
116*2d9fd380Sjfb8856606 * If a port gets plugged back to DPDK, the corresponding
117*2d9fd380Sjfb8856606 * RTE ethdev will indicate the same RTE switch domain ID.
118*2d9fd380Sjfb8856606 */
119*2d9fd380Sjfb8856606 struct sfc_mae_switch {
120*2d9fd380Sjfb8856606 /** A lock to protect the whole structure */
121*2d9fd380Sjfb8856606 rte_spinlock_t lock;
122*2d9fd380Sjfb8856606 /** Switch domain registry */
123*2d9fd380Sjfb8856606 struct sfc_mae_switch_domains domains;
124*2d9fd380Sjfb8856606 };
125*2d9fd380Sjfb8856606
126*2d9fd380Sjfb8856606 static struct sfc_mae_switch sfc_mae_switch = {
127*2d9fd380Sjfb8856606 .lock = RTE_SPINLOCK_INITIALIZER,
128*2d9fd380Sjfb8856606 .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
129*2d9fd380Sjfb8856606 };
130*2d9fd380Sjfb8856606
131*2d9fd380Sjfb8856606
132*2d9fd380Sjfb8856606 /* This function expects to be called only when the lock is held */
133*2d9fd380Sjfb8856606 static struct sfc_mae_switch_domain *
sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)134*2d9fd380Sjfb8856606 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
135*2d9fd380Sjfb8856606 {
136*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain *domain;
137*2d9fd380Sjfb8856606
138*2d9fd380Sjfb8856606 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
139*2d9fd380Sjfb8856606
140*2d9fd380Sjfb8856606 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
141*2d9fd380Sjfb8856606 if (domain->id == switch_domain_id)
142*2d9fd380Sjfb8856606 return domain;
143*2d9fd380Sjfb8856606 }
144*2d9fd380Sjfb8856606
145*2d9fd380Sjfb8856606 return NULL;
146*2d9fd380Sjfb8856606 }
147*2d9fd380Sjfb8856606
148*2d9fd380Sjfb8856606 /* This function expects to be called only when the lock is held */
149*2d9fd380Sjfb8856606 static struct sfc_mae_switch_domain *
sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id * id)150*2d9fd380Sjfb8856606 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
151*2d9fd380Sjfb8856606 {
152*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain *domain;
153*2d9fd380Sjfb8856606
154*2d9fd380Sjfb8856606 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
155*2d9fd380Sjfb8856606
156*2d9fd380Sjfb8856606 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
157*2d9fd380Sjfb8856606 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
158*2d9fd380Sjfb8856606 return domain;
159*2d9fd380Sjfb8856606 }
160*2d9fd380Sjfb8856606
161*2d9fd380Sjfb8856606 return NULL;
162*2d9fd380Sjfb8856606 }
163*2d9fd380Sjfb8856606
164*2d9fd380Sjfb8856606 int
sfc_mae_assign_switch_domain(struct sfc_adapter * sa,uint16_t * switch_domain_id)165*2d9fd380Sjfb8856606 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
166*2d9fd380Sjfb8856606 uint16_t *switch_domain_id)
167*2d9fd380Sjfb8856606 {
168*2d9fd380Sjfb8856606 struct sfc_hw_switch_id *hw_switch_id;
169*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain *domain;
170*2d9fd380Sjfb8856606 int rc;
171*2d9fd380Sjfb8856606
172*2d9fd380Sjfb8856606 rte_spinlock_lock(&sfc_mae_switch.lock);
173*2d9fd380Sjfb8856606
174*2d9fd380Sjfb8856606 rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
175*2d9fd380Sjfb8856606 if (rc != 0)
176*2d9fd380Sjfb8856606 goto fail_hw_switch_id_init;
177*2d9fd380Sjfb8856606
178*2d9fd380Sjfb8856606 domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
179*2d9fd380Sjfb8856606 if (domain != NULL) {
180*2d9fd380Sjfb8856606 sfc_hw_switch_id_fini(sa, hw_switch_id);
181*2d9fd380Sjfb8856606 goto done;
182*2d9fd380Sjfb8856606 }
183*2d9fd380Sjfb8856606
184*2d9fd380Sjfb8856606 domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
185*2d9fd380Sjfb8856606 if (domain == NULL) {
186*2d9fd380Sjfb8856606 rc = ENOMEM;
187*2d9fd380Sjfb8856606 goto fail_mem_alloc;
188*2d9fd380Sjfb8856606 }
189*2d9fd380Sjfb8856606
190*2d9fd380Sjfb8856606 /*
191*2d9fd380Sjfb8856606 * This code belongs to driver init path, that is, negation is
192*2d9fd380Sjfb8856606 * done at the end of the path by sfc_eth_dev_init(). RTE APIs
193*2d9fd380Sjfb8856606 * negate error codes, so drop negation here.
194*2d9fd380Sjfb8856606 */
195*2d9fd380Sjfb8856606 rc = -rte_eth_switch_domain_alloc(&domain->id);
196*2d9fd380Sjfb8856606 if (rc != 0)
197*2d9fd380Sjfb8856606 goto fail_domain_alloc;
198*2d9fd380Sjfb8856606
199*2d9fd380Sjfb8856606 domain->hw_switch_id = hw_switch_id;
200*2d9fd380Sjfb8856606
201*2d9fd380Sjfb8856606 TAILQ_INIT(&domain->ports);
202*2d9fd380Sjfb8856606
203*2d9fd380Sjfb8856606 TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
204*2d9fd380Sjfb8856606
205*2d9fd380Sjfb8856606 done:
206*2d9fd380Sjfb8856606 *switch_domain_id = domain->id;
207*2d9fd380Sjfb8856606
208*2d9fd380Sjfb8856606 rte_spinlock_unlock(&sfc_mae_switch.lock);
209*2d9fd380Sjfb8856606
210*2d9fd380Sjfb8856606 return 0;
211*2d9fd380Sjfb8856606
212*2d9fd380Sjfb8856606 fail_domain_alloc:
213*2d9fd380Sjfb8856606 rte_free(domain);
214*2d9fd380Sjfb8856606
215*2d9fd380Sjfb8856606 fail_mem_alloc:
216*2d9fd380Sjfb8856606 sfc_hw_switch_id_fini(sa, hw_switch_id);
217*2d9fd380Sjfb8856606 rte_spinlock_unlock(&sfc_mae_switch.lock);
218*2d9fd380Sjfb8856606
219*2d9fd380Sjfb8856606 fail_hw_switch_id_init:
220*2d9fd380Sjfb8856606 return rc;
221*2d9fd380Sjfb8856606 }
222*2d9fd380Sjfb8856606
223*2d9fd380Sjfb8856606 /* This function expects to be called only when the lock is held */
224*2d9fd380Sjfb8856606 static struct sfc_mae_switch_port *
sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain * domain,const efx_mport_sel_t * entity_mportp,enum sfc_mae_switch_port_type type)225*2d9fd380Sjfb8856606 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
226*2d9fd380Sjfb8856606 const efx_mport_sel_t *entity_mportp,
227*2d9fd380Sjfb8856606 enum sfc_mae_switch_port_type type)
228*2d9fd380Sjfb8856606 {
229*2d9fd380Sjfb8856606 struct sfc_mae_switch_port *port;
230*2d9fd380Sjfb8856606
231*2d9fd380Sjfb8856606 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
232*2d9fd380Sjfb8856606
233*2d9fd380Sjfb8856606 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
234*2d9fd380Sjfb8856606 if (port->entity_mport.sel == entity_mportp->sel &&
235*2d9fd380Sjfb8856606 port->type == type)
236*2d9fd380Sjfb8856606 return port;
237*2d9fd380Sjfb8856606 }
238*2d9fd380Sjfb8856606
239*2d9fd380Sjfb8856606 return NULL;
240*2d9fd380Sjfb8856606 }
241*2d9fd380Sjfb8856606
242*2d9fd380Sjfb8856606 int
sfc_mae_assign_switch_port(uint16_t switch_domain_id,const struct sfc_mae_switch_port_request * req,uint16_t * switch_port_id)243*2d9fd380Sjfb8856606 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
244*2d9fd380Sjfb8856606 const struct sfc_mae_switch_port_request *req,
245*2d9fd380Sjfb8856606 uint16_t *switch_port_id)
246*2d9fd380Sjfb8856606 {
247*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain *domain;
248*2d9fd380Sjfb8856606 struct sfc_mae_switch_port *port;
249*2d9fd380Sjfb8856606 int rc;
250*2d9fd380Sjfb8856606
251*2d9fd380Sjfb8856606 rte_spinlock_lock(&sfc_mae_switch.lock);
252*2d9fd380Sjfb8856606
253*2d9fd380Sjfb8856606 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
254*2d9fd380Sjfb8856606 if (domain == NULL) {
255*2d9fd380Sjfb8856606 rc = EINVAL;
256*2d9fd380Sjfb8856606 goto fail_find_switch_domain_by_id;
257*2d9fd380Sjfb8856606 }
258*2d9fd380Sjfb8856606
259*2d9fd380Sjfb8856606 port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
260*2d9fd380Sjfb8856606 req->type);
261*2d9fd380Sjfb8856606 if (port != NULL)
262*2d9fd380Sjfb8856606 goto done;
263*2d9fd380Sjfb8856606
264*2d9fd380Sjfb8856606 port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
265*2d9fd380Sjfb8856606 if (port == NULL) {
266*2d9fd380Sjfb8856606 rc = ENOMEM;
267*2d9fd380Sjfb8856606 goto fail_mem_alloc;
268*2d9fd380Sjfb8856606 }
269*2d9fd380Sjfb8856606
270*2d9fd380Sjfb8856606 port->entity_mport.sel = req->entity_mportp->sel;
271*2d9fd380Sjfb8856606 port->type = req->type;
272*2d9fd380Sjfb8856606
273*2d9fd380Sjfb8856606 port->id = (domain->nb_ports++);
274*2d9fd380Sjfb8856606
275*2d9fd380Sjfb8856606 TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
276*2d9fd380Sjfb8856606
277*2d9fd380Sjfb8856606 done:
278*2d9fd380Sjfb8856606 port->ethdev_mport = *req->ethdev_mportp;
279*2d9fd380Sjfb8856606 port->ethdev_port_id = req->ethdev_port_id;
280*2d9fd380Sjfb8856606
281*2d9fd380Sjfb8856606 *switch_port_id = port->id;
282*2d9fd380Sjfb8856606
283*2d9fd380Sjfb8856606 rte_spinlock_unlock(&sfc_mae_switch.lock);
284*2d9fd380Sjfb8856606
285*2d9fd380Sjfb8856606 return 0;
286*2d9fd380Sjfb8856606
287*2d9fd380Sjfb8856606 fail_mem_alloc:
288*2d9fd380Sjfb8856606 fail_find_switch_domain_by_id:
289*2d9fd380Sjfb8856606 rte_spinlock_unlock(&sfc_mae_switch.lock);
290*2d9fd380Sjfb8856606 return rc;
291*2d9fd380Sjfb8856606 }
292*2d9fd380Sjfb8856606
293*2d9fd380Sjfb8856606 /* This function expects to be called only when the lock is held */
294*2d9fd380Sjfb8856606 static int
sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,uint16_t ethdev_port_id,efx_mport_sel_t * mport_sel)295*2d9fd380Sjfb8856606 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
296*2d9fd380Sjfb8856606 uint16_t ethdev_port_id,
297*2d9fd380Sjfb8856606 efx_mport_sel_t *mport_sel)
298*2d9fd380Sjfb8856606 {
299*2d9fd380Sjfb8856606 struct sfc_mae_switch_domain *domain;
300*2d9fd380Sjfb8856606 struct sfc_mae_switch_port *port;
301*2d9fd380Sjfb8856606
302*2d9fd380Sjfb8856606 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
303*2d9fd380Sjfb8856606
304*2d9fd380Sjfb8856606 if (ethdev_port_id == RTE_MAX_ETHPORTS)
305*2d9fd380Sjfb8856606 return EINVAL;
306*2d9fd380Sjfb8856606
307*2d9fd380Sjfb8856606 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
308*2d9fd380Sjfb8856606 if (domain == NULL)
309*2d9fd380Sjfb8856606 return EINVAL;
310*2d9fd380Sjfb8856606
311*2d9fd380Sjfb8856606 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
312*2d9fd380Sjfb8856606 if (port->ethdev_port_id == ethdev_port_id) {
313*2d9fd380Sjfb8856606 *mport_sel = port->ethdev_mport;
314*2d9fd380Sjfb8856606 return 0;
315*2d9fd380Sjfb8856606 }
316*2d9fd380Sjfb8856606 }
317*2d9fd380Sjfb8856606
318*2d9fd380Sjfb8856606 return ENOENT;
319*2d9fd380Sjfb8856606 }
320*2d9fd380Sjfb8856606
321*2d9fd380Sjfb8856606 int
sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,uint16_t ethdev_port_id,efx_mport_sel_t * mport_sel)322*2d9fd380Sjfb8856606 sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
323*2d9fd380Sjfb8856606 uint16_t ethdev_port_id,
324*2d9fd380Sjfb8856606 efx_mport_sel_t *mport_sel)
325*2d9fd380Sjfb8856606 {
326*2d9fd380Sjfb8856606 int rc;
327*2d9fd380Sjfb8856606
328*2d9fd380Sjfb8856606 rte_spinlock_lock(&sfc_mae_switch.lock);
329*2d9fd380Sjfb8856606 rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
330*2d9fd380Sjfb8856606 ethdev_port_id, mport_sel);
331*2d9fd380Sjfb8856606 rte_spinlock_unlock(&sfc_mae_switch.lock);
332*2d9fd380Sjfb8856606
333*2d9fd380Sjfb8856606 return rc;
334*2d9fd380Sjfb8856606 }
335