1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
4 */
5
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
19 #include "rte_flow.h"
20
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26
27 /**
28 * Flow elements description tables.
29 */
30 struct rte_flow_desc_data {
31 const char *name;
32 size_t size;
33 size_t (*desc_fn)(void *dst, const void *src);
34 };
35
36 /**
37 *
38 * @param buf
39 * Destination memory.
40 * @param data
41 * Source memory
42 * @param size
43 * Requested copy size
44 * @param desc
45 * rte_flow_desc_item - for flow item conversion.
46 * rte_flow_desc_action - for flow action conversion.
47 * @param type
48 * Offset into the desc param or negative value for private flow elements.
49 */
50 static inline size_t
rte_flow_conv_copy(void * buf,const void * data,const size_t size,const struct rte_flow_desc_data * desc,int type)51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 const struct rte_flow_desc_data *desc, int type)
53 {
54 /**
55 * Allow PMD private flow item
56 */
57 bool rte_type = type >= 0;
58
59 size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 if (buf == NULL || data == NULL)
61 return 0;
62 rte_memcpy(buf, data, (size > sz ? sz : size));
63 if (rte_type && desc[type].desc_fn)
64 sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 return sz;
66 }
67
68 static size_t
rte_flow_item_flex_conv(void * buf,const void * data)69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 struct rte_flow_item_flex *dst = buf;
72 const struct rte_flow_item_flex *src = data;
73 if (buf) {
74 dst->pattern = rte_memcpy
75 ((void *)((uintptr_t)(dst + 1)), src->pattern,
76 src->length);
77 }
78 return src->length;
79 }
80
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 .name = # t, \
85 .size = s, \
86 .desc_fn = NULL,\
87 }
88
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 [RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 .name = # t, \
92 .size = s, \
93 .desc_fn = fn, \
94 }
95
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 MK_FLOW_ITEM(END, 0),
99 MK_FLOW_ITEM(VOID, 0),
100 MK_FLOW_ITEM(INVERT, 0),
101 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 MK_FLOW_ITEM(PF, 0),
103 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
104 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
105 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
132 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
133 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
134 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
135 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
136 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
137 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
138 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
139 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
140 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
141 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
142 MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
143 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
144 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
145 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
146 MK_FLOW_ITEM(PPPOE_PROTO_ID,
147 sizeof(struct rte_flow_item_pppoe_proto_id)),
148 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
149 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
150 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
151 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
152 MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
153 MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
154 MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
155 MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
156 MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
157 MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
158 MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
159 MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
160 MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
161 rte_flow_item_flex_conv),
162 MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
163 MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
164 };
165
166 /** Generate flow_action[] entry. */
167 #define MK_FLOW_ACTION(t, s) \
168 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
169 .name = # t, \
170 .size = s, \
171 .desc_fn = NULL,\
172 }
173
174 #define MK_FLOW_ACTION_FN(t, fn) \
175 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
176 .name = # t, \
177 .size = 0, \
178 .desc_fn = fn,\
179 }
180
181
182 /** Information about known flow actions. */
183 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
184 MK_FLOW_ACTION(END, 0),
185 MK_FLOW_ACTION(VOID, 0),
186 MK_FLOW_ACTION(PASSTHRU, 0),
187 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
188 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
189 MK_FLOW_ACTION(FLAG, 0),
190 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
191 MK_FLOW_ACTION(DROP, 0),
192 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
193 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
194 MK_FLOW_ACTION(PF, 0),
195 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
196 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
197 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
198 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
199 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
200 MK_FLOW_ACTION(OF_SET_MPLS_TTL,
201 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
202 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
203 MK_FLOW_ACTION(OF_SET_NW_TTL,
204 sizeof(struct rte_flow_action_of_set_nw_ttl)),
205 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
206 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
207 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
208 MK_FLOW_ACTION(OF_POP_VLAN, 0),
209 MK_FLOW_ACTION(OF_PUSH_VLAN,
210 sizeof(struct rte_flow_action_of_push_vlan)),
211 MK_FLOW_ACTION(OF_SET_VLAN_VID,
212 sizeof(struct rte_flow_action_of_set_vlan_vid)),
213 MK_FLOW_ACTION(OF_SET_VLAN_PCP,
214 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
215 MK_FLOW_ACTION(OF_POP_MPLS,
216 sizeof(struct rte_flow_action_of_pop_mpls)),
217 MK_FLOW_ACTION(OF_PUSH_MPLS,
218 sizeof(struct rte_flow_action_of_push_mpls)),
219 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
220 MK_FLOW_ACTION(VXLAN_DECAP, 0),
221 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
222 MK_FLOW_ACTION(NVGRE_DECAP, 0),
223 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
224 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
225 MK_FLOW_ACTION(SET_IPV4_SRC,
226 sizeof(struct rte_flow_action_set_ipv4)),
227 MK_FLOW_ACTION(SET_IPV4_DST,
228 sizeof(struct rte_flow_action_set_ipv4)),
229 MK_FLOW_ACTION(SET_IPV6_SRC,
230 sizeof(struct rte_flow_action_set_ipv6)),
231 MK_FLOW_ACTION(SET_IPV6_DST,
232 sizeof(struct rte_flow_action_set_ipv6)),
233 MK_FLOW_ACTION(SET_TP_SRC,
234 sizeof(struct rte_flow_action_set_tp)),
235 MK_FLOW_ACTION(SET_TP_DST,
236 sizeof(struct rte_flow_action_set_tp)),
237 MK_FLOW_ACTION(MAC_SWAP, 0),
238 MK_FLOW_ACTION(DEC_TTL, 0),
239 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
240 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
241 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
242 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
243 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
244 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
245 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
246 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
247 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
248 MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
249 MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
250 MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
251 MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
252 MK_FLOW_ACTION(MODIFY_FIELD,
253 sizeof(struct rte_flow_action_modify_field)),
254 /**
255 * Indirect action represented as handle of type
256 * (struct rte_flow_action_handle *) stored in conf field (see
257 * struct rte_flow_action); no need for additional structure to * store
258 * indirect action handle.
259 */
260 MK_FLOW_ACTION(INDIRECT, 0),
261 MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
262 MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
263 MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
264 };
265
266 int
rte_flow_dynf_metadata_register(void)267 rte_flow_dynf_metadata_register(void)
268 {
269 int offset;
270 int flag;
271
272 static const struct rte_mbuf_dynfield desc_offs = {
273 .name = RTE_MBUF_DYNFIELD_METADATA_NAME,
274 .size = sizeof(uint32_t),
275 .align = __alignof__(uint32_t),
276 };
277 static const struct rte_mbuf_dynflag desc_flag = {
278 .name = RTE_MBUF_DYNFLAG_METADATA_NAME,
279 };
280
281 offset = rte_mbuf_dynfield_register(&desc_offs);
282 if (offset < 0)
283 goto error;
284 flag = rte_mbuf_dynflag_register(&desc_flag);
285 if (flag < 0)
286 goto error;
287 rte_flow_dynf_metadata_offs = offset;
288 rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
289 return 0;
290
291 error:
292 rte_flow_dynf_metadata_offs = -1;
293 rte_flow_dynf_metadata_mask = UINT64_C(0);
294 return -rte_errno;
295 }
296
297 static inline void
fts_enter(struct rte_eth_dev * dev)298 fts_enter(struct rte_eth_dev *dev)
299 {
300 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
301 pthread_mutex_lock(&dev->data->flow_ops_mutex);
302 }
303
304 static inline void
fts_exit(struct rte_eth_dev * dev)305 fts_exit(struct rte_eth_dev *dev)
306 {
307 if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
308 pthread_mutex_unlock(&dev->data->flow_ops_mutex);
309 }
310
311 static int
flow_err(uint16_t port_id,int ret,struct rte_flow_error * error)312 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
313 {
314 if (ret == 0)
315 return 0;
316 if (rte_eth_dev_is_removed(port_id))
317 return rte_flow_error_set(error, EIO,
318 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
319 NULL, rte_strerror(EIO));
320 return ret;
321 }
322
323 /* Get generic flow operations structure from a port. */
324 const struct rte_flow_ops *
rte_flow_ops_get(uint16_t port_id,struct rte_flow_error * error)325 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
326 {
327 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
328 const struct rte_flow_ops *ops;
329 int code;
330
331 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
332 code = ENODEV;
333 else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
334 /* flow API not supported with this driver dev_ops */
335 code = ENOSYS;
336 else
337 code = dev->dev_ops->flow_ops_get(dev, &ops);
338 if (code == 0 && ops == NULL)
339 /* flow API not supported with this device */
340 code = ENOSYS;
341
342 if (code != 0) {
343 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
344 NULL, rte_strerror(code));
345 return NULL;
346 }
347 return ops;
348 }
349
350 /* Check whether a flow rule can be created on a given port. */
351 int
rte_flow_validate(uint16_t port_id,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)352 rte_flow_validate(uint16_t port_id,
353 const struct rte_flow_attr *attr,
354 const struct rte_flow_item pattern[],
355 const struct rte_flow_action actions[],
356 struct rte_flow_error *error)
357 {
358 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
359 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
360 int ret;
361
362 if (unlikely(!ops))
363 return -rte_errno;
364 if (likely(!!ops->validate)) {
365 fts_enter(dev);
366 ret = ops->validate(dev, attr, pattern, actions, error);
367 fts_exit(dev);
368 return flow_err(port_id, ret, error);
369 }
370 return rte_flow_error_set(error, ENOSYS,
371 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
372 NULL, rte_strerror(ENOSYS));
373 }
374
375 /* Create a flow rule on a given port. */
376 struct rte_flow *
rte_flow_create(uint16_t port_id,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)377 rte_flow_create(uint16_t port_id,
378 const struct rte_flow_attr *attr,
379 const struct rte_flow_item pattern[],
380 const struct rte_flow_action actions[],
381 struct rte_flow_error *error)
382 {
383 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
384 struct rte_flow *flow;
385 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
386
387 if (unlikely(!ops))
388 return NULL;
389 if (likely(!!ops->create)) {
390 fts_enter(dev);
391 flow = ops->create(dev, attr, pattern, actions, error);
392 fts_exit(dev);
393 if (flow == NULL)
394 flow_err(port_id, -rte_errno, error);
395 return flow;
396 }
397 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
398 NULL, rte_strerror(ENOSYS));
399 return NULL;
400 }
401
402 /* Destroy a flow rule on a given port. */
403 int
rte_flow_destroy(uint16_t port_id,struct rte_flow * flow,struct rte_flow_error * error)404 rte_flow_destroy(uint16_t port_id,
405 struct rte_flow *flow,
406 struct rte_flow_error *error)
407 {
408 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
409 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
410 int ret;
411
412 if (unlikely(!ops))
413 return -rte_errno;
414 if (likely(!!ops->destroy)) {
415 fts_enter(dev);
416 ret = ops->destroy(dev, flow, error);
417 fts_exit(dev);
418 return flow_err(port_id, ret, error);
419 }
420 return rte_flow_error_set(error, ENOSYS,
421 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
422 NULL, rte_strerror(ENOSYS));
423 }
424
425 /* Destroy all flow rules associated with a port. */
426 int
rte_flow_flush(uint16_t port_id,struct rte_flow_error * error)427 rte_flow_flush(uint16_t port_id,
428 struct rte_flow_error *error)
429 {
430 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
431 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
432 int ret;
433
434 if (unlikely(!ops))
435 return -rte_errno;
436 if (likely(!!ops->flush)) {
437 fts_enter(dev);
438 ret = ops->flush(dev, error);
439 fts_exit(dev);
440 return flow_err(port_id, ret, error);
441 }
442 return rte_flow_error_set(error, ENOSYS,
443 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
444 NULL, rte_strerror(ENOSYS));
445 }
446
447 /* Query an existing flow rule. */
448 int
rte_flow_query(uint16_t port_id,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * error)449 rte_flow_query(uint16_t port_id,
450 struct rte_flow *flow,
451 const struct rte_flow_action *action,
452 void *data,
453 struct rte_flow_error *error)
454 {
455 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
456 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
457 int ret;
458
459 if (!ops)
460 return -rte_errno;
461 if (likely(!!ops->query)) {
462 fts_enter(dev);
463 ret = ops->query(dev, flow, action, data, error);
464 fts_exit(dev);
465 return flow_err(port_id, ret, error);
466 }
467 return rte_flow_error_set(error, ENOSYS,
468 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
469 NULL, rte_strerror(ENOSYS));
470 }
471
472 /* Restrict ingress traffic to the defined flow rules. */
473 int
rte_flow_isolate(uint16_t port_id,int set,struct rte_flow_error * error)474 rte_flow_isolate(uint16_t port_id,
475 int set,
476 struct rte_flow_error *error)
477 {
478 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
479 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
480 int ret;
481
482 if (!ops)
483 return -rte_errno;
484 if (likely(!!ops->isolate)) {
485 fts_enter(dev);
486 ret = ops->isolate(dev, set, error);
487 fts_exit(dev);
488 return flow_err(port_id, ret, error);
489 }
490 return rte_flow_error_set(error, ENOSYS,
491 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
492 NULL, rte_strerror(ENOSYS));
493 }
494
495 /* Initialize flow error structure. */
496 int
rte_flow_error_set(struct rte_flow_error * error,int code,enum rte_flow_error_type type,const void * cause,const char * message)497 rte_flow_error_set(struct rte_flow_error *error,
498 int code,
499 enum rte_flow_error_type type,
500 const void *cause,
501 const char *message)
502 {
503 if (error) {
504 *error = (struct rte_flow_error){
505 .type = type,
506 .cause = cause,
507 .message = message,
508 };
509 }
510 rte_errno = code;
511 return -code;
512 }
513
514 /** Pattern item specification types. */
515 enum rte_flow_conv_item_spec_type {
516 RTE_FLOW_CONV_ITEM_SPEC,
517 RTE_FLOW_CONV_ITEM_LAST,
518 RTE_FLOW_CONV_ITEM_MASK,
519 };
520
521 /**
522 * Copy pattern item specification.
523 *
524 * @param[out] buf
525 * Output buffer. Can be NULL if @p size is zero.
526 * @param size
527 * Size of @p buf in bytes.
528 * @param[in] item
529 * Pattern item to copy specification from.
530 * @param type
531 * Specification selector for either @p spec, @p last or @p mask.
532 *
533 * @return
534 * Number of bytes needed to store pattern item specification regardless
535 * of @p size. @p buf contents are truncated to @p size if not large
536 * enough.
537 */
538 static size_t
rte_flow_conv_item_spec(void * buf,const size_t size,const struct rte_flow_item * item,enum rte_flow_conv_item_spec_type type)539 rte_flow_conv_item_spec(void *buf, const size_t size,
540 const struct rte_flow_item *item,
541 enum rte_flow_conv_item_spec_type type)
542 {
543 size_t off;
544 const void *data =
545 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
546 type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
547 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
548 NULL;
549
550 switch (item->type) {
551 union {
552 const struct rte_flow_item_raw *raw;
553 } spec;
554 union {
555 const struct rte_flow_item_raw *raw;
556 } last;
557 union {
558 const struct rte_flow_item_raw *raw;
559 } mask;
560 union {
561 const struct rte_flow_item_raw *raw;
562 } src;
563 union {
564 struct rte_flow_item_raw *raw;
565 } dst;
566 size_t tmp;
567
568 case RTE_FLOW_ITEM_TYPE_RAW:
569 spec.raw = item->spec;
570 last.raw = item->last ? item->last : item->spec;
571 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
572 src.raw = data;
573 dst.raw = buf;
574 rte_memcpy(dst.raw,
575 (&(struct rte_flow_item_raw){
576 .relative = src.raw->relative,
577 .search = src.raw->search,
578 .reserved = src.raw->reserved,
579 .offset = src.raw->offset,
580 .limit = src.raw->limit,
581 .length = src.raw->length,
582 }),
583 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
584 off = sizeof(*dst.raw);
585 if (type == RTE_FLOW_CONV_ITEM_SPEC ||
586 (type == RTE_FLOW_CONV_ITEM_MASK &&
587 ((spec.raw->length & mask.raw->length) >=
588 (last.raw->length & mask.raw->length))))
589 tmp = spec.raw->length & mask.raw->length;
590 else
591 tmp = last.raw->length & mask.raw->length;
592 if (tmp) {
593 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
594 if (size >= off + tmp)
595 dst.raw->pattern = rte_memcpy
596 ((void *)((uintptr_t)dst.raw + off),
597 src.raw->pattern, tmp);
598 off += tmp;
599 }
600 break;
601 default:
602 off = rte_flow_conv_copy(buf, data, size,
603 rte_flow_desc_item, item->type);
604 break;
605 }
606 return off;
607 }
608
609 /**
610 * Copy action configuration.
611 *
612 * @param[out] buf
613 * Output buffer. Can be NULL if @p size is zero.
614 * @param size
615 * Size of @p buf in bytes.
616 * @param[in] action
617 * Action to copy configuration from.
618 *
619 * @return
620 * Number of bytes needed to store pattern item specification regardless
621 * of @p size. @p buf contents are truncated to @p size if not large
622 * enough.
623 */
624 static size_t
rte_flow_conv_action_conf(void * buf,const size_t size,const struct rte_flow_action * action)625 rte_flow_conv_action_conf(void *buf, const size_t size,
626 const struct rte_flow_action *action)
627 {
628 size_t off;
629
630 switch (action->type) {
631 union {
632 const struct rte_flow_action_rss *rss;
633 const struct rte_flow_action_vxlan_encap *vxlan_encap;
634 const struct rte_flow_action_nvgre_encap *nvgre_encap;
635 } src;
636 union {
637 struct rte_flow_action_rss *rss;
638 struct rte_flow_action_vxlan_encap *vxlan_encap;
639 struct rte_flow_action_nvgre_encap *nvgre_encap;
640 } dst;
641 size_t tmp;
642 int ret;
643
644 case RTE_FLOW_ACTION_TYPE_RSS:
645 src.rss = action->conf;
646 dst.rss = buf;
647 rte_memcpy(dst.rss,
648 (&(struct rte_flow_action_rss){
649 .func = src.rss->func,
650 .level = src.rss->level,
651 .types = src.rss->types,
652 .key_len = src.rss->key_len,
653 .queue_num = src.rss->queue_num,
654 }),
655 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
656 off = sizeof(*dst.rss);
657 if (src.rss->key_len && src.rss->key) {
658 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
659 tmp = sizeof(*src.rss->key) * src.rss->key_len;
660 if (size >= off + tmp)
661 dst.rss->key = rte_memcpy
662 ((void *)((uintptr_t)dst.rss + off),
663 src.rss->key, tmp);
664 off += tmp;
665 }
666 if (src.rss->queue_num) {
667 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
668 tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
669 if (size >= off + tmp)
670 dst.rss->queue = rte_memcpy
671 ((void *)((uintptr_t)dst.rss + off),
672 src.rss->queue, tmp);
673 off += tmp;
674 }
675 break;
676 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
677 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
678 src.vxlan_encap = action->conf;
679 dst.vxlan_encap = buf;
680 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
681 sizeof(*src.nvgre_encap) ||
682 offsetof(struct rte_flow_action_vxlan_encap,
683 definition) !=
684 offsetof(struct rte_flow_action_nvgre_encap,
685 definition));
686 off = sizeof(*dst.vxlan_encap);
687 if (src.vxlan_encap->definition) {
688 off = RTE_ALIGN_CEIL
689 (off, sizeof(*dst.vxlan_encap->definition));
690 ret = rte_flow_conv
691 (RTE_FLOW_CONV_OP_PATTERN,
692 (void *)((uintptr_t)dst.vxlan_encap + off),
693 size > off ? size - off : 0,
694 src.vxlan_encap->definition, NULL);
695 if (ret < 0)
696 return 0;
697 if (size >= off + ret)
698 dst.vxlan_encap->definition =
699 (void *)((uintptr_t)dst.vxlan_encap +
700 off);
701 off += ret;
702 }
703 break;
704 default:
705 off = rte_flow_conv_copy(buf, action->conf, size,
706 rte_flow_desc_action, action->type);
707 break;
708 }
709 return off;
710 }
711
712 /**
713 * Copy a list of pattern items.
714 *
715 * @param[out] dst
716 * Destination buffer. Can be NULL if @p size is zero.
717 * @param size
718 * Size of @p dst in bytes.
719 * @param[in] src
720 * Source pattern items.
721 * @param num
722 * Maximum number of pattern items to process from @p src or 0 to process
723 * the entire list. In both cases, processing stops after
724 * RTE_FLOW_ITEM_TYPE_END is encountered.
725 * @param[out] error
726 * Perform verbose error reporting if not NULL.
727 *
728 * @return
729 * A positive value representing the number of bytes needed to store
730 * pattern items regardless of @p size on success (@p buf contents are
731 * truncated to @p size if not large enough), a negative errno value
732 * otherwise and rte_errno is set.
733 */
734 static int
rte_flow_conv_pattern(struct rte_flow_item * dst,const size_t size,const struct rte_flow_item * src,unsigned int num,struct rte_flow_error * error)735 rte_flow_conv_pattern(struct rte_flow_item *dst,
736 const size_t size,
737 const struct rte_flow_item *src,
738 unsigned int num,
739 struct rte_flow_error *error)
740 {
741 uintptr_t data = (uintptr_t)dst;
742 size_t off;
743 size_t ret;
744 unsigned int i;
745
746 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
747 /**
748 * allow PMD private flow item
749 */
750 if (((int)src->type >= 0) &&
751 ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
752 !rte_flow_desc_item[src->type].name))
753 return rte_flow_error_set
754 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
755 "cannot convert unknown item type");
756 if (size >= off + sizeof(*dst))
757 *dst = (struct rte_flow_item){
758 .type = src->type,
759 };
760 off += sizeof(*dst);
761 if (!src->type)
762 num = i + 1;
763 }
764 num = i;
765 src -= num;
766 dst -= num;
767 do {
768 if (src->spec) {
769 off = RTE_ALIGN_CEIL(off, sizeof(double));
770 ret = rte_flow_conv_item_spec
771 ((void *)(data + off),
772 size > off ? size - off : 0, src,
773 RTE_FLOW_CONV_ITEM_SPEC);
774 if (size && size >= off + ret)
775 dst->spec = (void *)(data + off);
776 off += ret;
777
778 }
779 if (src->last) {
780 off = RTE_ALIGN_CEIL(off, sizeof(double));
781 ret = rte_flow_conv_item_spec
782 ((void *)(data + off),
783 size > off ? size - off : 0, src,
784 RTE_FLOW_CONV_ITEM_LAST);
785 if (size && size >= off + ret)
786 dst->last = (void *)(data + off);
787 off += ret;
788 }
789 if (src->mask) {
790 off = RTE_ALIGN_CEIL(off, sizeof(double));
791 ret = rte_flow_conv_item_spec
792 ((void *)(data + off),
793 size > off ? size - off : 0, src,
794 RTE_FLOW_CONV_ITEM_MASK);
795 if (size && size >= off + ret)
796 dst->mask = (void *)(data + off);
797 off += ret;
798 }
799 ++src;
800 ++dst;
801 } while (--num);
802 return off;
803 }
804
805 /**
806 * Copy a list of actions.
807 *
808 * @param[out] dst
809 * Destination buffer. Can be NULL if @p size is zero.
810 * @param size
811 * Size of @p dst in bytes.
812 * @param[in] src
813 * Source actions.
814 * @param num
815 * Maximum number of actions to process from @p src or 0 to process the
816 * entire list. In both cases, processing stops after
817 * RTE_FLOW_ACTION_TYPE_END is encountered.
818 * @param[out] error
819 * Perform verbose error reporting if not NULL.
820 *
821 * @return
822 * A positive value representing the number of bytes needed to store
823 * actions regardless of @p size on success (@p buf contents are truncated
824 * to @p size if not large enough), a negative errno value otherwise and
825 * rte_errno is set.
826 */
827 static int
rte_flow_conv_actions(struct rte_flow_action * dst,const size_t size,const struct rte_flow_action * src,unsigned int num,struct rte_flow_error * error)828 rte_flow_conv_actions(struct rte_flow_action *dst,
829 const size_t size,
830 const struct rte_flow_action *src,
831 unsigned int num,
832 struct rte_flow_error *error)
833 {
834 uintptr_t data = (uintptr_t)dst;
835 size_t off;
836 size_t ret;
837 unsigned int i;
838
839 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
840 /**
841 * allow PMD private flow action
842 */
843 if (((int)src->type >= 0) &&
844 ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
845 !rte_flow_desc_action[src->type].name))
846 return rte_flow_error_set
847 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
848 src, "cannot convert unknown action type");
849 if (size >= off + sizeof(*dst))
850 *dst = (struct rte_flow_action){
851 .type = src->type,
852 };
853 off += sizeof(*dst);
854 if (!src->type)
855 num = i + 1;
856 }
857 num = i;
858 src -= num;
859 dst -= num;
860 do {
861 if (src->conf) {
862 off = RTE_ALIGN_CEIL(off, sizeof(double));
863 ret = rte_flow_conv_action_conf
864 ((void *)(data + off),
865 size > off ? size - off : 0, src);
866 if (size && size >= off + ret)
867 dst->conf = (void *)(data + off);
868 off += ret;
869 }
870 ++src;
871 ++dst;
872 } while (--num);
873 return off;
874 }
875
876 /**
877 * Copy flow rule components.
878 *
879 * This comprises the flow rule descriptor itself, attributes, pattern and
880 * actions list. NULL components in @p src are skipped.
881 *
882 * @param[out] dst
883 * Destination buffer. Can be NULL if @p size is zero.
884 * @param size
885 * Size of @p dst in bytes.
886 * @param[in] src
887 * Source flow rule descriptor.
888 * @param[out] error
889 * Perform verbose error reporting if not NULL.
890 *
891 * @return
892 * A positive value representing the number of bytes needed to store all
893 * components including the descriptor regardless of @p size on success
894 * (@p buf contents are truncated to @p size if not large enough), a
895 * negative errno value otherwise and rte_errno is set.
896 */
897 static int
rte_flow_conv_rule(struct rte_flow_conv_rule * dst,const size_t size,const struct rte_flow_conv_rule * src,struct rte_flow_error * error)898 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
899 const size_t size,
900 const struct rte_flow_conv_rule *src,
901 struct rte_flow_error *error)
902 {
903 size_t off;
904 int ret;
905
906 rte_memcpy(dst,
907 (&(struct rte_flow_conv_rule){
908 .attr = NULL,
909 .pattern = NULL,
910 .actions = NULL,
911 }),
912 size > sizeof(*dst) ? sizeof(*dst) : size);
913 off = sizeof(*dst);
914 if (src->attr_ro) {
915 off = RTE_ALIGN_CEIL(off, sizeof(double));
916 if (size && size >= off + sizeof(*dst->attr))
917 dst->attr = rte_memcpy
918 ((void *)((uintptr_t)dst + off),
919 src->attr_ro, sizeof(*dst->attr));
920 off += sizeof(*dst->attr);
921 }
922 if (src->pattern_ro) {
923 off = RTE_ALIGN_CEIL(off, sizeof(double));
924 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
925 size > off ? size - off : 0,
926 src->pattern_ro, 0, error);
927 if (ret < 0)
928 return ret;
929 if (size && size >= off + (size_t)ret)
930 dst->pattern = (void *)((uintptr_t)dst + off);
931 off += ret;
932 }
933 if (src->actions_ro) {
934 off = RTE_ALIGN_CEIL(off, sizeof(double));
935 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
936 size > off ? size - off : 0,
937 src->actions_ro, 0, error);
938 if (ret < 0)
939 return ret;
940 if (size >= off + (size_t)ret)
941 dst->actions = (void *)((uintptr_t)dst + off);
942 off += ret;
943 }
944 return off;
945 }
946
947 /**
948 * Retrieve the name of a pattern item/action type.
949 *
950 * @param is_action
951 * Nonzero when @p src represents an action type instead of a pattern item
952 * type.
953 * @param is_ptr
954 * Nonzero to write string address instead of contents into @p dst.
955 * @param[out] dst
956 * Destination buffer. Can be NULL if @p size is zero.
957 * @param size
958 * Size of @p dst in bytes.
959 * @param[in] src
960 * Depending on @p is_action, source pattern item or action type cast as a
961 * pointer.
962 * @param[out] error
963 * Perform verbose error reporting if not NULL.
964 *
965 * @return
966 * A positive value representing the number of bytes needed to store the
967 * name or its address regardless of @p size on success (@p buf contents
968 * are truncated to @p size if not large enough), a negative errno value
969 * otherwise and rte_errno is set.
970 */
971 static int
rte_flow_conv_name(int is_action,int is_ptr,char * dst,const size_t size,const void * src,struct rte_flow_error * error)972 rte_flow_conv_name(int is_action,
973 int is_ptr,
974 char *dst,
975 const size_t size,
976 const void *src,
977 struct rte_flow_error *error)
978 {
979 struct desc_info {
980 const struct rte_flow_desc_data *data;
981 size_t num;
982 };
983 static const struct desc_info info_rep[2] = {
984 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
985 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
986 };
987 const struct desc_info *const info = &info_rep[!!is_action];
988 unsigned int type = (uintptr_t)src;
989
990 if (type >= info->num)
991 return rte_flow_error_set
992 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
993 "unknown object type to retrieve the name of");
994 if (!is_ptr)
995 return strlcpy(dst, info->data[type].name, size);
996 if (size >= sizeof(const char **))
997 *((const char **)dst) = info->data[type].name;
998 return sizeof(const char **);
999 }
1000
1001 /** Helper function to convert flow API objects. */
1002 int
rte_flow_conv(enum rte_flow_conv_op op,void * dst,size_t size,const void * src,struct rte_flow_error * error)1003 rte_flow_conv(enum rte_flow_conv_op op,
1004 void *dst,
1005 size_t size,
1006 const void *src,
1007 struct rte_flow_error *error)
1008 {
1009 switch (op) {
1010 const struct rte_flow_attr *attr;
1011
1012 case RTE_FLOW_CONV_OP_NONE:
1013 return 0;
1014 case RTE_FLOW_CONV_OP_ATTR:
1015 attr = src;
1016 if (size > sizeof(*attr))
1017 size = sizeof(*attr);
1018 rte_memcpy(dst, attr, size);
1019 return sizeof(*attr);
1020 case RTE_FLOW_CONV_OP_ITEM:
1021 return rte_flow_conv_pattern(dst, size, src, 1, error);
1022 case RTE_FLOW_CONV_OP_ACTION:
1023 return rte_flow_conv_actions(dst, size, src, 1, error);
1024 case RTE_FLOW_CONV_OP_PATTERN:
1025 return rte_flow_conv_pattern(dst, size, src, 0, error);
1026 case RTE_FLOW_CONV_OP_ACTIONS:
1027 return rte_flow_conv_actions(dst, size, src, 0, error);
1028 case RTE_FLOW_CONV_OP_RULE:
1029 return rte_flow_conv_rule(dst, size, src, error);
1030 case RTE_FLOW_CONV_OP_ITEM_NAME:
1031 return rte_flow_conv_name(0, 0, dst, size, src, error);
1032 case RTE_FLOW_CONV_OP_ACTION_NAME:
1033 return rte_flow_conv_name(1, 0, dst, size, src, error);
1034 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1035 return rte_flow_conv_name(0, 1, dst, size, src, error);
1036 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1037 return rte_flow_conv_name(1, 1, dst, size, src, error);
1038 }
1039 return rte_flow_error_set
1040 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1041 "unknown object conversion operation");
1042 }
1043
1044 /** Store a full rte_flow description. */
1045 size_t
rte_flow_copy(struct rte_flow_desc * desc,size_t len,const struct rte_flow_attr * attr,const struct rte_flow_item * items,const struct rte_flow_action * actions)1046 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1047 const struct rte_flow_attr *attr,
1048 const struct rte_flow_item *items,
1049 const struct rte_flow_action *actions)
1050 {
1051 /*
1052 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1053 * to convert the former to the latter without wasting space.
1054 */
1055 struct rte_flow_conv_rule *dst =
1056 len ?
1057 (void *)((uintptr_t)desc +
1058 (offsetof(struct rte_flow_desc, actions) -
1059 offsetof(struct rte_flow_conv_rule, actions))) :
1060 NULL;
1061 size_t dst_size =
1062 len > sizeof(*desc) - sizeof(*dst) ?
1063 len - (sizeof(*desc) - sizeof(*dst)) :
1064 0;
1065 struct rte_flow_conv_rule src = {
1066 .attr_ro = NULL,
1067 .pattern_ro = items,
1068 .actions_ro = actions,
1069 };
1070 int ret;
1071
1072 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1073 sizeof(struct rte_flow_conv_rule));
1074 if (dst_size &&
1075 (&dst->pattern != &desc->items ||
1076 &dst->actions != &desc->actions ||
1077 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1078 rte_errno = EINVAL;
1079 return 0;
1080 }
1081 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1082 if (ret < 0)
1083 return 0;
1084 ret += sizeof(*desc) - sizeof(*dst);
1085 rte_memcpy(desc,
1086 (&(struct rte_flow_desc){
1087 .size = ret,
1088 .attr = *attr,
1089 .items = dst_size ? dst->pattern : NULL,
1090 .actions = dst_size ? dst->actions : NULL,
1091 }),
1092 len > sizeof(*desc) ? sizeof(*desc) : len);
1093 return ret;
1094 }
1095
1096 int
rte_flow_dev_dump(uint16_t port_id,struct rte_flow * flow,FILE * file,struct rte_flow_error * error)1097 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1098 FILE *file, struct rte_flow_error *error)
1099 {
1100 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1101 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1102 int ret;
1103
1104 if (unlikely(!ops))
1105 return -rte_errno;
1106 if (likely(!!ops->dev_dump)) {
1107 fts_enter(dev);
1108 ret = ops->dev_dump(dev, flow, file, error);
1109 fts_exit(dev);
1110 return flow_err(port_id, ret, error);
1111 }
1112 return rte_flow_error_set(error, ENOSYS,
1113 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1114 NULL, rte_strerror(ENOSYS));
1115 }
1116
1117 int
rte_flow_get_aged_flows(uint16_t port_id,void ** contexts,uint32_t nb_contexts,struct rte_flow_error * error)1118 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1119 uint32_t nb_contexts, struct rte_flow_error *error)
1120 {
1121 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1122 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1123 int ret;
1124
1125 if (unlikely(!ops))
1126 return -rte_errno;
1127 if (likely(!!ops->get_aged_flows)) {
1128 fts_enter(dev);
1129 ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1130 fts_exit(dev);
1131 return flow_err(port_id, ret, error);
1132 }
1133 return rte_flow_error_set(error, ENOTSUP,
1134 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1135 NULL, rte_strerror(ENOTSUP));
1136 }
1137
1138 struct rte_flow_action_handle *
rte_flow_action_handle_create(uint16_t port_id,const struct rte_flow_indir_action_conf * conf,const struct rte_flow_action * action,struct rte_flow_error * error)1139 rte_flow_action_handle_create(uint16_t port_id,
1140 const struct rte_flow_indir_action_conf *conf,
1141 const struct rte_flow_action *action,
1142 struct rte_flow_error *error)
1143 {
1144 struct rte_flow_action_handle *handle;
1145 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1146
1147 if (unlikely(!ops))
1148 return NULL;
1149 if (unlikely(!ops->action_handle_create)) {
1150 rte_flow_error_set(error, ENOSYS,
1151 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1152 rte_strerror(ENOSYS));
1153 return NULL;
1154 }
1155 handle = ops->action_handle_create(&rte_eth_devices[port_id],
1156 conf, action, error);
1157 if (handle == NULL)
1158 flow_err(port_id, -rte_errno, error);
1159 return handle;
1160 }
1161
1162 int
rte_flow_action_handle_destroy(uint16_t port_id,struct rte_flow_action_handle * handle,struct rte_flow_error * error)1163 rte_flow_action_handle_destroy(uint16_t port_id,
1164 struct rte_flow_action_handle *handle,
1165 struct rte_flow_error *error)
1166 {
1167 int ret;
1168 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1169
1170 if (unlikely(!ops))
1171 return -rte_errno;
1172 if (unlikely(!ops->action_handle_destroy))
1173 return rte_flow_error_set(error, ENOSYS,
1174 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1175 NULL, rte_strerror(ENOSYS));
1176 ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1177 handle, error);
1178 return flow_err(port_id, ret, error);
1179 }
1180
1181 int
rte_flow_action_handle_update(uint16_t port_id,struct rte_flow_action_handle * handle,const void * update,struct rte_flow_error * error)1182 rte_flow_action_handle_update(uint16_t port_id,
1183 struct rte_flow_action_handle *handle,
1184 const void *update,
1185 struct rte_flow_error *error)
1186 {
1187 int ret;
1188 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1189
1190 if (unlikely(!ops))
1191 return -rte_errno;
1192 if (unlikely(!ops->action_handle_update))
1193 return rte_flow_error_set(error, ENOSYS,
1194 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1195 NULL, rte_strerror(ENOSYS));
1196 ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1197 update, error);
1198 return flow_err(port_id, ret, error);
1199 }
1200
1201 int
rte_flow_action_handle_query(uint16_t port_id,const struct rte_flow_action_handle * handle,void * data,struct rte_flow_error * error)1202 rte_flow_action_handle_query(uint16_t port_id,
1203 const struct rte_flow_action_handle *handle,
1204 void *data,
1205 struct rte_flow_error *error)
1206 {
1207 int ret;
1208 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1209
1210 if (unlikely(!ops))
1211 return -rte_errno;
1212 if (unlikely(!ops->action_handle_query))
1213 return rte_flow_error_set(error, ENOSYS,
1214 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1215 NULL, rte_strerror(ENOSYS));
1216 ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1217 data, error);
1218 return flow_err(port_id, ret, error);
1219 }
1220
1221 int
rte_flow_tunnel_decap_set(uint16_t port_id,struct rte_flow_tunnel * tunnel,struct rte_flow_action ** actions,uint32_t * num_of_actions,struct rte_flow_error * error)1222 rte_flow_tunnel_decap_set(uint16_t port_id,
1223 struct rte_flow_tunnel *tunnel,
1224 struct rte_flow_action **actions,
1225 uint32_t *num_of_actions,
1226 struct rte_flow_error *error)
1227 {
1228 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1229 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1230
1231 if (unlikely(!ops))
1232 return -rte_errno;
1233 if (likely(!!ops->tunnel_decap_set)) {
1234 return flow_err(port_id,
1235 ops->tunnel_decap_set(dev, tunnel, actions,
1236 num_of_actions, error),
1237 error);
1238 }
1239 return rte_flow_error_set(error, ENOTSUP,
1240 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1241 NULL, rte_strerror(ENOTSUP));
1242 }
1243
1244 int
rte_flow_tunnel_match(uint16_t port_id,struct rte_flow_tunnel * tunnel,struct rte_flow_item ** items,uint32_t * num_of_items,struct rte_flow_error * error)1245 rte_flow_tunnel_match(uint16_t port_id,
1246 struct rte_flow_tunnel *tunnel,
1247 struct rte_flow_item **items,
1248 uint32_t *num_of_items,
1249 struct rte_flow_error *error)
1250 {
1251 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1252 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1253
1254 if (unlikely(!ops))
1255 return -rte_errno;
1256 if (likely(!!ops->tunnel_match)) {
1257 return flow_err(port_id,
1258 ops->tunnel_match(dev, tunnel, items,
1259 num_of_items, error),
1260 error);
1261 }
1262 return rte_flow_error_set(error, ENOTSUP,
1263 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1264 NULL, rte_strerror(ENOTSUP));
1265 }
1266
1267 int
rte_flow_get_restore_info(uint16_t port_id,struct rte_mbuf * m,struct rte_flow_restore_info * restore_info,struct rte_flow_error * error)1268 rte_flow_get_restore_info(uint16_t port_id,
1269 struct rte_mbuf *m,
1270 struct rte_flow_restore_info *restore_info,
1271 struct rte_flow_error *error)
1272 {
1273 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1274 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1275
1276 if (unlikely(!ops))
1277 return -rte_errno;
1278 if (likely(!!ops->get_restore_info)) {
1279 return flow_err(port_id,
1280 ops->get_restore_info(dev, m, restore_info,
1281 error),
1282 error);
1283 }
1284 return rte_flow_error_set(error, ENOTSUP,
1285 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1286 NULL, rte_strerror(ENOTSUP));
1287 }
1288
1289 int
rte_flow_tunnel_action_decap_release(uint16_t port_id,struct rte_flow_action * actions,uint32_t num_of_actions,struct rte_flow_error * error)1290 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1291 struct rte_flow_action *actions,
1292 uint32_t num_of_actions,
1293 struct rte_flow_error *error)
1294 {
1295 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1296 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1297
1298 if (unlikely(!ops))
1299 return -rte_errno;
1300 if (likely(!!ops->tunnel_action_decap_release)) {
1301 return flow_err(port_id,
1302 ops->tunnel_action_decap_release(dev, actions,
1303 num_of_actions,
1304 error),
1305 error);
1306 }
1307 return rte_flow_error_set(error, ENOTSUP,
1308 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1309 NULL, rte_strerror(ENOTSUP));
1310 }
1311
1312 int
rte_flow_tunnel_item_release(uint16_t port_id,struct rte_flow_item * items,uint32_t num_of_items,struct rte_flow_error * error)1313 rte_flow_tunnel_item_release(uint16_t port_id,
1314 struct rte_flow_item *items,
1315 uint32_t num_of_items,
1316 struct rte_flow_error *error)
1317 {
1318 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1319 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1320
1321 if (unlikely(!ops))
1322 return -rte_errno;
1323 if (likely(!!ops->tunnel_item_release)) {
1324 return flow_err(port_id,
1325 ops->tunnel_item_release(dev, items,
1326 num_of_items, error),
1327 error);
1328 }
1329 return rte_flow_error_set(error, ENOTSUP,
1330 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1331 NULL, rte_strerror(ENOTSUP));
1332 }
1333
1334 int
rte_flow_pick_transfer_proxy(uint16_t port_id,uint16_t * proxy_port_id,struct rte_flow_error * error)1335 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1336 struct rte_flow_error *error)
1337 {
1338 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1339 struct rte_eth_dev *dev;
1340
1341 if (unlikely(ops == NULL))
1342 return -rte_errno;
1343
1344 if (ops->pick_transfer_proxy == NULL) {
1345 *proxy_port_id = port_id;
1346 return 0;
1347 }
1348
1349 dev = &rte_eth_devices[port_id];
1350
1351 return flow_err(port_id,
1352 ops->pick_transfer_proxy(dev, proxy_port_id, error),
1353 error);
1354 }
1355
1356 struct rte_flow_item_flex_handle *
rte_flow_flex_item_create(uint16_t port_id,const struct rte_flow_item_flex_conf * conf,struct rte_flow_error * error)1357 rte_flow_flex_item_create(uint16_t port_id,
1358 const struct rte_flow_item_flex_conf *conf,
1359 struct rte_flow_error *error)
1360 {
1361 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1362 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1363 struct rte_flow_item_flex_handle *handle;
1364
1365 if (unlikely(!ops))
1366 return NULL;
1367 if (unlikely(!ops->flex_item_create)) {
1368 rte_flow_error_set(error, ENOTSUP,
1369 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1370 NULL, rte_strerror(ENOTSUP));
1371 return NULL;
1372 }
1373 handle = ops->flex_item_create(dev, conf, error);
1374 if (handle == NULL)
1375 flow_err(port_id, -rte_errno, error);
1376 return handle;
1377 }
1378
1379 int
rte_flow_flex_item_release(uint16_t port_id,const struct rte_flow_item_flex_handle * handle,struct rte_flow_error * error)1380 rte_flow_flex_item_release(uint16_t port_id,
1381 const struct rte_flow_item_flex_handle *handle,
1382 struct rte_flow_error *error)
1383 {
1384 int ret;
1385 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1386 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1387
1388 if (unlikely(!ops || !ops->flex_item_release))
1389 return rte_flow_error_set(error, ENOTSUP,
1390 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1391 NULL, rte_strerror(ENOTSUP));
1392 ret = ops->flex_item_release(dev, handle, error);
1393 return flow_err(port_id, ret, error);
1394 }
1395
1396 int
rte_flow_info_get(uint16_t port_id,struct rte_flow_port_info * port_info,struct rte_flow_queue_info * queue_info,struct rte_flow_error * error)1397 rte_flow_info_get(uint16_t port_id,
1398 struct rte_flow_port_info *port_info,
1399 struct rte_flow_queue_info *queue_info,
1400 struct rte_flow_error *error)
1401 {
1402 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1403 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1404
1405 if (unlikely(!ops))
1406 return -rte_errno;
1407 if (dev->data->dev_configured == 0) {
1408 RTE_FLOW_LOG(INFO,
1409 "Device with port_id=%"PRIu16" is not configured.\n",
1410 port_id);
1411 return -EINVAL;
1412 }
1413 if (port_info == NULL) {
1414 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1415 return -EINVAL;
1416 }
1417 if (likely(!!ops->info_get)) {
1418 return flow_err(port_id,
1419 ops->info_get(dev, port_info, queue_info, error),
1420 error);
1421 }
1422 return rte_flow_error_set(error, ENOTSUP,
1423 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1424 NULL, rte_strerror(ENOTSUP));
1425 }
1426
1427 int
rte_flow_configure(uint16_t port_id,const struct rte_flow_port_attr * port_attr,uint16_t nb_queue,const struct rte_flow_queue_attr * queue_attr[],struct rte_flow_error * error)1428 rte_flow_configure(uint16_t port_id,
1429 const struct rte_flow_port_attr *port_attr,
1430 uint16_t nb_queue,
1431 const struct rte_flow_queue_attr *queue_attr[],
1432 struct rte_flow_error *error)
1433 {
1434 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1435 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1436 int ret;
1437
1438 if (unlikely(!ops))
1439 return -rte_errno;
1440 if (dev->data->dev_configured == 0) {
1441 RTE_FLOW_LOG(INFO,
1442 "Device with port_id=%"PRIu16" is not configured.\n",
1443 port_id);
1444 return -EINVAL;
1445 }
1446 if (dev->data->dev_started != 0) {
1447 RTE_FLOW_LOG(INFO,
1448 "Device with port_id=%"PRIu16" already started.\n",
1449 port_id);
1450 return -EINVAL;
1451 }
1452 if (port_attr == NULL) {
1453 RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1454 return -EINVAL;
1455 }
1456 if (queue_attr == NULL) {
1457 RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1458 return -EINVAL;
1459 }
1460 if (likely(!!ops->configure)) {
1461 ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1462 if (ret == 0)
1463 dev->data->flow_configured = 1;
1464 return flow_err(port_id, ret, error);
1465 }
1466 return rte_flow_error_set(error, ENOTSUP,
1467 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1468 NULL, rte_strerror(ENOTSUP));
1469 }
1470
1471 struct rte_flow_pattern_template *
rte_flow_pattern_template_create(uint16_t port_id,const struct rte_flow_pattern_template_attr * template_attr,const struct rte_flow_item pattern[],struct rte_flow_error * error)1472 rte_flow_pattern_template_create(uint16_t port_id,
1473 const struct rte_flow_pattern_template_attr *template_attr,
1474 const struct rte_flow_item pattern[],
1475 struct rte_flow_error *error)
1476 {
1477 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1478 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1479 struct rte_flow_pattern_template *template;
1480
1481 if (unlikely(!ops))
1482 return NULL;
1483 if (dev->data->flow_configured == 0) {
1484 RTE_FLOW_LOG(INFO,
1485 "Flow engine on port_id=%"PRIu16" is not configured.\n",
1486 port_id);
1487 rte_flow_error_set(error, EINVAL,
1488 RTE_FLOW_ERROR_TYPE_STATE,
1489 NULL, rte_strerror(EINVAL));
1490 return NULL;
1491 }
1492 if (template_attr == NULL) {
1493 RTE_FLOW_LOG(ERR,
1494 "Port %"PRIu16" template attr is NULL.\n",
1495 port_id);
1496 rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ATTR,
1498 NULL, rte_strerror(EINVAL));
1499 return NULL;
1500 }
1501 if (pattern == NULL) {
1502 RTE_FLOW_LOG(ERR,
1503 "Port %"PRIu16" pattern is NULL.\n",
1504 port_id);
1505 rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ATTR,
1507 NULL, rte_strerror(EINVAL));
1508 return NULL;
1509 }
1510 if (likely(!!ops->pattern_template_create)) {
1511 template = ops->pattern_template_create(dev, template_attr,
1512 pattern, error);
1513 if (template == NULL)
1514 flow_err(port_id, -rte_errno, error);
1515 return template;
1516 }
1517 rte_flow_error_set(error, ENOTSUP,
1518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1519 NULL, rte_strerror(ENOTSUP));
1520 return NULL;
1521 }
1522
1523 int
rte_flow_pattern_template_destroy(uint16_t port_id,struct rte_flow_pattern_template * pattern_template,struct rte_flow_error * error)1524 rte_flow_pattern_template_destroy(uint16_t port_id,
1525 struct rte_flow_pattern_template *pattern_template,
1526 struct rte_flow_error *error)
1527 {
1528 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1529 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1530
1531 if (unlikely(!ops))
1532 return -rte_errno;
1533 if (unlikely(pattern_template == NULL))
1534 return 0;
1535 if (likely(!!ops->pattern_template_destroy)) {
1536 return flow_err(port_id,
1537 ops->pattern_template_destroy(dev,
1538 pattern_template,
1539 error),
1540 error);
1541 }
1542 return rte_flow_error_set(error, ENOTSUP,
1543 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1544 NULL, rte_strerror(ENOTSUP));
1545 }
1546
1547 struct rte_flow_actions_template *
rte_flow_actions_template_create(uint16_t port_id,const struct rte_flow_actions_template_attr * template_attr,const struct rte_flow_action actions[],const struct rte_flow_action masks[],struct rte_flow_error * error)1548 rte_flow_actions_template_create(uint16_t port_id,
1549 const struct rte_flow_actions_template_attr *template_attr,
1550 const struct rte_flow_action actions[],
1551 const struct rte_flow_action masks[],
1552 struct rte_flow_error *error)
1553 {
1554 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1555 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1556 struct rte_flow_actions_template *template;
1557
1558 if (unlikely(!ops))
1559 return NULL;
1560 if (dev->data->flow_configured == 0) {
1561 RTE_FLOW_LOG(INFO,
1562 "Flow engine on port_id=%"PRIu16" is not configured.\n",
1563 port_id);
1564 rte_flow_error_set(error, EINVAL,
1565 RTE_FLOW_ERROR_TYPE_STATE,
1566 NULL, rte_strerror(EINVAL));
1567 return NULL;
1568 }
1569 if (template_attr == NULL) {
1570 RTE_FLOW_LOG(ERR,
1571 "Port %"PRIu16" template attr is NULL.\n",
1572 port_id);
1573 rte_flow_error_set(error, EINVAL,
1574 RTE_FLOW_ERROR_TYPE_ATTR,
1575 NULL, rte_strerror(EINVAL));
1576 return NULL;
1577 }
1578 if (actions == NULL) {
1579 RTE_FLOW_LOG(ERR,
1580 "Port %"PRIu16" actions is NULL.\n",
1581 port_id);
1582 rte_flow_error_set(error, EINVAL,
1583 RTE_FLOW_ERROR_TYPE_ATTR,
1584 NULL, rte_strerror(EINVAL));
1585 return NULL;
1586 }
1587 if (masks == NULL) {
1588 RTE_FLOW_LOG(ERR,
1589 "Port %"PRIu16" masks is NULL.\n",
1590 port_id);
1591 rte_flow_error_set(error, EINVAL,
1592 RTE_FLOW_ERROR_TYPE_ATTR,
1593 NULL, rte_strerror(EINVAL));
1594
1595 }
1596 if (likely(!!ops->actions_template_create)) {
1597 template = ops->actions_template_create(dev, template_attr,
1598 actions, masks, error);
1599 if (template == NULL)
1600 flow_err(port_id, -rte_errno, error);
1601 return template;
1602 }
1603 rte_flow_error_set(error, ENOTSUP,
1604 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1605 NULL, rte_strerror(ENOTSUP));
1606 return NULL;
1607 }
1608
1609 int
rte_flow_actions_template_destroy(uint16_t port_id,struct rte_flow_actions_template * actions_template,struct rte_flow_error * error)1610 rte_flow_actions_template_destroy(uint16_t port_id,
1611 struct rte_flow_actions_template *actions_template,
1612 struct rte_flow_error *error)
1613 {
1614 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1615 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1616
1617 if (unlikely(!ops))
1618 return -rte_errno;
1619 if (unlikely(actions_template == NULL))
1620 return 0;
1621 if (likely(!!ops->actions_template_destroy)) {
1622 return flow_err(port_id,
1623 ops->actions_template_destroy(dev,
1624 actions_template,
1625 error),
1626 error);
1627 }
1628 return rte_flow_error_set(error, ENOTSUP,
1629 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1630 NULL, rte_strerror(ENOTSUP));
1631 }
1632
1633 struct rte_flow_template_table *
rte_flow_template_table_create(uint16_t port_id,const struct rte_flow_template_table_attr * table_attr,struct rte_flow_pattern_template * pattern_templates[],uint8_t nb_pattern_templates,struct rte_flow_actions_template * actions_templates[],uint8_t nb_actions_templates,struct rte_flow_error * error)1634 rte_flow_template_table_create(uint16_t port_id,
1635 const struct rte_flow_template_table_attr *table_attr,
1636 struct rte_flow_pattern_template *pattern_templates[],
1637 uint8_t nb_pattern_templates,
1638 struct rte_flow_actions_template *actions_templates[],
1639 uint8_t nb_actions_templates,
1640 struct rte_flow_error *error)
1641 {
1642 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1643 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1644 struct rte_flow_template_table *table;
1645
1646 if (unlikely(!ops))
1647 return NULL;
1648 if (dev->data->flow_configured == 0) {
1649 RTE_FLOW_LOG(INFO,
1650 "Flow engine on port_id=%"PRIu16" is not configured.\n",
1651 port_id);
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_STATE,
1654 NULL, rte_strerror(EINVAL));
1655 return NULL;
1656 }
1657 if (table_attr == NULL) {
1658 RTE_FLOW_LOG(ERR,
1659 "Port %"PRIu16" table attr is NULL.\n",
1660 port_id);
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ATTR,
1663 NULL, rte_strerror(EINVAL));
1664 return NULL;
1665 }
1666 if (pattern_templates == NULL) {
1667 RTE_FLOW_LOG(ERR,
1668 "Port %"PRIu16" pattern templates is NULL.\n",
1669 port_id);
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ATTR,
1672 NULL, rte_strerror(EINVAL));
1673 return NULL;
1674 }
1675 if (actions_templates == NULL) {
1676 RTE_FLOW_LOG(ERR,
1677 "Port %"PRIu16" actions templates is NULL.\n",
1678 port_id);
1679 rte_flow_error_set(error, EINVAL,
1680 RTE_FLOW_ERROR_TYPE_ATTR,
1681 NULL, rte_strerror(EINVAL));
1682 return NULL;
1683 }
1684 if (likely(!!ops->template_table_create)) {
1685 table = ops->template_table_create(dev, table_attr,
1686 pattern_templates, nb_pattern_templates,
1687 actions_templates, nb_actions_templates,
1688 error);
1689 if (table == NULL)
1690 flow_err(port_id, -rte_errno, error);
1691 return table;
1692 }
1693 rte_flow_error_set(error, ENOTSUP,
1694 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695 NULL, rte_strerror(ENOTSUP));
1696 return NULL;
1697 }
1698
1699 int
rte_flow_template_table_destroy(uint16_t port_id,struct rte_flow_template_table * template_table,struct rte_flow_error * error)1700 rte_flow_template_table_destroy(uint16_t port_id,
1701 struct rte_flow_template_table *template_table,
1702 struct rte_flow_error *error)
1703 {
1704 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1705 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1706
1707 if (unlikely(!ops))
1708 return -rte_errno;
1709 if (unlikely(template_table == NULL))
1710 return 0;
1711 if (likely(!!ops->template_table_destroy)) {
1712 return flow_err(port_id,
1713 ops->template_table_destroy(dev,
1714 template_table,
1715 error),
1716 error);
1717 }
1718 return rte_flow_error_set(error, ENOTSUP,
1719 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1720 NULL, rte_strerror(ENOTSUP));
1721 }
1722
1723 struct rte_flow *
rte_flow_async_create(uint16_t port_id,uint32_t queue_id,const struct rte_flow_op_attr * op_attr,struct rte_flow_template_table * template_table,const struct rte_flow_item pattern[],uint8_t pattern_template_index,const struct rte_flow_action actions[],uint8_t actions_template_index,void * user_data,struct rte_flow_error * error)1724 rte_flow_async_create(uint16_t port_id,
1725 uint32_t queue_id,
1726 const struct rte_flow_op_attr *op_attr,
1727 struct rte_flow_template_table *template_table,
1728 const struct rte_flow_item pattern[],
1729 uint8_t pattern_template_index,
1730 const struct rte_flow_action actions[],
1731 uint8_t actions_template_index,
1732 void *user_data,
1733 struct rte_flow_error *error)
1734 {
1735 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1736 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1737 struct rte_flow *flow;
1738
1739 flow = ops->async_create(dev, queue_id,
1740 op_attr, template_table,
1741 pattern, pattern_template_index,
1742 actions, actions_template_index,
1743 user_data, error);
1744 if (flow == NULL)
1745 flow_err(port_id, -rte_errno, error);
1746 return flow;
1747 }
1748
1749 int
rte_flow_async_destroy(uint16_t port_id,uint32_t queue_id,const struct rte_flow_op_attr * op_attr,struct rte_flow * flow,void * user_data,struct rte_flow_error * error)1750 rte_flow_async_destroy(uint16_t port_id,
1751 uint32_t queue_id,
1752 const struct rte_flow_op_attr *op_attr,
1753 struct rte_flow *flow,
1754 void *user_data,
1755 struct rte_flow_error *error)
1756 {
1757 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1758 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1759
1760 return flow_err(port_id,
1761 ops->async_destroy(dev, queue_id,
1762 op_attr, flow,
1763 user_data, error),
1764 error);
1765 }
1766
1767 int
rte_flow_push(uint16_t port_id,uint32_t queue_id,struct rte_flow_error * error)1768 rte_flow_push(uint16_t port_id,
1769 uint32_t queue_id,
1770 struct rte_flow_error *error)
1771 {
1772 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1773 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1774
1775 return flow_err(port_id,
1776 ops->push(dev, queue_id, error),
1777 error);
1778 }
1779
1780 int
rte_flow_pull(uint16_t port_id,uint32_t queue_id,struct rte_flow_op_result res[],uint16_t n_res,struct rte_flow_error * error)1781 rte_flow_pull(uint16_t port_id,
1782 uint32_t queue_id,
1783 struct rte_flow_op_result res[],
1784 uint16_t n_res,
1785 struct rte_flow_error *error)
1786 {
1787 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1788 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1789 int ret;
1790
1791 ret = ops->pull(dev, queue_id, res, n_res, error);
1792 return ret ? ret : flow_err(port_id, ret, error);
1793 }
1794
1795 struct rte_flow_action_handle *
rte_flow_async_action_handle_create(uint16_t port_id,uint32_t queue_id,const struct rte_flow_op_attr * op_attr,const struct rte_flow_indir_action_conf * indir_action_conf,const struct rte_flow_action * action,void * user_data,struct rte_flow_error * error)1796 rte_flow_async_action_handle_create(uint16_t port_id,
1797 uint32_t queue_id,
1798 const struct rte_flow_op_attr *op_attr,
1799 const struct rte_flow_indir_action_conf *indir_action_conf,
1800 const struct rte_flow_action *action,
1801 void *user_data,
1802 struct rte_flow_error *error)
1803 {
1804 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1805 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1806 struct rte_flow_action_handle *handle;
1807
1808 handle = ops->async_action_handle_create(dev, queue_id, op_attr,
1809 indir_action_conf, action, user_data, error);
1810 if (handle == NULL)
1811 flow_err(port_id, -rte_errno, error);
1812 return handle;
1813 }
1814
1815 int
rte_flow_async_action_handle_destroy(uint16_t port_id,uint32_t queue_id,const struct rte_flow_op_attr * op_attr,struct rte_flow_action_handle * action_handle,void * user_data,struct rte_flow_error * error)1816 rte_flow_async_action_handle_destroy(uint16_t port_id,
1817 uint32_t queue_id,
1818 const struct rte_flow_op_attr *op_attr,
1819 struct rte_flow_action_handle *action_handle,
1820 void *user_data,
1821 struct rte_flow_error *error)
1822 {
1823 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1824 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1825 int ret;
1826
1827 ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
1828 action_handle, user_data, error);
1829 return flow_err(port_id, ret, error);
1830 }
1831
1832 int
rte_flow_async_action_handle_update(uint16_t port_id,uint32_t queue_id,const struct rte_flow_op_attr * op_attr,struct rte_flow_action_handle * action_handle,const void * update,void * user_data,struct rte_flow_error * error)1833 rte_flow_async_action_handle_update(uint16_t port_id,
1834 uint32_t queue_id,
1835 const struct rte_flow_op_attr *op_attr,
1836 struct rte_flow_action_handle *action_handle,
1837 const void *update,
1838 void *user_data,
1839 struct rte_flow_error *error)
1840 {
1841 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1842 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1843 int ret;
1844
1845 ret = ops->async_action_handle_update(dev, queue_id, op_attr,
1846 action_handle, update, user_data, error);
1847 return flow_err(port_id, ret, error);
1848 }
1849