1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
3 */
4
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rx.h"
27
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30
31 /* Verbs specification header. */
32 struct ibv_spec_header {
33 enum ibv_flow_spec_type type;
34 uint16_t size;
35 };
36
37 /**
38 * Discover the maximum number of priority available.
39 *
40 * @param[in] dev
41 * Pointer to the Ethernet device structure.
42 * @param[in] vprio
43 * Expected result variants.
44 * @param[in] vprio_n
45 * Number of entries in @p vprio array.
46 * @return
47 * Number of supported flow priority on success, a negative errno
48 * value otherwise and rte_errno is set.
49 */
50 static int
flow_verbs_discover_priorities(struct rte_eth_dev * dev,const uint16_t * vprio,int vprio_n)51 flow_verbs_discover_priorities(struct rte_eth_dev *dev,
52 const uint16_t *vprio, int vprio_n)
53 {
54 struct mlx5_priv *priv = dev->data->dev_private;
55 struct {
56 struct ibv_flow_attr attr;
57 struct ibv_flow_spec_eth eth;
58 struct ibv_flow_spec_action_drop drop;
59 } flow_attr = {
60 .attr = {
61 .num_of_specs = 2,
62 .port = (uint8_t)priv->dev_port,
63 },
64 .eth = {
65 .type = IBV_FLOW_SPEC_ETH,
66 .size = sizeof(struct ibv_flow_spec_eth),
67 },
68 .drop = {
69 .size = sizeof(struct ibv_flow_spec_action_drop),
70 .type = IBV_FLOW_SPEC_ACTION_DROP,
71 },
72 };
73 struct ibv_flow *flow;
74 struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
75 int i;
76 int priority = 0;
77
78 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
79 /* If DevX supported, driver must support 16 verbs flow priorities. */
80 priority = 16;
81 goto out;
82 #endif
83 if (!drop->qp) {
84 rte_errno = ENOTSUP;
85 return -rte_errno;
86 }
87 for (i = 0; i != vprio_n; i++) {
88 flow_attr.attr.priority = vprio[i] - 1;
89 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
90 if (!flow)
91 break;
92 claim_zero(mlx5_glue->destroy_flow(flow));
93 priority = vprio[i];
94 }
95 #if defined(HAVE_MLX5DV_DR_DEVX_PORT) || defined(HAVE_MLX5DV_DR_DEVX_PORT_V35)
96 out:
97 #endif
98 DRV_LOG(INFO, "port %u supported flow priorities:"
99 " 0-%d for ingress or egress root table,"
100 " 0-%d for non-root table or transfer root table.",
101 dev->data->port_id, priority - 2,
102 MLX5_NON_ROOT_FLOW_MAX_PRIO - 1);
103 return priority;
104 }
105
106 /**
107 * Get Verbs flow counter by index.
108 *
109 * @param[in] dev
110 * Pointer to the Ethernet device structure.
111 * @param[in] idx
112 * mlx5 flow counter index in the container.
113 * @param[out] ppool
114 * mlx5 flow counter pool in the container,
115 *
116 * @return
117 * A pointer to the counter, NULL otherwise.
118 */
119 static struct mlx5_flow_counter *
flow_verbs_counter_get_by_idx(struct rte_eth_dev * dev,uint32_t idx,struct mlx5_flow_counter_pool ** ppool)120 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
121 uint32_t idx,
122 struct mlx5_flow_counter_pool **ppool)
123 {
124 struct mlx5_priv *priv = dev->data->dev_private;
125 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
126 struct mlx5_flow_counter_pool *pool;
127
128 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
129 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
130 MLX5_ASSERT(pool);
131 if (ppool)
132 *ppool = pool;
133 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
134 }
135
136 /**
137 * Create Verbs flow counter with Verbs library.
138 *
139 * @param[in] dev
140 * Pointer to the Ethernet device structure.
141 * @param[in, out] counter
142 * mlx5 flow counter object, contains the counter id,
143 * handle of created Verbs flow counter is returned
144 * in cs field (if counters are supported).
145 *
146 * @return
147 * 0 On success else a negative errno value is returned
148 * and rte_errno is set.
149 */
150 static int
flow_verbs_counter_create(struct rte_eth_dev * dev,struct mlx5_flow_counter * counter)151 flow_verbs_counter_create(struct rte_eth_dev *dev,
152 struct mlx5_flow_counter *counter)
153 {
154 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
155 struct mlx5_priv *priv = dev->data->dev_private;
156 struct ibv_context *ctx = priv->sh->cdev->ctx;
157 struct ibv_counter_set_init_attr init = {
158 .counter_set_id = counter->shared_info.id};
159
160 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
161 if (!counter->dcs_when_free) {
162 rte_errno = ENOTSUP;
163 return -ENOTSUP;
164 }
165 return 0;
166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
167 struct mlx5_priv *priv = dev->data->dev_private;
168 struct ibv_context *ctx = priv->sh->cdev->ctx;
169 struct ibv_counters_init_attr init = {0};
170 struct ibv_counter_attach_attr attach;
171 int ret;
172
173 memset(&attach, 0, sizeof(attach));
174 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
175 if (!counter->dcs_when_free) {
176 rte_errno = ENOTSUP;
177 return -ENOTSUP;
178 }
179 attach.counter_desc = IBV_COUNTER_PACKETS;
180 attach.index = 0;
181 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
182 if (!ret) {
183 attach.counter_desc = IBV_COUNTER_BYTES;
184 attach.index = 1;
185 ret = mlx5_glue->attach_counters
186 (counter->dcs_when_free, &attach, NULL);
187 }
188 if (ret) {
189 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
190 counter->dcs_when_free = NULL;
191 rte_errno = ret;
192 return -ret;
193 }
194 return 0;
195 #else
196 (void)dev;
197 (void)counter;
198 rte_errno = ENOTSUP;
199 return -ENOTSUP;
200 #endif
201 }
202
203 /**
204 * Get a flow counter.
205 *
206 * @param[in] dev
207 * Pointer to the Ethernet device structure.
208 * @param[in] id
209 * Counter identifier.
210 *
211 * @return
212 * Index to the counter, 0 otherwise and rte_errno is set.
213 */
214 static uint32_t
flow_verbs_counter_new(struct rte_eth_dev * dev,uint32_t id __rte_unused)215 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
216 {
217 struct mlx5_priv *priv = dev->data->dev_private;
218 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
219 struct mlx5_flow_counter_pool *pool = NULL;
220 struct mlx5_flow_counter *cnt = NULL;
221 uint32_t n_valid = cmng->n_valid;
222 uint32_t pool_idx, cnt_idx;
223 uint32_t i;
224 int ret;
225
226 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
227 pool = cmng->pools[pool_idx];
228 if (!pool)
229 continue;
230 cnt = TAILQ_FIRST(&pool->counters[0]);
231 if (cnt)
232 break;
233 }
234 if (!cnt) {
235 struct mlx5_flow_counter_pool **pools;
236 uint32_t size;
237
238 if (n_valid == cmng->n) {
239 /* Resize the container pool array. */
240 size = sizeof(struct mlx5_flow_counter_pool *) *
241 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
242 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
243 SOCKET_ID_ANY);
244 if (!pools)
245 return 0;
246 if (n_valid) {
247 memcpy(pools, cmng->pools,
248 sizeof(struct mlx5_flow_counter_pool *) *
249 n_valid);
250 mlx5_free(cmng->pools);
251 }
252 cmng->pools = pools;
253 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
254 }
255 /* Allocate memory for new pool*/
256 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
257 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
258 if (!pool)
259 return 0;
260 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
261 cnt = MLX5_POOL_GET_CNT(pool, i);
262 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
263 }
264 cnt = MLX5_POOL_GET_CNT(pool, 0);
265 cmng->pools[n_valid] = pool;
266 pool_idx = n_valid;
267 cmng->n_valid++;
268 }
269 TAILQ_REMOVE(&pool->counters[0], cnt, next);
270 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
271 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
272 /* Create counter with Verbs. */
273 ret = flow_verbs_counter_create(dev, cnt);
274 if (!ret) {
275 cnt->dcs_when_active = cnt->dcs_when_free;
276 cnt->hits = 0;
277 cnt->bytes = 0;
278 return cnt_idx;
279 }
280 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
281 /* Some error occurred in Verbs library. */
282 rte_errno = -ret;
283 return 0;
284 }
285
286 /**
287 * Release a flow counter.
288 *
289 * @param[in] dev
290 * Pointer to the Ethernet device structure.
291 * @param[in] counter
292 * Index to the counter handler.
293 */
294 static void
flow_verbs_counter_release(struct rte_eth_dev * dev,uint32_t counter)295 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
296 {
297 struct mlx5_flow_counter_pool *pool;
298 struct mlx5_flow_counter *cnt;
299
300 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
301 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
302 claim_zero(mlx5_glue->destroy_counter_set
303 ((struct ibv_counter_set *)cnt->dcs_when_active));
304 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
305 claim_zero(mlx5_glue->destroy_counters
306 ((struct ibv_counters *)cnt->dcs_when_active));
307 #endif
308 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
309 }
310
311 /**
312 * Query a flow counter via Verbs library call.
313 *
314 * @see rte_flow_query()
315 * @see rte_flow_ops
316 */
317 static int
flow_verbs_counter_query(struct rte_eth_dev * dev __rte_unused,struct rte_flow * flow,void * data,struct rte_flow_error * error)318 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
319 struct rte_flow *flow, void *data,
320 struct rte_flow_error *error)
321 {
322 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
323 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
324 if (flow->counter) {
325 struct mlx5_flow_counter_pool *pool;
326 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
327 (dev, flow->counter, &pool);
328 struct rte_flow_query_count *qc = data;
329 uint64_t counters[2] = {0, 0};
330 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
331 struct ibv_query_counter_set_attr query_cs_attr = {
332 .dcs_when_free = (struct ibv_counter_set *)
333 cnt->dcs_when_active,
334 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
335 };
336 struct ibv_counter_set_data query_out = {
337 .out = counters,
338 .outlen = 2 * sizeof(uint64_t),
339 };
340 int err = mlx5_glue->query_counter_set(&query_cs_attr,
341 &query_out);
342 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
343 int err = mlx5_glue->query_counters
344 ((struct ibv_counters *)cnt->dcs_when_active, counters,
345 RTE_DIM(counters),
346 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
347 #endif
348 if (err)
349 return rte_flow_error_set
350 (error, err,
351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
352 NULL,
353 "cannot read counter");
354 qc->hits_set = 1;
355 qc->bytes_set = 1;
356 qc->hits = counters[0] - cnt->hits;
357 qc->bytes = counters[1] - cnt->bytes;
358 if (qc->reset) {
359 cnt->hits = counters[0];
360 cnt->bytes = counters[1];
361 }
362 return 0;
363 }
364 return rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
366 NULL,
367 "flow does not have counter");
368 #else
369 (void)flow;
370 (void)data;
371 return rte_flow_error_set(error, ENOTSUP,
372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
373 NULL,
374 "counters are not available");
375 #endif
376 }
377
378 /**
379 * Add a verbs item specification into @p verbs.
380 *
381 * @param[out] verbs
382 * Pointer to verbs structure.
383 * @param[in] src
384 * Create specification.
385 * @param[in] size
386 * Size in bytes of the specification to copy.
387 */
388 static void
flow_verbs_spec_add(struct mlx5_flow_verbs_workspace * verbs,void * src,unsigned int size)389 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
390 void *src, unsigned int size)
391 {
392 void *dst;
393
394 if (!verbs)
395 return;
396 MLX5_ASSERT(verbs->specs);
397 dst = (void *)(verbs->specs + verbs->size);
398 memcpy(dst, src, size);
399 ++verbs->attr.num_of_specs;
400 verbs->size += size;
401 }
402
403 /**
404 * Convert the @p item into a Verbs specification. This function assumes that
405 * the input is valid and that there is space to insert the requested item
406 * into the flow.
407 *
408 * @param[in, out] dev_flow
409 * Pointer to dev_flow structure.
410 * @param[in] item
411 * Item specification.
412 * @param[in] item_flags
413 * Parsed item flags.
414 */
415 static void
flow_verbs_translate_item_eth(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)416 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
417 const struct rte_flow_item *item,
418 uint64_t item_flags)
419 {
420 const struct rte_flow_item_eth *spec = item->spec;
421 const struct rte_flow_item_eth *mask = item->mask;
422 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
423 struct ibv_flow_spec_eth eth = {
424 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
425 .size = size,
426 };
427
428 if (!mask)
429 mask = &rte_flow_item_eth_mask;
430 if (spec) {
431 unsigned int i;
432
433 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
434 RTE_ETHER_ADDR_LEN);
435 memcpy(ð.val.src_mac, spec->src.addr_bytes,
436 RTE_ETHER_ADDR_LEN);
437 eth.val.ether_type = spec->type;
438 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
439 RTE_ETHER_ADDR_LEN);
440 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
441 RTE_ETHER_ADDR_LEN);
442 eth.mask.ether_type = mask->type;
443 /* Remove unwanted bits from values. */
444 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
445 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
446 eth.val.src_mac[i] &= eth.mask.src_mac[i];
447 }
448 eth.val.ether_type &= eth.mask.ether_type;
449 }
450 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
451 }
452
453 /**
454 * Update the VLAN tag in the Verbs Ethernet specification.
455 * This function assumes that the input is valid and there is space to add
456 * the requested item.
457 *
458 * @param[in, out] attr
459 * Pointer to Verbs attributes structure.
460 * @param[in] eth
461 * Verbs structure containing the VLAN information to copy.
462 */
463 static void
flow_verbs_item_vlan_update(struct ibv_flow_attr * attr,struct ibv_flow_spec_eth * eth)464 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
465 struct ibv_flow_spec_eth *eth)
466 {
467 unsigned int i;
468 const enum ibv_flow_spec_type search = eth->type;
469 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
470 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
471
472 for (i = 0; i != attr->num_of_specs; ++i) {
473 if (hdr->type == search) {
474 struct ibv_flow_spec_eth *e =
475 (struct ibv_flow_spec_eth *)hdr;
476
477 e->val.vlan_tag = eth->val.vlan_tag;
478 e->mask.vlan_tag = eth->mask.vlan_tag;
479 e->val.ether_type = eth->val.ether_type;
480 e->mask.ether_type = eth->mask.ether_type;
481 break;
482 }
483 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
484 }
485 }
486
487 /**
488 * Convert the @p item into a Verbs specification. This function assumes that
489 * the input is valid and that there is space to insert the requested item
490 * into the flow.
491 *
492 * @param[in, out] dev_flow
493 * Pointer to dev_flow structure.
494 * @param[in] item
495 * Item specification.
496 * @param[in] item_flags
497 * Parsed item flags.
498 */
499 static void
flow_verbs_translate_item_vlan(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)500 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
501 const struct rte_flow_item *item,
502 uint64_t item_flags)
503 {
504 const struct rte_flow_item_vlan *spec = item->spec;
505 const struct rte_flow_item_vlan *mask = item->mask;
506 unsigned int size = sizeof(struct ibv_flow_spec_eth);
507 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
508 struct ibv_flow_spec_eth eth = {
509 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
510 .size = size,
511 };
512 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
513 MLX5_FLOW_LAYER_OUTER_L2;
514
515 if (!mask)
516 mask = &rte_flow_item_vlan_mask;
517 if (spec) {
518 eth.val.vlan_tag = spec->tci;
519 eth.mask.vlan_tag = mask->tci;
520 eth.val.vlan_tag &= eth.mask.vlan_tag;
521 eth.val.ether_type = spec->inner_type;
522 eth.mask.ether_type = mask->inner_type;
523 eth.val.ether_type &= eth.mask.ether_type;
524 }
525 if (!(item_flags & l2m))
526 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
527 else
528 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
529 if (!tunnel)
530 dev_flow->handle->vf_vlan.tag =
531 rte_be_to_cpu_16(spec->tci) & 0x0fff;
532 }
533
534 /**
535 * Convert the @p item into a Verbs specification. This function assumes that
536 * the input is valid and that there is space to insert the requested item
537 * into the flow.
538 *
539 * @param[in, out] dev_flow
540 * Pointer to dev_flow structure.
541 * @param[in] item
542 * Item specification.
543 * @param[in] item_flags
544 * Parsed item flags.
545 */
546 static void
flow_verbs_translate_item_ipv4(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)547 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
548 const struct rte_flow_item *item,
549 uint64_t item_flags)
550 {
551 const struct rte_flow_item_ipv4 *spec = item->spec;
552 const struct rte_flow_item_ipv4 *mask = item->mask;
553 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
554 struct ibv_flow_spec_ipv4_ext ipv4 = {
555 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
556 .size = size,
557 };
558
559 if (!mask)
560 mask = &rte_flow_item_ipv4_mask;
561 if (spec) {
562 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
563 .src_ip = spec->hdr.src_addr,
564 .dst_ip = spec->hdr.dst_addr,
565 .proto = spec->hdr.next_proto_id,
566 .tos = spec->hdr.type_of_service,
567 };
568 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
569 .src_ip = mask->hdr.src_addr,
570 .dst_ip = mask->hdr.dst_addr,
571 .proto = mask->hdr.next_proto_id,
572 .tos = mask->hdr.type_of_service,
573 };
574 /* Remove unwanted bits from values. */
575 ipv4.val.src_ip &= ipv4.mask.src_ip;
576 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
577 ipv4.val.proto &= ipv4.mask.proto;
578 ipv4.val.tos &= ipv4.mask.tos;
579 }
580 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
581 }
582
583 /**
584 * Convert the @p item into a Verbs specification. This function assumes that
585 * the input is valid and that there is space to insert the requested item
586 * into the flow.
587 *
588 * @param[in, out] dev_flow
589 * Pointer to dev_flow structure.
590 * @param[in] item
591 * Item specification.
592 * @param[in] item_flags
593 * Parsed item flags.
594 */
595 static void
flow_verbs_translate_item_ipv6(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)596 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
597 const struct rte_flow_item *item,
598 uint64_t item_flags)
599 {
600 const struct rte_flow_item_ipv6 *spec = item->spec;
601 const struct rte_flow_item_ipv6 *mask = item->mask;
602 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
603 struct ibv_flow_spec_ipv6 ipv6 = {
604 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
605 .size = size,
606 };
607
608 if (!mask)
609 mask = &rte_flow_item_ipv6_mask;
610 if (spec) {
611 unsigned int i;
612 uint32_t vtc_flow_val;
613 uint32_t vtc_flow_mask;
614
615 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
616 RTE_DIM(ipv6.val.src_ip));
617 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
618 RTE_DIM(ipv6.val.dst_ip));
619 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
620 RTE_DIM(ipv6.mask.src_ip));
621 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
622 RTE_DIM(ipv6.mask.dst_ip));
623 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
624 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
625 ipv6.val.flow_label =
626 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
627 RTE_IPV6_HDR_FL_SHIFT);
628 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
629 RTE_IPV6_HDR_TC_SHIFT;
630 ipv6.val.next_hdr = spec->hdr.proto;
631 ipv6.mask.flow_label =
632 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
633 RTE_IPV6_HDR_FL_SHIFT);
634 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
635 RTE_IPV6_HDR_TC_SHIFT;
636 ipv6.mask.next_hdr = mask->hdr.proto;
637 /* Remove unwanted bits from values. */
638 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
639 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
640 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
641 }
642 ipv6.val.flow_label &= ipv6.mask.flow_label;
643 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
644 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
645 }
646 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
647 }
648
649 /**
650 * Convert the @p item into a Verbs specification. This function assumes that
651 * the input is valid and that there is space to insert the requested item
652 * into the flow.
653 *
654 * @param[in, out] dev_flow
655 * Pointer to dev_flow structure.
656 * @param[in] item
657 * Item specification.
658 * @param[in] item_flags
659 * Parsed item flags.
660 */
661 static void
flow_verbs_translate_item_tcp(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)662 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
663 const struct rte_flow_item *item,
664 uint64_t item_flags __rte_unused)
665 {
666 const struct rte_flow_item_tcp *spec = item->spec;
667 const struct rte_flow_item_tcp *mask = item->mask;
668 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
669 struct ibv_flow_spec_tcp_udp tcp = {
670 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
671 .size = size,
672 };
673
674 if (!mask)
675 mask = &rte_flow_item_tcp_mask;
676 if (spec) {
677 tcp.val.dst_port = spec->hdr.dst_port;
678 tcp.val.src_port = spec->hdr.src_port;
679 tcp.mask.dst_port = mask->hdr.dst_port;
680 tcp.mask.src_port = mask->hdr.src_port;
681 /* Remove unwanted bits from values. */
682 tcp.val.src_port &= tcp.mask.src_port;
683 tcp.val.dst_port &= tcp.mask.dst_port;
684 }
685 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
686 }
687
688 /**
689 * Convert the @p item into a Verbs specification. This function assumes that
690 * the input is valid and that there is space to insert the requested item
691 * into the flow.
692 *
693 * @param[in, out] dev_flow
694 * Pointer to dev_flow structure.
695 * @param[in] item
696 * Item specification.
697 * @param[in] item_flags
698 * Parsed item flags.
699 */
700 static void
flow_verbs_translate_item_udp(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)701 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
702 const struct rte_flow_item *item,
703 uint64_t item_flags __rte_unused)
704 {
705 const struct rte_flow_item_udp *spec = item->spec;
706 const struct rte_flow_item_udp *mask = item->mask;
707 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
708 struct ibv_flow_spec_tcp_udp udp = {
709 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
710 .size = size,
711 };
712
713 if (!mask)
714 mask = &rte_flow_item_udp_mask;
715 if (spec) {
716 udp.val.dst_port = spec->hdr.dst_port;
717 udp.val.src_port = spec->hdr.src_port;
718 udp.mask.dst_port = mask->hdr.dst_port;
719 udp.mask.src_port = mask->hdr.src_port;
720 /* Remove unwanted bits from values. */
721 udp.val.src_port &= udp.mask.src_port;
722 udp.val.dst_port &= udp.mask.dst_port;
723 }
724 item++;
725 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
726 item++;
727 if (!(udp.val.dst_port & udp.mask.dst_port)) {
728 switch ((item)->type) {
729 case RTE_FLOW_ITEM_TYPE_VXLAN:
730 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
731 udp.mask.dst_port = 0xffff;
732 break;
733 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
734 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
735 udp.mask.dst_port = 0xffff;
736 break;
737 case RTE_FLOW_ITEM_TYPE_MPLS:
738 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
739 udp.mask.dst_port = 0xffff;
740 break;
741 default:
742 break;
743 }
744 }
745
746 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
747 }
748
749 /**
750 * Convert the @p item into a Verbs specification. This function assumes that
751 * the input is valid and that there is space to insert the requested item
752 * into the flow.
753 *
754 * @param[in, out] dev_flow
755 * Pointer to dev_flow structure.
756 * @param[in] item
757 * Item specification.
758 * @param[in] item_flags
759 * Parsed item flags.
760 */
761 static void
flow_verbs_translate_item_vxlan(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)762 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
763 const struct rte_flow_item *item,
764 uint64_t item_flags __rte_unused)
765 {
766 const struct rte_flow_item_vxlan *spec = item->spec;
767 const struct rte_flow_item_vxlan *mask = item->mask;
768 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
769 struct ibv_flow_spec_tunnel vxlan = {
770 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
771 .size = size,
772 };
773 union vni {
774 uint32_t vlan_id;
775 uint8_t vni[4];
776 } id = { .vlan_id = 0, };
777
778 if (!mask)
779 mask = &rte_flow_item_vxlan_mask;
780 if (spec) {
781 memcpy(&id.vni[1], spec->vni, 3);
782 vxlan.val.tunnel_id = id.vlan_id;
783 memcpy(&id.vni[1], mask->vni, 3);
784 vxlan.mask.tunnel_id = id.vlan_id;
785 /* Remove unwanted bits from values. */
786 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
787 }
788 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
789 }
790
791 /**
792 * Convert the @p item into a Verbs specification. This function assumes that
793 * the input is valid and that there is space to insert the requested item
794 * into the flow.
795 *
796 * @param[in, out] dev_flow
797 * Pointer to dev_flow structure.
798 * @param[in] item
799 * Item specification.
800 * @param[in] item_flags
801 * Parsed item flags.
802 */
803 static void
flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)804 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
805 const struct rte_flow_item *item,
806 uint64_t item_flags __rte_unused)
807 {
808 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
809 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
810 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
811 struct ibv_flow_spec_tunnel vxlan_gpe = {
812 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
813 .size = size,
814 };
815 union vni {
816 uint32_t vlan_id;
817 uint8_t vni[4];
818 } id = { .vlan_id = 0, };
819
820 if (!mask)
821 mask = &rte_flow_item_vxlan_gpe_mask;
822 if (spec) {
823 memcpy(&id.vni[1], spec->vni, 3);
824 vxlan_gpe.val.tunnel_id = id.vlan_id;
825 memcpy(&id.vni[1], mask->vni, 3);
826 vxlan_gpe.mask.tunnel_id = id.vlan_id;
827 /* Remove unwanted bits from values. */
828 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
829 }
830 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
831 }
832
833 /**
834 * Update the protocol in Verbs IPv4/IPv6 spec.
835 *
836 * @param[in, out] attr
837 * Pointer to Verbs attributes structure.
838 * @param[in] search
839 * Specification type to search in order to update the IP protocol.
840 * @param[in] protocol
841 * Protocol value to set if none is present in the specification.
842 */
843 static void
flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr * attr,enum ibv_flow_spec_type search,uint8_t protocol)844 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
845 enum ibv_flow_spec_type search,
846 uint8_t protocol)
847 {
848 unsigned int i;
849 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
850 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
851
852 if (!attr)
853 return;
854 for (i = 0; i != attr->num_of_specs; ++i) {
855 if (hdr->type == search) {
856 union {
857 struct ibv_flow_spec_ipv4_ext *ipv4;
858 struct ibv_flow_spec_ipv6 *ipv6;
859 } ip;
860
861 switch (search) {
862 case IBV_FLOW_SPEC_IPV4_EXT:
863 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
864 if (!ip.ipv4->val.proto) {
865 ip.ipv4->val.proto = protocol;
866 ip.ipv4->mask.proto = 0xff;
867 }
868 break;
869 case IBV_FLOW_SPEC_IPV6:
870 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
871 if (!ip.ipv6->val.next_hdr) {
872 ip.ipv6->val.next_hdr = protocol;
873 ip.ipv6->mask.next_hdr = 0xff;
874 }
875 break;
876 default:
877 break;
878 }
879 break;
880 }
881 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
882 }
883 }
884
885 /**
886 * Reserve space for GRE spec in spec buffer.
887 *
888 * @param[in,out] dev_flow
889 * Pointer to dev_flow structure.
890 *
891 * @return
892 * Pointer to reserved space in spec buffer.
893 */
894 static uint8_t *
flow_verbs_reserve_gre(struct mlx5_flow * dev_flow)895 flow_verbs_reserve_gre(struct mlx5_flow *dev_flow)
896 {
897 uint8_t *buffer;
898 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
899 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
900 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
901 struct ibv_flow_spec_tunnel tunnel = {
902 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
903 .size = size,
904 };
905 #else
906 unsigned int size = sizeof(struct ibv_flow_spec_gre);
907 struct ibv_flow_spec_gre tunnel = {
908 .type = IBV_FLOW_SPEC_GRE,
909 .size = size,
910 };
911 #endif
912
913 buffer = verbs->specs + verbs->size;
914 flow_verbs_spec_add(verbs, &tunnel, size);
915 return buffer;
916 }
917
918 /**
919 * Convert the @p item into a Verbs specification. This function assumes that
920 * the input is valid and that Verbs specification will be placed in
921 * the pre-reserved space.
922 *
923 * @param[in, out] dev_flow
924 * Pointer to dev_flow structure.
925 * @param[in, out] gre_spec
926 * Pointer to space reserved for GRE spec.
927 * @param[in] item
928 * Item specification.
929 * @param[in] item_flags
930 * Parsed item flags.
931 */
932 static void
flow_verbs_translate_item_gre(struct mlx5_flow * dev_flow,uint8_t * gre_spec,const struct rte_flow_item * item __rte_unused,uint64_t item_flags)933 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
934 uint8_t *gre_spec,
935 const struct rte_flow_item *item __rte_unused,
936 uint64_t item_flags)
937 {
938 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
939 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
940 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
941 struct ibv_flow_spec_tunnel tunnel = {
942 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
943 .size = size,
944 };
945 #else
946 static const struct rte_flow_item_gre empty_gre = {0,};
947 const struct rte_flow_item_gre *spec = item->spec;
948 const struct rte_flow_item_gre *mask = item->mask;
949 unsigned int size = sizeof(struct ibv_flow_spec_gre);
950 struct ibv_flow_spec_gre tunnel = {
951 .type = IBV_FLOW_SPEC_GRE,
952 .size = size,
953 };
954
955 if (!spec) {
956 spec = &empty_gre;
957 mask = &empty_gre;
958 } else {
959 if (!mask)
960 mask = &rte_flow_item_gre_mask;
961 }
962 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
963 tunnel.val.protocol = spec->protocol;
964 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
965 tunnel.mask.protocol = mask->protocol;
966 /* Remove unwanted bits from values. */
967 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
968 tunnel.val.key &= tunnel.mask.key;
969 if (tunnel.mask.protocol) {
970 tunnel.val.protocol &= tunnel.mask.protocol;
971 } else {
972 tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
973 if (tunnel.val.protocol) {
974 tunnel.mask.protocol = 0xFFFF;
975 tunnel.val.protocol =
976 rte_cpu_to_be_16(tunnel.val.protocol);
977 }
978 }
979 #endif
980 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
981 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
982 IBV_FLOW_SPEC_IPV4_EXT,
983 IPPROTO_GRE);
984 else
985 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
986 IBV_FLOW_SPEC_IPV6,
987 IPPROTO_GRE);
988 MLX5_ASSERT(gre_spec);
989 memcpy(gre_spec, &tunnel, size);
990 }
991
992 /**
993 * Convert the @p action into a Verbs specification. This function assumes that
994 * the input is valid and that there is space to insert the requested action
995 * into the flow. This function also return the action that was added.
996 *
997 * @param[in, out] dev_flow
998 * Pointer to dev_flow structure.
999 * @param[in] item
1000 * Item specification.
1001 * @param[in] item_flags
1002 * Parsed item flags.
1003 */
1004 static void
flow_verbs_translate_item_mpls(struct mlx5_flow * dev_flow __rte_unused,const struct rte_flow_item * item __rte_unused,uint64_t item_flags __rte_unused)1005 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1006 const struct rte_flow_item *item __rte_unused,
1007 uint64_t item_flags __rte_unused)
1008 {
1009 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1010 const struct rte_flow_item_mpls *spec = item->spec;
1011 const struct rte_flow_item_mpls *mask = item->mask;
1012 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1013 struct ibv_flow_spec_mpls mpls = {
1014 .type = IBV_FLOW_SPEC_MPLS,
1015 .size = size,
1016 };
1017
1018 if (!mask)
1019 mask = &rte_flow_item_mpls_mask;
1020 if (spec) {
1021 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1022 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1023 /* Remove unwanted bits from values. */
1024 mpls.val.label &= mpls.mask.label;
1025 }
1026 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1027 #endif
1028 }
1029
1030 /**
1031 * Convert the @p action into a Verbs specification. This function assumes that
1032 * the input is valid and that there is space to insert the requested action
1033 * into the flow.
1034 *
1035 * @param[in] dev_flow
1036 * Pointer to mlx5_flow.
1037 * @param[in] action
1038 * Action configuration.
1039 */
1040 static void
flow_verbs_translate_action_drop(struct mlx5_flow * dev_flow,const struct rte_flow_action * action __rte_unused)1041 flow_verbs_translate_action_drop
1042 (struct mlx5_flow *dev_flow,
1043 const struct rte_flow_action *action __rte_unused)
1044 {
1045 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1046 struct ibv_flow_spec_action_drop drop = {
1047 .type = IBV_FLOW_SPEC_ACTION_DROP,
1048 .size = size,
1049 };
1050
1051 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1052 }
1053
1054 /**
1055 * Convert the @p action into a Verbs specification. This function assumes that
1056 * the input is valid and that there is space to insert the requested action
1057 * into the flow.
1058 *
1059 * @param[in] rss_desc
1060 * Pointer to mlx5_flow_rss_desc.
1061 * @param[in] action
1062 * Action configuration.
1063 */
1064 static void
flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc * rss_desc,const struct rte_flow_action * action)1065 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1066 const struct rte_flow_action *action)
1067 {
1068 const struct rte_flow_action_queue *queue = action->conf;
1069
1070 rss_desc->queue[0] = queue->index;
1071 rss_desc->queue_num = 1;
1072 }
1073
1074 /**
1075 * Convert the @p action into a Verbs specification. This function assumes that
1076 * the input is valid and that there is space to insert the requested action
1077 * into the flow.
1078 *
1079 * @param[in] rss_desc
1080 * Pointer to mlx5_flow_rss_desc.
1081 * @param[in] action
1082 * Action configuration.
1083 */
1084 static void
flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc * rss_desc,const struct rte_flow_action * action)1085 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1086 const struct rte_flow_action *action)
1087 {
1088 const struct rte_flow_action_rss *rss = action->conf;
1089 const uint8_t *rss_key;
1090
1091 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1092 rss_desc->queue_num = rss->queue_num;
1093 /* NULL RSS key indicates default RSS key. */
1094 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1095 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1096 /*
1097 * rss->level and rss.types should be set in advance when expanding
1098 * items for RSS.
1099 */
1100 }
1101
1102 /**
1103 * Convert the @p action into a Verbs specification. This function assumes that
1104 * the input is valid and that there is space to insert the requested action
1105 * into the flow.
1106 *
1107 * @param[in] dev_flow
1108 * Pointer to mlx5_flow.
1109 * @param[in] action
1110 * Action configuration.
1111 */
1112 static void
flow_verbs_translate_action_flag(struct mlx5_flow * dev_flow,const struct rte_flow_action * action __rte_unused)1113 flow_verbs_translate_action_flag
1114 (struct mlx5_flow *dev_flow,
1115 const struct rte_flow_action *action __rte_unused)
1116 {
1117 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1118 struct ibv_flow_spec_action_tag tag = {
1119 .type = IBV_FLOW_SPEC_ACTION_TAG,
1120 .size = size,
1121 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1122 };
1123
1124 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1125 }
1126
1127 /**
1128 * Convert the @p action into a Verbs specification. This function assumes that
1129 * the input is valid and that there is space to insert the requested action
1130 * into the flow.
1131 *
1132 * @param[in] dev_flow
1133 * Pointer to mlx5_flow.
1134 * @param[in] action
1135 * Action configuration.
1136 */
1137 static void
flow_verbs_translate_action_mark(struct mlx5_flow * dev_flow,const struct rte_flow_action * action)1138 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1139 const struct rte_flow_action *action)
1140 {
1141 const struct rte_flow_action_mark *mark = action->conf;
1142 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1143 struct ibv_flow_spec_action_tag tag = {
1144 .type = IBV_FLOW_SPEC_ACTION_TAG,
1145 .size = size,
1146 .tag_id = mlx5_flow_mark_set(mark->id),
1147 };
1148
1149 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1150 }
1151
1152 /**
1153 * Convert the @p action into a Verbs specification. This function assumes that
1154 * the input is valid and that there is space to insert the requested action
1155 * into the flow.
1156 *
1157 * @param[in] dev
1158 * Pointer to the Ethernet device structure.
1159 * @param[in] action
1160 * Action configuration.
1161 * @param[in] dev_flow
1162 * Pointer to mlx5_flow.
1163 * @param[out] error
1164 * Pointer to error structure.
1165 *
1166 * @return
1167 * 0 On success else a negative errno value is returned and rte_errno is set.
1168 */
1169 static int
flow_verbs_translate_action_count(struct mlx5_flow * dev_flow,const struct rte_flow_action * action,struct rte_eth_dev * dev,struct rte_flow_error * error)1170 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1171 const struct rte_flow_action *action,
1172 struct rte_eth_dev *dev,
1173 struct rte_flow_error *error)
1174 {
1175 const struct rte_flow_action_count *count = action->conf;
1176 struct rte_flow *flow = dev_flow->flow;
1177 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1178 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1179 struct mlx5_flow_counter_pool *pool;
1180 struct mlx5_flow_counter *cnt = NULL;
1181 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1182 struct ibv_flow_spec_counter_action counter = {
1183 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1184 .size = size,
1185 };
1186 #endif
1187
1188 if (!flow->counter) {
1189 flow->counter = flow_verbs_counter_new(dev, count->id);
1190 if (!flow->counter)
1191 return rte_flow_error_set(error, rte_errno,
1192 RTE_FLOW_ERROR_TYPE_ACTION,
1193 action,
1194 "cannot get counter"
1195 " context.");
1196 }
1197 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1198 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1199 counter.counter_set_handle =
1200 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1201 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1202 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1203 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1204 counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1205 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1206 #endif
1207 return 0;
1208 }
1209
1210 /**
1211 * Internal validation function. For validating both actions and items.
1212 *
1213 * @param[in] dev
1214 * Pointer to the Ethernet device structure.
1215 * @param[in] attr
1216 * Pointer to the flow attributes.
1217 * @param[in] items
1218 * Pointer to the list of items.
1219 * @param[in] actions
1220 * Pointer to the list of actions.
1221 * @param[in] external
1222 * This flow rule is created by request external to PMD.
1223 * @param[in] hairpin
1224 * Number of hairpin TX actions, 0 means classic flow.
1225 * @param[out] error
1226 * Pointer to the error structure.
1227 *
1228 * @return
1229 * 0 on success, a negative errno value otherwise and rte_errno is set.
1230 */
1231 static int
flow_verbs_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],bool external __rte_unused,int hairpin __rte_unused,struct rte_flow_error * error)1232 flow_verbs_validate(struct rte_eth_dev *dev,
1233 const struct rte_flow_attr *attr,
1234 const struct rte_flow_item items[],
1235 const struct rte_flow_action actions[],
1236 bool external __rte_unused,
1237 int hairpin __rte_unused,
1238 struct rte_flow_error *error)
1239 {
1240 int ret;
1241 uint64_t action_flags = 0;
1242 uint64_t item_flags = 0;
1243 uint64_t last_item = 0;
1244 uint8_t next_protocol = 0xff;
1245 uint16_t ether_type = 0;
1246 bool is_empty_vlan = false;
1247 uint16_t udp_dport = 0;
1248
1249 if (items == NULL)
1250 return -1;
1251 ret = mlx5_flow_validate_attributes(dev, attr, error);
1252 if (ret < 0)
1253 return ret;
1254 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1255 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1256 int ret = 0;
1257
1258 switch (items->type) {
1259 case RTE_FLOW_ITEM_TYPE_VOID:
1260 break;
1261 case RTE_FLOW_ITEM_TYPE_ETH:
1262 ret = mlx5_flow_validate_item_eth(items, item_flags,
1263 false, error);
1264 if (ret < 0)
1265 return ret;
1266 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1267 MLX5_FLOW_LAYER_OUTER_L2;
1268 if (items->mask != NULL && items->spec != NULL) {
1269 ether_type =
1270 ((const struct rte_flow_item_eth *)
1271 items->spec)->type;
1272 ether_type &=
1273 ((const struct rte_flow_item_eth *)
1274 items->mask)->type;
1275 if (ether_type == RTE_BE16(RTE_ETHER_TYPE_VLAN))
1276 is_empty_vlan = true;
1277 ether_type = rte_be_to_cpu_16(ether_type);
1278 } else {
1279 ether_type = 0;
1280 }
1281 break;
1282 case RTE_FLOW_ITEM_TYPE_VLAN:
1283 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1284 dev, error);
1285 if (ret < 0)
1286 return ret;
1287 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1288 MLX5_FLOW_LAYER_INNER_VLAN) :
1289 (MLX5_FLOW_LAYER_OUTER_L2 |
1290 MLX5_FLOW_LAYER_OUTER_VLAN);
1291 if (items->mask != NULL && items->spec != NULL) {
1292 ether_type =
1293 ((const struct rte_flow_item_vlan *)
1294 items->spec)->inner_type;
1295 ether_type &=
1296 ((const struct rte_flow_item_vlan *)
1297 items->mask)->inner_type;
1298 ether_type = rte_be_to_cpu_16(ether_type);
1299 } else {
1300 ether_type = 0;
1301 }
1302 is_empty_vlan = false;
1303 break;
1304 case RTE_FLOW_ITEM_TYPE_IPV4:
1305 ret = mlx5_flow_validate_item_ipv4
1306 (items, item_flags,
1307 last_item, ether_type, NULL,
1308 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1309 error);
1310 if (ret < 0)
1311 return ret;
1312 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1313 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1314 if (items->mask != NULL &&
1315 ((const struct rte_flow_item_ipv4 *)
1316 items->mask)->hdr.next_proto_id) {
1317 next_protocol =
1318 ((const struct rte_flow_item_ipv4 *)
1319 (items->spec))->hdr.next_proto_id;
1320 next_protocol &=
1321 ((const struct rte_flow_item_ipv4 *)
1322 (items->mask))->hdr.next_proto_id;
1323 } else {
1324 /* Reset for inner layer. */
1325 next_protocol = 0xff;
1326 }
1327 break;
1328 case RTE_FLOW_ITEM_TYPE_IPV6:
1329 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1330 last_item,
1331 ether_type, NULL,
1332 error);
1333 if (ret < 0)
1334 return ret;
1335 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1336 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1337 if (items->mask != NULL &&
1338 ((const struct rte_flow_item_ipv6 *)
1339 items->mask)->hdr.proto) {
1340 next_protocol =
1341 ((const struct rte_flow_item_ipv6 *)
1342 items->spec)->hdr.proto;
1343 next_protocol &=
1344 ((const struct rte_flow_item_ipv6 *)
1345 items->mask)->hdr.proto;
1346 } else {
1347 /* Reset for inner layer. */
1348 next_protocol = 0xff;
1349 }
1350 break;
1351 case RTE_FLOW_ITEM_TYPE_UDP:
1352 ret = mlx5_flow_validate_item_udp(items, item_flags,
1353 next_protocol,
1354 error);
1355 const struct rte_flow_item_udp *spec = items->spec;
1356 const struct rte_flow_item_udp *mask = items->mask;
1357 if (!mask)
1358 mask = &rte_flow_item_udp_mask;
1359 if (spec != NULL)
1360 udp_dport = rte_be_to_cpu_16
1361 (spec->hdr.dst_port &
1362 mask->hdr.dst_port);
1363
1364 if (ret < 0)
1365 return ret;
1366 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1367 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1368 break;
1369 case RTE_FLOW_ITEM_TYPE_TCP:
1370 ret = mlx5_flow_validate_item_tcp
1371 (items, item_flags,
1372 next_protocol,
1373 &rte_flow_item_tcp_mask,
1374 error);
1375 if (ret < 0)
1376 return ret;
1377 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1378 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1379 break;
1380 case RTE_FLOW_ITEM_TYPE_VXLAN:
1381 ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
1382 items, item_flags,
1383 attr, error);
1384 if (ret < 0)
1385 return ret;
1386 last_item = MLX5_FLOW_LAYER_VXLAN;
1387 break;
1388 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1389 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1390 item_flags,
1391 dev, error);
1392 if (ret < 0)
1393 return ret;
1394 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1395 break;
1396 case RTE_FLOW_ITEM_TYPE_GRE:
1397 ret = mlx5_flow_validate_item_gre(items, item_flags,
1398 next_protocol, error);
1399 if (ret < 0)
1400 return ret;
1401 last_item = MLX5_FLOW_LAYER_GRE;
1402 break;
1403 case RTE_FLOW_ITEM_TYPE_MPLS:
1404 ret = mlx5_flow_validate_item_mpls(dev, items,
1405 item_flags,
1406 last_item, error);
1407 if (ret < 0)
1408 return ret;
1409 last_item = MLX5_FLOW_LAYER_MPLS;
1410 break;
1411 case RTE_FLOW_ITEM_TYPE_ICMP:
1412 case RTE_FLOW_ITEM_TYPE_ICMP6:
1413 return rte_flow_error_set(error, ENOTSUP,
1414 RTE_FLOW_ERROR_TYPE_ITEM,
1415 NULL, "ICMP/ICMP6 "
1416 "item not supported");
1417 default:
1418 return rte_flow_error_set(error, ENOTSUP,
1419 RTE_FLOW_ERROR_TYPE_ITEM,
1420 NULL, "item not supported");
1421 }
1422 item_flags |= last_item;
1423 }
1424 if (is_empty_vlan)
1425 return rte_flow_error_set(error, ENOTSUP,
1426 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1427 "VLAN matching without vid specification is not supported");
1428 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1429 switch (actions->type) {
1430 case RTE_FLOW_ACTION_TYPE_VOID:
1431 break;
1432 case RTE_FLOW_ACTION_TYPE_FLAG:
1433 ret = mlx5_flow_validate_action_flag(action_flags,
1434 attr,
1435 error);
1436 if (ret < 0)
1437 return ret;
1438 action_flags |= MLX5_FLOW_ACTION_FLAG;
1439 break;
1440 case RTE_FLOW_ACTION_TYPE_MARK:
1441 ret = mlx5_flow_validate_action_mark(actions,
1442 action_flags,
1443 attr,
1444 error);
1445 if (ret < 0)
1446 return ret;
1447 action_flags |= MLX5_FLOW_ACTION_MARK;
1448 break;
1449 case RTE_FLOW_ACTION_TYPE_DROP:
1450 ret = mlx5_flow_validate_action_drop(action_flags,
1451 attr,
1452 error);
1453 if (ret < 0)
1454 return ret;
1455 action_flags |= MLX5_FLOW_ACTION_DROP;
1456 break;
1457 case RTE_FLOW_ACTION_TYPE_QUEUE:
1458 ret = mlx5_flow_validate_action_queue(actions,
1459 action_flags, dev,
1460 attr,
1461 error);
1462 if (ret < 0)
1463 return ret;
1464 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1465 break;
1466 case RTE_FLOW_ACTION_TYPE_RSS:
1467 ret = mlx5_flow_validate_action_rss(actions,
1468 action_flags, dev,
1469 attr, item_flags,
1470 error);
1471 if (ret < 0)
1472 return ret;
1473 action_flags |= MLX5_FLOW_ACTION_RSS;
1474 break;
1475 case RTE_FLOW_ACTION_TYPE_COUNT:
1476 ret = mlx5_flow_validate_action_count(dev, attr, error);
1477 if (ret < 0)
1478 return ret;
1479 action_flags |= MLX5_FLOW_ACTION_COUNT;
1480 break;
1481 default:
1482 return rte_flow_error_set(error, ENOTSUP,
1483 RTE_FLOW_ERROR_TYPE_ACTION,
1484 actions,
1485 "action not supported");
1486 }
1487 }
1488 /*
1489 * Validate the drop action mutual exclusion with other actions.
1490 * Drop action is mutually-exclusive with any other action, except for
1491 * Count action.
1492 */
1493 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1494 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1495 return rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1497 "Drop action is mutually-exclusive "
1498 "with any other action, except for "
1499 "Count action");
1500 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1501 return rte_flow_error_set(error, EINVAL,
1502 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1503 "no fate action is found");
1504 return 0;
1505 }
1506
1507 /**
1508 * Calculate the required bytes that are needed for the action part of the verbs
1509 * flow.
1510 *
1511 * @param[in] actions
1512 * Pointer to the list of actions.
1513 *
1514 * @return
1515 * The size of the memory needed for all actions.
1516 */
1517 static int
flow_verbs_get_actions_size(const struct rte_flow_action actions[])1518 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1519 {
1520 int size = 0;
1521
1522 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1523 switch (actions->type) {
1524 case RTE_FLOW_ACTION_TYPE_VOID:
1525 break;
1526 case RTE_FLOW_ACTION_TYPE_FLAG:
1527 size += sizeof(struct ibv_flow_spec_action_tag);
1528 break;
1529 case RTE_FLOW_ACTION_TYPE_MARK:
1530 size += sizeof(struct ibv_flow_spec_action_tag);
1531 break;
1532 case RTE_FLOW_ACTION_TYPE_DROP:
1533 size += sizeof(struct ibv_flow_spec_action_drop);
1534 break;
1535 case RTE_FLOW_ACTION_TYPE_QUEUE:
1536 break;
1537 case RTE_FLOW_ACTION_TYPE_RSS:
1538 break;
1539 case RTE_FLOW_ACTION_TYPE_COUNT:
1540 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1541 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1542 size += sizeof(struct ibv_flow_spec_counter_action);
1543 #endif
1544 break;
1545 default:
1546 break;
1547 }
1548 }
1549 return size;
1550 }
1551
1552 /**
1553 * Calculate the required bytes that are needed for the item part of the verbs
1554 * flow.
1555 *
1556 * @param[in] items
1557 * Pointer to the list of items.
1558 *
1559 * @return
1560 * The size of the memory needed for all items.
1561 */
1562 static int
flow_verbs_get_items_size(const struct rte_flow_item items[])1563 flow_verbs_get_items_size(const struct rte_flow_item items[])
1564 {
1565 int size = 0;
1566
1567 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1568 switch (items->type) {
1569 case RTE_FLOW_ITEM_TYPE_VOID:
1570 break;
1571 case RTE_FLOW_ITEM_TYPE_ETH:
1572 size += sizeof(struct ibv_flow_spec_eth);
1573 break;
1574 case RTE_FLOW_ITEM_TYPE_VLAN:
1575 size += sizeof(struct ibv_flow_spec_eth);
1576 break;
1577 case RTE_FLOW_ITEM_TYPE_IPV4:
1578 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1579 break;
1580 case RTE_FLOW_ITEM_TYPE_IPV6:
1581 size += sizeof(struct ibv_flow_spec_ipv6);
1582 break;
1583 case RTE_FLOW_ITEM_TYPE_UDP:
1584 size += sizeof(struct ibv_flow_spec_tcp_udp);
1585 break;
1586 case RTE_FLOW_ITEM_TYPE_TCP:
1587 size += sizeof(struct ibv_flow_spec_tcp_udp);
1588 break;
1589 case RTE_FLOW_ITEM_TYPE_VXLAN:
1590 size += sizeof(struct ibv_flow_spec_tunnel);
1591 break;
1592 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1593 size += sizeof(struct ibv_flow_spec_tunnel);
1594 break;
1595 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1596 case RTE_FLOW_ITEM_TYPE_GRE:
1597 size += sizeof(struct ibv_flow_spec_gre);
1598 break;
1599 case RTE_FLOW_ITEM_TYPE_MPLS:
1600 size += sizeof(struct ibv_flow_spec_mpls);
1601 break;
1602 #else
1603 case RTE_FLOW_ITEM_TYPE_GRE:
1604 size += sizeof(struct ibv_flow_spec_tunnel);
1605 break;
1606 #endif
1607 default:
1608 break;
1609 }
1610 }
1611 return size;
1612 }
1613
1614 /**
1615 * Internal preparation function. Allocate mlx5_flow with the required size.
1616 * The required size is calculate based on the actions and items. This function
1617 * also returns the detected actions and items for later use.
1618 *
1619 * @param[in] dev
1620 * Pointer to Ethernet device.
1621 * @param[in] attr
1622 * Pointer to the flow attributes.
1623 * @param[in] items
1624 * Pointer to the list of items.
1625 * @param[in] actions
1626 * Pointer to the list of actions.
1627 * @param[out] error
1628 * Pointer to the error structure.
1629 *
1630 * @return
1631 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1632 * is set.
1633 */
1634 static struct mlx5_flow *
flow_verbs_prepare(struct rte_eth_dev * dev,const struct rte_flow_attr * attr __rte_unused,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1635 flow_verbs_prepare(struct rte_eth_dev *dev,
1636 const struct rte_flow_attr *attr __rte_unused,
1637 const struct rte_flow_item items[],
1638 const struct rte_flow_action actions[],
1639 struct rte_flow_error *error)
1640 {
1641 size_t size = 0;
1642 uint32_t handle_idx = 0;
1643 struct mlx5_flow *dev_flow;
1644 struct mlx5_flow_handle *dev_handle;
1645 struct mlx5_priv *priv = dev->data->dev_private;
1646 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1647
1648 MLX5_ASSERT(wks);
1649 size += flow_verbs_get_actions_size(actions);
1650 size += flow_verbs_get_items_size(items);
1651 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1652 rte_flow_error_set(error, E2BIG,
1653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1654 "Verbs spec/action size too large");
1655 return NULL;
1656 }
1657 /* In case of corrupting the memory. */
1658 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1659 rte_flow_error_set(error, ENOSPC,
1660 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1661 "not free temporary device flow");
1662 return NULL;
1663 }
1664 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1665 &handle_idx);
1666 if (!dev_handle) {
1667 rte_flow_error_set(error, ENOMEM,
1668 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1669 "not enough memory to create flow handle");
1670 return NULL;
1671 }
1672 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1673 dev_flow = &wks->flows[wks->flow_idx++];
1674 dev_flow->handle = dev_handle;
1675 dev_flow->handle_idx = handle_idx;
1676 /* Memcpy is used, only size needs to be cleared to 0. */
1677 dev_flow->verbs.size = 0;
1678 dev_flow->verbs.attr.num_of_specs = 0;
1679 dev_flow->ingress = attr->ingress;
1680 dev_flow->hash_fields = 0;
1681 /* Need to set transfer attribute: not supported in Verbs mode. */
1682 return dev_flow;
1683 }
1684
1685 /**
1686 * Fill the flow with verb spec.
1687 *
1688 * @param[in] dev
1689 * Pointer to Ethernet device.
1690 * @param[in, out] dev_flow
1691 * Pointer to the mlx5 flow.
1692 * @param[in] attr
1693 * Pointer to the flow attributes.
1694 * @param[in] items
1695 * Pointer to the list of items.
1696 * @param[in] actions
1697 * Pointer to the list of actions.
1698 * @param[out] error
1699 * Pointer to the error structure.
1700 *
1701 * @return
1702 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1703 */
1704 static int
flow_verbs_translate(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1705 flow_verbs_translate(struct rte_eth_dev *dev,
1706 struct mlx5_flow *dev_flow,
1707 const struct rte_flow_attr *attr,
1708 const struct rte_flow_item items[],
1709 const struct rte_flow_action actions[],
1710 struct rte_flow_error *error)
1711 {
1712 uint64_t item_flags = 0;
1713 uint64_t action_flags = 0;
1714 uint64_t priority = attr->priority;
1715 uint32_t subpriority = 0;
1716 struct mlx5_priv *priv = dev->data->dev_private;
1717 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1718 struct mlx5_flow_rss_desc *rss_desc;
1719 const struct rte_flow_item *tunnel_item = NULL;
1720 uint8_t *gre_spec = NULL;
1721
1722 MLX5_ASSERT(wks);
1723 rss_desc = &wks->rss_desc;
1724 if (priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
1725 priority = priv->sh->flow_max_priority - 1;
1726 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1727 int ret;
1728
1729 switch (actions->type) {
1730 case RTE_FLOW_ACTION_TYPE_VOID:
1731 break;
1732 case RTE_FLOW_ACTION_TYPE_FLAG:
1733 flow_verbs_translate_action_flag(dev_flow, actions);
1734 action_flags |= MLX5_FLOW_ACTION_FLAG;
1735 wks->mark = 1;
1736 break;
1737 case RTE_FLOW_ACTION_TYPE_MARK:
1738 flow_verbs_translate_action_mark(dev_flow, actions);
1739 action_flags |= MLX5_FLOW_ACTION_MARK;
1740 wks->mark = 1;
1741 break;
1742 case RTE_FLOW_ACTION_TYPE_DROP:
1743 flow_verbs_translate_action_drop(dev_flow, actions);
1744 action_flags |= MLX5_FLOW_ACTION_DROP;
1745 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1746 break;
1747 case RTE_FLOW_ACTION_TYPE_QUEUE:
1748 flow_verbs_translate_action_queue(rss_desc, actions);
1749 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1750 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1751 break;
1752 case RTE_FLOW_ACTION_TYPE_RSS:
1753 flow_verbs_translate_action_rss(rss_desc, actions);
1754 action_flags |= MLX5_FLOW_ACTION_RSS;
1755 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1756 break;
1757 case RTE_FLOW_ACTION_TYPE_COUNT:
1758 ret = flow_verbs_translate_action_count(dev_flow,
1759 actions,
1760 dev, error);
1761 if (ret < 0)
1762 return ret;
1763 action_flags |= MLX5_FLOW_ACTION_COUNT;
1764 break;
1765 default:
1766 return rte_flow_error_set(error, ENOTSUP,
1767 RTE_FLOW_ERROR_TYPE_ACTION,
1768 actions,
1769 "action not supported");
1770 }
1771 }
1772 dev_flow->act_flags = action_flags;
1773 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1774 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1775
1776 switch (items->type) {
1777 case RTE_FLOW_ITEM_TYPE_VOID:
1778 break;
1779 case RTE_FLOW_ITEM_TYPE_ETH:
1780 flow_verbs_translate_item_eth(dev_flow, items,
1781 item_flags);
1782 subpriority = MLX5_PRIORITY_MAP_L2;
1783 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1784 MLX5_FLOW_LAYER_OUTER_L2;
1785 break;
1786 case RTE_FLOW_ITEM_TYPE_VLAN:
1787 flow_verbs_translate_item_vlan(dev_flow, items,
1788 item_flags);
1789 subpriority = MLX5_PRIORITY_MAP_L2;
1790 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1791 MLX5_FLOW_LAYER_INNER_VLAN) :
1792 (MLX5_FLOW_LAYER_OUTER_L2 |
1793 MLX5_FLOW_LAYER_OUTER_VLAN);
1794 break;
1795 case RTE_FLOW_ITEM_TYPE_IPV4:
1796 flow_verbs_translate_item_ipv4(dev_flow, items,
1797 item_flags);
1798 subpriority = MLX5_PRIORITY_MAP_L3;
1799 dev_flow->hash_fields |=
1800 mlx5_flow_hashfields_adjust
1801 (rss_desc, tunnel,
1802 MLX5_IPV4_LAYER_TYPES,
1803 MLX5_IPV4_IBV_RX_HASH);
1804 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1805 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1806 break;
1807 case RTE_FLOW_ITEM_TYPE_IPV6:
1808 flow_verbs_translate_item_ipv6(dev_flow, items,
1809 item_flags);
1810 subpriority = MLX5_PRIORITY_MAP_L3;
1811 dev_flow->hash_fields |=
1812 mlx5_flow_hashfields_adjust
1813 (rss_desc, tunnel,
1814 MLX5_IPV6_LAYER_TYPES,
1815 MLX5_IPV6_IBV_RX_HASH);
1816 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1817 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1818 break;
1819 case RTE_FLOW_ITEM_TYPE_TCP:
1820 flow_verbs_translate_item_tcp(dev_flow, items,
1821 item_flags);
1822 subpriority = MLX5_PRIORITY_MAP_L4;
1823 if (dev_flow->hash_fields != 0)
1824 dev_flow->hash_fields |=
1825 mlx5_flow_hashfields_adjust
1826 (rss_desc, tunnel, RTE_ETH_RSS_TCP,
1827 (IBV_RX_HASH_SRC_PORT_TCP |
1828 IBV_RX_HASH_DST_PORT_TCP));
1829 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1830 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1831 break;
1832 case RTE_FLOW_ITEM_TYPE_UDP:
1833 flow_verbs_translate_item_udp(dev_flow, items,
1834 item_flags);
1835 subpriority = MLX5_PRIORITY_MAP_L4;
1836 if (dev_flow->hash_fields != 0)
1837 dev_flow->hash_fields |=
1838 mlx5_flow_hashfields_adjust
1839 (rss_desc, tunnel, RTE_ETH_RSS_UDP,
1840 (IBV_RX_HASH_SRC_PORT_UDP |
1841 IBV_RX_HASH_DST_PORT_UDP));
1842 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1843 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1844 break;
1845 case RTE_FLOW_ITEM_TYPE_VXLAN:
1846 flow_verbs_translate_item_vxlan(dev_flow, items,
1847 item_flags);
1848 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1849 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1850 break;
1851 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1852 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1853 item_flags);
1854 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1855 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1856 break;
1857 case RTE_FLOW_ITEM_TYPE_GRE:
1858 gre_spec = flow_verbs_reserve_gre(dev_flow);
1859 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1860 item_flags |= MLX5_FLOW_LAYER_GRE;
1861 tunnel_item = items;
1862 break;
1863 case RTE_FLOW_ITEM_TYPE_MPLS:
1864 flow_verbs_translate_item_mpls(dev_flow, items,
1865 item_flags);
1866 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1867 item_flags |= MLX5_FLOW_LAYER_MPLS;
1868 break;
1869 default:
1870 return rte_flow_error_set(error, ENOTSUP,
1871 RTE_FLOW_ERROR_TYPE_ITEM,
1872 NULL, "item not supported");
1873 }
1874 }
1875 if (item_flags & MLX5_FLOW_LAYER_GRE)
1876 flow_verbs_translate_item_gre(dev_flow, gre_spec,
1877 tunnel_item, item_flags);
1878 dev_flow->handle->layers = item_flags;
1879 /* Other members of attr will be ignored. */
1880 dev_flow->verbs.attr.priority =
1881 mlx5_flow_adjust_priority(dev, priority, subpriority);
1882 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1883 return 0;
1884 }
1885
1886 /**
1887 * Remove the flow from the NIC but keeps it in memory.
1888 *
1889 * @param[in] dev
1890 * Pointer to the Ethernet device structure.
1891 * @param[in, out] flow
1892 * Pointer to flow structure.
1893 */
1894 static void
flow_verbs_remove(struct rte_eth_dev * dev,struct rte_flow * flow)1895 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1896 {
1897 struct mlx5_priv *priv = dev->data->dev_private;
1898 struct mlx5_flow_handle *handle;
1899 uint32_t handle_idx;
1900
1901 if (!flow)
1902 return;
1903 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1904 handle_idx, handle, next) {
1905 if (handle->drv_flow) {
1906 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1907 handle->drv_flow = NULL;
1908 }
1909 /* hrxq is union, don't touch it only the flag is set. */
1910 if (handle->rix_hrxq &&
1911 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1912 mlx5_hrxq_release(dev, handle->rix_hrxq);
1913 handle->rix_hrxq = 0;
1914 }
1915 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1916 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1917 }
1918 }
1919
1920 /**
1921 * Remove the flow from the NIC and the memory.
1922 *
1923 * @param[in] dev
1924 * Pointer to the Ethernet device structure.
1925 * @param[in, out] flow
1926 * Pointer to flow structure.
1927 */
1928 static void
flow_verbs_destroy(struct rte_eth_dev * dev,struct rte_flow * flow)1929 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1930 {
1931 struct mlx5_priv *priv = dev->data->dev_private;
1932 struct mlx5_flow_handle *handle;
1933
1934 if (!flow)
1935 return;
1936 flow_verbs_remove(dev, flow);
1937 while (flow->dev_handles) {
1938 uint32_t tmp_idx = flow->dev_handles;
1939
1940 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1941 tmp_idx);
1942 if (!handle)
1943 return;
1944 flow->dev_handles = handle->next.next;
1945 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1946 tmp_idx);
1947 }
1948 if (flow->counter) {
1949 flow_verbs_counter_release(dev, flow->counter);
1950 flow->counter = 0;
1951 }
1952 }
1953
1954 /**
1955 * Apply the flow to the NIC.
1956 *
1957 * @param[in] dev
1958 * Pointer to the Ethernet device structure.
1959 * @param[in, out] flow
1960 * Pointer to flow structure.
1961 * @param[out] error
1962 * Pointer to error structure.
1963 *
1964 * @return
1965 * 0 on success, a negative errno value otherwise and rte_errno is set.
1966 */
1967 static int
flow_verbs_apply(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1968 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1969 struct rte_flow_error *error)
1970 {
1971 struct mlx5_priv *priv = dev->data->dev_private;
1972 struct mlx5_flow_handle *handle;
1973 struct mlx5_flow *dev_flow;
1974 struct mlx5_hrxq *hrxq;
1975 uint32_t dev_handles;
1976 int err;
1977 int idx;
1978 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1979
1980 MLX5_ASSERT(wks);
1981 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1982 dev_flow = &wks->flows[idx];
1983 handle = dev_flow->handle;
1984 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1985 MLX5_ASSERT(priv->drop_queue.hrxq);
1986 hrxq = priv->drop_queue.hrxq;
1987 } else {
1988 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1989
1990 MLX5_ASSERT(rss_desc->queue_num);
1991 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1992 rss_desc->hash_fields = dev_flow->hash_fields;
1993 rss_desc->tunnel = !!(handle->layers &
1994 MLX5_FLOW_LAYER_TUNNEL);
1995 rss_desc->shared_rss = 0;
1996 hrxq = mlx5_hrxq_get(dev, rss_desc);
1997 if (!hrxq) {
1998 rte_flow_error_set
1999 (error, rte_errno,
2000 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2001 "cannot get hash queue");
2002 goto error;
2003 }
2004 handle->rix_hrxq = hrxq->idx;
2005 }
2006 MLX5_ASSERT(hrxq);
2007 handle->drv_flow = mlx5_glue->create_flow
2008 (hrxq->qp, &dev_flow->verbs.attr);
2009 if (!handle->drv_flow) {
2010 rte_flow_error_set(error, errno,
2011 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2012 NULL,
2013 "hardware refuses to create flow");
2014 goto error;
2015 }
2016 if (priv->vmwa_context &&
2017 handle->vf_vlan.tag && !handle->vf_vlan.created) {
2018 /*
2019 * The rule contains the VLAN pattern.
2020 * For VF we are going to create VLAN
2021 * interface to make hypervisor set correct
2022 * e-Switch vport context.
2023 */
2024 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2025 }
2026 }
2027 return 0;
2028 error:
2029 err = rte_errno; /* Save rte_errno before cleanup. */
2030 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2031 dev_handles, handle, next) {
2032 /* hrxq is union, don't touch it only the flag is set. */
2033 if (handle->rix_hrxq &&
2034 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2035 mlx5_hrxq_release(dev, handle->rix_hrxq);
2036 handle->rix_hrxq = 0;
2037 }
2038 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2039 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2040 }
2041 rte_errno = err; /* Restore rte_errno. */
2042 return -rte_errno;
2043 }
2044
2045 /**
2046 * Query a flow.
2047 *
2048 * @see rte_flow_query()
2049 * @see rte_flow_ops
2050 */
2051 static int
flow_verbs_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)2052 flow_verbs_query(struct rte_eth_dev *dev,
2053 struct rte_flow *flow,
2054 const struct rte_flow_action *actions,
2055 void *data,
2056 struct rte_flow_error *error)
2057 {
2058 int ret = -EINVAL;
2059
2060 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2061 switch (actions->type) {
2062 case RTE_FLOW_ACTION_TYPE_VOID:
2063 break;
2064 case RTE_FLOW_ACTION_TYPE_COUNT:
2065 ret = flow_verbs_counter_query(dev, flow, data, error);
2066 break;
2067 default:
2068 return rte_flow_error_set(error, ENOTSUP,
2069 RTE_FLOW_ERROR_TYPE_ACTION,
2070 actions,
2071 "action not supported");
2072 }
2073 }
2074 return ret;
2075 }
2076
2077 static int
flow_verbs_sync_domain(struct rte_eth_dev * dev,uint32_t domains,uint32_t flags)2078 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2079 uint32_t flags)
2080 {
2081 RTE_SET_USED(dev);
2082 RTE_SET_USED(domains);
2083 RTE_SET_USED(flags);
2084
2085 return 0;
2086 }
2087
2088 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2089 .validate = flow_verbs_validate,
2090 .prepare = flow_verbs_prepare,
2091 .translate = flow_verbs_translate,
2092 .apply = flow_verbs_apply,
2093 .remove = flow_verbs_remove,
2094 .destroy = flow_verbs_destroy,
2095 .query = flow_verbs_query,
2096 .sync_domain = flow_verbs_sync_domain,
2097 .discover_priorities = flow_verbs_discover_priorities,
2098 };
2099