1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
20
21 #define ICE_FDIR_INSET_ETH (\
22 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25 ICE_FDIR_INSET_ETH | \
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42 ICE_INSET_DMAC | \
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47 ICE_FDIR_INSET_ETH_IPV6 | \
48 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
49
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51 ICE_FDIR_INSET_ETH_IPV6 | \
52 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
53
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55 ICE_FDIR_INSET_ETH_IPV6 | \
56 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
62 ICE_FDIR_INSET_VXLAN_IPV4 | \
63 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
64
65 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
66 ICE_FDIR_INSET_VXLAN_IPV4 | \
67 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
68
69 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
70 ICE_FDIR_INSET_VXLAN_IPV4 | \
71 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
72
73 #define ICE_FDIR_INSET_IPV4_GTPU (\
74 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
75
76 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
77 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
78 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
79
80 #define ICE_FDIR_INSET_IPV6_GTPU (\
81 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
82
83 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
84 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
85 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
86
87 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
88 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
89 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
90 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
91 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
92 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
93 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
94 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
95 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
96 {pattern_eth_ipv4_udp_vxlan_ipv4,
97 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
98 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
99 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
100 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
101 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
102 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
103 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
104 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
105 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
106 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
107 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
108 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
109 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
110 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
111 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
112 };
113
114 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
115 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE},
116 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
118 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
120 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
121 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
122 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
123 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
124 {pattern_eth_ipv4_udp_vxlan_ipv4,
125 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
127 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
129 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
130 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
131 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
132 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
133 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
134 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
135 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
136 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
137 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
138 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
139 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
140 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE},
141 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE},
142 {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE},
143 {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE},
144 };
145
146 static struct ice_flow_parser ice_fdir_parser_os;
147 static struct ice_flow_parser ice_fdir_parser_comms;
148
149 static int
150 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
151
152 static const struct rte_memzone *
ice_memzone_reserve(const char * name,uint32_t len,int socket_id)153 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
154 {
155 const struct rte_memzone *mz;
156
157 mz = rte_memzone_lookup(name);
158 if (mz)
159 return mz;
160
161 return rte_memzone_reserve_aligned(name, len, socket_id,
162 RTE_MEMZONE_IOVA_CONTIG,
163 ICE_RING_BASE_ALIGN);
164 }
165
166 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
167
168 static int
ice_fdir_prof_alloc(struct ice_hw * hw)169 ice_fdir_prof_alloc(struct ice_hw *hw)
170 {
171 enum ice_fltr_ptype ptype, fltr_ptype;
172
173 if (!hw->fdir_prof) {
174 hw->fdir_prof = (struct ice_fd_hw_prof **)
175 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
176 sizeof(*hw->fdir_prof));
177 if (!hw->fdir_prof)
178 return -ENOMEM;
179 }
180 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
181 ptype < ICE_FLTR_PTYPE_MAX;
182 ptype++) {
183 if (!hw->fdir_prof[ptype]) {
184 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
185 ice_malloc(hw, sizeof(**hw->fdir_prof));
186 if (!hw->fdir_prof[ptype])
187 goto fail_mem;
188 }
189 }
190 return 0;
191
192 fail_mem:
193 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
194 fltr_ptype < ptype;
195 fltr_ptype++) {
196 rte_free(hw->fdir_prof[fltr_ptype]);
197 hw->fdir_prof[fltr_ptype] = NULL;
198 }
199
200 rte_free(hw->fdir_prof);
201 hw->fdir_prof = NULL;
202
203 return -ENOMEM;
204 }
205
206 static int
ice_fdir_counter_pool_add(__rte_unused struct ice_pf * pf,struct ice_fdir_counter_pool_container * container,uint32_t index_start,uint32_t len)207 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
208 struct ice_fdir_counter_pool_container *container,
209 uint32_t index_start,
210 uint32_t len)
211 {
212 struct ice_fdir_counter_pool *pool;
213 uint32_t i;
214 int ret = 0;
215
216 pool = rte_zmalloc("ice_fdir_counter_pool",
217 sizeof(*pool) +
218 sizeof(struct ice_fdir_counter) * len,
219 0);
220 if (!pool) {
221 PMD_INIT_LOG(ERR,
222 "Failed to allocate memory for fdir counter pool");
223 return -ENOMEM;
224 }
225
226 TAILQ_INIT(&pool->counter_list);
227 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
228
229 for (i = 0; i < len; i++) {
230 struct ice_fdir_counter *counter = &pool->counters[i];
231
232 counter->hw_index = index_start + i;
233 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
234 }
235
236 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
237 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
238 ret = -EINVAL;
239 goto free_pool;
240 }
241
242 container->pools[container->index_free++] = pool;
243 return 0;
244
245 free_pool:
246 rte_free(pool);
247 return ret;
248 }
249
250 static int
ice_fdir_counter_init(struct ice_pf * pf)251 ice_fdir_counter_init(struct ice_pf *pf)
252 {
253 struct ice_hw *hw = ICE_PF_TO_HW(pf);
254 struct ice_fdir_info *fdir_info = &pf->fdir;
255 struct ice_fdir_counter_pool_container *container =
256 &fdir_info->counter;
257 uint32_t cnt_index, len;
258 int ret;
259
260 TAILQ_INIT(&container->pool_list);
261
262 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
263 len = ICE_FDIR_COUNTERS_PER_BLOCK;
264
265 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
266 if (ret) {
267 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
268 return ret;
269 }
270
271 return 0;
272 }
273
274 static int
ice_fdir_counter_release(struct ice_pf * pf)275 ice_fdir_counter_release(struct ice_pf *pf)
276 {
277 struct ice_fdir_info *fdir_info = &pf->fdir;
278 struct ice_fdir_counter_pool_container *container =
279 &fdir_info->counter;
280 uint8_t i;
281
282 for (i = 0; i < container->index_free; i++) {
283 rte_free(container->pools[i]);
284 container->pools[i] = NULL;
285 }
286
287 TAILQ_INIT(&container->pool_list);
288 container->index_free = 0;
289
290 return 0;
291 }
292
293 static struct ice_fdir_counter *
ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container * container,uint32_t id)294 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
295 *container,
296 uint32_t id)
297 {
298 struct ice_fdir_counter_pool *pool;
299 struct ice_fdir_counter *counter;
300 int i;
301
302 TAILQ_FOREACH(pool, &container->pool_list, next) {
303 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
304 counter = &pool->counters[i];
305
306 if (counter->shared &&
307 counter->ref_cnt &&
308 counter->id == id)
309 return counter;
310 }
311 }
312
313 return NULL;
314 }
315
316 static struct ice_fdir_counter *
ice_fdir_counter_alloc(struct ice_pf * pf,uint32_t shared,uint32_t id)317 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
318 {
319 struct ice_hw *hw = ICE_PF_TO_HW(pf);
320 struct ice_fdir_info *fdir_info = &pf->fdir;
321 struct ice_fdir_counter_pool_container *container =
322 &fdir_info->counter;
323 struct ice_fdir_counter_pool *pool = NULL;
324 struct ice_fdir_counter *counter_free = NULL;
325
326 if (shared) {
327 counter_free = ice_fdir_counter_shared_search(container, id);
328 if (counter_free) {
329 if (counter_free->ref_cnt + 1 == 0) {
330 rte_errno = E2BIG;
331 return NULL;
332 }
333 counter_free->ref_cnt++;
334 return counter_free;
335 }
336 }
337
338 TAILQ_FOREACH(pool, &container->pool_list, next) {
339 counter_free = TAILQ_FIRST(&pool->counter_list);
340 if (counter_free)
341 break;
342 counter_free = NULL;
343 }
344
345 if (!counter_free) {
346 PMD_DRV_LOG(ERR, "No free counter found\n");
347 return NULL;
348 }
349
350 counter_free->shared = shared;
351 counter_free->id = id;
352 counter_free->ref_cnt = 1;
353 counter_free->pool = pool;
354
355 /* reset statistic counter value */
356 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
357 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
358
359 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
360 if (TAILQ_EMPTY(&pool->counter_list)) {
361 TAILQ_REMOVE(&container->pool_list, pool, next);
362 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
363 }
364
365 return counter_free;
366 }
367
368 static void
ice_fdir_counter_free(__rte_unused struct ice_pf * pf,struct ice_fdir_counter * counter)369 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
370 struct ice_fdir_counter *counter)
371 {
372 if (!counter)
373 return;
374
375 if (--counter->ref_cnt == 0) {
376 struct ice_fdir_counter_pool *pool = counter->pool;
377
378 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
379 }
380 }
381
382 static int
ice_fdir_init_filter_list(struct ice_pf * pf)383 ice_fdir_init_filter_list(struct ice_pf *pf)
384 {
385 struct rte_eth_dev *dev = pf->adapter->eth_dev;
386 struct ice_fdir_info *fdir_info = &pf->fdir;
387 char fdir_hash_name[RTE_HASH_NAMESIZE];
388 int ret;
389
390 struct rte_hash_parameters fdir_hash_params = {
391 .name = fdir_hash_name,
392 .entries = ICE_MAX_FDIR_FILTER_NUM,
393 .key_len = sizeof(struct ice_fdir_fltr_pattern),
394 .hash_func = rte_hash_crc,
395 .hash_func_init_val = 0,
396 .socket_id = rte_socket_id(),
397 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
398 };
399
400 /* Initialize hash */
401 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
402 "fdir_%s", dev->device->name);
403 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
404 if (!fdir_info->hash_table) {
405 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
406 return -EINVAL;
407 }
408 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
409 sizeof(*fdir_info->hash_map) *
410 ICE_MAX_FDIR_FILTER_NUM,
411 0);
412 if (!fdir_info->hash_map) {
413 PMD_INIT_LOG(ERR,
414 "Failed to allocate memory for fdir hash map!");
415 ret = -ENOMEM;
416 goto err_fdir_hash_map_alloc;
417 }
418 return 0;
419
420 err_fdir_hash_map_alloc:
421 rte_hash_free(fdir_info->hash_table);
422
423 return ret;
424 }
425
426 static void
ice_fdir_release_filter_list(struct ice_pf * pf)427 ice_fdir_release_filter_list(struct ice_pf *pf)
428 {
429 struct ice_fdir_info *fdir_info = &pf->fdir;
430
431 if (fdir_info->hash_map)
432 rte_free(fdir_info->hash_map);
433 if (fdir_info->hash_table)
434 rte_hash_free(fdir_info->hash_table);
435
436 fdir_info->hash_map = NULL;
437 fdir_info->hash_table = NULL;
438 }
439
440 /*
441 * ice_fdir_setup - reserve and initialize the Flow Director resources
442 * @pf: board private structure
443 */
444 static int
ice_fdir_setup(struct ice_pf * pf)445 ice_fdir_setup(struct ice_pf *pf)
446 {
447 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
448 struct ice_hw *hw = ICE_PF_TO_HW(pf);
449 const struct rte_memzone *mz = NULL;
450 char z_name[RTE_MEMZONE_NAMESIZE];
451 struct ice_vsi *vsi;
452 int err = ICE_SUCCESS;
453
454 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
455 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
456 return -ENOTSUP;
457 }
458
459 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
460 " fd_fltr_best_effort = %u.",
461 hw->func_caps.fd_fltr_guar,
462 hw->func_caps.fd_fltr_best_effort);
463
464 if (pf->fdir.fdir_vsi) {
465 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
466 return ICE_SUCCESS;
467 }
468
469 /* make new FDIR VSI */
470 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
471 if (!vsi) {
472 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
473 return -EINVAL;
474 }
475 pf->fdir.fdir_vsi = vsi;
476
477 err = ice_fdir_init_filter_list(pf);
478 if (err) {
479 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
480 return -EINVAL;
481 }
482
483 err = ice_fdir_counter_init(pf);
484 if (err) {
485 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
486 return -EINVAL;
487 }
488
489 /*Fdir tx queue setup*/
490 err = ice_fdir_setup_tx_resources(pf);
491 if (err) {
492 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
493 goto fail_setup_tx;
494 }
495
496 /*Fdir rx queue setup*/
497 err = ice_fdir_setup_rx_resources(pf);
498 if (err) {
499 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
500 goto fail_setup_rx;
501 }
502
503 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
504 if (err) {
505 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
506 goto fail_mem;
507 }
508
509 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
510 if (err) {
511 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
512 goto fail_mem;
513 }
514
515 /* Enable FDIR MSIX interrupt */
516 vsi->nb_used_qps = 1;
517 ice_vsi_queues_bind_intr(vsi);
518 ice_vsi_enable_queues_intr(vsi);
519
520 /* reserve memory for the fdir programming packet */
521 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
522 ICE_FDIR_MZ_NAME,
523 eth_dev->data->port_id);
524 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
525 if (!mz) {
526 PMD_DRV_LOG(ERR, "Cannot init memzone for "
527 "flow director program packet.");
528 err = -ENOMEM;
529 goto fail_mem;
530 }
531 pf->fdir.prg_pkt = mz->addr;
532 pf->fdir.dma_addr = mz->iova;
533 pf->fdir.mz = mz;
534
535 err = ice_fdir_prof_alloc(hw);
536 if (err) {
537 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
538 "flow director profile.");
539 err = -ENOMEM;
540 goto fail_prof;
541 }
542
543 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
544 vsi->base_queue);
545 return ICE_SUCCESS;
546
547 fail_prof:
548 rte_memzone_free(pf->fdir.mz);
549 pf->fdir.mz = NULL;
550 fail_mem:
551 ice_rx_queue_release(pf->fdir.rxq);
552 pf->fdir.rxq = NULL;
553 fail_setup_rx:
554 ice_tx_queue_release(pf->fdir.txq);
555 pf->fdir.txq = NULL;
556 fail_setup_tx:
557 ice_release_vsi(vsi);
558 pf->fdir.fdir_vsi = NULL;
559 return err;
560 }
561
562 static void
ice_fdir_prof_free(struct ice_hw * hw)563 ice_fdir_prof_free(struct ice_hw *hw)
564 {
565 enum ice_fltr_ptype ptype;
566
567 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
568 ptype < ICE_FLTR_PTYPE_MAX;
569 ptype++) {
570 rte_free(hw->fdir_prof[ptype]);
571 hw->fdir_prof[ptype] = NULL;
572 }
573
574 rte_free(hw->fdir_prof);
575 hw->fdir_prof = NULL;
576 }
577
578 /* Remove a profile for some filter type */
579 static void
ice_fdir_prof_rm(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)580 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
581 {
582 struct ice_hw *hw = ICE_PF_TO_HW(pf);
583 struct ice_fd_hw_prof *hw_prof;
584 uint64_t prof_id;
585 uint16_t vsi_num;
586 int i;
587
588 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
589 return;
590
591 hw_prof = hw->fdir_prof[ptype];
592
593 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
594 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
595 if (hw_prof->entry_h[i][is_tunnel]) {
596 vsi_num = ice_get_hw_vsi_num(hw,
597 hw_prof->vsi_h[i]);
598 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
599 vsi_num, ptype);
600 ice_flow_rem_entry(hw, ICE_BLK_FD,
601 hw_prof->entry_h[i][is_tunnel]);
602 hw_prof->entry_h[i][is_tunnel] = 0;
603 }
604 }
605 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
606 rte_free(hw_prof->fdir_seg[is_tunnel]);
607 hw_prof->fdir_seg[is_tunnel] = NULL;
608
609 for (i = 0; i < hw_prof->cnt; i++)
610 hw_prof->vsi_h[i] = 0;
611 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
612 }
613
614 /* Remove all created profiles */
615 static void
ice_fdir_prof_rm_all(struct ice_pf * pf)616 ice_fdir_prof_rm_all(struct ice_pf *pf)
617 {
618 enum ice_fltr_ptype ptype;
619
620 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
621 ptype < ICE_FLTR_PTYPE_MAX;
622 ptype++) {
623 ice_fdir_prof_rm(pf, ptype, false);
624 ice_fdir_prof_rm(pf, ptype, true);
625 }
626 }
627
628 /*
629 * ice_fdir_teardown - release the Flow Director resources
630 * @pf: board private structure
631 */
632 static void
ice_fdir_teardown(struct ice_pf * pf)633 ice_fdir_teardown(struct ice_pf *pf)
634 {
635 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
636 struct ice_hw *hw = ICE_PF_TO_HW(pf);
637 struct ice_vsi *vsi;
638 int err;
639
640 vsi = pf->fdir.fdir_vsi;
641 if (!vsi)
642 return;
643
644 ice_vsi_disable_queues_intr(vsi);
645
646 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
647 if (err)
648 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
649
650 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
651 if (err)
652 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
653
654 err = ice_fdir_counter_release(pf);
655 if (err)
656 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
657
658 ice_fdir_release_filter_list(pf);
659
660 ice_tx_queue_release(pf->fdir.txq);
661 pf->fdir.txq = NULL;
662 ice_rx_queue_release(pf->fdir.rxq);
663 pf->fdir.rxq = NULL;
664 ice_fdir_prof_rm_all(pf);
665 ice_fdir_prof_free(hw);
666 ice_release_vsi(vsi);
667 pf->fdir.fdir_vsi = NULL;
668
669 if (pf->fdir.mz) {
670 err = rte_memzone_free(pf->fdir.mz);
671 pf->fdir.mz = NULL;
672 if (err)
673 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
674 }
675 }
676
677 static int
ice_fdir_cur_prof_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,struct ice_flow_seg_info * seg,bool is_tunnel)678 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
679 enum ice_fltr_ptype ptype,
680 struct ice_flow_seg_info *seg,
681 bool is_tunnel)
682 {
683 struct ice_hw *hw = ICE_PF_TO_HW(pf);
684 struct ice_flow_seg_info *ori_seg;
685 struct ice_fd_hw_prof *hw_prof;
686
687 hw_prof = hw->fdir_prof[ptype];
688 ori_seg = hw_prof->fdir_seg[is_tunnel];
689
690 /* profile does not exist */
691 if (!ori_seg)
692 return 0;
693
694 /* if no input set conflict, return -EEXIST */
695 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
696 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
697 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
698 ptype);
699 return -EEXIST;
700 }
701
702 /* a rule with input set conflict already exist, so give up */
703 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
704 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
705 ptype);
706 return -EINVAL;
707 }
708
709 /* it's safe to delete an empty profile */
710 ice_fdir_prof_rm(pf, ptype, is_tunnel);
711 return 0;
712 }
713
714 static bool
ice_fdir_prof_resolve_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)715 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
716 enum ice_fltr_ptype ptype,
717 bool is_tunnel)
718 {
719 struct ice_hw *hw = ICE_PF_TO_HW(pf);
720 struct ice_fd_hw_prof *hw_prof;
721 struct ice_flow_seg_info *seg;
722
723 hw_prof = hw->fdir_prof[ptype];
724 seg = hw_prof->fdir_seg[is_tunnel];
725
726 /* profile does not exist */
727 if (!seg)
728 return true;
729
730 /* profile exists and rule exists, fail to resolve the conflict */
731 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
732 return false;
733
734 /* it's safe to delete an empty profile */
735 ice_fdir_prof_rm(pf, ptype, is_tunnel);
736
737 return true;
738 }
739
740 static int
ice_fdir_cross_prof_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)741 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
742 enum ice_fltr_ptype ptype,
743 bool is_tunnel)
744 {
745 enum ice_fltr_ptype cflct_ptype;
746
747 switch (ptype) {
748 /* IPv4 */
749 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
750 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
751 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
752 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
753 if (!ice_fdir_prof_resolve_conflict
754 (pf, cflct_ptype, is_tunnel))
755 goto err;
756 break;
757 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
758 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
759 if (!ice_fdir_prof_resolve_conflict
760 (pf, cflct_ptype, is_tunnel))
761 goto err;
762 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
763 if (!ice_fdir_prof_resolve_conflict
764 (pf, cflct_ptype, is_tunnel))
765 goto err;
766 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
767 if (!ice_fdir_prof_resolve_conflict
768 (pf, cflct_ptype, is_tunnel))
769 goto err;
770 break;
771 /* IPv4 GTPU */
772 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
773 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
774 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
775 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
776 if (!ice_fdir_prof_resolve_conflict
777 (pf, cflct_ptype, is_tunnel))
778 goto err;
779 break;
780 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
781 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
782 if (!ice_fdir_prof_resolve_conflict
783 (pf, cflct_ptype, is_tunnel))
784 goto err;
785 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
786 if (!ice_fdir_prof_resolve_conflict
787 (pf, cflct_ptype, is_tunnel))
788 goto err;
789 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
790 if (!ice_fdir_prof_resolve_conflict
791 (pf, cflct_ptype, is_tunnel))
792 goto err;
793 break;
794 /* IPv6 */
795 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
796 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
797 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
798 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
799 if (!ice_fdir_prof_resolve_conflict
800 (pf, cflct_ptype, is_tunnel))
801 goto err;
802 break;
803 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
804 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
805 if (!ice_fdir_prof_resolve_conflict
806 (pf, cflct_ptype, is_tunnel))
807 goto err;
808 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
809 if (!ice_fdir_prof_resolve_conflict
810 (pf, cflct_ptype, is_tunnel))
811 goto err;
812 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
813 if (!ice_fdir_prof_resolve_conflict
814 (pf, cflct_ptype, is_tunnel))
815 goto err;
816 break;
817 default:
818 break;
819 }
820 return 0;
821 err:
822 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
823 ptype, cflct_ptype);
824 return -EINVAL;
825 }
826
827 static int
ice_fdir_hw_tbl_conf(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_vsi * ctrl_vsi,struct ice_flow_seg_info * seg,enum ice_fltr_ptype ptype,bool is_tunnel)828 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
829 struct ice_vsi *ctrl_vsi,
830 struct ice_flow_seg_info *seg,
831 enum ice_fltr_ptype ptype,
832 bool is_tunnel)
833 {
834 struct ice_hw *hw = ICE_PF_TO_HW(pf);
835 enum ice_flow_dir dir = ICE_FLOW_RX;
836 struct ice_fd_hw_prof *hw_prof;
837 struct ice_flow_prof *prof;
838 uint64_t entry_1 = 0;
839 uint64_t entry_2 = 0;
840 uint16_t vsi_num;
841 int ret;
842 uint64_t prof_id;
843
844 /* check if have input set conflict on current profile. */
845 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
846 if (ret)
847 return ret;
848
849 /* check if the profile is conflict with other profile. */
850 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
851 if (ret)
852 return ret;
853
854 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
855 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
856 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
857 if (ret)
858 return ret;
859 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
860 vsi->idx, ICE_FLOW_PRIO_NORMAL,
861 seg, NULL, 0, &entry_1);
862 if (ret) {
863 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
864 ptype);
865 goto err_add_prof;
866 }
867 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
868 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
869 seg, NULL, 0, &entry_2);
870 if (ret) {
871 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
872 ptype);
873 goto err_add_entry;
874 }
875
876 hw_prof = hw->fdir_prof[ptype];
877 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
878 hw_prof->cnt = 0;
879 hw_prof->fdir_seg[is_tunnel] = seg;
880 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
881 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
882 pf->hw_prof_cnt[ptype][is_tunnel]++;
883 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
884 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
885 pf->hw_prof_cnt[ptype][is_tunnel]++;
886
887 return ret;
888
889 err_add_entry:
890 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
891 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
892 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
893 err_add_prof:
894 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
895
896 return ret;
897 }
898
899 static void
ice_fdir_input_set_parse(uint64_t inset,enum ice_flow_field * field)900 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
901 {
902 uint32_t i, j;
903
904 struct ice_inset_map {
905 uint64_t inset;
906 enum ice_flow_field fld;
907 };
908 static const struct ice_inset_map ice_inset_map[] = {
909 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
910 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
911 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
912 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
913 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
914 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
915 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
916 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
917 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
918 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
919 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
920 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
921 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
922 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
923 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
924 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
925 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
926 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
927 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
928 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
929 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
930 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
931 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
932 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
933 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
934 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
935 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
936 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
937 };
938
939 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
940 if ((inset & ice_inset_map[i].inset) ==
941 ice_inset_map[i].inset)
942 field[j++] = ice_inset_map[i].fld;
943 }
944 }
945
946 static void
ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow,struct ice_flow_seg_info * seg)947 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
948 {
949 switch (flow) {
950 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
951 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
952 ICE_FLOW_SEG_HDR_IPV4 |
953 ICE_FLOW_SEG_HDR_IPV_OTHER);
954 break;
955 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
956 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
957 ICE_FLOW_SEG_HDR_IPV4 |
958 ICE_FLOW_SEG_HDR_IPV_OTHER);
959 break;
960 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
961 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
962 ICE_FLOW_SEG_HDR_IPV4 |
963 ICE_FLOW_SEG_HDR_IPV_OTHER);
964 break;
965 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
966 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
967 ICE_FLOW_SEG_HDR_IPV_OTHER);
968 break;
969 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
970 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
971 ICE_FLOW_SEG_HDR_IPV6 |
972 ICE_FLOW_SEG_HDR_IPV_OTHER);
973 break;
974 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
975 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
976 ICE_FLOW_SEG_HDR_IPV6 |
977 ICE_FLOW_SEG_HDR_IPV_OTHER);
978 break;
979 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
980 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
981 ICE_FLOW_SEG_HDR_IPV6 |
982 ICE_FLOW_SEG_HDR_IPV_OTHER);
983 break;
984 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
985 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
986 ICE_FLOW_SEG_HDR_IPV_OTHER);
987 break;
988 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
989 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
990 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
991 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
992 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
993 ICE_FLOW_SEG_HDR_IPV4 |
994 ICE_FLOW_SEG_HDR_IPV_OTHER);
995 break;
996 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER:
997 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
998 ICE_FLOW_SEG_HDR_GTPU_IP |
999 ICE_FLOW_SEG_HDR_IPV4 |
1000 ICE_FLOW_SEG_HDR_IPV_OTHER);
1001 break;
1002 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER:
1003 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1004 ICE_FLOW_SEG_HDR_IPV6 |
1005 ICE_FLOW_SEG_HDR_IPV_OTHER);
1006 break;
1007 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER:
1008 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1009 ICE_FLOW_SEG_HDR_GTPU_IP |
1010 ICE_FLOW_SEG_HDR_IPV6 |
1011 ICE_FLOW_SEG_HDR_IPV_OTHER);
1012 break;
1013 case ICE_FLTR_PTYPE_NON_IP_L2:
1014 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1015 break;
1016 default:
1017 PMD_DRV_LOG(ERR, "not supported filter type.");
1018 break;
1019 }
1020 }
1021
1022 static int
ice_fdir_input_set_conf(struct ice_pf * pf,enum ice_fltr_ptype flow,uint64_t inner_input_set,uint64_t outer_input_set,enum ice_fdir_tunnel_type ttype)1023 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1024 uint64_t inner_input_set, uint64_t outer_input_set,
1025 enum ice_fdir_tunnel_type ttype)
1026 {
1027 struct ice_flow_seg_info *seg;
1028 struct ice_flow_seg_info *seg_tun = NULL;
1029 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1030 uint64_t input_set;
1031 bool is_tunnel;
1032 int k, i, ret = 0;
1033
1034 if (!(inner_input_set | outer_input_set))
1035 return -EINVAL;
1036
1037 seg_tun = (struct ice_flow_seg_info *)
1038 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1039 if (!seg_tun) {
1040 PMD_DRV_LOG(ERR, "No memory can be allocated");
1041 return -ENOMEM;
1042 }
1043
1044 /* use seg_tun[1] to record tunnel inner part or non-tunnel */
1045 for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1046 seg = &seg_tun[k];
1047 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1048 if (input_set == 0)
1049 continue;
1050
1051 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1052 field[i] = ICE_FLOW_FIELD_IDX_MAX;
1053
1054 ice_fdir_input_set_parse(input_set, field);
1055
1056 ice_fdir_input_set_hdrs(flow, seg);
1057
1058 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1059 ice_flow_set_fld(seg, field[i],
1060 ICE_FLOW_FLD_OFF_INVAL,
1061 ICE_FLOW_FLD_OFF_INVAL,
1062 ICE_FLOW_FLD_OFF_INVAL, false);
1063 }
1064 }
1065
1066 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1067 if (!is_tunnel) {
1068 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1069 seg_tun + 1, flow, false);
1070 } else {
1071 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1072 seg_tun, flow, true);
1073 }
1074
1075 if (!ret) {
1076 return ret;
1077 } else if (ret < 0) {
1078 rte_free(seg_tun);
1079 return (ret == -EEXIST) ? 0 : ret;
1080 } else {
1081 return ret;
1082 }
1083 }
1084
1085 static void
ice_fdir_cnt_update(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel,bool add)1086 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1087 bool is_tunnel, bool add)
1088 {
1089 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1090 int cnt;
1091
1092 cnt = (add) ? 1 : -1;
1093 hw->fdir_active_fltr += cnt;
1094 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1095 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1096 else
1097 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1098 }
1099
1100 static int
ice_fdir_init(struct ice_adapter * ad)1101 ice_fdir_init(struct ice_adapter *ad)
1102 {
1103 struct ice_pf *pf = &ad->pf;
1104 struct ice_flow_parser *parser;
1105 int ret;
1106
1107 if (ad->hw.dcf_enabled)
1108 return 0;
1109
1110 ret = ice_fdir_setup(pf);
1111 if (ret)
1112 return ret;
1113
1114 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1115 parser = &ice_fdir_parser_comms;
1116 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1117 parser = &ice_fdir_parser_os;
1118 else
1119 return -EINVAL;
1120
1121 return ice_register_parser(parser, ad);
1122 }
1123
1124 static void
ice_fdir_uninit(struct ice_adapter * ad)1125 ice_fdir_uninit(struct ice_adapter *ad)
1126 {
1127 struct ice_pf *pf = &ad->pf;
1128 struct ice_flow_parser *parser;
1129
1130 if (ad->hw.dcf_enabled)
1131 return;
1132
1133 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1134 parser = &ice_fdir_parser_comms;
1135 else
1136 parser = &ice_fdir_parser_os;
1137
1138 ice_unregister_parser(parser, ad);
1139
1140 ice_fdir_teardown(pf);
1141 }
1142
1143 static int
ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)1144 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1145 {
1146 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1147 return 1;
1148 else
1149 return 0;
1150 }
1151
1152 static int
ice_fdir_add_del_filter(struct ice_pf * pf,struct ice_fdir_filter_conf * filter,bool add)1153 ice_fdir_add_del_filter(struct ice_pf *pf,
1154 struct ice_fdir_filter_conf *filter,
1155 bool add)
1156 {
1157 struct ice_fltr_desc desc;
1158 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1159 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1160 bool is_tun;
1161 int ret;
1162
1163 filter->input.dest_vsi = pf->main_vsi->idx;
1164
1165 memset(&desc, 0, sizeof(desc));
1166 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1167 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1168
1169 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1170
1171 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1172 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1173 if (ret) {
1174 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1175 return -EINVAL;
1176 }
1177
1178 return ice_fdir_programming(pf, &desc);
1179 }
1180
1181 static void
ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern * key,struct ice_fdir_filter_conf * filter)1182 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1183 struct ice_fdir_filter_conf *filter)
1184 {
1185 struct ice_fdir_fltr *input = &filter->input;
1186 memset(key, 0, sizeof(*key));
1187
1188 key->flow_type = input->flow_type;
1189 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1190 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1191 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1192 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1193
1194 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1195 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1196
1197 key->tunnel_type = filter->tunnel_type;
1198 }
1199
1200 /* Check if there exists the flow director filter */
1201 static struct ice_fdir_filter_conf *
ice_fdir_entry_lookup(struct ice_fdir_info * fdir_info,const struct ice_fdir_fltr_pattern * key)1202 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1203 const struct ice_fdir_fltr_pattern *key)
1204 {
1205 int ret;
1206
1207 ret = rte_hash_lookup(fdir_info->hash_table, key);
1208 if (ret < 0)
1209 return NULL;
1210
1211 return fdir_info->hash_map[ret];
1212 }
1213
1214 /* Add a flow director entry into the SW list */
1215 static int
ice_fdir_entry_insert(struct ice_pf * pf,struct ice_fdir_filter_conf * entry,struct ice_fdir_fltr_pattern * key)1216 ice_fdir_entry_insert(struct ice_pf *pf,
1217 struct ice_fdir_filter_conf *entry,
1218 struct ice_fdir_fltr_pattern *key)
1219 {
1220 struct ice_fdir_info *fdir_info = &pf->fdir;
1221 int ret;
1222
1223 ret = rte_hash_add_key(fdir_info->hash_table, key);
1224 if (ret < 0) {
1225 PMD_DRV_LOG(ERR,
1226 "Failed to insert fdir entry to hash table %d!",
1227 ret);
1228 return ret;
1229 }
1230 fdir_info->hash_map[ret] = entry;
1231
1232 return 0;
1233 }
1234
1235 /* Delete a flow director entry from the SW list */
1236 static int
ice_fdir_entry_del(struct ice_pf * pf,struct ice_fdir_fltr_pattern * key)1237 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1238 {
1239 struct ice_fdir_info *fdir_info = &pf->fdir;
1240 int ret;
1241
1242 ret = rte_hash_del_key(fdir_info->hash_table, key);
1243 if (ret < 0) {
1244 PMD_DRV_LOG(ERR,
1245 "Failed to delete fdir filter to hash table %d!",
1246 ret);
1247 return ret;
1248 }
1249 fdir_info->hash_map[ret] = NULL;
1250
1251 return 0;
1252 }
1253
1254 static int
ice_fdir_create_filter(struct ice_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)1255 ice_fdir_create_filter(struct ice_adapter *ad,
1256 struct rte_flow *flow,
1257 void *meta,
1258 struct rte_flow_error *error)
1259 {
1260 struct ice_pf *pf = &ad->pf;
1261 struct ice_fdir_filter_conf *filter = meta;
1262 struct ice_fdir_info *fdir_info = &pf->fdir;
1263 struct ice_fdir_filter_conf *entry, *node;
1264 struct ice_fdir_fltr_pattern key;
1265 bool is_tun;
1266 int ret;
1267
1268 ice_fdir_extract_fltr_key(&key, filter);
1269 node = ice_fdir_entry_lookup(fdir_info, &key);
1270 if (node) {
1271 rte_flow_error_set(error, EEXIST,
1272 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1273 "Rule already exists!");
1274 return -rte_errno;
1275 }
1276
1277 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1278 if (!entry) {
1279 rte_flow_error_set(error, ENOMEM,
1280 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1281 "Failed to allocate memory");
1282 return -rte_errno;
1283 }
1284
1285 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1286
1287 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1288 filter->input_set, filter->outer_input_set,
1289 filter->tunnel_type);
1290 if (ret) {
1291 rte_flow_error_set(error, -ret,
1292 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1293 "Profile configure failed.");
1294 goto free_entry;
1295 }
1296
1297 /* alloc counter for FDIR */
1298 if (filter->input.cnt_ena) {
1299 struct rte_flow_action_count *act_count = &filter->act_count;
1300
1301 filter->counter = ice_fdir_counter_alloc(pf,
1302 act_count->shared,
1303 act_count->id);
1304 if (!filter->counter) {
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1307 "Failed to alloc FDIR counter.");
1308 goto free_entry;
1309 }
1310 filter->input.cnt_index = filter->counter->hw_index;
1311 }
1312
1313 ret = ice_fdir_add_del_filter(pf, filter, true);
1314 if (ret) {
1315 rte_flow_error_set(error, -ret,
1316 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1317 "Add filter rule failed.");
1318 goto free_counter;
1319 }
1320
1321 if (filter->mark_flag == 1)
1322 ice_fdir_rx_parsing_enable(ad, 1);
1323
1324 rte_memcpy(entry, filter, sizeof(*entry));
1325 ret = ice_fdir_entry_insert(pf, entry, &key);
1326 if (ret) {
1327 rte_flow_error_set(error, -ret,
1328 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1329 "Insert entry to table failed.");
1330 goto free_entry;
1331 }
1332
1333 flow->rule = entry;
1334 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1335
1336 return 0;
1337
1338 free_counter:
1339 if (filter->counter) {
1340 ice_fdir_counter_free(pf, filter->counter);
1341 filter->counter = NULL;
1342 }
1343
1344 free_entry:
1345 rte_free(entry);
1346 return -rte_errno;
1347 }
1348
1349 static int
ice_fdir_destroy_filter(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error)1350 ice_fdir_destroy_filter(struct ice_adapter *ad,
1351 struct rte_flow *flow,
1352 struct rte_flow_error *error)
1353 {
1354 struct ice_pf *pf = &ad->pf;
1355 struct ice_fdir_info *fdir_info = &pf->fdir;
1356 struct ice_fdir_filter_conf *filter, *entry;
1357 struct ice_fdir_fltr_pattern key;
1358 bool is_tun;
1359 int ret;
1360
1361 filter = (struct ice_fdir_filter_conf *)flow->rule;
1362
1363 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1364
1365 if (filter->counter) {
1366 ice_fdir_counter_free(pf, filter->counter);
1367 filter->counter = NULL;
1368 }
1369
1370 ice_fdir_extract_fltr_key(&key, filter);
1371 entry = ice_fdir_entry_lookup(fdir_info, &key);
1372 if (!entry) {
1373 rte_flow_error_set(error, ENOENT,
1374 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1375 "Can't find entry.");
1376 return -rte_errno;
1377 }
1378
1379 ret = ice_fdir_add_del_filter(pf, filter, false);
1380 if (ret) {
1381 rte_flow_error_set(error, -ret,
1382 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1383 "Del filter rule failed.");
1384 return -rte_errno;
1385 }
1386
1387 ret = ice_fdir_entry_del(pf, &key);
1388 if (ret) {
1389 rte_flow_error_set(error, -ret,
1390 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1391 "Remove entry from table failed.");
1392 return -rte_errno;
1393 }
1394
1395 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1396
1397 if (filter->mark_flag == 1)
1398 ice_fdir_rx_parsing_enable(ad, 0);
1399
1400 flow->rule = NULL;
1401
1402 rte_free(filter);
1403
1404 return 0;
1405 }
1406
1407 static int
ice_fdir_query_count(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_query_count * flow_stats,struct rte_flow_error * error)1408 ice_fdir_query_count(struct ice_adapter *ad,
1409 struct rte_flow *flow,
1410 struct rte_flow_query_count *flow_stats,
1411 struct rte_flow_error *error)
1412 {
1413 struct ice_pf *pf = &ad->pf;
1414 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1415 struct ice_fdir_filter_conf *filter = flow->rule;
1416 struct ice_fdir_counter *counter = filter->counter;
1417 uint64_t hits_lo, hits_hi;
1418
1419 if (!counter) {
1420 rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ACTION,
1422 NULL,
1423 "FDIR counters not available");
1424 return -rte_errno;
1425 }
1426
1427 /*
1428 * Reading the low 32-bits latches the high 32-bits into a shadow
1429 * register. Reading the high 32-bit returns the value in the
1430 * shadow register.
1431 */
1432 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1433 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1434
1435 flow_stats->hits_set = 1;
1436 flow_stats->hits = hits_lo | (hits_hi << 32);
1437 flow_stats->bytes_set = 0;
1438 flow_stats->bytes = 0;
1439
1440 if (flow_stats->reset) {
1441 /* reset statistic counter value */
1442 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1443 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1444 }
1445
1446 return 0;
1447 }
1448
1449 static struct ice_flow_engine ice_fdir_engine = {
1450 .init = ice_fdir_init,
1451 .uninit = ice_fdir_uninit,
1452 .create = ice_fdir_create_filter,
1453 .destroy = ice_fdir_destroy_filter,
1454 .query_count = ice_fdir_query_count,
1455 .type = ICE_FLOW_ENGINE_FDIR,
1456 };
1457
1458 static int
ice_fdir_parse_action_qregion(struct ice_pf * pf,struct rte_flow_error * error,const struct rte_flow_action * act,struct ice_fdir_filter_conf * filter)1459 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1460 struct rte_flow_error *error,
1461 const struct rte_flow_action *act,
1462 struct ice_fdir_filter_conf *filter)
1463 {
1464 const struct rte_flow_action_rss *rss = act->conf;
1465 uint32_t i;
1466
1467 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1468 rte_flow_error_set(error, EINVAL,
1469 RTE_FLOW_ERROR_TYPE_ACTION, act,
1470 "Invalid action.");
1471 return -rte_errno;
1472 }
1473
1474 if (rss->queue_num <= 1) {
1475 rte_flow_error_set(error, EINVAL,
1476 RTE_FLOW_ERROR_TYPE_ACTION, act,
1477 "Queue region size can't be 0 or 1.");
1478 return -rte_errno;
1479 }
1480
1481 /* check if queue index for queue region is continuous */
1482 for (i = 0; i < rss->queue_num - 1; i++) {
1483 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1484 rte_flow_error_set(error, EINVAL,
1485 RTE_FLOW_ERROR_TYPE_ACTION, act,
1486 "Discontinuous queue region");
1487 return -rte_errno;
1488 }
1489 }
1490
1491 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1492 rte_flow_error_set(error, EINVAL,
1493 RTE_FLOW_ERROR_TYPE_ACTION, act,
1494 "Invalid queue region indexes.");
1495 return -rte_errno;
1496 }
1497
1498 if (!(rte_is_power_of_2(rss->queue_num) &&
1499 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1500 rte_flow_error_set(error, EINVAL,
1501 RTE_FLOW_ERROR_TYPE_ACTION, act,
1502 "The region size should be any of the following values:"
1503 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1504 "of queues do not exceed the VSI allocation.");
1505 return -rte_errno;
1506 }
1507
1508 filter->input.q_index = rss->queue[0];
1509 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1510 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1511
1512 return 0;
1513 }
1514
1515 static int
ice_fdir_parse_action(struct ice_adapter * ad,const struct rte_flow_action actions[],struct rte_flow_error * error,struct ice_fdir_filter_conf * filter)1516 ice_fdir_parse_action(struct ice_adapter *ad,
1517 const struct rte_flow_action actions[],
1518 struct rte_flow_error *error,
1519 struct ice_fdir_filter_conf *filter)
1520 {
1521 struct ice_pf *pf = &ad->pf;
1522 const struct rte_flow_action_queue *act_q;
1523 const struct rte_flow_action_mark *mark_spec = NULL;
1524 const struct rte_flow_action_count *act_count;
1525 uint32_t dest_num = 0;
1526 uint32_t mark_num = 0;
1527 uint32_t counter_num = 0;
1528 int ret;
1529
1530 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1531 switch (actions->type) {
1532 case RTE_FLOW_ACTION_TYPE_VOID:
1533 break;
1534 case RTE_FLOW_ACTION_TYPE_QUEUE:
1535 dest_num++;
1536
1537 act_q = actions->conf;
1538 filter->input.q_index = act_q->index;
1539 if (filter->input.q_index >=
1540 pf->dev_data->nb_rx_queues) {
1541 rte_flow_error_set(error, EINVAL,
1542 RTE_FLOW_ERROR_TYPE_ACTION,
1543 actions,
1544 "Invalid queue for FDIR.");
1545 return -rte_errno;
1546 }
1547 filter->input.dest_ctl =
1548 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1549 break;
1550 case RTE_FLOW_ACTION_TYPE_DROP:
1551 dest_num++;
1552
1553 filter->input.dest_ctl =
1554 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1555 break;
1556 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1557 dest_num++;
1558
1559 filter->input.dest_ctl =
1560 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1561 break;
1562 case RTE_FLOW_ACTION_TYPE_RSS:
1563 dest_num++;
1564
1565 ret = ice_fdir_parse_action_qregion(pf,
1566 error, actions, filter);
1567 if (ret)
1568 return ret;
1569 break;
1570 case RTE_FLOW_ACTION_TYPE_MARK:
1571 mark_num++;
1572 filter->mark_flag = 1;
1573 mark_spec = actions->conf;
1574 filter->input.fltr_id = mark_spec->id;
1575 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1576 break;
1577 case RTE_FLOW_ACTION_TYPE_COUNT:
1578 counter_num++;
1579
1580 act_count = actions->conf;
1581 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1582 rte_memcpy(&filter->act_count, act_count,
1583 sizeof(filter->act_count));
1584
1585 break;
1586 default:
1587 rte_flow_error_set(error, EINVAL,
1588 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1589 "Invalid action.");
1590 return -rte_errno;
1591 }
1592 }
1593
1594 if (dest_num >= 2) {
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1597 "Unsupported action combination");
1598 return -rte_errno;
1599 }
1600
1601 if (mark_num >= 2) {
1602 rte_flow_error_set(error, EINVAL,
1603 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1604 "Too many mark actions");
1605 return -rte_errno;
1606 }
1607
1608 if (counter_num >= 2) {
1609 rte_flow_error_set(error, EINVAL,
1610 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1611 "Too many count actions");
1612 return -rte_errno;
1613 }
1614
1615 if (dest_num + mark_num + counter_num == 0) {
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1618 "Empty action");
1619 return -rte_errno;
1620 }
1621
1622 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1623 if (dest_num == 0)
1624 filter->input.dest_ctl =
1625 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1626
1627 return 0;
1628 }
1629
1630 static int
ice_fdir_parse_pattern(__rte_unused struct ice_adapter * ad,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct ice_fdir_filter_conf * filter)1631 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1632 const struct rte_flow_item pattern[],
1633 struct rte_flow_error *error,
1634 struct ice_fdir_filter_conf *filter)
1635 {
1636 const struct rte_flow_item *item = pattern;
1637 enum rte_flow_item_type item_type;
1638 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1639 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1640 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1641 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1642 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1643 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1644 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1645 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1646 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1647 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1648 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1649 uint64_t input_set = ICE_INSET_NONE;
1650 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1651 uint8_t ipv6_addr_mask[16] = {
1652 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1653 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1654 };
1655 uint32_t vtc_flow_cpu;
1656 uint16_t ether_type;
1657 enum rte_flow_item_type next_type;
1658
1659 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1660 if (item->last) {
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ITEM,
1663 item,
1664 "Not support range");
1665 return -rte_errno;
1666 }
1667 item_type = item->type;
1668
1669 switch (item_type) {
1670 case RTE_FLOW_ITEM_TYPE_ETH:
1671 eth_spec = item->spec;
1672 eth_mask = item->mask;
1673 next_type = (item + 1)->type;
1674
1675 if (eth_spec && eth_mask) {
1676 if (!rte_is_zero_ether_addr(ð_mask->dst)) {
1677 input_set |= ICE_INSET_DMAC;
1678 rte_memcpy(&filter->input.ext_data.dst_mac,
1679 ð_spec->dst,
1680 RTE_ETHER_ADDR_LEN);
1681 }
1682
1683 if (!rte_is_zero_ether_addr(ð_mask->src)) {
1684 input_set |= ICE_INSET_SMAC;
1685 rte_memcpy(&filter->input.ext_data.src_mac,
1686 ð_spec->src,
1687 RTE_ETHER_ADDR_LEN);
1688 }
1689
1690 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1691 if (eth_mask->type == RTE_BE16(0xffff) &&
1692 next_type == RTE_FLOW_ITEM_TYPE_END) {
1693 input_set |= ICE_INSET_ETHERTYPE;
1694 ether_type = rte_be_to_cpu_16(eth_spec->type);
1695
1696 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1697 ether_type == RTE_ETHER_TYPE_IPV6) {
1698 rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_ITEM,
1700 item,
1701 "Unsupported ether_type.");
1702 return -rte_errno;
1703 }
1704
1705 rte_memcpy(&filter->input.ext_data.ether_type,
1706 ð_spec->type,
1707 sizeof(eth_spec->type));
1708 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1709 }
1710 }
1711 break;
1712 case RTE_FLOW_ITEM_TYPE_IPV4:
1713 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1714 ipv4_spec = item->spec;
1715 ipv4_mask = item->mask;
1716
1717 if (ipv4_spec && ipv4_mask) {
1718 /* Check IPv4 mask and update input set */
1719 if (ipv4_mask->hdr.version_ihl ||
1720 ipv4_mask->hdr.total_length ||
1721 ipv4_mask->hdr.packet_id ||
1722 ipv4_mask->hdr.fragment_offset ||
1723 ipv4_mask->hdr.hdr_checksum) {
1724 rte_flow_error_set(error, EINVAL,
1725 RTE_FLOW_ERROR_TYPE_ITEM,
1726 item,
1727 "Invalid IPv4 mask.");
1728 return -rte_errno;
1729 }
1730 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1731 input_set |= tunnel_type ?
1732 ICE_INSET_TUN_IPV4_SRC :
1733 ICE_INSET_IPV4_SRC;
1734 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1735 input_set |= tunnel_type ?
1736 ICE_INSET_TUN_IPV4_DST :
1737 ICE_INSET_IPV4_DST;
1738 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1739 input_set |= ICE_INSET_IPV4_TOS;
1740 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1741 input_set |= ICE_INSET_IPV4_TTL;
1742 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1743 input_set |= ICE_INSET_IPV4_PROTO;
1744
1745 filter->input.ip.v4.dst_ip =
1746 ipv4_spec->hdr.dst_addr;
1747 filter->input.ip.v4.src_ip =
1748 ipv4_spec->hdr.src_addr;
1749 filter->input.ip.v4.tos =
1750 ipv4_spec->hdr.type_of_service;
1751 filter->input.ip.v4.ttl =
1752 ipv4_spec->hdr.time_to_live;
1753 filter->input.ip.v4.proto =
1754 ipv4_spec->hdr.next_proto_id;
1755 }
1756
1757 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1758 break;
1759 case RTE_FLOW_ITEM_TYPE_IPV6:
1760 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1761 ipv6_spec = item->spec;
1762 ipv6_mask = item->mask;
1763
1764 if (ipv6_spec && ipv6_mask) {
1765 /* Check IPv6 mask and update input set */
1766 if (ipv6_mask->hdr.payload_len) {
1767 rte_flow_error_set(error, EINVAL,
1768 RTE_FLOW_ERROR_TYPE_ITEM,
1769 item,
1770 "Invalid IPv6 mask");
1771 return -rte_errno;
1772 }
1773
1774 if (!memcmp(ipv6_mask->hdr.src_addr,
1775 ipv6_addr_mask,
1776 RTE_DIM(ipv6_mask->hdr.src_addr)))
1777 input_set |= ICE_INSET_IPV6_SRC;
1778 if (!memcmp(ipv6_mask->hdr.dst_addr,
1779 ipv6_addr_mask,
1780 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1781 input_set |= ICE_INSET_IPV6_DST;
1782
1783 if ((ipv6_mask->hdr.vtc_flow &
1784 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1785 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1786 input_set |= ICE_INSET_IPV6_TC;
1787 if (ipv6_mask->hdr.proto == UINT8_MAX)
1788 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1789 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1790 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1791
1792 rte_memcpy(filter->input.ip.v6.dst_ip,
1793 ipv6_spec->hdr.dst_addr, 16);
1794 rte_memcpy(filter->input.ip.v6.src_ip,
1795 ipv6_spec->hdr.src_addr, 16);
1796
1797 vtc_flow_cpu =
1798 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1799 filter->input.ip.v6.tc =
1800 (uint8_t)(vtc_flow_cpu >>
1801 ICE_FDIR_IPV6_TC_OFFSET);
1802 filter->input.ip.v6.proto =
1803 ipv6_spec->hdr.proto;
1804 filter->input.ip.v6.hlim =
1805 ipv6_spec->hdr.hop_limits;
1806 }
1807
1808 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1809 break;
1810 case RTE_FLOW_ITEM_TYPE_TCP:
1811 tcp_spec = item->spec;
1812 tcp_mask = item->mask;
1813
1814 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1815 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1816 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1817 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1818
1819 if (tcp_spec && tcp_mask) {
1820 /* Check TCP mask and update input set */
1821 if (tcp_mask->hdr.sent_seq ||
1822 tcp_mask->hdr.recv_ack ||
1823 tcp_mask->hdr.data_off ||
1824 tcp_mask->hdr.tcp_flags ||
1825 tcp_mask->hdr.rx_win ||
1826 tcp_mask->hdr.cksum ||
1827 tcp_mask->hdr.tcp_urp) {
1828 rte_flow_error_set(error, EINVAL,
1829 RTE_FLOW_ERROR_TYPE_ITEM,
1830 item,
1831 "Invalid TCP mask");
1832 return -rte_errno;
1833 }
1834
1835 if (tcp_mask->hdr.src_port == UINT16_MAX)
1836 input_set |= tunnel_type ?
1837 ICE_INSET_TUN_TCP_SRC_PORT :
1838 ICE_INSET_TCP_SRC_PORT;
1839 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1840 input_set |= tunnel_type ?
1841 ICE_INSET_TUN_TCP_DST_PORT :
1842 ICE_INSET_TCP_DST_PORT;
1843
1844 /* Get filter info */
1845 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1846 filter->input.ip.v4.dst_port =
1847 tcp_spec->hdr.dst_port;
1848 filter->input.ip.v4.src_port =
1849 tcp_spec->hdr.src_port;
1850 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1851 filter->input.ip.v6.dst_port =
1852 tcp_spec->hdr.dst_port;
1853 filter->input.ip.v6.src_port =
1854 tcp_spec->hdr.src_port;
1855 }
1856 }
1857 break;
1858 case RTE_FLOW_ITEM_TYPE_UDP:
1859 udp_spec = item->spec;
1860 udp_mask = item->mask;
1861
1862 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1863 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1864 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1865 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1866
1867 if (udp_spec && udp_mask) {
1868 /* Check UDP mask and update input set*/
1869 if (udp_mask->hdr.dgram_len ||
1870 udp_mask->hdr.dgram_cksum) {
1871 rte_flow_error_set(error, EINVAL,
1872 RTE_FLOW_ERROR_TYPE_ITEM,
1873 item,
1874 "Invalid UDP mask");
1875 return -rte_errno;
1876 }
1877
1878 if (udp_mask->hdr.src_port == UINT16_MAX)
1879 input_set |= tunnel_type ?
1880 ICE_INSET_TUN_UDP_SRC_PORT :
1881 ICE_INSET_UDP_SRC_PORT;
1882 if (udp_mask->hdr.dst_port == UINT16_MAX)
1883 input_set |= tunnel_type ?
1884 ICE_INSET_TUN_UDP_DST_PORT :
1885 ICE_INSET_UDP_DST_PORT;
1886
1887 /* Get filter info */
1888 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1889 filter->input.ip.v4.dst_port =
1890 udp_spec->hdr.dst_port;
1891 filter->input.ip.v4.src_port =
1892 udp_spec->hdr.src_port;
1893 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1894 filter->input.ip.v6.src_port =
1895 udp_spec->hdr.src_port;
1896 filter->input.ip.v6.dst_port =
1897 udp_spec->hdr.dst_port;
1898 }
1899 }
1900 break;
1901 case RTE_FLOW_ITEM_TYPE_SCTP:
1902 sctp_spec = item->spec;
1903 sctp_mask = item->mask;
1904
1905 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1906 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1907 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1908 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1909
1910 if (sctp_spec && sctp_mask) {
1911 /* Check SCTP mask and update input set */
1912 if (sctp_mask->hdr.cksum) {
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ITEM,
1915 item,
1916 "Invalid UDP mask");
1917 return -rte_errno;
1918 }
1919
1920 if (sctp_mask->hdr.src_port == UINT16_MAX)
1921 input_set |= tunnel_type ?
1922 ICE_INSET_TUN_SCTP_SRC_PORT :
1923 ICE_INSET_SCTP_SRC_PORT;
1924 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1925 input_set |= tunnel_type ?
1926 ICE_INSET_TUN_SCTP_DST_PORT :
1927 ICE_INSET_SCTP_DST_PORT;
1928
1929 /* Get filter info */
1930 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1931 filter->input.ip.v4.dst_port =
1932 sctp_spec->hdr.dst_port;
1933 filter->input.ip.v4.src_port =
1934 sctp_spec->hdr.src_port;
1935 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1936 filter->input.ip.v6.dst_port =
1937 sctp_spec->hdr.dst_port;
1938 filter->input.ip.v6.src_port =
1939 sctp_spec->hdr.src_port;
1940 }
1941 }
1942 break;
1943 case RTE_FLOW_ITEM_TYPE_VOID:
1944 break;
1945 case RTE_FLOW_ITEM_TYPE_VXLAN:
1946 l3 = RTE_FLOW_ITEM_TYPE_END;
1947 vxlan_spec = item->spec;
1948 vxlan_mask = item->mask;
1949
1950 if (vxlan_spec || vxlan_mask) {
1951 rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_ITEM,
1953 item,
1954 "Invalid vxlan field");
1955 return -rte_errno;
1956 }
1957
1958 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1959 break;
1960 case RTE_FLOW_ITEM_TYPE_GTPU:
1961 l3 = RTE_FLOW_ITEM_TYPE_END;
1962 gtp_spec = item->spec;
1963 gtp_mask = item->mask;
1964
1965 if (gtp_spec && gtp_mask) {
1966 if (gtp_mask->v_pt_rsv_flags ||
1967 gtp_mask->msg_type ||
1968 gtp_mask->msg_len) {
1969 rte_flow_error_set(error, EINVAL,
1970 RTE_FLOW_ERROR_TYPE_ITEM,
1971 item,
1972 "Invalid GTP mask");
1973 return -rte_errno;
1974 }
1975
1976 if (gtp_mask->teid == UINT32_MAX)
1977 input_set |= ICE_INSET_GTPU_TEID;
1978
1979 filter->input.gtpu_data.teid = gtp_spec->teid;
1980 }
1981
1982 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1983 break;
1984 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1985 gtp_psc_spec = item->spec;
1986 gtp_psc_mask = item->mask;
1987
1988 if (gtp_psc_spec && gtp_psc_mask) {
1989 if (gtp_psc_mask->qfi == UINT8_MAX)
1990 input_set |= ICE_INSET_GTPU_QFI;
1991
1992 filter->input.gtpu_data.qfi =
1993 gtp_psc_spec->qfi;
1994 }
1995 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1996 break;
1997 default:
1998 rte_flow_error_set(error, EINVAL,
1999 RTE_FLOW_ERROR_TYPE_ITEM,
2000 item,
2001 "Invalid pattern item.");
2002 return -rte_errno;
2003 }
2004 }
2005
2006 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2007 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2008 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
2009 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2010 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2011 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH_IPV4_OTHER;
2012 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2013 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2014 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER;
2015 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2016 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2017 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH_IPV6_OTHER;
2018
2019 filter->tunnel_type = tunnel_type;
2020 filter->input.flow_type = flow_type;
2021 filter->input_set = input_set;
2022
2023 return 0;
2024 }
2025
2026 static int
ice_fdir_parse(struct ice_adapter * ad,struct ice_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],void ** meta,struct rte_flow_error * error)2027 ice_fdir_parse(struct ice_adapter *ad,
2028 struct ice_pattern_match_item *array,
2029 uint32_t array_len,
2030 const struct rte_flow_item pattern[],
2031 const struct rte_flow_action actions[],
2032 void **meta,
2033 struct rte_flow_error *error)
2034 {
2035 struct ice_pf *pf = &ad->pf;
2036 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2037 struct ice_pattern_match_item *item = NULL;
2038 uint64_t input_set;
2039 int ret;
2040
2041 memset(filter, 0, sizeof(*filter));
2042 item = ice_search_pattern_match_item(pattern, array, array_len, error);
2043 if (!item)
2044 return -rte_errno;
2045
2046 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2047 if (ret)
2048 goto error;
2049 input_set = filter->input_set | filter->outer_input_set;
2050 if (!input_set || input_set & ~item->input_set_mask) {
2051 rte_flow_error_set(error, EINVAL,
2052 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2053 pattern,
2054 "Invalid input set");
2055 ret = -rte_errno;
2056 goto error;
2057 }
2058
2059 ret = ice_fdir_parse_action(ad, actions, error, filter);
2060 if (ret)
2061 goto error;
2062
2063 if (meta)
2064 *meta = filter;
2065 error:
2066 rte_free(item);
2067 return ret;
2068 }
2069
2070 static struct ice_flow_parser ice_fdir_parser_os = {
2071 .engine = &ice_fdir_engine,
2072 .array = ice_fdir_pattern_os,
2073 .array_len = RTE_DIM(ice_fdir_pattern_os),
2074 .parse_pattern_action = ice_fdir_parse,
2075 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2076 };
2077
2078 static struct ice_flow_parser ice_fdir_parser_comms = {
2079 .engine = &ice_fdir_engine,
2080 .array = ice_fdir_pattern_comms,
2081 .array_len = RTE_DIM(ice_fdir_pattern_comms),
2082 .parse_pattern_action = ice_fdir_parse,
2083 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2084 };
2085
RTE_INIT(ice_fdir_engine_register)2086 RTE_INIT(ice_fdir_engine_register)
2087 {
2088 ice_register_flow_engine(&ice_fdir_engine);
2089 }
2090