1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
20
21 #define ICE_FDIR_INSET_ETH (\
22 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
23
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25 ICE_FDIR_INSET_ETH | \
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
28
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
32
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
36
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
40
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
42 ICE_INSET_DMAC | \
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
45 ICE_INSET_IPV6_PKID)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56 ICE_FDIR_INSET_ETH_IPV6 | \
57 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58
59 #define ICE_FDIR_INSET_IPV4 (\
60 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
61 ICE_INSET_IPV4_PKID)
62
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64 ICE_FDIR_INSET_IPV4 | \
65 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
66
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68 ICE_FDIR_INSET_IPV4 | \
69 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
70
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72 ICE_FDIR_INSET_IPV4 | \
73 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
74
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76 ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
77 ICE_INSET_VXLAN_VNI)
78
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
81
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
85
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
88
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
92
93 #define ICE_FDIR_INSET_IPV4_ESP (\
94 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
95 ICE_INSET_ESP_SPI)
96
97 #define ICE_FDIR_INSET_IPV6_ESP (\
98 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
99 ICE_INSET_ESP_SPI)
100
101 #define ICE_FDIR_INSET_IPV4_NATT_ESP (\
102 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
103 ICE_INSET_NAT_T_ESP_SPI)
104
105 #define ICE_FDIR_INSET_IPV6_NATT_ESP (\
106 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
107 ICE_INSET_NAT_T_ESP_SPI)
108
109 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
110 {pattern_raw, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
111 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
112 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
113 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
114 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
115 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
116 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
117 {pattern_eth_ipv6_frag_ext, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
118 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
119 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
120 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
121 {pattern_eth_ipv4_esp, ICE_FDIR_INSET_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
122 {pattern_eth_ipv4_udp_esp, ICE_FDIR_INSET_IPV4_NATT_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
123 {pattern_eth_ipv6_esp, ICE_FDIR_INSET_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
124 {pattern_eth_ipv6_udp_esp, ICE_FDIR_INSET_IPV6_NATT_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
125 {pattern_eth_ipv4_udp_vxlan_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_UDP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_TCP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_SCTP, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
130 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
132 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
133 /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
134 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE},
135 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE},
136 {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE},
137 {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE},
138 };
139
140 static struct ice_flow_parser ice_fdir_parser;
141
142 static int
143 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
144
145 static const struct rte_memzone *
ice_memzone_reserve(const char * name,uint32_t len,int socket_id)146 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
147 {
148 const struct rte_memzone *mz;
149
150 mz = rte_memzone_lookup(name);
151 if (mz)
152 return mz;
153
154 return rte_memzone_reserve_aligned(name, len, socket_id,
155 RTE_MEMZONE_IOVA_CONTIG,
156 ICE_RING_BASE_ALIGN);
157 }
158
159 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
160
161 static int
ice_fdir_prof_alloc(struct ice_hw * hw)162 ice_fdir_prof_alloc(struct ice_hw *hw)
163 {
164 enum ice_fltr_ptype ptype, fltr_ptype;
165
166 if (!hw->fdir_prof) {
167 hw->fdir_prof = (struct ice_fd_hw_prof **)
168 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
169 sizeof(*hw->fdir_prof));
170 if (!hw->fdir_prof)
171 return -ENOMEM;
172 }
173 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
174 ptype < ICE_FLTR_PTYPE_MAX;
175 ptype++) {
176 if (!hw->fdir_prof[ptype]) {
177 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
178 ice_malloc(hw, sizeof(**hw->fdir_prof));
179 if (!hw->fdir_prof[ptype])
180 goto fail_mem;
181 }
182 }
183 return 0;
184
185 fail_mem:
186 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
187 fltr_ptype < ptype;
188 fltr_ptype++) {
189 rte_free(hw->fdir_prof[fltr_ptype]);
190 hw->fdir_prof[fltr_ptype] = NULL;
191 }
192
193 rte_free(hw->fdir_prof);
194 hw->fdir_prof = NULL;
195
196 return -ENOMEM;
197 }
198
199 static int
ice_fdir_counter_pool_add(__rte_unused struct ice_pf * pf,struct ice_fdir_counter_pool_container * container,uint32_t index_start,uint32_t len)200 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
201 struct ice_fdir_counter_pool_container *container,
202 uint32_t index_start,
203 uint32_t len)
204 {
205 struct ice_fdir_counter_pool *pool;
206 uint32_t i;
207 int ret = 0;
208
209 pool = rte_zmalloc("ice_fdir_counter_pool",
210 sizeof(*pool) +
211 sizeof(struct ice_fdir_counter) * len,
212 0);
213 if (!pool) {
214 PMD_INIT_LOG(ERR,
215 "Failed to allocate memory for fdir counter pool");
216 return -ENOMEM;
217 }
218
219 TAILQ_INIT(&pool->counter_list);
220 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
221
222 for (i = 0; i < len; i++) {
223 struct ice_fdir_counter *counter = &pool->counters[i];
224
225 counter->hw_index = index_start + i;
226 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
227 }
228
229 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
230 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
231 ret = -EINVAL;
232 goto free_pool;
233 }
234
235 container->pools[container->index_free++] = pool;
236 return 0;
237
238 free_pool:
239 rte_free(pool);
240 return ret;
241 }
242
243 static int
ice_fdir_counter_init(struct ice_pf * pf)244 ice_fdir_counter_init(struct ice_pf *pf)
245 {
246 struct ice_hw *hw = ICE_PF_TO_HW(pf);
247 struct ice_fdir_info *fdir_info = &pf->fdir;
248 struct ice_fdir_counter_pool_container *container =
249 &fdir_info->counter;
250 uint32_t cnt_index, len;
251 int ret;
252
253 TAILQ_INIT(&container->pool_list);
254
255 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
256 len = ICE_FDIR_COUNTERS_PER_BLOCK;
257
258 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
259 if (ret) {
260 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
261 return ret;
262 }
263
264 return 0;
265 }
266
267 static int
ice_fdir_counter_release(struct ice_pf * pf)268 ice_fdir_counter_release(struct ice_pf *pf)
269 {
270 struct ice_fdir_info *fdir_info = &pf->fdir;
271 struct ice_fdir_counter_pool_container *container =
272 &fdir_info->counter;
273 uint8_t i;
274
275 for (i = 0; i < container->index_free; i++) {
276 rte_free(container->pools[i]);
277 container->pools[i] = NULL;
278 }
279
280 TAILQ_INIT(&container->pool_list);
281 container->index_free = 0;
282
283 return 0;
284 }
285
286 static struct ice_fdir_counter *
ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container * container,uint32_t id)287 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
288 *container,
289 uint32_t id)
290 {
291 struct ice_fdir_counter_pool *pool;
292 struct ice_fdir_counter *counter;
293 int i;
294
295 TAILQ_FOREACH(pool, &container->pool_list, next) {
296 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
297 counter = &pool->counters[i];
298
299 if (counter->shared &&
300 counter->ref_cnt &&
301 counter->id == id)
302 return counter;
303 }
304 }
305
306 return NULL;
307 }
308
309 static struct ice_fdir_counter *
ice_fdir_counter_alloc(struct ice_pf * pf,uint32_t shared,uint32_t id)310 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
311 {
312 struct ice_hw *hw = ICE_PF_TO_HW(pf);
313 struct ice_fdir_info *fdir_info = &pf->fdir;
314 struct ice_fdir_counter_pool_container *container =
315 &fdir_info->counter;
316 struct ice_fdir_counter_pool *pool = NULL;
317 struct ice_fdir_counter *counter_free = NULL;
318
319 if (shared) {
320 counter_free = ice_fdir_counter_shared_search(container, id);
321 if (counter_free) {
322 if (counter_free->ref_cnt + 1 == 0) {
323 rte_errno = E2BIG;
324 return NULL;
325 }
326 counter_free->ref_cnt++;
327 return counter_free;
328 }
329 }
330
331 TAILQ_FOREACH(pool, &container->pool_list, next) {
332 counter_free = TAILQ_FIRST(&pool->counter_list);
333 if (counter_free)
334 break;
335 counter_free = NULL;
336 }
337
338 if (!counter_free) {
339 PMD_DRV_LOG(ERR, "No free counter found\n");
340 return NULL;
341 }
342
343 counter_free->shared = shared;
344 counter_free->id = id;
345 counter_free->ref_cnt = 1;
346 counter_free->pool = pool;
347
348 /* reset statistic counter value */
349 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
350 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
351
352 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
353 if (TAILQ_EMPTY(&pool->counter_list)) {
354 TAILQ_REMOVE(&container->pool_list, pool, next);
355 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
356 }
357
358 return counter_free;
359 }
360
361 static void
ice_fdir_counter_free(__rte_unused struct ice_pf * pf,struct ice_fdir_counter * counter)362 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
363 struct ice_fdir_counter *counter)
364 {
365 if (!counter)
366 return;
367
368 if (--counter->ref_cnt == 0) {
369 struct ice_fdir_counter_pool *pool = counter->pool;
370
371 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
372 }
373 }
374
375 static int
ice_fdir_init_filter_list(struct ice_pf * pf)376 ice_fdir_init_filter_list(struct ice_pf *pf)
377 {
378 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
379 struct ice_fdir_info *fdir_info = &pf->fdir;
380 char fdir_hash_name[RTE_HASH_NAMESIZE];
381 int ret;
382
383 struct rte_hash_parameters fdir_hash_params = {
384 .name = fdir_hash_name,
385 .entries = ICE_MAX_FDIR_FILTER_NUM,
386 .key_len = sizeof(struct ice_fdir_fltr_pattern),
387 .hash_func = rte_hash_crc,
388 .hash_func_init_val = 0,
389 .socket_id = rte_socket_id(),
390 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
391 };
392
393 /* Initialize hash */
394 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
395 "fdir_%s", dev->device->name);
396 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
397 if (!fdir_info->hash_table) {
398 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
399 return -EINVAL;
400 }
401 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
402 sizeof(*fdir_info->hash_map) *
403 ICE_MAX_FDIR_FILTER_NUM,
404 0);
405 if (!fdir_info->hash_map) {
406 PMD_INIT_LOG(ERR,
407 "Failed to allocate memory for fdir hash map!");
408 ret = -ENOMEM;
409 goto err_fdir_hash_map_alloc;
410 }
411 return 0;
412
413 err_fdir_hash_map_alloc:
414 rte_hash_free(fdir_info->hash_table);
415
416 return ret;
417 }
418
419 static void
ice_fdir_release_filter_list(struct ice_pf * pf)420 ice_fdir_release_filter_list(struct ice_pf *pf)
421 {
422 struct ice_fdir_info *fdir_info = &pf->fdir;
423
424 rte_free(fdir_info->hash_map);
425 rte_hash_free(fdir_info->hash_table);
426
427 fdir_info->hash_map = NULL;
428 fdir_info->hash_table = NULL;
429 }
430
431 /*
432 * ice_fdir_setup - reserve and initialize the Flow Director resources
433 * @pf: board private structure
434 */
435 static int
ice_fdir_setup(struct ice_pf * pf)436 ice_fdir_setup(struct ice_pf *pf)
437 {
438 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
439 struct ice_hw *hw = ICE_PF_TO_HW(pf);
440 const struct rte_memzone *mz = NULL;
441 char z_name[RTE_MEMZONE_NAMESIZE];
442 struct ice_vsi *vsi;
443 int err = ICE_SUCCESS;
444
445 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
446 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
447 return -ENOTSUP;
448 }
449
450 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
451 " fd_fltr_best_effort = %u.",
452 hw->func_caps.fd_fltr_guar,
453 hw->func_caps.fd_fltr_best_effort);
454
455 if (pf->fdir.fdir_vsi) {
456 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
457 return ICE_SUCCESS;
458 }
459
460 /* make new FDIR VSI */
461 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
462 if (!vsi) {
463 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
464 return -EINVAL;
465 }
466 pf->fdir.fdir_vsi = vsi;
467
468 err = ice_fdir_init_filter_list(pf);
469 if (err) {
470 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
471 return -EINVAL;
472 }
473
474 err = ice_fdir_counter_init(pf);
475 if (err) {
476 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
477 return -EINVAL;
478 }
479
480 /*Fdir tx queue setup*/
481 err = ice_fdir_setup_tx_resources(pf);
482 if (err) {
483 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
484 goto fail_setup_tx;
485 }
486
487 /*Fdir rx queue setup*/
488 err = ice_fdir_setup_rx_resources(pf);
489 if (err) {
490 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
491 goto fail_setup_rx;
492 }
493
494 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
495 if (err) {
496 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
497 goto fail_mem;
498 }
499
500 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
501 if (err) {
502 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
503 goto fail_mem;
504 }
505
506 /* Enable FDIR MSIX interrupt */
507 vsi->nb_used_qps = 1;
508 ice_vsi_queues_bind_intr(vsi);
509 ice_vsi_enable_queues_intr(vsi);
510
511 /* reserve memory for the fdir programming packet */
512 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
513 ICE_FDIR_MZ_NAME,
514 eth_dev->data->port_id);
515 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
516 if (!mz) {
517 PMD_DRV_LOG(ERR, "Cannot init memzone for "
518 "flow director program packet.");
519 err = -ENOMEM;
520 goto fail_mem;
521 }
522 pf->fdir.prg_pkt = mz->addr;
523 pf->fdir.dma_addr = mz->iova;
524 pf->fdir.mz = mz;
525
526 err = ice_fdir_prof_alloc(hw);
527 if (err) {
528 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
529 "flow director profile.");
530 err = -ENOMEM;
531 goto fail_prof;
532 }
533
534 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
535 vsi->base_queue);
536 return ICE_SUCCESS;
537
538 fail_prof:
539 rte_memzone_free(pf->fdir.mz);
540 pf->fdir.mz = NULL;
541 fail_mem:
542 ice_rx_queue_release(pf->fdir.rxq);
543 pf->fdir.rxq = NULL;
544 fail_setup_rx:
545 ice_tx_queue_release(pf->fdir.txq);
546 pf->fdir.txq = NULL;
547 fail_setup_tx:
548 ice_release_vsi(vsi);
549 pf->fdir.fdir_vsi = NULL;
550 return err;
551 }
552
553 static void
ice_fdir_prof_free(struct ice_hw * hw)554 ice_fdir_prof_free(struct ice_hw *hw)
555 {
556 enum ice_fltr_ptype ptype;
557
558 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
559 ptype < ICE_FLTR_PTYPE_MAX;
560 ptype++) {
561 rte_free(hw->fdir_prof[ptype]);
562 hw->fdir_prof[ptype] = NULL;
563 }
564
565 rte_free(hw->fdir_prof);
566 hw->fdir_prof = NULL;
567 }
568
569 /* Remove a profile for some filter type */
570 static void
ice_fdir_prof_rm(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)571 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
572 {
573 struct ice_hw *hw = ICE_PF_TO_HW(pf);
574 struct ice_fd_hw_prof *hw_prof;
575 uint64_t prof_id;
576 uint16_t vsi_num;
577 int i;
578
579 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
580 return;
581
582 hw_prof = hw->fdir_prof[ptype];
583
584 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
585 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
586 if (hw_prof->entry_h[i][is_tunnel]) {
587 vsi_num = ice_get_hw_vsi_num(hw,
588 hw_prof->vsi_h[i]);
589 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
590 vsi_num, ptype);
591 ice_flow_rem_entry(hw, ICE_BLK_FD,
592 hw_prof->entry_h[i][is_tunnel]);
593 hw_prof->entry_h[i][is_tunnel] = 0;
594 }
595 }
596 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
597 rte_free(hw_prof->fdir_seg[is_tunnel]);
598 hw_prof->fdir_seg[is_tunnel] = NULL;
599
600 for (i = 0; i < hw_prof->cnt; i++)
601 hw_prof->vsi_h[i] = 0;
602 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
603 }
604
605 /* Remove all created profiles */
606 static void
ice_fdir_prof_rm_all(struct ice_pf * pf)607 ice_fdir_prof_rm_all(struct ice_pf *pf)
608 {
609 enum ice_fltr_ptype ptype;
610
611 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
612 ptype < ICE_FLTR_PTYPE_MAX;
613 ptype++) {
614 ice_fdir_prof_rm(pf, ptype, false);
615 ice_fdir_prof_rm(pf, ptype, true);
616 }
617 }
618
619 /*
620 * ice_fdir_teardown - release the Flow Director resources
621 * @pf: board private structure
622 */
623 static void
ice_fdir_teardown(struct ice_pf * pf)624 ice_fdir_teardown(struct ice_pf *pf)
625 {
626 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
627 struct ice_hw *hw = ICE_PF_TO_HW(pf);
628 struct ice_vsi *vsi;
629 int err;
630
631 vsi = pf->fdir.fdir_vsi;
632 if (!vsi)
633 return;
634
635 ice_vsi_disable_queues_intr(vsi);
636
637 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
638 if (err)
639 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
640
641 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
642 if (err)
643 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
644
645 err = ice_fdir_counter_release(pf);
646 if (err)
647 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
648
649 ice_fdir_release_filter_list(pf);
650
651 ice_tx_queue_release(pf->fdir.txq);
652 pf->fdir.txq = NULL;
653 ice_rx_queue_release(pf->fdir.rxq);
654 pf->fdir.rxq = NULL;
655 ice_fdir_prof_rm_all(pf);
656 ice_fdir_prof_free(hw);
657 ice_release_vsi(vsi);
658 pf->fdir.fdir_vsi = NULL;
659
660 if (pf->fdir.mz) {
661 err = rte_memzone_free(pf->fdir.mz);
662 pf->fdir.mz = NULL;
663 if (err)
664 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
665 }
666 }
667
668 static int
ice_fdir_cur_prof_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,struct ice_flow_seg_info * seg,bool is_tunnel)669 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
670 enum ice_fltr_ptype ptype,
671 struct ice_flow_seg_info *seg,
672 bool is_tunnel)
673 {
674 struct ice_hw *hw = ICE_PF_TO_HW(pf);
675 struct ice_flow_seg_info *ori_seg;
676 struct ice_fd_hw_prof *hw_prof;
677
678 hw_prof = hw->fdir_prof[ptype];
679 ori_seg = hw_prof->fdir_seg[is_tunnel];
680
681 /* profile does not exist */
682 if (!ori_seg)
683 return 0;
684
685 /* if no input set conflict, return -EEXIST */
686 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
687 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
688 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
689 ptype);
690 return -EEXIST;
691 }
692
693 /* a rule with input set conflict already exist, so give up */
694 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
695 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
696 ptype);
697 return -EINVAL;
698 }
699
700 /* it's safe to delete an empty profile */
701 ice_fdir_prof_rm(pf, ptype, is_tunnel);
702 return 0;
703 }
704
705 static bool
ice_fdir_prof_resolve_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)706 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
707 enum ice_fltr_ptype ptype,
708 bool is_tunnel)
709 {
710 struct ice_hw *hw = ICE_PF_TO_HW(pf);
711 struct ice_fd_hw_prof *hw_prof;
712 struct ice_flow_seg_info *seg;
713
714 hw_prof = hw->fdir_prof[ptype];
715 seg = hw_prof->fdir_seg[is_tunnel];
716
717 /* profile does not exist */
718 if (!seg)
719 return true;
720
721 /* profile exists and rule exists, fail to resolve the conflict */
722 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
723 return false;
724
725 /* it's safe to delete an empty profile */
726 ice_fdir_prof_rm(pf, ptype, is_tunnel);
727
728 return true;
729 }
730
731 static int
ice_fdir_cross_prof_conflict(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel)732 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
733 enum ice_fltr_ptype ptype,
734 bool is_tunnel)
735 {
736 enum ice_fltr_ptype cflct_ptype;
737
738 switch (ptype) {
739 /* IPv4 */
740 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
741 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
742 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
743 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
744 if (!ice_fdir_prof_resolve_conflict
745 (pf, cflct_ptype, is_tunnel))
746 goto err;
747 break;
748 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
749 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
750 if (!ice_fdir_prof_resolve_conflict
751 (pf, cflct_ptype, is_tunnel))
752 goto err;
753 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
754 if (!ice_fdir_prof_resolve_conflict
755 (pf, cflct_ptype, is_tunnel))
756 goto err;
757 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
758 if (!ice_fdir_prof_resolve_conflict
759 (pf, cflct_ptype, is_tunnel))
760 goto err;
761 break;
762 /* IPv4 GTPU */
763 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
764 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
765 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
766 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
767 if (!ice_fdir_prof_resolve_conflict
768 (pf, cflct_ptype, is_tunnel))
769 goto err;
770 break;
771 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
772 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
773 if (!ice_fdir_prof_resolve_conflict
774 (pf, cflct_ptype, is_tunnel))
775 goto err;
776 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
777 if (!ice_fdir_prof_resolve_conflict
778 (pf, cflct_ptype, is_tunnel))
779 goto err;
780 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
781 if (!ice_fdir_prof_resolve_conflict
782 (pf, cflct_ptype, is_tunnel))
783 goto err;
784 break;
785 /* IPv6 */
786 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
787 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
788 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
789 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
790 if (!ice_fdir_prof_resolve_conflict
791 (pf, cflct_ptype, is_tunnel))
792 goto err;
793 break;
794 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
795 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
796 if (!ice_fdir_prof_resolve_conflict
797 (pf, cflct_ptype, is_tunnel))
798 goto err;
799 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
800 if (!ice_fdir_prof_resolve_conflict
801 (pf, cflct_ptype, is_tunnel))
802 goto err;
803 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
804 if (!ice_fdir_prof_resolve_conflict
805 (pf, cflct_ptype, is_tunnel))
806 goto err;
807 break;
808 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
809 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
810 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
811 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
812 if (!ice_fdir_prof_resolve_conflict
813 (pf, cflct_ptype, is_tunnel))
814 goto err;
815 break;
816 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
817 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
818 if (!ice_fdir_prof_resolve_conflict
819 (pf, cflct_ptype, is_tunnel))
820 goto err;
821 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
822 if (!ice_fdir_prof_resolve_conflict
823 (pf, cflct_ptype, is_tunnel))
824 goto err;
825 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
826 if (!ice_fdir_prof_resolve_conflict
827 (pf, cflct_ptype, is_tunnel))
828 goto err;
829 break;
830 default:
831 break;
832 }
833 return 0;
834 err:
835 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
836 ptype, cflct_ptype);
837 return -EINVAL;
838 }
839
840 static int
ice_fdir_hw_tbl_conf(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_vsi * ctrl_vsi,struct ice_flow_seg_info * seg,enum ice_fltr_ptype ptype,bool is_tunnel)841 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
842 struct ice_vsi *ctrl_vsi,
843 struct ice_flow_seg_info *seg,
844 enum ice_fltr_ptype ptype,
845 bool is_tunnel)
846 {
847 struct ice_hw *hw = ICE_PF_TO_HW(pf);
848 enum ice_flow_dir dir = ICE_FLOW_RX;
849 struct ice_fd_hw_prof *hw_prof;
850 struct ice_flow_prof *prof;
851 uint64_t entry_1 = 0;
852 uint64_t entry_2 = 0;
853 uint16_t vsi_num;
854 int ret;
855 uint64_t prof_id;
856
857 /* check if have input set conflict on current profile. */
858 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
859 if (ret)
860 return ret;
861
862 /* check if the profile is conflict with other profile. */
863 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
864 if (ret)
865 return ret;
866
867 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
868 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
869 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
870 if (ret)
871 return ret;
872 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
873 vsi->idx, ICE_FLOW_PRIO_NORMAL,
874 seg, NULL, 0, &entry_1);
875 if (ret) {
876 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
877 ptype);
878 goto err_add_prof;
879 }
880 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
881 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
882 seg, NULL, 0, &entry_2);
883 if (ret) {
884 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
885 ptype);
886 goto err_add_entry;
887 }
888
889 hw_prof = hw->fdir_prof[ptype];
890 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
891 hw_prof->cnt = 0;
892 hw_prof->fdir_seg[is_tunnel] = seg;
893 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
894 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
895 pf->hw_prof_cnt[ptype][is_tunnel]++;
896 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
897 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
898 pf->hw_prof_cnt[ptype][is_tunnel]++;
899
900 return ret;
901
902 err_add_entry:
903 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
904 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
905 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
906 err_add_prof:
907 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
908
909 return ret;
910 }
911
912 static void
ice_fdir_input_set_parse(uint64_t inset,enum ice_flow_field * field)913 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
914 {
915 uint32_t i, j;
916
917 struct ice_inset_map {
918 uint64_t inset;
919 enum ice_flow_field fld;
920 };
921 static const struct ice_inset_map ice_inset_map[] = {
922 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
923 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
924 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
925 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
926 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
927 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
928 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
929 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
930 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
931 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
932 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
933 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
934 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
935 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
936 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
937 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
938 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
939 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
940 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
941 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
942 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
943 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
944 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
945 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
946 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
947 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
948 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
949 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
950 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
951 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
952 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
953 {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
954 {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
955 };
956
957 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
958 if ((inset & ice_inset_map[i].inset) ==
959 ice_inset_map[i].inset)
960 field[j++] = ice_inset_map[i].fld;
961 }
962 }
963
964 static void
ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow,struct ice_flow_seg_info * seg)965 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
966 {
967 switch (flow) {
968 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
969 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
970 ICE_FLOW_SEG_HDR_IPV4 |
971 ICE_FLOW_SEG_HDR_IPV_OTHER);
972 break;
973 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
974 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
975 ICE_FLOW_SEG_HDR_IPV4 |
976 ICE_FLOW_SEG_HDR_IPV_OTHER);
977 break;
978 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
979 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
980 ICE_FLOW_SEG_HDR_IPV4 |
981 ICE_FLOW_SEG_HDR_IPV_OTHER);
982 break;
983 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
984 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
985 ICE_FLOW_SEG_HDR_IPV_OTHER);
986 break;
987 case ICE_FLTR_PTYPE_FRAG_IPV4:
988 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
989 ICE_FLOW_SEG_HDR_IPV_FRAG);
990 break;
991 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
992 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
993 ICE_FLOW_SEG_HDR_IPV6 |
994 ICE_FLOW_SEG_HDR_IPV_OTHER);
995 break;
996 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
997 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
998 ICE_FLOW_SEG_HDR_IPV6 |
999 ICE_FLOW_SEG_HDR_IPV_OTHER);
1000 break;
1001 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1002 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1003 ICE_FLOW_SEG_HDR_IPV6 |
1004 ICE_FLOW_SEG_HDR_IPV_OTHER);
1005 break;
1006 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1007 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1008 ICE_FLOW_SEG_HDR_IPV_OTHER);
1009 break;
1010 case ICE_FLTR_PTYPE_FRAG_IPV6:
1011 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1012 ICE_FLOW_SEG_HDR_IPV_FRAG);
1013 break;
1014 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
1015 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
1016 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
1017 break;
1018 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
1019 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_OTHER);
1020 break;
1021 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
1022 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1023 ICE_FLOW_SEG_HDR_IPV4 |
1024 ICE_FLOW_SEG_HDR_IPV_OTHER);
1025 break;
1026 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
1027 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1028 ICE_FLOW_SEG_HDR_GTPU_IP |
1029 ICE_FLOW_SEG_HDR_IPV4 |
1030 ICE_FLOW_SEG_HDR_IPV_OTHER);
1031 break;
1032 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
1033 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1034 ICE_FLOW_SEG_HDR_IPV6 |
1035 ICE_FLOW_SEG_HDR_IPV_OTHER);
1036 break;
1037 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
1038 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1039 ICE_FLOW_SEG_HDR_GTPU_IP |
1040 ICE_FLOW_SEG_HDR_IPV6 |
1041 ICE_FLOW_SEG_HDR_IPV_OTHER);
1042 break;
1043 case ICE_FLTR_PTYPE_NON_IP_L2:
1044 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1045 break;
1046 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
1047 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1048 ICE_FLOW_SEG_HDR_IPV4 |
1049 ICE_FLOW_SEG_HDR_IPV_OTHER);
1050 break;
1051 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
1052 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1053 ICE_FLOW_SEG_HDR_IPV6 |
1054 ICE_FLOW_SEG_HDR_IPV_OTHER);
1055 break;
1056 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
1057 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1058 ICE_FLOW_SEG_HDR_IPV4 |
1059 ICE_FLOW_SEG_HDR_IPV_OTHER);
1060 break;
1061 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
1062 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1063 ICE_FLOW_SEG_HDR_IPV6 |
1064 ICE_FLOW_SEG_HDR_IPV_OTHER);
1065 break;
1066 default:
1067 PMD_DRV_LOG(ERR, "not supported filter type.");
1068 break;
1069 }
1070 }
1071
1072 static int
ice_fdir_input_set_conf(struct ice_pf * pf,enum ice_fltr_ptype flow,uint64_t inner_input_set,uint64_t outer_input_set,enum ice_fdir_tunnel_type ttype)1073 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1074 uint64_t inner_input_set, uint64_t outer_input_set,
1075 enum ice_fdir_tunnel_type ttype)
1076 {
1077 struct ice_flow_seg_info *seg;
1078 struct ice_flow_seg_info *seg_tun = NULL;
1079 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1080 uint64_t input_set;
1081 bool is_tunnel;
1082 int k, i, ret = 0;
1083
1084 if (!(inner_input_set | outer_input_set))
1085 return -EINVAL;
1086
1087 seg_tun = (struct ice_flow_seg_info *)
1088 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1089 if (!seg_tun) {
1090 PMD_DRV_LOG(ERR, "No memory can be allocated");
1091 return -ENOMEM;
1092 }
1093
1094 /* use seg_tun[1] to record tunnel inner part */
1095 for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1096 seg = &seg_tun[k];
1097 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1098 if (input_set == 0)
1099 continue;
1100
1101 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1102 field[i] = ICE_FLOW_FIELD_IDX_MAX;
1103
1104 ice_fdir_input_set_parse(input_set, field);
1105
1106 ice_fdir_input_set_hdrs(flow, seg);
1107
1108 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1109 ice_flow_set_fld(seg, field[i],
1110 ICE_FLOW_FLD_OFF_INVAL,
1111 ICE_FLOW_FLD_OFF_INVAL,
1112 ICE_FLOW_FLD_OFF_INVAL, false);
1113 }
1114 }
1115
1116 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1117
1118 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1119 seg_tun, flow, is_tunnel);
1120
1121 if (!ret) {
1122 return ret;
1123 } else if (ret < 0) {
1124 rte_free(seg_tun);
1125 return (ret == -EEXIST) ? 0 : ret;
1126 } else {
1127 return ret;
1128 }
1129 }
1130
1131 static void
ice_fdir_cnt_update(struct ice_pf * pf,enum ice_fltr_ptype ptype,bool is_tunnel,bool add)1132 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1133 bool is_tunnel, bool add)
1134 {
1135 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1136 int cnt;
1137
1138 cnt = (add) ? 1 : -1;
1139 hw->fdir_active_fltr += cnt;
1140 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1141 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1142 else
1143 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1144 }
1145
1146 static int
ice_fdir_init(struct ice_adapter * ad)1147 ice_fdir_init(struct ice_adapter *ad)
1148 {
1149 struct ice_pf *pf = &ad->pf;
1150 struct ice_flow_parser *parser;
1151 int ret;
1152
1153 if (ad->hw.dcf_enabled)
1154 return 0;
1155
1156 ret = ice_fdir_setup(pf);
1157 if (ret)
1158 return ret;
1159
1160 parser = &ice_fdir_parser;
1161
1162 return ice_register_parser(parser, ad);
1163 }
1164
1165 static void
ice_fdir_uninit(struct ice_adapter * ad)1166 ice_fdir_uninit(struct ice_adapter *ad)
1167 {
1168 struct ice_flow_parser *parser;
1169 struct ice_pf *pf = &ad->pf;
1170
1171 if (ad->hw.dcf_enabled)
1172 return;
1173
1174 parser = &ice_fdir_parser;
1175
1176 ice_unregister_parser(parser, ad);
1177
1178 ice_fdir_teardown(pf);
1179 }
1180
1181 static int
ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)1182 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1183 {
1184 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1185 return 1;
1186 else
1187 return 0;
1188 }
1189
1190 static int
ice_fdir_add_del_raw(struct ice_pf * pf,struct ice_fdir_filter_conf * filter,bool add)1191 ice_fdir_add_del_raw(struct ice_pf *pf,
1192 struct ice_fdir_filter_conf *filter,
1193 bool add)
1194 {
1195 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1196
1197 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1198 rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
1199
1200 struct ice_fltr_desc desc;
1201 memset(&desc, 0, sizeof(desc));
1202 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1203 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1204
1205 return ice_fdir_programming(pf, &desc);
1206 }
1207
1208 static int
ice_fdir_add_del_filter(struct ice_pf * pf,struct ice_fdir_filter_conf * filter,bool add)1209 ice_fdir_add_del_filter(struct ice_pf *pf,
1210 struct ice_fdir_filter_conf *filter,
1211 bool add)
1212 {
1213 struct ice_fltr_desc desc;
1214 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1215 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1216 bool is_tun;
1217 int ret;
1218
1219 filter->input.dest_vsi = pf->main_vsi->idx;
1220
1221 memset(&desc, 0, sizeof(desc));
1222 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1223 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1224
1225 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1226
1227 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1228 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1229 if (ret) {
1230 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1231 return -EINVAL;
1232 }
1233
1234 return ice_fdir_programming(pf, &desc);
1235 }
1236
1237 static void
ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern * key,struct ice_fdir_filter_conf * filter)1238 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1239 struct ice_fdir_filter_conf *filter)
1240 {
1241 struct ice_fdir_fltr *input = &filter->input;
1242 memset(key, 0, sizeof(*key));
1243
1244 key->flow_type = input->flow_type;
1245 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1246 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1247 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1248 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1249
1250 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1251 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1252
1253 key->tunnel_type = filter->tunnel_type;
1254 }
1255
1256 /* Check if there exists the flow director filter */
1257 static struct ice_fdir_filter_conf *
ice_fdir_entry_lookup(struct ice_fdir_info * fdir_info,const struct ice_fdir_fltr_pattern * key)1258 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1259 const struct ice_fdir_fltr_pattern *key)
1260 {
1261 int ret;
1262
1263 ret = rte_hash_lookup(fdir_info->hash_table, key);
1264 if (ret < 0)
1265 return NULL;
1266
1267 return fdir_info->hash_map[ret];
1268 }
1269
1270 /* Add a flow director entry into the SW list */
1271 static int
ice_fdir_entry_insert(struct ice_pf * pf,struct ice_fdir_filter_conf * entry,struct ice_fdir_fltr_pattern * key)1272 ice_fdir_entry_insert(struct ice_pf *pf,
1273 struct ice_fdir_filter_conf *entry,
1274 struct ice_fdir_fltr_pattern *key)
1275 {
1276 struct ice_fdir_info *fdir_info = &pf->fdir;
1277 int ret;
1278
1279 ret = rte_hash_add_key(fdir_info->hash_table, key);
1280 if (ret < 0) {
1281 PMD_DRV_LOG(ERR,
1282 "Failed to insert fdir entry to hash table %d!",
1283 ret);
1284 return ret;
1285 }
1286 fdir_info->hash_map[ret] = entry;
1287
1288 return 0;
1289 }
1290
1291 /* Delete a flow director entry from the SW list */
1292 static int
ice_fdir_entry_del(struct ice_pf * pf,struct ice_fdir_fltr_pattern * key)1293 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1294 {
1295 struct ice_fdir_info *fdir_info = &pf->fdir;
1296 int ret;
1297
1298 ret = rte_hash_del_key(fdir_info->hash_table, key);
1299 if (ret < 0) {
1300 PMD_DRV_LOG(ERR,
1301 "Failed to delete fdir filter to hash table %d!",
1302 ret);
1303 return ret;
1304 }
1305 fdir_info->hash_map[ret] = NULL;
1306
1307 return 0;
1308 }
1309
1310 static int
ice_fdir_create_filter(struct ice_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)1311 ice_fdir_create_filter(struct ice_adapter *ad,
1312 struct rte_flow *flow,
1313 void *meta,
1314 struct rte_flow_error *error)
1315 {
1316 struct ice_pf *pf = &ad->pf;
1317 struct ice_fdir_filter_conf *filter = meta;
1318 struct ice_fdir_info *fdir_info = &pf->fdir;
1319 struct ice_fdir_filter_conf *entry, *node;
1320 struct ice_fdir_fltr_pattern key;
1321 bool is_tun;
1322 int ret;
1323 int i;
1324
1325 if (filter->parser_ena) {
1326 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1327
1328 int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
1329 int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1330 u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
1331 u16 main_vsi = pf->main_vsi->idx;
1332 bool fv_found = false;
1333
1334 struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
1335 if (pi->fdir_actived_cnt != 0) {
1336 for (i = 0; i < ICE_MAX_FV_WORDS; i++)
1337 if (pi->prof.fv[i].proto_id !=
1338 filter->prof->fv[i].proto_id ||
1339 pi->prof.fv[i].offset !=
1340 filter->prof->fv[i].offset ||
1341 pi->prof.fv[i].msk !=
1342 filter->prof->fv[i].msk)
1343 break;
1344 if (i == ICE_MAX_FV_WORDS) {
1345 fv_found = true;
1346 pi->fdir_actived_cnt++;
1347 }
1348 }
1349
1350 if (!fv_found) {
1351 ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
1352 filter->prof, ICE_BLK_FD);
1353 if (ret)
1354 goto error;
1355 }
1356
1357 ret = ice_fdir_add_del_raw(pf, filter, true);
1358 if (ret)
1359 goto error;
1360
1361 if (!fv_found) {
1362 for (i = 0; i < filter->prof->fv_num; i++) {
1363 pi->prof.fv[i].proto_id =
1364 filter->prof->fv[i].proto_id;
1365 pi->prof.fv[i].offset =
1366 filter->prof->fv[i].offset;
1367 pi->prof.fv[i].msk = filter->prof->fv[i].msk;
1368 }
1369 pi->fdir_actived_cnt = 1;
1370 }
1371
1372 if (filter->mark_flag == 1)
1373 ice_fdir_rx_parsing_enable(ad, 1);
1374
1375 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1376 if (!entry)
1377 goto error;
1378
1379 rte_memcpy(entry, filter, sizeof(*filter));
1380
1381 flow->rule = entry;
1382
1383 return 0;
1384 }
1385
1386 ice_fdir_extract_fltr_key(&key, filter);
1387 node = ice_fdir_entry_lookup(fdir_info, &key);
1388 if (node) {
1389 rte_flow_error_set(error, EEXIST,
1390 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1391 "Rule already exists!");
1392 return -rte_errno;
1393 }
1394
1395 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1396 if (!entry) {
1397 rte_flow_error_set(error, ENOMEM,
1398 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1399 "Failed to allocate memory");
1400 return -rte_errno;
1401 }
1402
1403 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1404
1405 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1406 filter->input_set_i, filter->input_set_o,
1407 filter->tunnel_type);
1408 if (ret) {
1409 rte_flow_error_set(error, -ret,
1410 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1411 "Profile configure failed.");
1412 goto free_entry;
1413 }
1414
1415 /* alloc counter for FDIR */
1416 if (filter->input.cnt_ena) {
1417 struct rte_flow_action_count *act_count = &filter->act_count;
1418
1419 filter->counter = ice_fdir_counter_alloc(pf, 0, act_count->id);
1420 if (!filter->counter) {
1421 rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1423 "Failed to alloc FDIR counter.");
1424 goto free_entry;
1425 }
1426 filter->input.cnt_index = filter->counter->hw_index;
1427 }
1428
1429 ret = ice_fdir_add_del_filter(pf, filter, true);
1430 if (ret) {
1431 rte_flow_error_set(error, -ret,
1432 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1433 "Add filter rule failed.");
1434 goto free_counter;
1435 }
1436
1437 if (filter->mark_flag == 1)
1438 ice_fdir_rx_parsing_enable(ad, 1);
1439
1440 rte_memcpy(entry, filter, sizeof(*entry));
1441 ret = ice_fdir_entry_insert(pf, entry, &key);
1442 if (ret) {
1443 rte_flow_error_set(error, -ret,
1444 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1445 "Insert entry to table failed.");
1446 goto free_entry;
1447 }
1448
1449 flow->rule = entry;
1450 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1451
1452 return 0;
1453
1454 free_counter:
1455 if (filter->counter) {
1456 ice_fdir_counter_free(pf, filter->counter);
1457 filter->counter = NULL;
1458 }
1459
1460 free_entry:
1461 rte_free(entry);
1462 return -rte_errno;
1463
1464 error:
1465 rte_free(filter->prof);
1466 rte_free(filter->pkt_buf);
1467 return -rte_errno;
1468 }
1469
1470 static int
ice_fdir_destroy_filter(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error)1471 ice_fdir_destroy_filter(struct ice_adapter *ad,
1472 struct rte_flow *flow,
1473 struct rte_flow_error *error)
1474 {
1475 struct ice_pf *pf = &ad->pf;
1476 struct ice_fdir_info *fdir_info = &pf->fdir;
1477 struct ice_fdir_filter_conf *filter, *entry;
1478 struct ice_fdir_fltr_pattern key;
1479 bool is_tun;
1480 int ret;
1481
1482 filter = (struct ice_fdir_filter_conf *)flow->rule;
1483
1484 if (filter->parser_ena) {
1485 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1486
1487 int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
1488 int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
1489 u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
1490 u16 main_vsi = pf->main_vsi->idx;
1491 enum ice_block blk = ICE_BLK_FD;
1492 u16 vsi_num;
1493
1494 ret = ice_fdir_add_del_raw(pf, filter, false);
1495 if (ret)
1496 return -rte_errno;
1497
1498 struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
1499 if (pi->fdir_actived_cnt != 0) {
1500 pi->fdir_actived_cnt--;
1501 if (!pi->fdir_actived_cnt) {
1502 vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
1503 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
1504
1505 vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
1506 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
1507 }
1508 }
1509
1510 if (filter->mark_flag == 1)
1511 ice_fdir_rx_parsing_enable(ad, 0);
1512
1513 flow->rule = NULL;
1514
1515 rte_free(filter->prof);
1516 rte_free(filter->pkt_buf);
1517 rte_free(filter);
1518
1519 return 0;
1520 }
1521
1522 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1523
1524 if (filter->counter) {
1525 ice_fdir_counter_free(pf, filter->counter);
1526 filter->counter = NULL;
1527 }
1528
1529 ice_fdir_extract_fltr_key(&key, filter);
1530 entry = ice_fdir_entry_lookup(fdir_info, &key);
1531 if (!entry) {
1532 rte_flow_error_set(error, ENOENT,
1533 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1534 "Can't find entry.");
1535 return -rte_errno;
1536 }
1537
1538 ret = ice_fdir_add_del_filter(pf, filter, false);
1539 if (ret) {
1540 rte_flow_error_set(error, -ret,
1541 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1542 "Del filter rule failed.");
1543 return -rte_errno;
1544 }
1545
1546 ret = ice_fdir_entry_del(pf, &key);
1547 if (ret) {
1548 rte_flow_error_set(error, -ret,
1549 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1550 "Remove entry from table failed.");
1551 return -rte_errno;
1552 }
1553
1554 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1555
1556 if (filter->mark_flag == 1)
1557 ice_fdir_rx_parsing_enable(ad, 0);
1558
1559 flow->rule = NULL;
1560
1561 rte_free(filter);
1562
1563 return 0;
1564 }
1565
1566 static int
ice_fdir_query_count(struct ice_adapter * ad,struct rte_flow * flow,struct rte_flow_query_count * flow_stats,struct rte_flow_error * error)1567 ice_fdir_query_count(struct ice_adapter *ad,
1568 struct rte_flow *flow,
1569 struct rte_flow_query_count *flow_stats,
1570 struct rte_flow_error *error)
1571 {
1572 struct ice_pf *pf = &ad->pf;
1573 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1574 struct ice_fdir_filter_conf *filter = flow->rule;
1575 struct ice_fdir_counter *counter = filter->counter;
1576 uint64_t hits_lo, hits_hi;
1577
1578 if (!counter) {
1579 rte_flow_error_set(error, EINVAL,
1580 RTE_FLOW_ERROR_TYPE_ACTION,
1581 NULL,
1582 "FDIR counters not available");
1583 return -rte_errno;
1584 }
1585
1586 /*
1587 * Reading the low 32-bits latches the high 32-bits into a shadow
1588 * register. Reading the high 32-bit returns the value in the
1589 * shadow register.
1590 */
1591 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1592 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1593
1594 flow_stats->hits_set = 1;
1595 flow_stats->hits = hits_lo | (hits_hi << 32);
1596 flow_stats->bytes_set = 0;
1597 flow_stats->bytes = 0;
1598
1599 if (flow_stats->reset) {
1600 /* reset statistic counter value */
1601 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1602 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1603 }
1604
1605 return 0;
1606 }
1607
1608 static struct ice_flow_engine ice_fdir_engine = {
1609 .init = ice_fdir_init,
1610 .uninit = ice_fdir_uninit,
1611 .create = ice_fdir_create_filter,
1612 .destroy = ice_fdir_destroy_filter,
1613 .query_count = ice_fdir_query_count,
1614 .type = ICE_FLOW_ENGINE_FDIR,
1615 };
1616
1617 static int
ice_fdir_parse_action_qregion(struct ice_pf * pf,struct rte_flow_error * error,const struct rte_flow_action * act,struct ice_fdir_filter_conf * filter)1618 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1619 struct rte_flow_error *error,
1620 const struct rte_flow_action *act,
1621 struct ice_fdir_filter_conf *filter)
1622 {
1623 const struct rte_flow_action_rss *rss = act->conf;
1624 uint32_t i;
1625
1626 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ACTION, act,
1629 "Invalid action.");
1630 return -rte_errno;
1631 }
1632
1633 if (rss->queue_num <= 1) {
1634 rte_flow_error_set(error, EINVAL,
1635 RTE_FLOW_ERROR_TYPE_ACTION, act,
1636 "Queue region size can't be 0 or 1.");
1637 return -rte_errno;
1638 }
1639
1640 /* check if queue index for queue region is continuous */
1641 for (i = 0; i < rss->queue_num - 1; i++) {
1642 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ACTION, act,
1645 "Discontinuous queue region");
1646 return -rte_errno;
1647 }
1648 }
1649
1650 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1651 rte_flow_error_set(error, EINVAL,
1652 RTE_FLOW_ERROR_TYPE_ACTION, act,
1653 "Invalid queue region indexes.");
1654 return -rte_errno;
1655 }
1656
1657 if (!(rte_is_power_of_2(rss->queue_num) &&
1658 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1659 rte_flow_error_set(error, EINVAL,
1660 RTE_FLOW_ERROR_TYPE_ACTION, act,
1661 "The region size should be any of the following values:"
1662 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1663 "of queues do not exceed the VSI allocation.");
1664 return -rte_errno;
1665 }
1666
1667 filter->input.q_index = rss->queue[0];
1668 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1669 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1670
1671 return 0;
1672 }
1673
1674 static int
ice_fdir_parse_action(struct ice_adapter * ad,const struct rte_flow_action actions[],struct rte_flow_error * error,struct ice_fdir_filter_conf * filter)1675 ice_fdir_parse_action(struct ice_adapter *ad,
1676 const struct rte_flow_action actions[],
1677 struct rte_flow_error *error,
1678 struct ice_fdir_filter_conf *filter)
1679 {
1680 struct ice_pf *pf = &ad->pf;
1681 const struct rte_flow_action_queue *act_q;
1682 const struct rte_flow_action_mark *mark_spec = NULL;
1683 const struct rte_flow_action_count *act_count;
1684 uint32_t dest_num = 0;
1685 uint32_t mark_num = 0;
1686 uint32_t counter_num = 0;
1687 int ret;
1688
1689 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1690 switch (actions->type) {
1691 case RTE_FLOW_ACTION_TYPE_VOID:
1692 break;
1693 case RTE_FLOW_ACTION_TYPE_QUEUE:
1694 dest_num++;
1695
1696 act_q = actions->conf;
1697 filter->input.q_index = act_q->index;
1698 if (filter->input.q_index >=
1699 pf->dev_data->nb_rx_queues) {
1700 rte_flow_error_set(error, EINVAL,
1701 RTE_FLOW_ERROR_TYPE_ACTION,
1702 actions,
1703 "Invalid queue for FDIR.");
1704 return -rte_errno;
1705 }
1706 filter->input.dest_ctl =
1707 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1708 break;
1709 case RTE_FLOW_ACTION_TYPE_DROP:
1710 dest_num++;
1711
1712 filter->input.dest_ctl =
1713 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1714 break;
1715 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1716 dest_num++;
1717
1718 filter->input.dest_ctl =
1719 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1720 break;
1721 case RTE_FLOW_ACTION_TYPE_RSS:
1722 dest_num++;
1723
1724 ret = ice_fdir_parse_action_qregion(pf,
1725 error, actions, filter);
1726 if (ret)
1727 return ret;
1728 break;
1729 case RTE_FLOW_ACTION_TYPE_MARK:
1730 mark_num++;
1731 filter->mark_flag = 1;
1732 mark_spec = actions->conf;
1733 filter->input.fltr_id = mark_spec->id;
1734 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1735 break;
1736 case RTE_FLOW_ACTION_TYPE_COUNT:
1737 counter_num++;
1738
1739 act_count = actions->conf;
1740 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1741 rte_memcpy(&filter->act_count, act_count,
1742 sizeof(filter->act_count));
1743
1744 break;
1745 default:
1746 rte_flow_error_set(error, EINVAL,
1747 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1748 "Invalid action.");
1749 return -rte_errno;
1750 }
1751 }
1752
1753 if (dest_num >= 2) {
1754 rte_flow_error_set(error, EINVAL,
1755 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1756 "Unsupported action combination");
1757 return -rte_errno;
1758 }
1759
1760 if (mark_num >= 2) {
1761 rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1763 "Too many mark actions");
1764 return -rte_errno;
1765 }
1766
1767 if (counter_num >= 2) {
1768 rte_flow_error_set(error, EINVAL,
1769 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1770 "Too many count actions");
1771 return -rte_errno;
1772 }
1773
1774 if (dest_num + mark_num + counter_num == 0) {
1775 rte_flow_error_set(error, EINVAL,
1776 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1777 "Empty action");
1778 return -rte_errno;
1779 }
1780
1781 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1782 if (dest_num == 0)
1783 filter->input.dest_ctl =
1784 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1785
1786 return 0;
1787 }
1788
1789 static int
ice_fdir_parse_pattern(__rte_unused struct ice_adapter * ad,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct ice_fdir_filter_conf * filter)1790 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1791 const struct rte_flow_item pattern[],
1792 struct rte_flow_error *error,
1793 struct ice_fdir_filter_conf *filter)
1794 {
1795 const struct rte_flow_item *item = pattern;
1796 enum rte_flow_item_type item_type;
1797 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1798 enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
1799 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1800 const struct rte_flow_item_raw *raw_spec, *raw_mask;
1801 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1802 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1803 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1804 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1805 *ipv6_frag_mask;
1806 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1807 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1808 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1809 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1810 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1811 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1812 const struct rte_flow_item_esp *esp_spec, *esp_mask;
1813 uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1814 uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1815 uint64_t *input_set;
1816 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1817 uint8_t ipv6_addr_mask[16] = {
1818 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1819 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1820 };
1821 uint32_t vtc_flow_cpu;
1822 uint16_t ether_type;
1823 enum rte_flow_item_type next_type;
1824 bool is_outer = true;
1825 struct ice_fdir_extra *p_ext_data;
1826 struct ice_fdir_v4 *p_v4 = NULL;
1827 struct ice_fdir_v6 *p_v6 = NULL;
1828 struct ice_parser_result rslt;
1829 uint8_t item_num = 0;
1830
1831 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1832 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1833 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1834 /* To align with shared code behavior, save gtpu outer
1835 * fields in inner struct.
1836 */
1837 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1838 item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1839 is_outer = false;
1840 }
1841 item_num++;
1842 }
1843
1844 /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1845 * flow. input_set_i is used for inner part.
1846 */
1847 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1848 item_type = item->type;
1849
1850 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1851 item_type ==
1852 RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1853 rte_flow_error_set(error, EINVAL,
1854 RTE_FLOW_ERROR_TYPE_ITEM, item,
1855 "Not support range");
1856 }
1857
1858 input_set = (tunnel_type && !is_outer) ?
1859 &input_set_i : &input_set_o;
1860
1861 switch (item_type) {
1862 case RTE_FLOW_ITEM_TYPE_RAW: {
1863 if (ad->psr == NULL)
1864 return -rte_errno;
1865
1866 raw_spec = item->spec;
1867 raw_mask = item->mask;
1868
1869 if (item_num != 1)
1870 break;
1871
1872 /* convert raw spec & mask from byte string to int */
1873 unsigned char *spec_pattern =
1874 (uint8_t *)(uintptr_t)raw_spec->pattern;
1875 unsigned char *mask_pattern =
1876 (uint8_t *)(uintptr_t)raw_mask->pattern;
1877 uint8_t *tmp_spec, *tmp_mask;
1878 uint16_t tmp_val = 0;
1879 uint8_t pkt_len = 0;
1880 uint8_t tmp = 0;
1881 int i, j;
1882
1883 pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
1884 if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
1885 pkt_len)
1886 return -rte_errno;
1887
1888 tmp_spec = rte_zmalloc(NULL, pkt_len / 2, 0);
1889 if (!tmp_spec)
1890 return -rte_errno;
1891
1892 tmp_mask = rte_zmalloc(NULL, pkt_len / 2, 0);
1893 if (!tmp_mask) {
1894 rte_free(tmp_spec);
1895 return -rte_errno;
1896 }
1897
1898 for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
1899 tmp = spec_pattern[i];
1900 if (tmp >= 'a' && tmp <= 'f')
1901 tmp_val = tmp - 'a' + 10;
1902 if (tmp >= 'A' && tmp <= 'F')
1903 tmp_val = tmp - 'A' + 10;
1904 if (tmp >= '0' && tmp <= '9')
1905 tmp_val = tmp - '0';
1906
1907 tmp_val *= 16;
1908 tmp = spec_pattern[i + 1];
1909 if (tmp >= 'a' && tmp <= 'f')
1910 tmp_spec[j] = tmp_val + tmp - 'a' + 10;
1911 if (tmp >= 'A' && tmp <= 'F')
1912 tmp_spec[j] = tmp_val + tmp - 'A' + 10;
1913 if (tmp >= '0' && tmp <= '9')
1914 tmp_spec[j] = tmp_val + tmp - '0';
1915
1916 tmp = mask_pattern[i];
1917 if (tmp >= 'a' && tmp <= 'f')
1918 tmp_val = tmp - 'a' + 10;
1919 if (tmp >= 'A' && tmp <= 'F')
1920 tmp_val = tmp - 'A' + 10;
1921 if (tmp >= '0' && tmp <= '9')
1922 tmp_val = tmp - '0';
1923
1924 tmp_val *= 16;
1925 tmp = mask_pattern[i + 1];
1926 if (tmp >= 'a' && tmp <= 'f')
1927 tmp_mask[j] = tmp_val + tmp - 'a' + 10;
1928 if (tmp >= 'A' && tmp <= 'F')
1929 tmp_mask[j] = tmp_val + tmp - 'A' + 10;
1930 if (tmp >= '0' && tmp <= '9')
1931 tmp_mask[j] = tmp_val + tmp - '0';
1932 }
1933
1934 pkt_len /= 2;
1935
1936 if (ice_parser_run(ad->psr, tmp_spec, pkt_len, &rslt))
1937 return -rte_errno;
1938
1939 if (!tmp_mask)
1940 return -rte_errno;
1941
1942 filter->prof = (struct ice_parser_profile *)
1943 ice_malloc(&ad->hw, sizeof(*filter->prof));
1944 if (!filter->prof)
1945 return -ENOMEM;
1946
1947 if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
1948 pkt_len, ICE_BLK_FD, true, filter->prof))
1949 return -rte_errno;
1950
1951 u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
1952 if (!pkt_buf)
1953 return -ENOMEM;
1954 rte_memcpy(pkt_buf, tmp_spec, pkt_len);
1955 filter->pkt_buf = pkt_buf;
1956
1957 filter->pkt_len = pkt_len;
1958
1959 filter->parser_ena = true;
1960
1961 rte_free(tmp_spec);
1962 rte_free(tmp_mask);
1963 break;
1964 }
1965
1966 case RTE_FLOW_ITEM_TYPE_ETH:
1967 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1968 eth_spec = item->spec;
1969 eth_mask = item->mask;
1970
1971 if (!(eth_spec && eth_mask))
1972 break;
1973
1974 if (!rte_is_zero_ether_addr(ð_mask->dst))
1975 *input_set |= ICE_INSET_DMAC;
1976 if (!rte_is_zero_ether_addr(ð_mask->src))
1977 *input_set |= ICE_INSET_SMAC;
1978
1979 next_type = (item + 1)->type;
1980 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1981 if (eth_mask->type == RTE_BE16(0xffff) &&
1982 next_type == RTE_FLOW_ITEM_TYPE_END) {
1983 *input_set |= ICE_INSET_ETHERTYPE;
1984 ether_type = rte_be_to_cpu_16(eth_spec->type);
1985
1986 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1987 ether_type == RTE_ETHER_TYPE_IPV6) {
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ITEM,
1990 item,
1991 "Unsupported ether_type.");
1992 return -rte_errno;
1993 }
1994 }
1995
1996 p_ext_data = (tunnel_type && is_outer) ?
1997 &filter->input.ext_data_outer :
1998 &filter->input.ext_data;
1999 rte_memcpy(&p_ext_data->src_mac,
2000 ð_spec->src, RTE_ETHER_ADDR_LEN);
2001 rte_memcpy(&p_ext_data->dst_mac,
2002 ð_spec->dst, RTE_ETHER_ADDR_LEN);
2003 rte_memcpy(&p_ext_data->ether_type,
2004 ð_spec->type, sizeof(eth_spec->type));
2005 break;
2006 case RTE_FLOW_ITEM_TYPE_IPV4:
2007 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
2008 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2009 ipv4_spec = item->spec;
2010 ipv4_last = item->last;
2011 ipv4_mask = item->mask;
2012 p_v4 = (tunnel_type && is_outer) ?
2013 &filter->input.ip_outer.v4 :
2014 &filter->input.ip.v4;
2015
2016 if (!(ipv4_spec && ipv4_mask))
2017 break;
2018
2019 /* Check IPv4 mask and update input set */
2020 if (ipv4_mask->hdr.version_ihl ||
2021 ipv4_mask->hdr.total_length ||
2022 ipv4_mask->hdr.hdr_checksum) {
2023 rte_flow_error_set(error, EINVAL,
2024 RTE_FLOW_ERROR_TYPE_ITEM,
2025 item,
2026 "Invalid IPv4 mask.");
2027 return -rte_errno;
2028 }
2029
2030 if (ipv4_last &&
2031 (ipv4_last->hdr.version_ihl ||
2032 ipv4_last->hdr.type_of_service ||
2033 ipv4_last->hdr.time_to_live ||
2034 ipv4_last->hdr.total_length |
2035 ipv4_last->hdr.next_proto_id ||
2036 ipv4_last->hdr.hdr_checksum ||
2037 ipv4_last->hdr.src_addr ||
2038 ipv4_last->hdr.dst_addr)) {
2039 rte_flow_error_set(error, EINVAL,
2040 RTE_FLOW_ERROR_TYPE_ITEM,
2041 item, "Invalid IPv4 last.");
2042 return -rte_errno;
2043 }
2044
2045 /* Mask for IPv4 src/dst addrs not supported */
2046 if (ipv4_mask->hdr.src_addr &&
2047 ipv4_mask->hdr.src_addr != UINT32_MAX)
2048 return -rte_errno;
2049 if (ipv4_mask->hdr.dst_addr &&
2050 ipv4_mask->hdr.dst_addr != UINT32_MAX)
2051 return -rte_errno;
2052
2053 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2054 *input_set |= ICE_INSET_IPV4_DST;
2055 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2056 *input_set |= ICE_INSET_IPV4_SRC;
2057 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2058 *input_set |= ICE_INSET_IPV4_TTL;
2059 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2060 *input_set |= ICE_INSET_IPV4_PROTO;
2061 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2062 *input_set |= ICE_INSET_IPV4_TOS;
2063
2064 p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
2065 p_v4->src_ip = ipv4_spec->hdr.src_addr;
2066 p_v4->ttl = ipv4_spec->hdr.time_to_live;
2067 p_v4->proto = ipv4_spec->hdr.next_proto_id;
2068 p_v4->tos = ipv4_spec->hdr.type_of_service;
2069
2070 /* fragment Ipv4:
2071 * spec is 0x2000, mask is 0x2000
2072 */
2073 if (ipv4_spec->hdr.fragment_offset ==
2074 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
2075 ipv4_mask->hdr.fragment_offset ==
2076 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
2077 /* all IPv4 fragment packet has the same
2078 * ethertype, if the spec and mask is valid,
2079 * set ethertype into input set.
2080 */
2081 flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
2082 *input_set |= ICE_INSET_ETHERTYPE;
2083 input_set_o |= ICE_INSET_ETHERTYPE;
2084 } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
2085 rte_flow_error_set(error, EINVAL,
2086 RTE_FLOW_ERROR_TYPE_ITEM,
2087 item, "Invalid IPv4 mask.");
2088 return -rte_errno;
2089 }
2090
2091 break;
2092 case RTE_FLOW_ITEM_TYPE_IPV6:
2093 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
2094 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2095 ipv6_spec = item->spec;
2096 ipv6_mask = item->mask;
2097 p_v6 = (tunnel_type && is_outer) ?
2098 &filter->input.ip_outer.v6 :
2099 &filter->input.ip.v6;
2100
2101 if (!(ipv6_spec && ipv6_mask))
2102 break;
2103
2104 /* Check IPv6 mask and update input set */
2105 if (ipv6_mask->hdr.payload_len) {
2106 rte_flow_error_set(error, EINVAL,
2107 RTE_FLOW_ERROR_TYPE_ITEM,
2108 item,
2109 "Invalid IPv6 mask");
2110 return -rte_errno;
2111 }
2112
2113 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
2114 RTE_DIM(ipv6_mask->hdr.src_addr)))
2115 *input_set |= ICE_INSET_IPV6_SRC;
2116 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
2117 RTE_DIM(ipv6_mask->hdr.dst_addr)))
2118 *input_set |= ICE_INSET_IPV6_DST;
2119
2120 if ((ipv6_mask->hdr.vtc_flow &
2121 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
2122 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
2123 *input_set |= ICE_INSET_IPV6_TC;
2124 if (ipv6_mask->hdr.proto == UINT8_MAX)
2125 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
2126 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2127 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
2128
2129 rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
2130 rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
2131 vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2132 p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
2133 p_v6->proto = ipv6_spec->hdr.proto;
2134 p_v6->hlim = ipv6_spec->hdr.hop_limits;
2135 break;
2136 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
2137 l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
2138 flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
2139 ipv6_frag_spec = item->spec;
2140 ipv6_frag_mask = item->mask;
2141
2142 if (!(ipv6_frag_spec && ipv6_frag_mask))
2143 break;
2144
2145 /* fragment Ipv6:
2146 * spec is 0x1, mask is 0x1
2147 */
2148 if (ipv6_frag_spec->hdr.frag_data ==
2149 rte_cpu_to_be_16(1) &&
2150 ipv6_frag_mask->hdr.frag_data ==
2151 rte_cpu_to_be_16(1)) {
2152 /* all IPv6 fragment packet has the same
2153 * ethertype, if the spec and mask is valid,
2154 * set ethertype into input set.
2155 */
2156 *input_set |= ICE_INSET_ETHERTYPE;
2157 input_set_o |= ICE_INSET_ETHERTYPE;
2158 } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
2159 rte_flow_error_set(error, EINVAL,
2160 RTE_FLOW_ERROR_TYPE_ITEM,
2161 item, "Invalid IPv6 mask.");
2162 return -rte_errno;
2163 }
2164
2165 break;
2166
2167 case RTE_FLOW_ITEM_TYPE_TCP:
2168 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2169 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
2170 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2171 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
2172
2173 tcp_spec = item->spec;
2174 tcp_mask = item->mask;
2175
2176 if (!(tcp_spec && tcp_mask))
2177 break;
2178
2179 /* Check TCP mask and update input set */
2180 if (tcp_mask->hdr.sent_seq ||
2181 tcp_mask->hdr.recv_ack ||
2182 tcp_mask->hdr.data_off ||
2183 tcp_mask->hdr.tcp_flags ||
2184 tcp_mask->hdr.rx_win ||
2185 tcp_mask->hdr.cksum ||
2186 tcp_mask->hdr.tcp_urp) {
2187 rte_flow_error_set(error, EINVAL,
2188 RTE_FLOW_ERROR_TYPE_ITEM,
2189 item,
2190 "Invalid TCP mask");
2191 return -rte_errno;
2192 }
2193
2194 /* Mask for TCP src/dst ports not supported */
2195 if (tcp_mask->hdr.src_port &&
2196 tcp_mask->hdr.src_port != UINT16_MAX)
2197 return -rte_errno;
2198 if (tcp_mask->hdr.dst_port &&
2199 tcp_mask->hdr.dst_port != UINT16_MAX)
2200 return -rte_errno;
2201
2202 if (tcp_mask->hdr.src_port == UINT16_MAX)
2203 *input_set |= ICE_INSET_TCP_SRC_PORT;
2204 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2205 *input_set |= ICE_INSET_TCP_DST_PORT;
2206
2207 /* Get filter info */
2208 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2209 assert(p_v4);
2210 p_v4->dst_port = tcp_spec->hdr.dst_port;
2211 p_v4->src_port = tcp_spec->hdr.src_port;
2212 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2213 assert(p_v6);
2214 p_v6->dst_port = tcp_spec->hdr.dst_port;
2215 p_v6->src_port = tcp_spec->hdr.src_port;
2216 }
2217 break;
2218 case RTE_FLOW_ITEM_TYPE_UDP:
2219 l4 = RTE_FLOW_ITEM_TYPE_UDP;
2220 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2221 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
2222 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2223 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
2224
2225 udp_spec = item->spec;
2226 udp_mask = item->mask;
2227
2228 if (!(udp_spec && udp_mask))
2229 break;
2230
2231 /* Check UDP mask and update input set*/
2232 if (udp_mask->hdr.dgram_len ||
2233 udp_mask->hdr.dgram_cksum) {
2234 rte_flow_error_set(error, EINVAL,
2235 RTE_FLOW_ERROR_TYPE_ITEM,
2236 item,
2237 "Invalid UDP mask");
2238 return -rte_errno;
2239 }
2240
2241 /* Mask for UDP src/dst ports not supported */
2242 if (udp_mask->hdr.src_port &&
2243 udp_mask->hdr.src_port != UINT16_MAX)
2244 return -rte_errno;
2245 if (udp_mask->hdr.dst_port &&
2246 udp_mask->hdr.dst_port != UINT16_MAX)
2247 return -rte_errno;
2248
2249 if (udp_mask->hdr.src_port == UINT16_MAX)
2250 *input_set |= ICE_INSET_UDP_SRC_PORT;
2251 if (udp_mask->hdr.dst_port == UINT16_MAX)
2252 *input_set |= ICE_INSET_UDP_DST_PORT;
2253
2254 /* Get filter info */
2255 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2256 assert(p_v4);
2257 p_v4->dst_port = udp_spec->hdr.dst_port;
2258 p_v4->src_port = udp_spec->hdr.src_port;
2259 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2260 assert(p_v6);
2261 p_v6->src_port = udp_spec->hdr.src_port;
2262 p_v6->dst_port = udp_spec->hdr.dst_port;
2263 }
2264 break;
2265 case RTE_FLOW_ITEM_TYPE_SCTP:
2266 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2267 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
2268 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2269 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
2270
2271 sctp_spec = item->spec;
2272 sctp_mask = item->mask;
2273
2274 if (!(sctp_spec && sctp_mask))
2275 break;
2276
2277 /* Check SCTP mask and update input set */
2278 if (sctp_mask->hdr.cksum) {
2279 rte_flow_error_set(error, EINVAL,
2280 RTE_FLOW_ERROR_TYPE_ITEM,
2281 item,
2282 "Invalid UDP mask");
2283 return -rte_errno;
2284 }
2285
2286 /* Mask for SCTP src/dst ports not supported */
2287 if (sctp_mask->hdr.src_port &&
2288 sctp_mask->hdr.src_port != UINT16_MAX)
2289 return -rte_errno;
2290 if (sctp_mask->hdr.dst_port &&
2291 sctp_mask->hdr.dst_port != UINT16_MAX)
2292 return -rte_errno;
2293
2294 if (sctp_mask->hdr.src_port == UINT16_MAX)
2295 *input_set |= ICE_INSET_SCTP_SRC_PORT;
2296 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2297 *input_set |= ICE_INSET_SCTP_DST_PORT;
2298
2299 /* Get filter info */
2300 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2301 assert(p_v4);
2302 p_v4->dst_port = sctp_spec->hdr.dst_port;
2303 p_v4->src_port = sctp_spec->hdr.src_port;
2304 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2305 assert(p_v6);
2306 p_v6->dst_port = sctp_spec->hdr.dst_port;
2307 p_v6->src_port = sctp_spec->hdr.src_port;
2308 }
2309 break;
2310 case RTE_FLOW_ITEM_TYPE_VOID:
2311 break;
2312 case RTE_FLOW_ITEM_TYPE_VXLAN:
2313 l3 = RTE_FLOW_ITEM_TYPE_END;
2314 vxlan_spec = item->spec;
2315 vxlan_mask = item->mask;
2316 is_outer = false;
2317
2318 if (!(vxlan_spec && vxlan_mask))
2319 break;
2320
2321 if (vxlan_mask->hdr.vx_flags) {
2322 rte_flow_error_set(error, EINVAL,
2323 RTE_FLOW_ERROR_TYPE_ITEM,
2324 item,
2325 "Invalid vxlan field");
2326 return -rte_errno;
2327 }
2328
2329 if (vxlan_mask->hdr.vx_vni)
2330 *input_set |= ICE_INSET_VXLAN_VNI;
2331
2332 filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2333
2334 break;
2335 case RTE_FLOW_ITEM_TYPE_GTPU:
2336 l3 = RTE_FLOW_ITEM_TYPE_END;
2337 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2338 gtp_spec = item->spec;
2339 gtp_mask = item->mask;
2340
2341 if (!(gtp_spec && gtp_mask))
2342 break;
2343
2344 if (gtp_mask->v_pt_rsv_flags ||
2345 gtp_mask->msg_type ||
2346 gtp_mask->msg_len) {
2347 rte_flow_error_set(error, EINVAL,
2348 RTE_FLOW_ERROR_TYPE_ITEM,
2349 item,
2350 "Invalid GTP mask");
2351 return -rte_errno;
2352 }
2353
2354 if (gtp_mask->teid == UINT32_MAX)
2355 input_set_o |= ICE_INSET_GTPU_TEID;
2356
2357 filter->input.gtpu_data.teid = gtp_spec->teid;
2358 break;
2359 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2360 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2361 gtp_psc_spec = item->spec;
2362 gtp_psc_mask = item->mask;
2363
2364 if (!(gtp_psc_spec && gtp_psc_mask))
2365 break;
2366
2367 if (gtp_psc_mask->hdr.qfi == 0x3F)
2368 input_set_o |= ICE_INSET_GTPU_QFI;
2369
2370 filter->input.gtpu_data.qfi =
2371 gtp_psc_spec->hdr.qfi;
2372 break;
2373 case RTE_FLOW_ITEM_TYPE_ESP:
2374 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2375 l4 == RTE_FLOW_ITEM_TYPE_UDP)
2376 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
2377 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2378 l4 == RTE_FLOW_ITEM_TYPE_UDP)
2379 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
2380 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2381 l4 == RTE_FLOW_ITEM_TYPE_END)
2382 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
2383 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2384 l4 == RTE_FLOW_ITEM_TYPE_END)
2385 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
2386
2387 esp_spec = item->spec;
2388 esp_mask = item->mask;
2389
2390 if (!(esp_spec && esp_mask))
2391 break;
2392
2393 if (esp_mask->hdr.spi == UINT32_MAX) {
2394 if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
2395 *input_set |= ICE_INSET_NAT_T_ESP_SPI;
2396 else
2397 *input_set |= ICE_INSET_ESP_SPI;
2398 }
2399
2400 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2401 filter->input.ip.v4.sec_parm_idx =
2402 esp_spec->hdr.spi;
2403 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2404 filter->input.ip.v6.sec_parm_idx =
2405 esp_spec->hdr.spi;
2406 break;
2407 default:
2408 rte_flow_error_set(error, EINVAL,
2409 RTE_FLOW_ERROR_TYPE_ITEM,
2410 item,
2411 "Invalid pattern item.");
2412 return -rte_errno;
2413 }
2414 }
2415
2416 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2417 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2418 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2419 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2420 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2421 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2422 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2423 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2424 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2425 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2426 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2427 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2428 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2429 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2430 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
2431 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2432 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
2433 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
2434 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2435 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
2436 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
2437 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2438 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
2439 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
2440
2441 filter->tunnel_type = tunnel_type;
2442 filter->input.flow_type = flow_type;
2443 filter->input_set_o = input_set_o;
2444 filter->input_set_i = input_set_i;
2445
2446 return 0;
2447 }
2448
2449 static int
ice_fdir_parse(struct ice_adapter * ad,struct ice_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],uint32_t priority,void ** meta,struct rte_flow_error * error)2450 ice_fdir_parse(struct ice_adapter *ad,
2451 struct ice_pattern_match_item *array,
2452 uint32_t array_len,
2453 const struct rte_flow_item pattern[],
2454 const struct rte_flow_action actions[],
2455 uint32_t priority,
2456 void **meta,
2457 struct rte_flow_error *error)
2458 {
2459 struct ice_pf *pf = &ad->pf;
2460 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2461 struct ice_pattern_match_item *item = NULL;
2462 uint64_t input_set;
2463 bool raw = false;
2464 int ret;
2465
2466 memset(filter, 0, sizeof(*filter));
2467 item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2468 error);
2469
2470 if (!ad->devargs.pipe_mode_support && priority >= 1)
2471 return -rte_errno;
2472
2473 if (!item)
2474 return -rte_errno;
2475
2476 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2477 if (ret)
2478 goto error;
2479
2480 if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
2481 raw = true;
2482
2483 input_set = filter->input_set_o | filter->input_set_i;
2484 input_set = raw ? ~input_set : input_set;
2485
2486 if (!input_set || filter->input_set_o &
2487 ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2488 filter->input_set_i & ~item->input_set_mask_i) {
2489 rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2491 pattern,
2492 "Invalid input set");
2493 ret = -rte_errno;
2494 goto error;
2495 }
2496
2497 ret = ice_fdir_parse_action(ad, actions, error, filter);
2498 if (ret)
2499 goto error;
2500
2501 if (meta)
2502 *meta = filter;
2503
2504 rte_free(item);
2505 return ret;
2506 error:
2507 rte_free(filter->prof);
2508 rte_free(filter->pkt_buf);
2509 rte_free(item);
2510 return ret;
2511 }
2512
2513 static struct ice_flow_parser ice_fdir_parser = {
2514 .engine = &ice_fdir_engine,
2515 .array = ice_fdir_pattern_list,
2516 .array_len = RTE_DIM(ice_fdir_pattern_list),
2517 .parse_pattern_action = ice_fdir_parse,
2518 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2519 };
2520
RTE_INIT(ice_fdir_engine_register)2521 RTE_INIT(ice_fdir_engine_register)
2522 {
2523 ice_register_flow_engine(&ice_fdir_engine);
2524 }
2525