xref: /dpdk/drivers/net/i40e/i40e_fdir.c (revision 7be78d02)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_arp.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_hash_crc.h>
24 #include <rte_bitmap.h>
25 #include <rte_os_shim.h>
26 
27 #include "i40e_logs.h"
28 #include "base/i40e_type.h"
29 #include "base/i40e_prototype.h"
30 #include "i40e_ethdev.h"
31 #include "i40e_rxtx.h"
32 
33 #define I40E_FDIR_MZ_NAME          "FDIR_MEMZONE"
34 #ifndef IPV6_ADDR_LEN
35 #define IPV6_ADDR_LEN              16
36 #endif
37 
38 #ifndef IPPROTO_L2TP
39 #define IPPROTO_L2TP		  115
40 #endif
41 
42 #define I40E_FDIR_PKT_LEN                   512
43 #define I40E_FDIR_IP_DEFAULT_LEN            420
44 #define I40E_FDIR_IP_DEFAULT_TTL            0x40
45 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL    0x45
46 #define I40E_FDIR_TCP_DEFAULT_DATAOFF       0x50
47 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW     0x60000000
48 
49 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
50 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
51 #define I40E_FDIR_UDP_DEFAULT_LEN           400
52 #define I40E_FDIR_GTP_DEFAULT_LEN           384
53 #define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
54 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
55 
56 #define I40E_FDIR_GTPC_DST_PORT             2123
57 #define I40E_FDIR_GTPU_DST_PORT             2152
58 #define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
59 #define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
60 #define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
61 #define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
62 
63 #define I40E_FDIR_ESP_DST_PORT              4500
64 
65 /* Wait time for fdir filter programming */
66 #define I40E_FDIR_MAX_WAIT_US 10000
67 
68 /* Wait count and interval for fdir filter flush */
69 #define I40E_FDIR_FLUSH_RETRY       50
70 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
71 
72 #define I40E_COUNTER_PF           2
73 /* Statistic counter index for one pf */
74 #define I40E_COUNTER_INDEX_FDIR(pf_id)   (0 + (pf_id) * I40E_COUNTER_PF)
75 
76 #define I40E_FDIR_FLOWS ( \
77 	(1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
78 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
79 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
80 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
81 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
82 	(1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
83 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
84 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
85 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
86 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
87 	(1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
88 
89 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
90 			 struct i40e_fdir_filter *filter);
91 static struct i40e_fdir_filter *
92 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
93 			const struct i40e_fdir_input *input);
94 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
95 				   struct i40e_fdir_filter *filter);
96 static int
97 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
98 				  enum i40e_filter_pctype pctype,
99 				  const struct i40e_fdir_filter_conf *filter,
100 				  bool add, bool wait_status);
101 
102 static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue * rxq)103 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
104 {
105 	struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
106 	struct i40e_hmc_obj_rxq rx_ctx;
107 	int err = I40E_SUCCESS;
108 
109 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
110 	/* Init the RX queue in hardware */
111 	rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
112 	rx_ctx.hbuff = 0;
113 	rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
114 	rx_ctx.qlen = rxq->nb_rx_desc;
115 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
116 	rx_ctx.dsize = 1;
117 #endif
118 	rx_ctx.dtype = i40e_header_split_none;
119 	rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
120 	rx_ctx.rxmax = I40E_ETH_MAX_LEN;
121 	rx_ctx.tphrdesc_ena = 1;
122 	rx_ctx.tphwdesc_ena = 1;
123 	rx_ctx.tphdata_ena = 1;
124 	rx_ctx.tphhead_ena = 1;
125 	rx_ctx.lrxqthresh = 2;
126 	rx_ctx.crcstrip = 0;
127 	rx_ctx.l2tsel = 1;
128 	rx_ctx.showiv = 0;
129 	rx_ctx.prefena = 1;
130 
131 	err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
132 	if (err != I40E_SUCCESS) {
133 		PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
134 		return err;
135 	}
136 	err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
137 	if (err != I40E_SUCCESS) {
138 		PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
139 		return err;
140 	}
141 	rxq->qrx_tail = hw->hw_addr +
142 		I40E_QRX_TAIL(rxq->vsi->base_queue);
143 
144 	rte_wmb();
145 	/* Init the RX tail register. */
146 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
147 
148 	return err;
149 }
150 
151 /*
152  * i40e_fdir_setup - reserve and initialize the Flow Director resources
153  * @pf: board private structure
154  */
155 int
i40e_fdir_setup(struct i40e_pf * pf)156 i40e_fdir_setup(struct i40e_pf *pf)
157 {
158 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
159 	struct i40e_vsi *vsi;
160 	int err = I40E_SUCCESS;
161 	char z_name[RTE_MEMZONE_NAMESIZE];
162 	const struct rte_memzone *mz = NULL;
163 	struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
164 	uint16_t i;
165 
166 	if ((pf->flags & I40E_FLAG_FDIR) == 0) {
167 		PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
168 		return I40E_NOT_SUPPORTED;
169 	}
170 
171 	PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
172 			" num_filters_best_effort = %u.",
173 			hw->func_caps.fd_filters_guaranteed,
174 			hw->func_caps.fd_filters_best_effort);
175 
176 	vsi = pf->fdir.fdir_vsi;
177 	if (vsi) {
178 		PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
179 		return I40E_SUCCESS;
180 	}
181 
182 	/* make new FDIR VSI */
183 	vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
184 	if (!vsi) {
185 		PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
186 		return I40E_ERR_NO_AVAILABLE_VSI;
187 	}
188 	pf->fdir.fdir_vsi = vsi;
189 
190 	/*Fdir tx queue setup*/
191 	err = i40e_fdir_setup_tx_resources(pf);
192 	if (err) {
193 		PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
194 		goto fail_setup_tx;
195 	}
196 
197 	/*Fdir rx queue setup*/
198 	err = i40e_fdir_setup_rx_resources(pf);
199 	if (err) {
200 		PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
201 		goto fail_setup_rx;
202 	}
203 
204 	err = i40e_tx_queue_init(pf->fdir.txq);
205 	if (err) {
206 		PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
207 		goto fail_mem;
208 	}
209 
210 	/* need switch on before dev start*/
211 	err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
212 	if (err) {
213 		PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
214 		goto fail_mem;
215 	}
216 
217 	/* Init the rx queue in hardware */
218 	err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
219 	if (err) {
220 		PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
221 		goto fail_mem;
222 	}
223 
224 	/* switch on rx queue */
225 	err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
226 	if (err) {
227 		PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
228 		goto fail_mem;
229 	}
230 
231 	/* enable FDIR MSIX interrupt */
232 	vsi->nb_used_qps = 1;
233 	i40e_vsi_queues_bind_intr(vsi, I40E_ITR_INDEX_NONE);
234 	i40e_vsi_enable_queues_intr(vsi);
235 
236 	/* reserve memory for the fdir programming packet */
237 	snprintf(z_name, sizeof(z_name), "%s_%s_%d",
238 			eth_dev->device->driver->name,
239 			I40E_FDIR_MZ_NAME,
240 			eth_dev->data->port_id);
241 	mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN *
242 			I40E_FDIR_PRG_PKT_CNT, SOCKET_ID_ANY);
243 	if (!mz) {
244 		PMD_DRV_LOG(ERR, "Cannot init memzone for "
245 				 "flow director program packet.");
246 		err = I40E_ERR_NO_MEMORY;
247 		goto fail_mem;
248 	}
249 
250 	for (i = 0; i < I40E_FDIR_PRG_PKT_CNT; i++) {
251 		pf->fdir.prg_pkt[i] = (uint8_t *)mz->addr +
252 			I40E_FDIR_PKT_LEN * i;
253 		pf->fdir.dma_addr[i] = mz->iova +
254 			I40E_FDIR_PKT_LEN * i;
255 	}
256 
257 	pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
258 	pf->fdir.fdir_actual_cnt = 0;
259 	pf->fdir.fdir_guarantee_free_space =
260 		pf->fdir.fdir_guarantee_total_space;
261 
262 	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
263 		    vsi->base_queue);
264 	return I40E_SUCCESS;
265 
266 fail_mem:
267 	i40e_rx_queue_release(pf->fdir.rxq);
268 	pf->fdir.rxq = NULL;
269 fail_setup_rx:
270 	i40e_tx_queue_release(pf->fdir.txq);
271 	pf->fdir.txq = NULL;
272 fail_setup_tx:
273 	i40e_vsi_release(vsi);
274 	pf->fdir.fdir_vsi = NULL;
275 	return err;
276 }
277 
278 /*
279  * i40e_fdir_teardown - release the Flow Director resources
280  * @pf: board private structure
281  */
282 void
i40e_fdir_teardown(struct i40e_pf * pf)283 i40e_fdir_teardown(struct i40e_pf *pf)
284 {
285 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
286 	struct i40e_vsi *vsi;
287 
288 	vsi = pf->fdir.fdir_vsi;
289 	if (!vsi)
290 		return;
291 
292 	/* disable FDIR MSIX interrupt */
293 	i40e_vsi_queues_unbind_intr(vsi);
294 	i40e_vsi_disable_queues_intr(vsi);
295 
296 	int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
297 	if (err)
298 		PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
299 	err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
300 	if (err)
301 		PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
302 
303 	i40e_rx_queue_release(pf->fdir.rxq);
304 	pf->fdir.rxq = NULL;
305 	i40e_tx_queue_release(pf->fdir.txq);
306 	pf->fdir.txq = NULL;
307 	i40e_vsi_release(vsi);
308 	pf->fdir.fdir_vsi = NULL;
309 }
310 
311 /* check whether the flow director table in empty */
312 static inline int
i40e_fdir_empty(struct i40e_hw * hw)313 i40e_fdir_empty(struct i40e_hw *hw)
314 {
315 	uint32_t guarant_cnt, best_cnt;
316 
317 	guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
318 				 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
319 				 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
320 	best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
321 			      I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
322 			      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
323 	if (best_cnt + guarant_cnt > 0)
324 		return -1;
325 
326 	return 0;
327 }
328 
329 /*
330  * Initialize the configuration about bytes stream extracted as flexible payload
331  * and mask setting
332  */
333 static inline void
i40e_init_flx_pld(struct i40e_pf * pf)334 i40e_init_flx_pld(struct i40e_pf *pf)
335 {
336 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
337 	uint8_t pctype;
338 	int i, index;
339 	uint16_t flow_type;
340 
341 	/*
342 	 * Define the bytes stream extracted as flexible payload in
343 	 * field vector. By default, select 8 words from the beginning
344 	 * of payload as flexible payload.
345 	 */
346 	for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
347 		index = i * I40E_MAX_FLXPLD_FIED;
348 		pf->fdir.flex_set[index].src_offset = 0;
349 		pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
350 		pf->fdir.flex_set[index].dst_offset = 0;
351 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
352 		I40E_WRITE_REG(hw,
353 			I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
354 		I40E_WRITE_REG(hw,
355 			I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
356 		pf->fdir.flex_pit_flag[i] = 0;
357 	}
358 
359 	/* initialize the masks */
360 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
361 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
362 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
363 
364 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
365 			continue;
366 		pf->fdir.flex_mask[pctype].word_mask = 0;
367 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
368 		for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
369 			pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
370 			pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
371 			i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
372 		}
373 	}
374 }
375 
376 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
377 	if ((flex_pit2).src_offset < \
378 		(flex_pit1).src_offset + (flex_pit1).size) { \
379 		PMD_DRV_LOG(ERR, "src_offset should be not" \
380 			" less than than previous offset" \
381 			" + previous FSIZE."); \
382 		return -EINVAL; \
383 	} \
384 } while (0)
385 
386 /*
387  * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
388  * and the flex_pit will be sorted by it's src_offset value
389  */
390 static inline uint16_t
i40e_srcoff_to_flx_pit(const uint16_t * src_offset,struct i40e_fdir_flex_pit * flex_pit)391 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
392 			struct i40e_fdir_flex_pit *flex_pit)
393 {
394 	uint16_t src_tmp, size, num = 0;
395 	uint16_t i, k, j = 0;
396 
397 	while (j < I40E_FDIR_MAX_FLEX_LEN) {
398 		size = 1;
399 		for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
400 			if (src_offset[j + 1] == src_offset[j] + 1)
401 				size++;
402 			else
403 				break;
404 		}
405 		src_tmp = src_offset[j] + 1 - size;
406 		/* the flex_pit need to be sort by src_offset */
407 		for (i = 0; i < num; i++) {
408 			if (src_tmp < flex_pit[i].src_offset)
409 				break;
410 		}
411 		/* if insert required, move backward */
412 		for (k = num; k > i; k--)
413 			flex_pit[k] = flex_pit[k - 1];
414 		/* insert */
415 		flex_pit[i].dst_offset = j + 1 - size;
416 		flex_pit[i].src_offset = src_tmp;
417 		flex_pit[i].size = size;
418 		j++;
419 		num++;
420 	}
421 	return num;
422 }
423 
424 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
425 static inline int
i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg * flex_cfg)426 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
427 {
428 	struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
429 	uint16_t num, i;
430 
431 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
432 		if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
433 			PMD_DRV_LOG(ERR, "exceeds maximal payload limit.");
434 			return -EINVAL;
435 		}
436 	}
437 
438 	memset(flex_pit, 0, sizeof(flex_pit));
439 	num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
440 	if (num > I40E_MAX_FLXPLD_FIED) {
441 		PMD_DRV_LOG(ERR, "exceeds maximal number of flex fields.");
442 		return -EINVAL;
443 	}
444 	for (i = 0; i < num; i++) {
445 		if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
446 			flex_pit[i].src_offset & 0x01) {
447 			PMD_DRV_LOG(ERR, "flexpayload should be measured"
448 				" in word");
449 			return -EINVAL;
450 		}
451 		if (i != num - 1)
452 			I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
453 	}
454 	return 0;
455 }
456 
457 /*
458  * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
459  * arguments are valid
460  */
461 static int
i40e_check_fdir_flex_conf(const struct i40e_adapter * adapter,const struct rte_eth_fdir_flex_conf * conf)462 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
463 			  const struct rte_eth_fdir_flex_conf *conf)
464 {
465 	const struct rte_eth_flex_payload_cfg *flex_cfg;
466 	const struct rte_eth_fdir_flex_mask *flex_mask;
467 	uint16_t mask_tmp;
468 	uint8_t nb_bitmask;
469 	uint16_t i, j;
470 	int ret = 0;
471 	enum i40e_filter_pctype pctype;
472 
473 	if (conf == NULL) {
474 		PMD_DRV_LOG(INFO, "NULL pointer.");
475 		return -EINVAL;
476 	}
477 	/* check flexible payload setting configuration */
478 	if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
479 		PMD_DRV_LOG(ERR, "invalid number of payload setting.");
480 		return -EINVAL;
481 	}
482 	for (i = 0; i < conf->nb_payloads; i++) {
483 		flex_cfg = &conf->flex_set[i];
484 		if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
485 			PMD_DRV_LOG(ERR, "invalid payload type.");
486 			return -EINVAL;
487 		}
488 		ret = i40e_check_fdir_flex_payload(flex_cfg);
489 		if (ret < 0) {
490 			PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
491 			return -EINVAL;
492 		}
493 	}
494 
495 	/* check flex mask setting configuration */
496 	if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
497 		PMD_DRV_LOG(ERR, "invalid number of flex masks.");
498 		return -EINVAL;
499 	}
500 	for (i = 0; i < conf->nb_flexmasks; i++) {
501 		flex_mask = &conf->flex_mask[i];
502 		pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
503 		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
504 			PMD_DRV_LOG(WARNING, "invalid flow type.");
505 			return -EINVAL;
506 		}
507 		nb_bitmask = 0;
508 		for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
509 			mask_tmp = I40E_WORD(flex_mask->mask[j],
510 					     flex_mask->mask[j + 1]);
511 			if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
512 				nb_bitmask++;
513 				if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
514 					PMD_DRV_LOG(ERR, " exceed maximal"
515 						" number of bitmasks.");
516 					return -EINVAL;
517 				}
518 			}
519 		}
520 	}
521 	return 0;
522 }
523 
524 /*
525  * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
526  * @pf: board private structure
527  * @cfg: the rule how bytes stream is extracted as flexible payload
528  */
529 static void
i40e_set_flx_pld_cfg(struct i40e_pf * pf,const struct rte_eth_flex_payload_cfg * cfg)530 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
531 			 const struct rte_eth_flex_payload_cfg *cfg)
532 {
533 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
534 	struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
535 	uint32_t flx_pit, flx_ort;
536 	uint16_t num, min_next_off;  /* in words */
537 	uint8_t field_idx = 0;
538 	uint8_t layer_idx = 0;
539 	uint16_t i;
540 
541 	if (cfg->type == RTE_ETH_L2_PAYLOAD)
542 		layer_idx = I40E_FLXPLD_L2_IDX;
543 	else if (cfg->type == RTE_ETH_L3_PAYLOAD)
544 		layer_idx = I40E_FLXPLD_L3_IDX;
545 	else if (cfg->type == RTE_ETH_L4_PAYLOAD)
546 		layer_idx = I40E_FLXPLD_L4_IDX;
547 
548 	memset(flex_pit, 0, sizeof(flex_pit));
549 	num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
550 		      RTE_DIM(flex_pit));
551 
552 	if (num) {
553 		flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
554 			  (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
555 			  (layer_idx * I40E_MAX_FLXPLD_FIED);
556 		I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
557 	}
558 
559 	for (i = 0; i < num; i++) {
560 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
561 		/* record the info in fdir structure */
562 		pf->fdir.flex_set[field_idx].src_offset =
563 			flex_pit[i].src_offset / sizeof(uint16_t);
564 		pf->fdir.flex_set[field_idx].size =
565 			flex_pit[i].size / sizeof(uint16_t);
566 		pf->fdir.flex_set[field_idx].dst_offset =
567 			flex_pit[i].dst_offset / sizeof(uint16_t);
568 		flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
569 				pf->fdir.flex_set[field_idx].size,
570 				pf->fdir.flex_set[field_idx].dst_offset);
571 
572 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
573 	}
574 	min_next_off = pf->fdir.flex_set[field_idx].src_offset +
575 				pf->fdir.flex_set[field_idx].size;
576 
577 	for (; i < I40E_MAX_FLXPLD_FIED; i++) {
578 		/* set the non-used register obeying register's constrain */
579 		flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
580 			   NONUSE_FLX_PIT_DEST_OFF);
581 		I40E_WRITE_REG(hw,
582 			I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
583 			flx_pit);
584 		min_next_off++;
585 	}
586 }
587 
588 /*
589  * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
590  * @pf: board private structure
591  * @pctype: packet classify type
592  * @flex_masks: mask for flexible payload
593  */
594 static void
i40e_set_flex_mask_on_pctype(struct i40e_pf * pf,enum i40e_filter_pctype pctype,const struct rte_eth_fdir_flex_mask * mask_cfg)595 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
596 		enum i40e_filter_pctype pctype,
597 		const struct rte_eth_fdir_flex_mask *mask_cfg)
598 {
599 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
600 	struct i40e_fdir_flex_mask *flex_mask;
601 	uint32_t flxinset, fd_mask;
602 	uint16_t mask_tmp;
603 	uint8_t i, nb_bitmask = 0;
604 
605 	flex_mask = &pf->fdir.flex_mask[pctype];
606 	memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
607 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
608 		mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
609 		if (mask_tmp != 0x0) {
610 			flex_mask->word_mask |=
611 				I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
612 			if (mask_tmp != UINT16_MAX) {
613 				/* set bit mask */
614 				flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
615 				flex_mask->bitmask[nb_bitmask].offset =
616 					i / sizeof(uint16_t);
617 				nb_bitmask++;
618 			}
619 		}
620 	}
621 	/* write mask to hw */
622 	flxinset = (flex_mask->word_mask <<
623 		I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
624 		I40E_PRTQF_FD_FLXINSET_INSET_MASK;
625 	i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
626 
627 	for (i = 0; i < nb_bitmask; i++) {
628 		fd_mask = (flex_mask->bitmask[i].mask <<
629 			I40E_PRTQF_FD_MSK_MASK_SHIFT) &
630 			I40E_PRTQF_FD_MSK_MASK_MASK;
631 		fd_mask |= ((flex_mask->bitmask[i].offset +
632 			I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
633 			I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
634 			I40E_PRTQF_FD_MSK_OFFSET_MASK;
635 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
636 	}
637 }
638 
639 /*
640  * Enable/disable flow director RX processing in vector routines.
641  */
642 void
i40e_fdir_rx_proc_enable(struct rte_eth_dev * dev,bool on)643 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
644 {
645 	int32_t i;
646 
647 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
648 		struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
649 		if (!rxq)
650 			continue;
651 		rxq->fdir_enabled = on;
652 	}
653 	PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
654 }
655 
656 /*
657  * Configure flow director related setting
658  */
659 int
i40e_fdir_configure(struct rte_eth_dev * dev)660 i40e_fdir_configure(struct rte_eth_dev *dev)
661 {
662 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
663 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
664 	struct rte_eth_fdir_flex_conf *conf;
665 	enum i40e_filter_pctype pctype;
666 	uint32_t val;
667 	uint8_t i;
668 	int ret = 0;
669 
670 	/*
671 	* configuration need to be done before
672 	* flow director filters are added
673 	* If filters exist, flush them.
674 	*/
675 	if (i40e_fdir_empty(hw) < 0) {
676 		ret = i40e_fdir_flush(dev);
677 		if (ret) {
678 			PMD_DRV_LOG(ERR, "failed to flush fdir table.");
679 			return ret;
680 		}
681 	}
682 
683 	/* enable FDIR filter */
684 	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
685 	val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
686 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
687 
688 	i40e_init_flx_pld(pf); /* set flex config to default value */
689 
690 	conf = &dev->data->dev_conf.fdir_conf.flex_conf;
691 	ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
692 	if (ret < 0) {
693 		PMD_DRV_LOG(ERR, " invalid configuration arguments.");
694 		return -EINVAL;
695 	}
696 
697 	if (!pf->support_multi_driver) {
698 		/* configure flex payload */
699 		for (i = 0; i < conf->nb_payloads; i++)
700 			i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
701 		/* configure flex mask*/
702 		for (i = 0; i < conf->nb_flexmasks; i++) {
703 			if (hw->mac.type == I40E_MAC_X722) {
704 				/* get pctype value in fd pctype register */
705 				pctype = (enum i40e_filter_pctype)
706 					  i40e_read_rx_ctl(hw,
707 						I40E_GLQF_FD_PCTYPES(
708 						(int)i40e_flowtype_to_pctype(
709 						pf->adapter,
710 						conf->flex_mask[i].flow_type)));
711 			} else {
712 				pctype = i40e_flowtype_to_pctype(pf->adapter,
713 						  conf->flex_mask[i].flow_type);
714 			}
715 
716 			i40e_set_flex_mask_on_pctype(pf, pctype,
717 						     &conf->flex_mask[i]);
718 		}
719 	} else {
720 		PMD_DRV_LOG(ERR, "Not support flexible payload.");
721 	}
722 
723 	/* Enable FDIR processing in RX routines */
724 	i40e_fdir_rx_proc_enable(dev, 1);
725 
726 	return ret;
727 }
728 
729 
730 static struct i40e_customized_pctype *
i40e_flow_fdir_find_customized_pctype(struct i40e_pf * pf,uint8_t pctype)731 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
732 {
733 	struct i40e_customized_pctype *cus_pctype;
734 	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
735 
736 	for (; i < I40E_CUSTOMIZED_MAX; i++) {
737 		cus_pctype = &pf->customized_pctype[i];
738 		if (pctype == cus_pctype->pctype)
739 			return cus_pctype;
740 	}
741 	return NULL;
742 }
743 
744 static inline int
fill_ip6_head(const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,uint8_t next_proto,uint8_t len,uint16_t * ether_type)745 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
746 		uint8_t next_proto, uint8_t len, uint16_t *ether_type)
747 {
748 	struct rte_ipv6_hdr *ip6;
749 
750 	ip6 = (struct rte_ipv6_hdr *)raw_pkt;
751 
752 	*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
753 	ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
754 		(fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
755 	ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
756 	ip6->proto = fdir_input->flow.ipv6_flow.proto ?
757 		fdir_input->flow.ipv6_flow.proto : next_proto;
758 	ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
759 		fdir_input->flow.ipv6_flow.hop_limits :
760 		I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
761 	/**
762 	 * The source and destination fields in the transmitted packet
763 	 * need to be presented in a reversed order with respect
764 	 * to the expected received packets.
765 	 */
766 	rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
767 		IPV6_ADDR_LEN);
768 	rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
769 		IPV6_ADDR_LEN);
770 	len += sizeof(struct rte_ipv6_hdr);
771 
772 	return len;
773 }
774 
775 static inline int
fill_ip4_head(const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,uint8_t next_proto,uint8_t len,uint16_t * ether_type)776 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
777 		uint8_t next_proto, uint8_t len, uint16_t *ether_type)
778 {
779 	struct rte_ipv4_hdr *ip4;
780 
781 	ip4 = (struct rte_ipv4_hdr *)raw_pkt;
782 
783 	*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
784 	ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
785 	/* set len to by default */
786 	ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
787 	ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
788 		fdir_input->flow.ip4_flow.ttl :
789 		I40E_FDIR_IP_DEFAULT_TTL;
790 	ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
791 	ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
792 		fdir_input->flow.ip4_flow.proto : next_proto;
793 	/**
794 	 * The source and destination fields in the transmitted packet
795 	 * need to be presented in a reversed order with respect
796 	 * to the expected received packets.
797 	 */
798 	ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
799 	ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
800 	len += sizeof(struct rte_ipv4_hdr);
801 
802 	return len;
803 }
804 
805 static inline int
i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf * pf,const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,bool vlan)806 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
807 				const struct i40e_fdir_input *fdir_input,
808 				unsigned char *raw_pkt,
809 				bool vlan)
810 {
811 	struct i40e_customized_pctype *cus_pctype = NULL;
812 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
813 	uint16_t *ether_type;
814 	uint8_t len = 2 * sizeof(struct rte_ether_addr);
815 	uint8_t pctype = fdir_input->pctype;
816 	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
817 	static const uint8_t next_proto[] = {
818 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
819 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
820 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
821 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
822 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
823 		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
824 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
825 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
826 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
827 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
828 	};
829 
830 	rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
831 		sizeof(struct rte_ether_addr));
832 	rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
833 		&fdir_input->flow.l2_flow.src,
834 		sizeof(struct rte_ether_addr));
835 	raw_pkt += 2 * sizeof(struct rte_ether_addr);
836 
837 	if (vlan && fdir_input->flow_ext.vlan_tci) {
838 		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
839 		rte_memcpy(raw_pkt + sizeof(uint16_t),
840 			   &fdir_input->flow_ext.vlan_tci,
841 			   sizeof(uint16_t));
842 		raw_pkt += sizeof(vlan_frame);
843 		len += sizeof(vlan_frame);
844 	}
845 	ether_type = (uint16_t *)raw_pkt;
846 	raw_pkt += sizeof(uint16_t);
847 	len += sizeof(uint16_t);
848 
849 	if (is_customized_pctype) {
850 		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
851 		if (!cus_pctype) {
852 			PMD_DRV_LOG(ERR, "unknown pctype %u.",
853 				    fdir_input->pctype);
854 			return -1;
855 		}
856 	}
857 
858 	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
859 		*ether_type = fdir_input->flow.l2_flow.ether_type;
860 	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
861 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
862 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
863 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
864 		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
865 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
866 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
867 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
868 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
869 		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
870 		 is_customized_pctype) {
871 		if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
872 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
873 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
874 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
875 			pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
876 			len = fill_ip4_head(fdir_input, raw_pkt,
877 					next_proto[pctype], len, ether_type);
878 		} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
879 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
880 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
881 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
882 			pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
883 			len = fill_ip6_head(fdir_input, raw_pkt,
884 					next_proto[pctype], len,
885 					ether_type);
886 		} else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
887 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
888 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
889 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
890 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
891 					len, ether_type);
892 		} else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
893 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
894 					len, ether_type);
895 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
896 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
897 					len, ether_type);
898 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
899 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
900 					len, ether_type);
901 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
902 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
903 					len, ether_type);
904 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
905 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
906 					len, ether_type);
907 		else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
908 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
909 					len, ether_type);
910 		else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
911 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
912 					len, ether_type);
913 	} else {
914 		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
915 		return -1;
916 	}
917 
918 	return len;
919 }
920 
921 /**
922  * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
923  * @pf: board private structure
924  * @fdir_input: input set of the flow director entry
925  * @raw_pkt: a packet to be constructed
926  */
927 static int
i40e_flow_fdir_construct_pkt(struct i40e_pf * pf,const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt)928 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
929 			     const struct i40e_fdir_input *fdir_input,
930 			     unsigned char *raw_pkt)
931 {
932 	unsigned char *payload = NULL;
933 	unsigned char *ptr;
934 	struct rte_udp_hdr *udp;
935 	struct rte_tcp_hdr *tcp;
936 	struct rte_sctp_hdr *sctp;
937 	struct rte_flow_item_gtp *gtp;
938 	struct rte_ipv4_hdr *gtp_ipv4;
939 	struct rte_ipv6_hdr *gtp_ipv6;
940 	struct rte_flow_item_l2tpv3oip *l2tpv3oip;
941 	struct rte_flow_item_esp *esp;
942 	struct rte_ipv4_hdr *esp_ipv4;
943 	struct rte_ipv6_hdr *esp_ipv6;
944 
945 	uint8_t size, dst = 0;
946 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
947 	int len;
948 	uint8_t pctype = fdir_input->pctype;
949 	struct i40e_customized_pctype *cus_pctype;
950 
951 	/* raw packet template - just copy contents of the raw packet */
952 	if (fdir_input->flow_ext.pkt_template) {
953 		memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
954 		       fdir_input->flow.raw_flow.length);
955 		return 0;
956 	}
957 
958 	/* fill the ethernet and IP head */
959 	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
960 					      !!fdir_input->flow_ext.vlan_tci);
961 	if (len < 0)
962 		return -EINVAL;
963 
964 	/* fill the L4 head */
965 	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
966 		udp = (struct rte_udp_hdr *)(raw_pkt + len);
967 		payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
968 		/**
969 		 * The source and destination fields in the transmitted packet
970 		 * need to be presented in a reversed order with respect
971 		 * to the expected received packets.
972 		 */
973 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
974 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
975 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
976 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
977 		tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
978 		payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
979 		/**
980 		 * The source and destination fields in the transmitted packet
981 		 * need to be presented in a reversed order with respect
982 		 * to the expected received packets.
983 		 */
984 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
985 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
986 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
987 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
988 		sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
989 		payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
990 		/**
991 		 * The source and destination fields in the transmitted packet
992 		 * need to be presented in a reversed order with respect
993 		 * to the expected received packets.
994 		 */
995 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
996 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
997 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
998 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
999 		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1000 		payload = raw_pkt + len;
1001 		set_idx = I40E_FLXPLD_L3_IDX;
1002 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1003 		udp = (struct rte_udp_hdr *)(raw_pkt + len);
1004 		payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1005 		/**
1006 		 * The source and destination fields in the transmitted packet
1007 		 * need to be presented in a reversed order with respect
1008 		 * to the expected received packets.
1009 		 */
1010 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1011 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1012 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1013 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1014 		tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1015 		payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1016 		/**
1017 		 * The source and destination fields in the transmitted packet
1018 		 * need to be presented in a reversed order with respect
1019 		 * to the expected received packets.
1020 		 */
1021 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1022 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1023 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1024 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1025 		sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1026 		payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1027 		/**
1028 		 * The source and destination fields in the transmitted packet
1029 		 * need to be presented in a reversed order with respect
1030 		 * to the expected received packets.
1031 		 */
1032 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1033 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1034 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1035 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1036 		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1037 		payload = raw_pkt + len;
1038 		set_idx = I40E_FLXPLD_L3_IDX;
1039 	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1040 		payload = raw_pkt + len;
1041 		/**
1042 		 * ARP packet is a special case on which the payload
1043 		 * starts after the whole ARP header
1044 		 */
1045 		if (fdir_input->flow.l2_flow.ether_type ==
1046 				rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1047 			payload += sizeof(struct rte_arp_hdr);
1048 		set_idx = I40E_FLXPLD_L2_IDX;
1049 	} else if (fdir_input->flow_ext.customized_pctype) {
1050 		/* If customized pctype is used */
1051 		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1052 		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1053 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1054 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1055 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1056 			udp = (struct rte_udp_hdr *)(raw_pkt + len);
1057 			udp->dgram_len =
1058 				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1059 
1060 			gtp = (struct rte_flow_item_gtp *)
1061 				((unsigned char *)udp +
1062 					sizeof(struct rte_udp_hdr));
1063 			gtp->msg_len =
1064 				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1065 			gtp->teid = fdir_input->flow.gtp_flow.teid;
1066 			gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1067 
1068 			/* GTP-C message type is not supported. */
1069 			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1070 				udp->dst_port =
1071 				      rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1072 				gtp->v_pt_rsv_flags =
1073 					I40E_FDIR_GTP_VER_FLAG_0X32;
1074 			} else {
1075 				udp->dst_port =
1076 				      rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1077 				gtp->v_pt_rsv_flags =
1078 					I40E_FDIR_GTP_VER_FLAG_0X30;
1079 			}
1080 
1081 			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1082 				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1083 				gtp_ipv4 = (struct rte_ipv4_hdr *)
1084 					((unsigned char *)gtp +
1085 					 sizeof(struct rte_flow_item_gtp));
1086 				gtp_ipv4->version_ihl =
1087 					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1088 				gtp_ipv4->next_proto_id = IPPROTO_IP;
1089 				gtp_ipv4->total_length =
1090 					rte_cpu_to_be_16(
1091 						I40E_FDIR_INNER_IP_DEFAULT_LEN);
1092 				payload = (unsigned char *)gtp_ipv4 +
1093 					sizeof(struct rte_ipv4_hdr);
1094 			} else if (cus_pctype->index ==
1095 				   I40E_CUSTOMIZED_GTPU_IPV6) {
1096 				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1097 				gtp_ipv6 = (struct rte_ipv6_hdr *)
1098 					((unsigned char *)gtp +
1099 					 sizeof(struct rte_flow_item_gtp));
1100 				gtp_ipv6->vtc_flow =
1101 					rte_cpu_to_be_32(
1102 					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1103 					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
1104 				gtp_ipv6->proto = IPPROTO_NONE;
1105 				gtp_ipv6->payload_len =
1106 					rte_cpu_to_be_16(
1107 					      I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1108 				gtp_ipv6->hop_limits =
1109 					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1110 				payload = (unsigned char *)gtp_ipv6 +
1111 					sizeof(struct rte_ipv6_hdr);
1112 			} else
1113 				payload = (unsigned char *)gtp +
1114 					sizeof(struct rte_flow_item_gtp);
1115 		} else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1116 			   cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1117 			l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1118 								       + len);
1119 
1120 			if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1121 				l2tpv3oip->session_id =
1122 				 fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1123 			else
1124 				l2tpv3oip->session_id =
1125 				 fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1126 			payload = (unsigned char *)l2tpv3oip +
1127 				sizeof(struct rte_flow_item_l2tpv3oip);
1128 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1129 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1130 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1131 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1132 			if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1133 				esp_ipv4 = (struct rte_ipv4_hdr *)
1134 					(raw_pkt + len);
1135 				esp = (struct rte_flow_item_esp *)esp_ipv4;
1136 				esp->hdr.spi =
1137 					fdir_input->flow.esp_ipv4_flow.spi;
1138 				payload = (unsigned char *)esp +
1139 					sizeof(struct rte_esp_hdr);
1140 				len += sizeof(struct rte_esp_hdr);
1141 			} else if (cus_pctype->index ==
1142 					I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1143 				esp_ipv4 = (struct rte_ipv4_hdr *)
1144 					(raw_pkt + len);
1145 				udp = (struct rte_udp_hdr *)esp_ipv4;
1146 				udp->dst_port = rte_cpu_to_be_16
1147 					(I40E_FDIR_ESP_DST_PORT);
1148 
1149 				udp->dgram_len = rte_cpu_to_be_16
1150 						(I40E_FDIR_UDP_DEFAULT_LEN);
1151 				esp = (struct rte_flow_item_esp *)
1152 					((unsigned char *)esp_ipv4 +
1153 						sizeof(struct rte_udp_hdr));
1154 				esp->hdr.spi =
1155 					fdir_input->flow.esp_ipv4_udp_flow.spi;
1156 				payload = (unsigned char *)esp +
1157 					sizeof(struct rte_esp_hdr);
1158 				len += sizeof(struct rte_udp_hdr) +
1159 						sizeof(struct rte_esp_hdr);
1160 			} else if (cus_pctype->index ==
1161 					I40E_CUSTOMIZED_ESP_IPV6) {
1162 				esp_ipv6 = (struct rte_ipv6_hdr *)
1163 					(raw_pkt + len);
1164 				esp = (struct rte_flow_item_esp *)esp_ipv6;
1165 				esp->hdr.spi =
1166 					fdir_input->flow.esp_ipv6_flow.spi;
1167 				payload = (unsigned char *)esp +
1168 					sizeof(struct rte_esp_hdr);
1169 				len += sizeof(struct rte_esp_hdr);
1170 			} else if (cus_pctype->index ==
1171 					I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1172 				esp_ipv6 = (struct rte_ipv6_hdr *)
1173 					(raw_pkt + len);
1174 				udp = (struct rte_udp_hdr *)esp_ipv6;
1175 				udp->dst_port =	rte_cpu_to_be_16
1176 					(I40E_FDIR_ESP_DST_PORT);
1177 
1178 				udp->dgram_len = rte_cpu_to_be_16
1179 					(I40E_FDIR_UDP_DEFAULT_LEN);
1180 				esp = (struct rte_flow_item_esp *)
1181 					((unsigned char *)esp_ipv6 +
1182 						sizeof(struct rte_udp_hdr));
1183 				esp->hdr.spi =
1184 					fdir_input->flow.esp_ipv6_udp_flow.spi;
1185 				payload = (unsigned char *)esp +
1186 					sizeof(struct rte_esp_hdr);
1187 				len += sizeof(struct rte_udp_hdr) +
1188 						sizeof(struct rte_esp_hdr);
1189 			}
1190 		}
1191 	} else {
1192 		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1193 		return -1;
1194 	}
1195 
1196 	/* fill the flexbytes to payload */
1197 	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1198 		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1199 		size = pf->fdir.flex_set[pit_idx].size;
1200 		if (size == 0)
1201 			continue;
1202 		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1203 		ptr = payload +
1204 		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1205 		(void)rte_memcpy(ptr,
1206 				 &fdir_input->flow_ext.flexbytes[dst],
1207 				 size * sizeof(uint16_t));
1208 	}
1209 
1210 	return 0;
1211 }
1212 
1213 /* Construct the tx flags */
1214 static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,uint32_t td_offset,unsigned int size,uint32_t td_tag)1215 i40e_build_ctob(uint32_t td_cmd,
1216 		uint32_t td_offset,
1217 		unsigned int size,
1218 		uint32_t td_tag)
1219 {
1220 	return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1221 			((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
1222 			((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1223 			((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1224 			((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
1225 }
1226 
1227 /*
1228  * check the programming status descriptor in rx queue.
1229  * done after Programming Flow Director is programmed on
1230  * tx queue
1231  */
1232 static inline int
i40e_check_fdir_programming_status(struct i40e_rx_queue * rxq)1233 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1234 {
1235 	volatile union i40e_rx_desc *rxdp;
1236 	uint64_t qword1;
1237 	uint32_t rx_status;
1238 	uint32_t len, id;
1239 	uint32_t error;
1240 	int ret = 0;
1241 
1242 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1243 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1244 	rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1245 			>> I40E_RXD_QW1_STATUS_SHIFT;
1246 
1247 	if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1248 		len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1249 		id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1250 			    I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1251 
1252 		if (len  == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1253 		    id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1254 			error = (qword1 &
1255 				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1256 				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1257 			if (error == (0x1 <<
1258 				I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1259 				PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1260 					    " (FD_ID %u): programming status"
1261 					    " reported.",
1262 					    rxdp->wb.qword0.hi_dword.fd_id);
1263 				ret = -1;
1264 			} else if (error == (0x1 <<
1265 				I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1266 				PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1267 					    " (FD_ID %u): programming status"
1268 					    " reported.",
1269 					    rxdp->wb.qword0.hi_dword.fd_id);
1270 				ret = -1;
1271 			} else
1272 				PMD_DRV_LOG(ERR, "invalid programming status"
1273 					    " reported, error = %u.", error);
1274 		} else
1275 			PMD_DRV_LOG(INFO, "unknown programming status"
1276 				    " reported, len = %d, id = %u.", len, id);
1277 		rxdp->wb.qword1.status_error_len = 0;
1278 		rxq->rx_tail++;
1279 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1280 			rxq->rx_tail = 0;
1281 		if (rxq->rx_tail == 0)
1282 			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1283 		else
1284 			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1285 	}
1286 
1287 	return ret;
1288 }
1289 
1290 static inline void
i40e_fdir_programming_status_cleanup(struct i40e_rx_queue * rxq)1291 i40e_fdir_programming_status_cleanup(struct i40e_rx_queue *rxq)
1292 {
1293 	uint16_t retry_count = 0;
1294 
1295 	/* capture the previous error report(if any) from rx ring */
1296 	while ((i40e_check_fdir_programming_status(rxq) < 0) &&
1297 			(++retry_count < I40E_FDIR_NUM_RX_DESC))
1298 		PMD_DRV_LOG(INFO, "error report captured.");
1299 }
1300 
1301 static int
i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf * input,struct i40e_fdir_filter * filter)1302 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1303 			 struct i40e_fdir_filter *filter)
1304 {
1305 	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1306 	if (input->input.flow_ext.pkt_template) {
1307 		filter->fdir.input.flow.raw_flow.packet = NULL;
1308 		filter->fdir.input.flow.raw_flow.length =
1309 			rte_hash_crc(input->input.flow.raw_flow.packet,
1310 				     input->input.flow.raw_flow.length,
1311 				     input->input.flow.raw_flow.pctype);
1312 	}
1313 	return 0;
1314 }
1315 
1316 /* Check if there exists the flow director filter */
1317 static struct i40e_fdir_filter *
i40e_sw_fdir_filter_lookup(struct i40e_fdir_info * fdir_info,const struct i40e_fdir_input * input)1318 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1319 			const struct i40e_fdir_input *input)
1320 {
1321 	int ret;
1322 
1323 	if (input->flow_ext.pkt_template)
1324 		ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1325 						(const void *)input,
1326 						input->flow.raw_flow.length);
1327 	else
1328 		ret = rte_hash_lookup(fdir_info->hash_table,
1329 				      (const void *)input);
1330 	if (ret < 0)
1331 		return NULL;
1332 
1333 	return fdir_info->hash_map[ret];
1334 }
1335 
1336 /* Add a flow director filter into the SW list */
1337 static int
i40e_sw_fdir_filter_insert(struct i40e_pf * pf,struct i40e_fdir_filter * filter)1338 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1339 {
1340 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1341 	struct i40e_fdir_filter *hash_filter;
1342 	int ret;
1343 
1344 	if (filter->fdir.input.flow_ext.pkt_template)
1345 		ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1346 				 &filter->fdir.input,
1347 				 filter->fdir.input.flow.raw_flow.length);
1348 	else
1349 		ret = rte_hash_add_key(fdir_info->hash_table,
1350 				       &filter->fdir.input);
1351 	if (ret < 0) {
1352 		PMD_DRV_LOG(ERR,
1353 			    "Failed to insert fdir filter to hash table %d!",
1354 			    ret);
1355 		return ret;
1356 	}
1357 
1358 	if (fdir_info->hash_map[ret])
1359 		return -1;
1360 
1361 	hash_filter = &fdir_info->fdir_filter_array[ret];
1362 	rte_memcpy(hash_filter, filter, sizeof(*filter));
1363 	fdir_info->hash_map[ret] = hash_filter;
1364 	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
1365 
1366 	return 0;
1367 }
1368 
1369 /* Delete a flow director filter from the SW list */
1370 int
i40e_sw_fdir_filter_del(struct i40e_pf * pf,struct i40e_fdir_input * input)1371 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1372 {
1373 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1374 	struct i40e_fdir_filter *filter;
1375 	int ret;
1376 
1377 	if (input->flow_ext.pkt_template)
1378 		ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1379 						 input,
1380 						 input->flow.raw_flow.length);
1381 	else
1382 		ret = rte_hash_del_key(fdir_info->hash_table, input);
1383 	if (ret < 0) {
1384 		PMD_DRV_LOG(ERR,
1385 			    "Failed to delete fdir filter to hash table %d!",
1386 			    ret);
1387 		return ret;
1388 	}
1389 	filter = fdir_info->hash_map[ret];
1390 	fdir_info->hash_map[ret] = NULL;
1391 
1392 	TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1393 
1394 	return 0;
1395 }
1396 
1397 struct rte_flow *
i40e_fdir_entry_pool_get(struct i40e_fdir_info * fdir_info)1398 i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
1399 {
1400 	struct rte_flow *flow = NULL;
1401 	uint64_t slab = 0;
1402 	uint32_t pos = 0;
1403 	uint32_t i = 0;
1404 	int ret;
1405 
1406 	if (fdir_info->fdir_actual_cnt >=
1407 			fdir_info->fdir_space_size) {
1408 		PMD_DRV_LOG(ERR, "Fdir space full");
1409 		return NULL;
1410 	}
1411 
1412 	ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
1413 			&slab);
1414 
1415 	/* normally this won't happen as the fdir_actual_cnt should be
1416 	 * same with the number of the set bits in fdir_flow_pool,
1417 	 * but anyway handle this error condition here for safe
1418 	 */
1419 	if (ret == 0) {
1420 		PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
1421 		return NULL;
1422 	}
1423 
1424 	i = rte_bsf64(slab);
1425 	pos += i;
1426 	rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
1427 	flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
1428 
1429 	memset(flow, 0, sizeof(struct rte_flow));
1430 
1431 	return flow;
1432 }
1433 
1434 void
i40e_fdir_entry_pool_put(struct i40e_fdir_info * fdir_info,struct rte_flow * flow)1435 i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
1436 		struct rte_flow *flow)
1437 {
1438 	struct i40e_fdir_entry *f;
1439 
1440 	f = FLOW_TO_FLOW_BITMAP(flow);
1441 	rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
1442 }
1443 
1444 static int
i40e_flow_store_flex_pit(struct i40e_pf * pf,struct i40e_fdir_flex_pit * flex_pit,enum i40e_flxpld_layer_idx layer_idx,uint8_t raw_id)1445 i40e_flow_store_flex_pit(struct i40e_pf *pf,
1446 			 struct i40e_fdir_flex_pit *flex_pit,
1447 			 enum i40e_flxpld_layer_idx layer_idx,
1448 			 uint8_t raw_id)
1449 {
1450 	uint8_t field_idx;
1451 
1452 	field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
1453 	/* Check if the configuration is conflicted */
1454 	if (pf->fdir.flex_pit_flag[layer_idx] &&
1455 	    (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
1456 	     pf->fdir.flex_set[field_idx].size != flex_pit->size ||
1457 	     pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
1458 		return -1;
1459 
1460 	/* Check if the configuration exists. */
1461 	if (pf->fdir.flex_pit_flag[layer_idx] &&
1462 	    (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
1463 	     pf->fdir.flex_set[field_idx].size == flex_pit->size &&
1464 	     pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
1465 		return 1;
1466 
1467 	pf->fdir.flex_set[field_idx].src_offset =
1468 		flex_pit->src_offset;
1469 	pf->fdir.flex_set[field_idx].size =
1470 		flex_pit->size;
1471 	pf->fdir.flex_set[field_idx].dst_offset =
1472 		flex_pit->dst_offset;
1473 
1474 	return 0;
1475 }
1476 
1477 static void
i40e_flow_set_fdir_flex_pit(struct i40e_pf * pf,enum i40e_flxpld_layer_idx layer_idx,uint8_t raw_id)1478 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
1479 			    enum i40e_flxpld_layer_idx layer_idx,
1480 			    uint8_t raw_id)
1481 {
1482 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1483 	uint32_t flx_pit, flx_ort;
1484 	uint16_t min_next_off = 0;
1485 	uint8_t field_idx;
1486 	uint8_t i;
1487 
1488 	if (raw_id) {
1489 		flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
1490 			  (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
1491 			  (layer_idx * I40E_MAX_FLXPLD_FIED);
1492 		I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
1493 	}
1494 
1495 	/* Set flex pit */
1496 	for (i = 0; i < raw_id; i++) {
1497 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1498 		flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
1499 				     pf->fdir.flex_set[field_idx].size,
1500 				     pf->fdir.flex_set[field_idx].dst_offset);
1501 
1502 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1503 		min_next_off = pf->fdir.flex_set[field_idx].src_offset +
1504 			pf->fdir.flex_set[field_idx].size;
1505 	}
1506 
1507 	for (; i < I40E_MAX_FLXPLD_FIED; i++) {
1508 		/* set the non-used register obeying register's constrain */
1509 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1510 		flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
1511 				     NONUSE_FLX_PIT_DEST_OFF);
1512 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1513 		min_next_off++;
1514 	}
1515 }
1516 
1517 static int
i40e_flow_store_flex_mask(struct i40e_pf * pf,enum i40e_filter_pctype pctype,uint8_t * mask)1518 i40e_flow_store_flex_mask(struct i40e_pf *pf,
1519 			  enum i40e_filter_pctype pctype,
1520 			  uint8_t *mask)
1521 {
1522 	struct i40e_fdir_flex_mask flex_mask;
1523 	uint8_t nb_bitmask = 0;
1524 	uint16_t mask_tmp;
1525 	uint8_t i;
1526 
1527 	memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
1528 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
1529 		mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
1530 		if (mask_tmp) {
1531 			flex_mask.word_mask |=
1532 				I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
1533 			if (mask_tmp != UINT16_MAX) {
1534 				flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
1535 				flex_mask.bitmask[nb_bitmask].offset =
1536 					i / sizeof(uint16_t);
1537 				nb_bitmask++;
1538 				if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
1539 					return -1;
1540 			}
1541 		}
1542 	}
1543 	flex_mask.nb_bitmask = nb_bitmask;
1544 
1545 	if (pf->fdir.flex_mask_flag[pctype] &&
1546 	    (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1547 		    sizeof(struct i40e_fdir_flex_mask))))
1548 		return -2;
1549 	else if (pf->fdir.flex_mask_flag[pctype] &&
1550 		 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1551 			  sizeof(struct i40e_fdir_flex_mask))))
1552 		return 1;
1553 
1554 	memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
1555 	       sizeof(struct i40e_fdir_flex_mask));
1556 	return 0;
1557 }
1558 
1559 static void
i40e_flow_set_fdir_flex_msk(struct i40e_pf * pf,enum i40e_filter_pctype pctype)1560 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
1561 			    enum i40e_filter_pctype pctype)
1562 {
1563 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1564 	struct i40e_fdir_flex_mask *flex_mask;
1565 	uint32_t flxinset, fd_mask;
1566 	uint8_t i;
1567 
1568 	/* Set flex mask */
1569 	flex_mask = &pf->fdir.flex_mask[pctype];
1570 	flxinset = (flex_mask->word_mask <<
1571 		    I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
1572 		I40E_PRTQF_FD_FLXINSET_INSET_MASK;
1573 	i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
1574 
1575 	for (i = 0; i < flex_mask->nb_bitmask; i++) {
1576 		fd_mask = (flex_mask->bitmask[i].mask <<
1577 			   I40E_PRTQF_FD_MSK_MASK_SHIFT) &
1578 			   I40E_PRTQF_FD_MSK_MASK_MASK;
1579 		fd_mask |= ((flex_mask->bitmask[i].offset +
1580 			     I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
1581 			    I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
1582 				I40E_PRTQF_FD_MSK_OFFSET_MASK;
1583 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
1584 	}
1585 
1586 	pf->fdir.flex_mask_flag[pctype] = 1;
1587 }
1588 
1589 static int
i40e_flow_set_fdir_inset(struct i40e_pf * pf,enum i40e_filter_pctype pctype,uint64_t input_set)1590 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
1591 			 enum i40e_filter_pctype pctype,
1592 			 uint64_t input_set)
1593 {
1594 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
1595 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1596 	uint64_t inset_reg = 0;
1597 	int i, num;
1598 
1599 	/* Check if the input set is valid */
1600 	if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
1601 				    input_set) != 0) {
1602 		PMD_DRV_LOG(ERR, "Invalid input set");
1603 		return -EINVAL;
1604 	}
1605 
1606 	/* Check if the configuration is conflicted */
1607 	if (pf->fdir.flow_count[pctype] &&
1608 	    memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t))) {
1609 		PMD_DRV_LOG(ERR, "Conflict with the first rule's input set.");
1610 		return -EINVAL;
1611 	}
1612 
1613 	if (pf->fdir.flow_count[pctype] &&
1614 	    !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
1615 		return 0;
1616 
1617 	num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
1618 						 I40E_INSET_MASK_NUM_REG);
1619 	if (num < 0) {
1620 		PMD_DRV_LOG(ERR, "Invalid pattern mask.");
1621 		return -EINVAL;
1622 	}
1623 
1624 	if (pf->support_multi_driver) {
1625 		for (i = 0; i < num; i++)
1626 			if (i40e_read_rx_ctl(hw,
1627 					I40E_GLQF_FD_MSK(i, pctype)) !=
1628 					mask_reg[i]) {
1629 				PMD_DRV_LOG(ERR, "Input set setting is not"
1630 						" supported with"
1631 						" `support-multi-driver`"
1632 						" enabled!");
1633 				return -EPERM;
1634 			}
1635 		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
1636 			if (i40e_read_rx_ctl(hw,
1637 					I40E_GLQF_FD_MSK(i, pctype)) != 0) {
1638 				PMD_DRV_LOG(ERR, "Input set setting is not"
1639 						" supported with"
1640 						" `support-multi-driver`"
1641 						" enabled!");
1642 				return -EPERM;
1643 			}
1644 
1645 	} else {
1646 		for (i = 0; i < num; i++)
1647 			i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
1648 				mask_reg[i]);
1649 		/*clear unused mask registers of the pctype */
1650 		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
1651 			i40e_check_write_reg(hw,
1652 					I40E_GLQF_FD_MSK(i, pctype), 0);
1653 	}
1654 
1655 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
1656 
1657 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
1658 			     (uint32_t)(inset_reg & UINT32_MAX));
1659 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
1660 			     (uint32_t)((inset_reg >>
1661 					 I40E_32_BIT_WIDTH) & UINT32_MAX));
1662 
1663 	I40E_WRITE_FLUSH(hw);
1664 
1665 	pf->fdir.input_set[pctype] = input_set;
1666 	return 0;
1667 }
1668 
1669 static inline unsigned char *
i40e_find_available_buffer(struct rte_eth_dev * dev)1670 i40e_find_available_buffer(struct rte_eth_dev *dev)
1671 {
1672 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1673 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1674 	struct i40e_tx_queue *txq = pf->fdir.txq;
1675 
1676 	/* no available buffer
1677 	 * search for more available buffers from the current
1678 	 * descriptor, until an unavailable one
1679 	 */
1680 	if (fdir_info->txq_available_buf_count <= 0) {
1681 		uint16_t tmp_tail;
1682 		volatile struct i40e_tx_desc *tmp_txdp;
1683 
1684 		tmp_tail = txq->tx_tail;
1685 		tmp_txdp = &txq->tx_ring[tmp_tail + 1];
1686 
1687 		do {
1688 			if ((tmp_txdp->cmd_type_offset_bsz &
1689 					rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1690 					rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1691 				fdir_info->txq_available_buf_count++;
1692 			else
1693 				break;
1694 
1695 			tmp_tail += 2;
1696 			if (tmp_tail >= txq->nb_tx_desc)
1697 				tmp_tail = 0;
1698 		} while (tmp_tail != txq->tx_tail);
1699 	}
1700 
1701 	if (fdir_info->txq_available_buf_count > 0)
1702 		fdir_info->txq_available_buf_count--;
1703 	else
1704 		return NULL;
1705 	return (unsigned char *)fdir_info->prg_pkt[txq->tx_tail >> 1];
1706 }
1707 
1708 /**
1709  * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1710  * @pf: board private structure
1711  * @filter: fdir filter entry
1712  * @add: 0 - delete, 1 - add
1713  */
1714 int
i40e_flow_add_del_fdir_filter(struct rte_eth_dev * dev,const struct i40e_fdir_filter_conf * filter,bool add)1715 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1716 			      const struct i40e_fdir_filter_conf *filter,
1717 			      bool add)
1718 {
1719 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1720 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1721 	enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
1722 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1723 	uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
1724 	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1725 	struct i40e_fdir_flex_pit flex_pit;
1726 	enum i40e_filter_pctype pctype;
1727 	struct i40e_fdir_filter *node;
1728 	unsigned char *pkt = NULL;
1729 	bool cfg_flex_pit = true;
1730 	bool wait_status = true;
1731 	uint8_t field_idx;
1732 	int ret = 0;
1733 	int i;
1734 
1735 	if (pf->fdir.fdir_vsi == NULL) {
1736 		PMD_DRV_LOG(ERR, "FDIR is not enabled");
1737 		return -ENOTSUP;
1738 	}
1739 
1740 	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1741 		PMD_DRV_LOG(ERR, "Invalid queue ID");
1742 		return -EINVAL;
1743 	}
1744 	if (filter->input.flow_ext.is_vf &&
1745 	    filter->input.flow_ext.dst_id >= pf->vf_num) {
1746 		PMD_DRV_LOG(ERR, "Invalid VF ID");
1747 		return -EINVAL;
1748 	}
1749 	if (filter->input.flow_ext.pkt_template) {
1750 		if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1751 		    !filter->input.flow.raw_flow.packet) {
1752 			PMD_DRV_LOG(ERR, "Invalid raw packet template"
1753 				" flow filter parameters!");
1754 			return -EINVAL;
1755 		}
1756 		pctype = filter->input.flow.raw_flow.pctype;
1757 	} else {
1758 		pctype = filter->input.pctype;
1759 	}
1760 
1761 	/* Check if there is the filter in SW list */
1762 	memset(&check_filter, 0, sizeof(check_filter));
1763 	i40e_fdir_filter_convert(filter, &check_filter);
1764 
1765 	if (add) {
1766 		/* configure the input set for common PCTYPEs*/
1767 		if (!filter->input.flow_ext.customized_pctype &&
1768 		    !filter->input.flow_ext.pkt_template) {
1769 			ret = i40e_flow_set_fdir_inset(pf, pctype,
1770 					filter->input.flow_ext.input_set);
1771 			if (ret < 0)
1772 				return ret;
1773 		}
1774 
1775 		if (filter->input.flow_ext.is_flex_flow) {
1776 			for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
1777 				layer_idx = filter->input.flow_ext.layer_idx;
1778 				field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1779 				flex_pit = filter->input.flow_ext.flex_pit[field_idx];
1780 
1781 				/* Store flex pit to SW */
1782 				ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1783 							       layer_idx, i);
1784 				if (ret < 0) {
1785 					PMD_DRV_LOG(ERR, "Conflict with the"
1786 						    " first flexible rule.");
1787 					return -EINVAL;
1788 				} else if (ret > 0) {
1789 					cfg_flex_pit = false;
1790 				}
1791 			}
1792 
1793 			if (cfg_flex_pit)
1794 				i40e_flow_set_fdir_flex_pit(pf, layer_idx,
1795 						filter->input.flow_ext.raw_id);
1796 
1797 			/* Store flex mask to SW */
1798 			for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++)
1799 				flex_mask[i] =
1800 					filter->input.flow_ext.flex_mask[i];
1801 
1802 			ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1803 			if (ret == -1) {
1804 				PMD_DRV_LOG(ERR, "Exceed maximal"
1805 					    " number of bitmasks");
1806 				return -EINVAL;
1807 			} else if (ret == -2) {
1808 				PMD_DRV_LOG(ERR, "Conflict with the"
1809 					    " first flexible rule");
1810 				return -EINVAL;
1811 			} else if (ret == 0) {
1812 				i40e_flow_set_fdir_flex_msk(pf, pctype);
1813 			}
1814 		}
1815 
1816 		ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
1817 		if (ret < 0) {
1818 			PMD_DRV_LOG(ERR,
1819 				    "Conflict with existing flow director rules!");
1820 			return -EINVAL;
1821 		}
1822 
1823 		if (fdir_info->fdir_invalprio == 1 &&
1824 				fdir_info->fdir_guarantee_free_space > 0)
1825 			wait_status = false;
1826 	} else {
1827 		if (filter->input.flow_ext.is_flex_flow)
1828 			layer_idx = filter->input.flow_ext.layer_idx;
1829 
1830 		node = i40e_sw_fdir_filter_lookup(fdir_info,
1831 				&check_filter.fdir.input);
1832 		if (!node) {
1833 			PMD_DRV_LOG(ERR,
1834 				    "There's no corresponding flow director filter!");
1835 			return -EINVAL;
1836 		}
1837 
1838 		ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1839 		if (ret < 0) {
1840 			PMD_DRV_LOG(ERR,
1841 					"Error deleting fdir rule from hash table!");
1842 			return -EINVAL;
1843 		}
1844 
1845 		pf->fdir.flex_mask_flag[pctype] = 0;
1846 
1847 		if (fdir_info->fdir_invalprio == 1)
1848 			wait_status = false;
1849 	}
1850 
1851 	/* find a buffer to store the pkt */
1852 	pkt = i40e_find_available_buffer(dev);
1853 	if (pkt == NULL)
1854 		goto error_op;
1855 
1856 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
1857 	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1858 	if (ret < 0) {
1859 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1860 		goto error_op;
1861 	}
1862 
1863 	if (hw->mac.type == I40E_MAC_X722) {
1864 		/* get translated pctype value in fd pctype register */
1865 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1866 			hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1867 	}
1868 
1869 	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add,
1870 			wait_status);
1871 	if (ret < 0) {
1872 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1873 			    pctype);
1874 		goto error_op;
1875 	}
1876 
1877 	if (filter->input.flow_ext.is_flex_flow) {
1878 		if (add) {
1879 			fdir_info->flex_flow_count[layer_idx]++;
1880 			pf->fdir.flex_pit_flag[layer_idx] = 1;
1881 		} else {
1882 			fdir_info->flex_flow_count[layer_idx]--;
1883 			if (!fdir_info->flex_flow_count[layer_idx])
1884 				pf->fdir.flex_pit_flag[layer_idx] = 0;
1885 		}
1886 	}
1887 
1888 	if (add) {
1889 		fdir_info->flow_count[pctype]++;
1890 		fdir_info->fdir_actual_cnt++;
1891 		if (fdir_info->fdir_invalprio == 1 &&
1892 				fdir_info->fdir_guarantee_free_space > 0)
1893 			fdir_info->fdir_guarantee_free_space--;
1894 	} else {
1895 		fdir_info->flow_count[pctype]--;
1896 		fdir_info->fdir_actual_cnt--;
1897 		if (fdir_info->fdir_invalprio == 1 &&
1898 				fdir_info->fdir_guarantee_free_space <
1899 				fdir_info->fdir_guarantee_total_space)
1900 			fdir_info->fdir_guarantee_free_space++;
1901 	}
1902 
1903 	return ret;
1904 
1905 error_op:
1906 	/* roll back */
1907 	if (add)
1908 		i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
1909 	else
1910 		i40e_sw_fdir_filter_insert(pf, &check_filter);
1911 
1912 	return ret;
1913 }
1914 
1915 /*
1916  * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1917  * Is done by Flow Director Programming Descriptor followed by packet
1918  * structure that contains the filter fields need to match.
1919  * @pf: board private structure
1920  * @pctype: pctype
1921  * @filter: fdir filter entry
1922  * @add: 0 - delete, 1 - add
1923  */
1924 static int
i40e_flow_fdir_filter_programming(struct i40e_pf * pf,enum i40e_filter_pctype pctype,const struct i40e_fdir_filter_conf * filter,bool add,bool wait_status)1925 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1926 				  enum i40e_filter_pctype pctype,
1927 				  const struct i40e_fdir_filter_conf *filter,
1928 				  bool add, bool wait_status)
1929 {
1930 	struct i40e_tx_queue *txq = pf->fdir.txq;
1931 	struct i40e_rx_queue *rxq = pf->fdir.rxq;
1932 	const struct i40e_fdir_action *fdir_action = &filter->action;
1933 	volatile struct i40e_tx_desc *txdp;
1934 	volatile struct i40e_filter_program_desc *fdirdp;
1935 	uint32_t td_cmd;
1936 	uint16_t vsi_id;
1937 	uint8_t dest;
1938 	uint32_t i;
1939 
1940 	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1941 	fdirdp = (volatile struct i40e_filter_program_desc *)
1942 				(&txq->tx_ring[txq->tx_tail]);
1943 
1944 	fdirdp->qindex_flex_ptype_vsi =
1945 			rte_cpu_to_le_32((fdir_action->rx_queue <<
1946 					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1947 					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
1948 
1949 	fdirdp->qindex_flex_ptype_vsi |=
1950 			rte_cpu_to_le_32((fdir_action->flex_off <<
1951 					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1952 					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1953 
1954 	fdirdp->qindex_flex_ptype_vsi |=
1955 			rte_cpu_to_le_32((pctype <<
1956 					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1957 					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1958 
1959 	if (filter->input.flow_ext.is_vf)
1960 		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1961 	else
1962 		/* Use LAN VSI Id by default */
1963 		vsi_id = pf->main_vsi->vsi_id;
1964 	fdirdp->qindex_flex_ptype_vsi |=
1965 		rte_cpu_to_le_32(((uint32_t)vsi_id <<
1966 				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1967 				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1968 
1969 	fdirdp->dtype_cmd_cntindex =
1970 			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1971 
1972 	if (add)
1973 		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1974 				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1975 				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1976 	else
1977 		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1978 				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1979 				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1980 
1981 	if (fdir_action->behavior == I40E_FDIR_REJECT)
1982 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1983 	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1984 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1985 	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1986 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1987 	else {
1988 		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1989 		return -EINVAL;
1990 	}
1991 
1992 	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1993 				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1994 				I40E_TXD_FLTR_QW1_DEST_MASK);
1995 
1996 	fdirdp->dtype_cmd_cntindex |=
1997 		rte_cpu_to_le_32((fdir_action->report_status <<
1998 				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1999 				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
2000 
2001 	fdirdp->dtype_cmd_cntindex |=
2002 			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
2003 	fdirdp->dtype_cmd_cntindex |=
2004 			rte_cpu_to_le_32(
2005 			((uint32_t)pf->fdir.match_counter_index <<
2006 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2007 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
2008 
2009 	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
2010 
2011 	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
2012 	txdp = &txq->tx_ring[txq->tx_tail + 1];
2013 	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
2014 
2015 	td_cmd = I40E_TX_DESC_CMD_EOP |
2016 		 I40E_TX_DESC_CMD_RS  |
2017 		 I40E_TX_DESC_CMD_DUMMY;
2018 
2019 	txdp->cmd_type_offset_bsz =
2020 		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
2021 
2022 	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
2023 	if (txq->tx_tail >= txq->nb_tx_desc)
2024 		txq->tx_tail = 0;
2025 	/* Update the tx tail register */
2026 	rte_wmb();
2027 
2028 	/* fdir program rx queue cleanup */
2029 	i40e_fdir_programming_status_cleanup(rxq);
2030 
2031 	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2032 
2033 	if (wait_status) {
2034 		for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
2035 			if ((txdp->cmd_type_offset_bsz &
2036 					rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
2037 					rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2038 				break;
2039 			rte_delay_us(1);
2040 		}
2041 		if (i >= I40E_FDIR_MAX_WAIT_US) {
2042 			PMD_DRV_LOG(ERR,
2043 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
2044 			return -ETIMEDOUT;
2045 		}
2046 		/* totally delay 10 ms to check programming status*/
2047 		rte_delay_us(I40E_FDIR_MAX_WAIT_US);
2048 		if (i40e_check_fdir_programming_status(rxq) < 0) {
2049 			PMD_DRV_LOG(ERR,
2050 			    "Failed to program FDIR filter: programming status reported.");
2051 			return -ETIMEDOUT;
2052 		}
2053 	}
2054 
2055 	return 0;
2056 }
2057 
2058 /*
2059  * i40e_fdir_flush - clear all filters of Flow Director table
2060  * @pf: board private structure
2061  */
2062 int
i40e_fdir_flush(struct rte_eth_dev * dev)2063 i40e_fdir_flush(struct rte_eth_dev *dev)
2064 {
2065 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2066 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2067 	uint32_t reg;
2068 	uint16_t guarant_cnt, best_cnt;
2069 	uint16_t i;
2070 
2071 	I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
2072 	I40E_WRITE_FLUSH(hw);
2073 
2074 	for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
2075 		rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
2076 		reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
2077 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
2078 			break;
2079 	}
2080 	if (i >= I40E_FDIR_FLUSH_RETRY) {
2081 		PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
2082 		return -ETIMEDOUT;
2083 	}
2084 	guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2085 				I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2086 				I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2087 	best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2088 				I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2089 				I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2090 	if (guarant_cnt != 0 || best_cnt != 0) {
2091 		PMD_DRV_LOG(ERR, "Failed to flush FD table.");
2092 		return -ENOSYS;
2093 	} else
2094 		PMD_DRV_LOG(INFO, "FD table Flush success.");
2095 	return 0;
2096 }
2097 
2098 static inline void
i40e_fdir_info_get_flex_set(struct i40e_pf * pf,struct rte_eth_flex_payload_cfg * flex_set,uint16_t * num)2099 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
2100 			struct rte_eth_flex_payload_cfg *flex_set,
2101 			uint16_t *num)
2102 {
2103 	struct i40e_fdir_flex_pit *flex_pit;
2104 	struct rte_eth_flex_payload_cfg *ptr = flex_set;
2105 	uint16_t src, dst, size, j, k;
2106 	uint8_t i, layer_idx;
2107 
2108 	for (layer_idx = I40E_FLXPLD_L2_IDX;
2109 	     layer_idx <= I40E_FLXPLD_L4_IDX;
2110 	     layer_idx++) {
2111 		if (layer_idx == I40E_FLXPLD_L2_IDX)
2112 			ptr->type = RTE_ETH_L2_PAYLOAD;
2113 		else if (layer_idx == I40E_FLXPLD_L3_IDX)
2114 			ptr->type = RTE_ETH_L3_PAYLOAD;
2115 		else if (layer_idx == I40E_FLXPLD_L4_IDX)
2116 			ptr->type = RTE_ETH_L4_PAYLOAD;
2117 
2118 		for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2119 			flex_pit = &pf->fdir.flex_set[layer_idx *
2120 				I40E_MAX_FLXPLD_FIED + i];
2121 			if (flex_pit->size == 0)
2122 				continue;
2123 			src = flex_pit->src_offset * sizeof(uint16_t);
2124 			dst = flex_pit->dst_offset * sizeof(uint16_t);
2125 			size = flex_pit->size * sizeof(uint16_t);
2126 			for (j = src, k = dst; j < src + size; j++, k++)
2127 				ptr->src_offset[k] = j;
2128 		}
2129 		(*num)++;
2130 		ptr++;
2131 	}
2132 }
2133 
2134 static inline void
i40e_fdir_info_get_flex_mask(struct i40e_pf * pf,struct rte_eth_fdir_flex_mask * flex_mask,uint16_t * num)2135 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2136 			struct rte_eth_fdir_flex_mask *flex_mask,
2137 			uint16_t *num)
2138 {
2139 	struct i40e_fdir_flex_mask *mask;
2140 	struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2141 	uint16_t flow_type;
2142 	uint8_t i, j;
2143 	uint16_t off_bytes, mask_tmp;
2144 
2145 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2146 	     i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2147 	     i++) {
2148 		mask =  &pf->fdir.flex_mask[i];
2149 		flow_type = i40e_pctype_to_flowtype(pf->adapter,
2150 						    (enum i40e_filter_pctype)i);
2151 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2152 			continue;
2153 
2154 		for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2155 			if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2156 				ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2157 				ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2158 			} else {
2159 				ptr->mask[j * sizeof(uint16_t)] = 0x0;
2160 				ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2161 			}
2162 		}
2163 		for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2164 			off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2165 			mask_tmp = ~mask->bitmask[j].mask;
2166 			ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2167 			ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2168 		}
2169 		ptr->flow_type = flow_type;
2170 		ptr++;
2171 		(*num)++;
2172 	}
2173 }
2174 
2175 /*
2176  * i40e_fdir_info_get - get information of Flow Director
2177  * @pf: ethernet device to get info from
2178  * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2179  *    the flow director information.
2180  */
2181 void
i40e_fdir_info_get(struct rte_eth_dev * dev,struct rte_eth_fdir_info * fdir)2182 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2183 {
2184 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2185 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2186 	uint16_t num_flex_set = 0;
2187 	uint16_t num_flex_mask = 0;
2188 	uint16_t i;
2189 
2190 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2191 		fdir->mode = RTE_FDIR_MODE_PERFECT;
2192 	else
2193 		fdir->mode = RTE_FDIR_MODE_NONE;
2194 
2195 	fdir->guarant_spc =
2196 		(uint32_t)hw->func_caps.fd_filters_guaranteed;
2197 	fdir->best_spc =
2198 		(uint32_t)hw->func_caps.fd_filters_best_effort;
2199 	fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2200 	fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2201 	for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2202 		fdir->flow_types_mask[i] = 0ULL;
2203 	fdir->flex_payload_unit = sizeof(uint16_t);
2204 	fdir->flex_bitmask_unit = sizeof(uint16_t);
2205 	fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2206 	fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2207 	fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2208 
2209 	i40e_fdir_info_get_flex_set(pf,
2210 				fdir->flex_conf.flex_set,
2211 				&num_flex_set);
2212 	i40e_fdir_info_get_flex_mask(pf,
2213 				fdir->flex_conf.flex_mask,
2214 				&num_flex_mask);
2215 
2216 	fdir->flex_conf.nb_payloads = num_flex_set;
2217 	fdir->flex_conf.nb_flexmasks = num_flex_mask;
2218 }
2219 
2220 /*
2221  * i40e_fdir_stat_get - get statistics of Flow Director
2222  * @pf: ethernet device to get info from
2223  * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2224  *    the flow director statistics.
2225  */
2226 void
i40e_fdir_stats_get(struct rte_eth_dev * dev,struct rte_eth_fdir_stats * stat)2227 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2228 {
2229 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2230 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2231 	uint32_t fdstat;
2232 
2233 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2234 	stat->guarant_cnt =
2235 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2236 			    I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2237 	stat->best_cnt =
2238 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2239 			    I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2240 }
2241 
2242 /* Restore flow director filter */
2243 void
i40e_fdir_filter_restore(struct i40e_pf * pf)2244 i40e_fdir_filter_restore(struct i40e_pf *pf)
2245 {
2246 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2247 	struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2248 	struct i40e_fdir_filter *f;
2249 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2250 	uint32_t fdstat;
2251 	uint32_t guarant_cnt;  /**< Number of filters in guaranteed spaces. */
2252 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
2253 
2254 	TAILQ_FOREACH(f, fdir_list, rules)
2255 		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2256 
2257 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2258 	guarant_cnt =
2259 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2260 			   I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2261 	best_cnt =
2262 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2263 			   I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2264 
2265 	PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d",
2266 		    guarant_cnt, best_cnt);
2267 }
2268