xref: /f-stack/dpdk/drivers/net/i40e/i40e_fdir.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_arp.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_hash_crc.h>
24 #include <rte_bitmap.h>
25 
26 #include "i40e_logs.h"
27 #include "base/i40e_type.h"
28 #include "base/i40e_prototype.h"
29 #include "i40e_ethdev.h"
30 #include "i40e_rxtx.h"
31 
32 #define I40E_FDIR_MZ_NAME          "FDIR_MEMZONE"
33 #ifndef IPV6_ADDR_LEN
34 #define IPV6_ADDR_LEN              16
35 #endif
36 
37 #ifndef IPPROTO_L2TP
38 #define IPPROTO_L2TP		  115
39 #endif
40 
41 #define I40E_FDIR_PKT_LEN                   512
42 #define I40E_FDIR_IP_DEFAULT_LEN            420
43 #define I40E_FDIR_IP_DEFAULT_TTL            0x40
44 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL    0x45
45 #define I40E_FDIR_TCP_DEFAULT_DATAOFF       0x50
46 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW     0x60000000
47 
48 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
49 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
50 #define I40E_FDIR_UDP_DEFAULT_LEN           400
51 #define I40E_FDIR_GTP_DEFAULT_LEN           384
52 #define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
53 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
54 
55 #define I40E_FDIR_GTPC_DST_PORT             2123
56 #define I40E_FDIR_GTPU_DST_PORT             2152
57 #define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
58 #define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
59 #define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
60 #define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
61 
62 #define I40E_FDIR_ESP_DST_PORT              4500
63 
64 /* Wait time for fdir filter programming */
65 #define I40E_FDIR_MAX_WAIT_US 10000
66 
67 /* Wait count and interval for fdir filter flush */
68 #define I40E_FDIR_FLUSH_RETRY       50
69 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
70 
71 #define I40E_COUNTER_PF           2
72 /* Statistic counter index for one pf */
73 #define I40E_COUNTER_INDEX_FDIR(pf_id)   (0 + (pf_id) * I40E_COUNTER_PF)
74 
75 #define I40E_FDIR_FLOWS ( \
76 	(1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
77 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
78 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
79 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
80 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
81 	(1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
82 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
83 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
84 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
85 	(1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
86 	(1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
87 
88 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
89 			 struct i40e_fdir_filter *filter);
90 static struct i40e_fdir_filter *
91 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
92 			const struct i40e_fdir_input *input);
93 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
94 				   struct i40e_fdir_filter *filter);
95 static int
96 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
97 				  enum i40e_filter_pctype pctype,
98 				  const struct i40e_fdir_filter_conf *filter,
99 				  bool add, bool wait_status);
100 
101 static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue * rxq)102 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
103 {
104 	struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
105 	struct i40e_hmc_obj_rxq rx_ctx;
106 	int err = I40E_SUCCESS;
107 
108 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
109 	/* Init the RX queue in hardware */
110 	rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
111 	rx_ctx.hbuff = 0;
112 	rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
113 	rx_ctx.qlen = rxq->nb_rx_desc;
114 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
115 	rx_ctx.dsize = 1;
116 #endif
117 	rx_ctx.dtype = i40e_header_split_none;
118 	rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
119 	rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
120 	rx_ctx.tphrdesc_ena = 1;
121 	rx_ctx.tphwdesc_ena = 1;
122 	rx_ctx.tphdata_ena = 1;
123 	rx_ctx.tphhead_ena = 1;
124 	rx_ctx.lrxqthresh = 2;
125 	rx_ctx.crcstrip = 0;
126 	rx_ctx.l2tsel = 1;
127 	rx_ctx.showiv = 0;
128 	rx_ctx.prefena = 1;
129 
130 	err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
131 	if (err != I40E_SUCCESS) {
132 		PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
133 		return err;
134 	}
135 	err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
136 	if (err != I40E_SUCCESS) {
137 		PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
138 		return err;
139 	}
140 	rxq->qrx_tail = hw->hw_addr +
141 		I40E_QRX_TAIL(rxq->vsi->base_queue);
142 
143 	rte_wmb();
144 	/* Init the RX tail regieter. */
145 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
146 
147 	return err;
148 }
149 
150 /*
151  * i40e_fdir_setup - reserve and initialize the Flow Director resources
152  * @pf: board private structure
153  */
154 int
i40e_fdir_setup(struct i40e_pf * pf)155 i40e_fdir_setup(struct i40e_pf *pf)
156 {
157 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
158 	struct i40e_vsi *vsi;
159 	int err = I40E_SUCCESS;
160 	char z_name[RTE_MEMZONE_NAMESIZE];
161 	const struct rte_memzone *mz = NULL;
162 	struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
163 	uint16_t i;
164 
165 	if ((pf->flags & I40E_FLAG_FDIR) == 0) {
166 		PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
167 		return I40E_NOT_SUPPORTED;
168 	}
169 
170 	PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
171 			" num_filters_best_effort = %u.",
172 			hw->func_caps.fd_filters_guaranteed,
173 			hw->func_caps.fd_filters_best_effort);
174 
175 	vsi = pf->fdir.fdir_vsi;
176 	if (vsi) {
177 		PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
178 		return I40E_SUCCESS;
179 	}
180 
181 	/* make new FDIR VSI */
182 	vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
183 	if (!vsi) {
184 		PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
185 		return I40E_ERR_NO_AVAILABLE_VSI;
186 	}
187 	pf->fdir.fdir_vsi = vsi;
188 
189 	/*Fdir tx queue setup*/
190 	err = i40e_fdir_setup_tx_resources(pf);
191 	if (err) {
192 		PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
193 		goto fail_setup_tx;
194 	}
195 
196 	/*Fdir rx queue setup*/
197 	err = i40e_fdir_setup_rx_resources(pf);
198 	if (err) {
199 		PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
200 		goto fail_setup_rx;
201 	}
202 
203 	err = i40e_tx_queue_init(pf->fdir.txq);
204 	if (err) {
205 		PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
206 		goto fail_mem;
207 	}
208 
209 	/* need switch on before dev start*/
210 	err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
211 	if (err) {
212 		PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
213 		goto fail_mem;
214 	}
215 
216 	/* Init the rx queue in hardware */
217 	err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
218 	if (err) {
219 		PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
220 		goto fail_mem;
221 	}
222 
223 	/* switch on rx queue */
224 	err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
225 	if (err) {
226 		PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
227 		goto fail_mem;
228 	}
229 
230 	/* enable FDIR MSIX interrupt */
231 	vsi->nb_used_qps = 1;
232 	i40e_vsi_queues_bind_intr(vsi, I40E_ITR_INDEX_NONE);
233 	i40e_vsi_enable_queues_intr(vsi);
234 
235 	/* reserve memory for the fdir programming packet */
236 	snprintf(z_name, sizeof(z_name), "%s_%s_%d",
237 			eth_dev->device->driver->name,
238 			I40E_FDIR_MZ_NAME,
239 			eth_dev->data->port_id);
240 	mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN *
241 			I40E_FDIR_PRG_PKT_CNT, SOCKET_ID_ANY);
242 	if (!mz) {
243 		PMD_DRV_LOG(ERR, "Cannot init memzone for "
244 				 "flow director program packet.");
245 		err = I40E_ERR_NO_MEMORY;
246 		goto fail_mem;
247 	}
248 
249 	for (i = 0; i < I40E_FDIR_PRG_PKT_CNT; i++) {
250 		pf->fdir.prg_pkt[i] = (uint8_t *)mz->addr +
251 			I40E_FDIR_PKT_LEN * i;
252 		pf->fdir.dma_addr[i] = mz->iova +
253 			I40E_FDIR_PKT_LEN * i;
254 	}
255 
256 	pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
257 	pf->fdir.fdir_actual_cnt = 0;
258 	pf->fdir.fdir_guarantee_free_space =
259 		pf->fdir.fdir_guarantee_total_space;
260 
261 	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
262 		    vsi->base_queue);
263 	return I40E_SUCCESS;
264 
265 fail_mem:
266 	i40e_dev_rx_queue_release(pf->fdir.rxq);
267 	pf->fdir.rxq = NULL;
268 fail_setup_rx:
269 	i40e_dev_tx_queue_release(pf->fdir.txq);
270 	pf->fdir.txq = NULL;
271 fail_setup_tx:
272 	i40e_vsi_release(vsi);
273 	pf->fdir.fdir_vsi = NULL;
274 	return err;
275 }
276 
277 /*
278  * i40e_fdir_teardown - release the Flow Director resources
279  * @pf: board private structure
280  */
281 void
i40e_fdir_teardown(struct i40e_pf * pf)282 i40e_fdir_teardown(struct i40e_pf *pf)
283 {
284 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
285 	struct i40e_vsi *vsi;
286 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
287 
288 	vsi = pf->fdir.fdir_vsi;
289 	if (!vsi)
290 		return;
291 
292 	/* disable FDIR MSIX interrupt */
293 	i40e_vsi_queues_unbind_intr(vsi);
294 	i40e_vsi_disable_queues_intr(vsi);
295 
296 	int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
297 	if (err)
298 		PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
299 	err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
300 	if (err)
301 		PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
302 
303 	i40e_dev_rx_queue_release(pf->fdir.rxq);
304 	rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
305 	pf->fdir.rxq = NULL;
306 	i40e_dev_tx_queue_release(pf->fdir.txq);
307 	rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
308 	pf->fdir.txq = NULL;
309 	i40e_vsi_release(vsi);
310 	pf->fdir.fdir_vsi = NULL;
311 }
312 
313 /* check whether the flow director table in empty */
314 static inline int
i40e_fdir_empty(struct i40e_hw * hw)315 i40e_fdir_empty(struct i40e_hw *hw)
316 {
317 	uint32_t guarant_cnt, best_cnt;
318 
319 	guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
320 				 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
321 				 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
322 	best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
323 			      I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
324 			      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
325 	if (best_cnt + guarant_cnt > 0)
326 		return -1;
327 
328 	return 0;
329 }
330 
331 /*
332  * Initialize the configuration about bytes stream extracted as flexible payload
333  * and mask setting
334  */
335 static inline void
i40e_init_flx_pld(struct i40e_pf * pf)336 i40e_init_flx_pld(struct i40e_pf *pf)
337 {
338 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
339 	uint8_t pctype;
340 	int i, index;
341 	uint16_t flow_type;
342 
343 	/*
344 	 * Define the bytes stream extracted as flexible payload in
345 	 * field vector. By default, select 8 words from the beginning
346 	 * of payload as flexible payload.
347 	 */
348 	for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
349 		index = i * I40E_MAX_FLXPLD_FIED;
350 		pf->fdir.flex_set[index].src_offset = 0;
351 		pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
352 		pf->fdir.flex_set[index].dst_offset = 0;
353 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
354 		I40E_WRITE_REG(hw,
355 			I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
356 		I40E_WRITE_REG(hw,
357 			I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
358 	}
359 
360 	/* initialize the masks */
361 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
362 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
363 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
364 
365 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
366 			continue;
367 		pf->fdir.flex_mask[pctype].word_mask = 0;
368 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
369 		for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
370 			pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
371 			pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
372 			i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
373 		}
374 	}
375 }
376 
377 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
378 	if ((flex_pit2).src_offset < \
379 		(flex_pit1).src_offset + (flex_pit1).size) { \
380 		PMD_DRV_LOG(ERR, "src_offset should be not" \
381 			" less than than previous offset" \
382 			" + previous FSIZE."); \
383 		return -EINVAL; \
384 	} \
385 } while (0)
386 
387 /*
388  * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
389  * and the flex_pit will be sorted by it's src_offset value
390  */
391 static inline uint16_t
i40e_srcoff_to_flx_pit(const uint16_t * src_offset,struct i40e_fdir_flex_pit * flex_pit)392 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
393 			struct i40e_fdir_flex_pit *flex_pit)
394 {
395 	uint16_t src_tmp, size, num = 0;
396 	uint16_t i, k, j = 0;
397 
398 	while (j < I40E_FDIR_MAX_FLEX_LEN) {
399 		size = 1;
400 		for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
401 			if (src_offset[j + 1] == src_offset[j] + 1)
402 				size++;
403 			else
404 				break;
405 		}
406 		src_tmp = src_offset[j] + 1 - size;
407 		/* the flex_pit need to be sort by src_offset */
408 		for (i = 0; i < num; i++) {
409 			if (src_tmp < flex_pit[i].src_offset)
410 				break;
411 		}
412 		/* if insert required, move backward */
413 		for (k = num; k > i; k--)
414 			flex_pit[k] = flex_pit[k - 1];
415 		/* insert */
416 		flex_pit[i].dst_offset = j + 1 - size;
417 		flex_pit[i].src_offset = src_tmp;
418 		flex_pit[i].size = size;
419 		j++;
420 		num++;
421 	}
422 	return num;
423 }
424 
425 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
426 static inline int
i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg * flex_cfg)427 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
428 {
429 	struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
430 	uint16_t num, i;
431 
432 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
433 		if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
434 			PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
435 			return -EINVAL;
436 		}
437 	}
438 
439 	memset(flex_pit, 0, sizeof(flex_pit));
440 	num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
441 	if (num > I40E_MAX_FLXPLD_FIED) {
442 		PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
443 		return -EINVAL;
444 	}
445 	for (i = 0; i < num; i++) {
446 		if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
447 			flex_pit[i].src_offset & 0x01) {
448 			PMD_DRV_LOG(ERR, "flexpayload should be measured"
449 				" in word");
450 			return -EINVAL;
451 		}
452 		if (i != num - 1)
453 			I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
454 	}
455 	return 0;
456 }
457 
458 /*
459  * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
460  * arguments are valid
461  */
462 static int
i40e_check_fdir_flex_conf(const struct i40e_adapter * adapter,const struct rte_eth_fdir_flex_conf * conf)463 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
464 			  const struct rte_eth_fdir_flex_conf *conf)
465 {
466 	const struct rte_eth_flex_payload_cfg *flex_cfg;
467 	const struct rte_eth_fdir_flex_mask *flex_mask;
468 	uint16_t mask_tmp;
469 	uint8_t nb_bitmask;
470 	uint16_t i, j;
471 	int ret = 0;
472 	enum i40e_filter_pctype pctype;
473 
474 	if (conf == NULL) {
475 		PMD_DRV_LOG(INFO, "NULL pointer.");
476 		return -EINVAL;
477 	}
478 	/* check flexible payload setting configuration */
479 	if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
480 		PMD_DRV_LOG(ERR, "invalid number of payload setting.");
481 		return -EINVAL;
482 	}
483 	for (i = 0; i < conf->nb_payloads; i++) {
484 		flex_cfg = &conf->flex_set[i];
485 		if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
486 			PMD_DRV_LOG(ERR, "invalid payload type.");
487 			return -EINVAL;
488 		}
489 		ret = i40e_check_fdir_flex_payload(flex_cfg);
490 		if (ret < 0) {
491 			PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
492 			return -EINVAL;
493 		}
494 	}
495 
496 	/* check flex mask setting configuration */
497 	if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
498 		PMD_DRV_LOG(ERR, "invalid number of flex masks.");
499 		return -EINVAL;
500 	}
501 	for (i = 0; i < conf->nb_flexmasks; i++) {
502 		flex_mask = &conf->flex_mask[i];
503 		pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
504 		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
505 			PMD_DRV_LOG(WARNING, "invalid flow type.");
506 			return -EINVAL;
507 		}
508 		nb_bitmask = 0;
509 		for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
510 			mask_tmp = I40E_WORD(flex_mask->mask[j],
511 					     flex_mask->mask[j + 1]);
512 			if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
513 				nb_bitmask++;
514 				if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
515 					PMD_DRV_LOG(ERR, " exceed maximal"
516 						" number of bitmasks.");
517 					return -EINVAL;
518 				}
519 			}
520 		}
521 	}
522 	return 0;
523 }
524 
525 /*
526  * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
527  * @pf: board private structure
528  * @cfg: the rule how bytes stream is extracted as flexible payload
529  */
530 static void
i40e_set_flx_pld_cfg(struct i40e_pf * pf,const struct rte_eth_flex_payload_cfg * cfg)531 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
532 			 const struct rte_eth_flex_payload_cfg *cfg)
533 {
534 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
535 	struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
536 	uint32_t flx_pit, flx_ort;
537 	uint16_t num, min_next_off;  /* in words */
538 	uint8_t field_idx = 0;
539 	uint8_t layer_idx = 0;
540 	uint16_t i;
541 
542 	if (cfg->type == RTE_ETH_L2_PAYLOAD)
543 		layer_idx = I40E_FLXPLD_L2_IDX;
544 	else if (cfg->type == RTE_ETH_L3_PAYLOAD)
545 		layer_idx = I40E_FLXPLD_L3_IDX;
546 	else if (cfg->type == RTE_ETH_L4_PAYLOAD)
547 		layer_idx = I40E_FLXPLD_L4_IDX;
548 
549 	memset(flex_pit, 0, sizeof(flex_pit));
550 	num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
551 		      RTE_DIM(flex_pit));
552 
553 	if (num) {
554 		flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
555 			  (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
556 			  (layer_idx * I40E_MAX_FLXPLD_FIED);
557 		I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
558 	}
559 
560 	for (i = 0; i < num; i++) {
561 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
562 		/* record the info in fdir structure */
563 		pf->fdir.flex_set[field_idx].src_offset =
564 			flex_pit[i].src_offset / sizeof(uint16_t);
565 		pf->fdir.flex_set[field_idx].size =
566 			flex_pit[i].size / sizeof(uint16_t);
567 		pf->fdir.flex_set[field_idx].dst_offset =
568 			flex_pit[i].dst_offset / sizeof(uint16_t);
569 		flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
570 				pf->fdir.flex_set[field_idx].size,
571 				pf->fdir.flex_set[field_idx].dst_offset);
572 
573 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
574 	}
575 	min_next_off = pf->fdir.flex_set[field_idx].src_offset +
576 				pf->fdir.flex_set[field_idx].size;
577 
578 	for (; i < I40E_MAX_FLXPLD_FIED; i++) {
579 		/* set the non-used register obeying register's constrain */
580 		flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
581 			   NONUSE_FLX_PIT_DEST_OFF);
582 		I40E_WRITE_REG(hw,
583 			I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
584 			flx_pit);
585 		min_next_off++;
586 	}
587 }
588 
589 /*
590  * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
591  * @pf: board private structure
592  * @pctype: packet classify type
593  * @flex_masks: mask for flexible payload
594  */
595 static void
i40e_set_flex_mask_on_pctype(struct i40e_pf * pf,enum i40e_filter_pctype pctype,const struct rte_eth_fdir_flex_mask * mask_cfg)596 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
597 		enum i40e_filter_pctype pctype,
598 		const struct rte_eth_fdir_flex_mask *mask_cfg)
599 {
600 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
601 	struct i40e_fdir_flex_mask *flex_mask;
602 	uint32_t flxinset, fd_mask;
603 	uint16_t mask_tmp;
604 	uint8_t i, nb_bitmask = 0;
605 
606 	flex_mask = &pf->fdir.flex_mask[pctype];
607 	memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
608 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
609 		mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
610 		if (mask_tmp != 0x0) {
611 			flex_mask->word_mask |=
612 				I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
613 			if (mask_tmp != UINT16_MAX) {
614 				/* set bit mask */
615 				flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
616 				flex_mask->bitmask[nb_bitmask].offset =
617 					i / sizeof(uint16_t);
618 				nb_bitmask++;
619 			}
620 		}
621 	}
622 	/* write mask to hw */
623 	flxinset = (flex_mask->word_mask <<
624 		I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
625 		I40E_PRTQF_FD_FLXINSET_INSET_MASK;
626 	i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
627 
628 	for (i = 0; i < nb_bitmask; i++) {
629 		fd_mask = (flex_mask->bitmask[i].mask <<
630 			I40E_PRTQF_FD_MSK_MASK_SHIFT) &
631 			I40E_PRTQF_FD_MSK_MASK_MASK;
632 		fd_mask |= ((flex_mask->bitmask[i].offset +
633 			I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
634 			I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
635 			I40E_PRTQF_FD_MSK_OFFSET_MASK;
636 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
637 	}
638 }
639 
640 /*
641  * Enable/disable flow director RX processing in vector routines.
642  */
643 void
i40e_fdir_rx_proc_enable(struct rte_eth_dev * dev,bool on)644 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
645 {
646 	int32_t i;
647 
648 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
649 		struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
650 		if (!rxq)
651 			continue;
652 		rxq->fdir_enabled = on;
653 	}
654 	PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
655 }
656 
657 /*
658  * Configure flow director related setting
659  */
660 int
i40e_fdir_configure(struct rte_eth_dev * dev)661 i40e_fdir_configure(struct rte_eth_dev *dev)
662 {
663 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
664 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
665 	struct rte_eth_fdir_flex_conf *conf;
666 	enum i40e_filter_pctype pctype;
667 	uint32_t val;
668 	uint8_t i;
669 	int ret = 0;
670 
671 	/*
672 	* configuration need to be done before
673 	* flow director filters are added
674 	* If filters exist, flush them.
675 	*/
676 	if (i40e_fdir_empty(hw) < 0) {
677 		ret = i40e_fdir_flush(dev);
678 		if (ret) {
679 			PMD_DRV_LOG(ERR, "failed to flush fdir table.");
680 			return ret;
681 		}
682 	}
683 
684 	/* enable FDIR filter */
685 	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
686 	val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
687 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
688 
689 	i40e_init_flx_pld(pf); /* set flex config to default value */
690 
691 	conf = &dev->data->dev_conf.fdir_conf.flex_conf;
692 	ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
693 	if (ret < 0) {
694 		PMD_DRV_LOG(ERR, " invalid configuration arguments.");
695 		return -EINVAL;
696 	}
697 
698 	if (!pf->support_multi_driver) {
699 		/* configure flex payload */
700 		for (i = 0; i < conf->nb_payloads; i++)
701 			i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
702 		/* configure flex mask*/
703 		for (i = 0; i < conf->nb_flexmasks; i++) {
704 			if (hw->mac.type == I40E_MAC_X722) {
705 				/* get pctype value in fd pctype register */
706 				pctype = (enum i40e_filter_pctype)
707 					  i40e_read_rx_ctl(hw,
708 						I40E_GLQF_FD_PCTYPES(
709 						(int)i40e_flowtype_to_pctype(
710 						pf->adapter,
711 						conf->flex_mask[i].flow_type)));
712 			} else {
713 				pctype = i40e_flowtype_to_pctype(pf->adapter,
714 						  conf->flex_mask[i].flow_type);
715 			}
716 
717 			i40e_set_flex_mask_on_pctype(pf, pctype,
718 						     &conf->flex_mask[i]);
719 		}
720 	} else {
721 		PMD_DRV_LOG(ERR, "Not support flexible payload.");
722 	}
723 
724 	/* Enable FDIR processing in RX routines */
725 	i40e_fdir_rx_proc_enable(dev, 1);
726 
727 	return ret;
728 }
729 
730 
731 static struct i40e_customized_pctype *
i40e_flow_fdir_find_customized_pctype(struct i40e_pf * pf,uint8_t pctype)732 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
733 {
734 	struct i40e_customized_pctype *cus_pctype;
735 	enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
736 
737 	for (; i < I40E_CUSTOMIZED_MAX; i++) {
738 		cus_pctype = &pf->customized_pctype[i];
739 		if (pctype == cus_pctype->pctype)
740 			return cus_pctype;
741 	}
742 	return NULL;
743 }
744 
745 static inline int
fill_ip6_head(const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,uint8_t next_proto,uint8_t len,uint16_t * ether_type)746 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
747 		uint8_t next_proto, uint8_t len, uint16_t *ether_type)
748 {
749 	struct rte_ipv6_hdr *ip6;
750 
751 	ip6 = (struct rte_ipv6_hdr *)raw_pkt;
752 
753 	*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
754 	ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
755 		(fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
756 	ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
757 	ip6->proto = fdir_input->flow.ipv6_flow.proto ?
758 		fdir_input->flow.ipv6_flow.proto : next_proto;
759 	ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
760 		fdir_input->flow.ipv6_flow.hop_limits :
761 		I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
762 	/**
763 	 * The source and destination fields in the transmitted packet
764 	 * need to be presented in a reversed order with respect
765 	 * to the expected received packets.
766 	 */
767 	rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
768 		IPV6_ADDR_LEN);
769 	rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
770 		IPV6_ADDR_LEN);
771 	len += sizeof(struct rte_ipv6_hdr);
772 
773 	return len;
774 }
775 
776 static inline int
fill_ip4_head(const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,uint8_t next_proto,uint8_t len,uint16_t * ether_type)777 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
778 		uint8_t next_proto, uint8_t len, uint16_t *ether_type)
779 {
780 	struct rte_ipv4_hdr *ip4;
781 
782 	ip4 = (struct rte_ipv4_hdr *)raw_pkt;
783 
784 	*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
785 	ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
786 	/* set len to by default */
787 	ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
788 	ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
789 		fdir_input->flow.ip4_flow.ttl :
790 		I40E_FDIR_IP_DEFAULT_TTL;
791 	ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
792 	ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
793 		fdir_input->flow.ip4_flow.proto : next_proto;
794 	/**
795 	 * The source and destination fields in the transmitted packet
796 	 * need to be presented in a reversed order with respect
797 	 * to the expected received packets.
798 	 */
799 	ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
800 	ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
801 	len += sizeof(struct rte_ipv4_hdr);
802 
803 	return len;
804 }
805 
806 static inline int
i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf * pf,const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt,bool vlan)807 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
808 				const struct i40e_fdir_input *fdir_input,
809 				unsigned char *raw_pkt,
810 				bool vlan)
811 {
812 	struct i40e_customized_pctype *cus_pctype = NULL;
813 	static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
814 	uint16_t *ether_type;
815 	uint8_t len = 2 * sizeof(struct rte_ether_addr);
816 	uint8_t pctype = fdir_input->pctype;
817 	bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
818 	static const uint8_t next_proto[] = {
819 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
820 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
821 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
822 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
823 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
824 		[I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
825 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
826 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
827 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
828 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
829 	};
830 
831 	rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
832 		sizeof(struct rte_ether_addr));
833 	rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
834 		&fdir_input->flow.l2_flow.src,
835 		sizeof(struct rte_ether_addr));
836 	raw_pkt += 2 * sizeof(struct rte_ether_addr);
837 
838 	if (vlan && fdir_input->flow_ext.vlan_tci) {
839 		rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
840 		rte_memcpy(raw_pkt + sizeof(uint16_t),
841 			   &fdir_input->flow_ext.vlan_tci,
842 			   sizeof(uint16_t));
843 		raw_pkt += sizeof(vlan_frame);
844 		len += sizeof(vlan_frame);
845 	}
846 	ether_type = (uint16_t *)raw_pkt;
847 	raw_pkt += sizeof(uint16_t);
848 	len += sizeof(uint16_t);
849 
850 	if (is_customized_pctype) {
851 		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
852 		if (!cus_pctype) {
853 			PMD_DRV_LOG(ERR, "unknown pctype %u.",
854 				    fdir_input->pctype);
855 			return -1;
856 		}
857 	}
858 
859 	if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
860 		*ether_type = fdir_input->flow.l2_flow.ether_type;
861 	else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
862 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
863 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
864 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
865 		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
866 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
867 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
868 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
869 		 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
870 		 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
871 		 is_customized_pctype) {
872 		if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
873 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
874 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
875 			pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
876 			pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
877 			len = fill_ip4_head(fdir_input, raw_pkt,
878 					next_proto[pctype], len, ether_type);
879 		} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
880 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
881 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
882 			pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
883 			pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
884 			len = fill_ip6_head(fdir_input, raw_pkt,
885 					next_proto[pctype], len,
886 					ether_type);
887 		} else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
888 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
889 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
890 			 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
891 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
892 					len, ether_type);
893 		} else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
894 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
895 					len, ether_type);
896 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
897 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
898 					len, ether_type);
899 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
900 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
901 					len, ether_type);
902 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
903 			len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
904 					len, ether_type);
905 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
906 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
907 					len, ether_type);
908 		else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
909 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
910 					len, ether_type);
911 		else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
912 			len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
913 					len, ether_type);
914 	} else {
915 		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
916 		return -1;
917 	}
918 
919 	return len;
920 }
921 
922 /**
923  * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
924  * @pf: board private structure
925  * @fdir_input: input set of the flow director entry
926  * @raw_pkt: a packet to be constructed
927  */
928 static int
i40e_flow_fdir_construct_pkt(struct i40e_pf * pf,const struct i40e_fdir_input * fdir_input,unsigned char * raw_pkt)929 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
930 			     const struct i40e_fdir_input *fdir_input,
931 			     unsigned char *raw_pkt)
932 {
933 	unsigned char *payload = NULL;
934 	unsigned char *ptr;
935 	struct rte_udp_hdr *udp;
936 	struct rte_tcp_hdr *tcp;
937 	struct rte_sctp_hdr *sctp;
938 	struct rte_flow_item_gtp *gtp;
939 	struct rte_ipv4_hdr *gtp_ipv4;
940 	struct rte_ipv6_hdr *gtp_ipv6;
941 	struct rte_flow_item_l2tpv3oip *l2tpv3oip;
942 	struct rte_flow_item_esp *esp;
943 	struct rte_ipv4_hdr *esp_ipv4;
944 	struct rte_ipv6_hdr *esp_ipv6;
945 
946 	uint8_t size, dst = 0;
947 	uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
948 	int len;
949 	uint8_t pctype = fdir_input->pctype;
950 	struct i40e_customized_pctype *cus_pctype;
951 
952 	/* raw pcket template - just copy contents of the raw packet */
953 	if (fdir_input->flow_ext.pkt_template) {
954 		memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
955 		       fdir_input->flow.raw_flow.length);
956 		return 0;
957 	}
958 
959 	/* fill the ethernet and IP head */
960 	len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
961 					      !!fdir_input->flow_ext.vlan_tci);
962 	if (len < 0)
963 		return -EINVAL;
964 
965 	/* fill the L4 head */
966 	if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
967 		udp = (struct rte_udp_hdr *)(raw_pkt + len);
968 		payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
969 		/**
970 		 * The source and destination fields in the transmitted packet
971 		 * need to be presented in a reversed order with respect
972 		 * to the expected received packets.
973 		 */
974 		udp->src_port = fdir_input->flow.udp4_flow.dst_port;
975 		udp->dst_port = fdir_input->flow.udp4_flow.src_port;
976 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
977 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
978 		tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
979 		payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
980 		/**
981 		 * The source and destination fields in the transmitted packet
982 		 * need to be presented in a reversed order with respect
983 		 * to the expected received packets.
984 		 */
985 		tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
986 		tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
987 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
988 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
989 		sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
990 		payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
991 		/**
992 		 * The source and destination fields in the transmitted packet
993 		 * need to be presented in a reversed order with respect
994 		 * to the expected received packets.
995 		 */
996 		sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
997 		sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
998 		sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
999 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1000 		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1001 		payload = raw_pkt + len;
1002 		set_idx = I40E_FLXPLD_L3_IDX;
1003 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1004 		udp = (struct rte_udp_hdr *)(raw_pkt + len);
1005 		payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1006 		/**
1007 		 * The source and destination fields in the transmitted packet
1008 		 * need to be presented in a reversed order with respect
1009 		 * to the expected received packets.
1010 		 */
1011 		udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1012 		udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1013 		udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1014 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1015 		tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1016 		payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1017 		/**
1018 		 * The source and destination fields in the transmitted packet
1019 		 * need to be presented in a reversed order with respect
1020 		 * to the expected received packets.
1021 		 */
1022 		tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1023 		tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1024 		tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1025 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1026 		sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1027 		payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1028 		/**
1029 		 * The source and destination fields in the transmitted packet
1030 		 * need to be presented in a reversed order with respect
1031 		 * to the expected received packets.
1032 		 */
1033 		sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1034 		sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1035 		sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1036 	} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1037 		   pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1038 		payload = raw_pkt + len;
1039 		set_idx = I40E_FLXPLD_L3_IDX;
1040 	} else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1041 		payload = raw_pkt + len;
1042 		/**
1043 		 * ARP packet is a special case on which the payload
1044 		 * starts after the whole ARP header
1045 		 */
1046 		if (fdir_input->flow.l2_flow.ether_type ==
1047 				rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1048 			payload += sizeof(struct rte_arp_hdr);
1049 		set_idx = I40E_FLXPLD_L2_IDX;
1050 	} else if (fdir_input->flow_ext.customized_pctype) {
1051 		/* If customized pctype is used */
1052 		cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1053 		if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1054 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1055 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1056 		    cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1057 			udp = (struct rte_udp_hdr *)(raw_pkt + len);
1058 			udp->dgram_len =
1059 				rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1060 
1061 			gtp = (struct rte_flow_item_gtp *)
1062 				((unsigned char *)udp +
1063 					sizeof(struct rte_udp_hdr));
1064 			gtp->msg_len =
1065 				rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1066 			gtp->teid = fdir_input->flow.gtp_flow.teid;
1067 			gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1068 
1069 			/* GTP-C message type is not supported. */
1070 			if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1071 				udp->dst_port =
1072 				      rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1073 				gtp->v_pt_rsv_flags =
1074 					I40E_FDIR_GTP_VER_FLAG_0X32;
1075 			} else {
1076 				udp->dst_port =
1077 				      rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1078 				gtp->v_pt_rsv_flags =
1079 					I40E_FDIR_GTP_VER_FLAG_0X30;
1080 			}
1081 
1082 			if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1083 				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1084 				gtp_ipv4 = (struct rte_ipv4_hdr *)
1085 					((unsigned char *)gtp +
1086 					 sizeof(struct rte_flow_item_gtp));
1087 				gtp_ipv4->version_ihl =
1088 					I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1089 				gtp_ipv4->next_proto_id = IPPROTO_IP;
1090 				gtp_ipv4->total_length =
1091 					rte_cpu_to_be_16(
1092 						I40E_FDIR_INNER_IP_DEFAULT_LEN);
1093 				payload = (unsigned char *)gtp_ipv4 +
1094 					sizeof(struct rte_ipv4_hdr);
1095 			} else if (cus_pctype->index ==
1096 				   I40E_CUSTOMIZED_GTPU_IPV6) {
1097 				gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1098 				gtp_ipv6 = (struct rte_ipv6_hdr *)
1099 					((unsigned char *)gtp +
1100 					 sizeof(struct rte_flow_item_gtp));
1101 				gtp_ipv6->vtc_flow =
1102 					rte_cpu_to_be_32(
1103 					       I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1104 					       (0 << I40E_FDIR_IPv6_TC_OFFSET));
1105 				gtp_ipv6->proto = IPPROTO_NONE;
1106 				gtp_ipv6->payload_len =
1107 					rte_cpu_to_be_16(
1108 					      I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1109 				gtp_ipv6->hop_limits =
1110 					I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1111 				payload = (unsigned char *)gtp_ipv6 +
1112 					sizeof(struct rte_ipv6_hdr);
1113 			} else
1114 				payload = (unsigned char *)gtp +
1115 					sizeof(struct rte_flow_item_gtp);
1116 		} else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1117 			   cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1118 			l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1119 								       + len);
1120 
1121 			if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1122 				l2tpv3oip->session_id =
1123 				 fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1124 			else
1125 				l2tpv3oip->session_id =
1126 				 fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1127 			payload = (unsigned char *)l2tpv3oip +
1128 				sizeof(struct rte_flow_item_l2tpv3oip);
1129 		} else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1130 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1131 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1132 			cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1133 			if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1134 				esp_ipv4 = (struct rte_ipv4_hdr *)
1135 					(raw_pkt + len);
1136 				esp = (struct rte_flow_item_esp *)esp_ipv4;
1137 				esp->hdr.spi =
1138 					fdir_input->flow.esp_ipv4_flow.spi;
1139 				payload = (unsigned char *)esp +
1140 					sizeof(struct rte_esp_hdr);
1141 				len += sizeof(struct rte_esp_hdr);
1142 			} else if (cus_pctype->index ==
1143 					I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1144 				esp_ipv4 = (struct rte_ipv4_hdr *)
1145 					(raw_pkt + len);
1146 				udp = (struct rte_udp_hdr *)esp_ipv4;
1147 				udp->dst_port = rte_cpu_to_be_16
1148 					(I40E_FDIR_ESP_DST_PORT);
1149 
1150 				udp->dgram_len = rte_cpu_to_be_16
1151 						(I40E_FDIR_UDP_DEFAULT_LEN);
1152 				esp = (struct rte_flow_item_esp *)
1153 					((unsigned char *)esp_ipv4 +
1154 						sizeof(struct rte_udp_hdr));
1155 				esp->hdr.spi =
1156 					fdir_input->flow.esp_ipv4_udp_flow.spi;
1157 				payload = (unsigned char *)esp +
1158 					sizeof(struct rte_esp_hdr);
1159 				len += sizeof(struct rte_udp_hdr) +
1160 						sizeof(struct rte_esp_hdr);
1161 			} else if (cus_pctype->index ==
1162 					I40E_CUSTOMIZED_ESP_IPV6) {
1163 				esp_ipv6 = (struct rte_ipv6_hdr *)
1164 					(raw_pkt + len);
1165 				esp = (struct rte_flow_item_esp *)esp_ipv6;
1166 				esp->hdr.spi =
1167 					fdir_input->flow.esp_ipv6_flow.spi;
1168 				payload = (unsigned char *)esp +
1169 					sizeof(struct rte_esp_hdr);
1170 				len += sizeof(struct rte_esp_hdr);
1171 			} else if (cus_pctype->index ==
1172 					I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1173 				esp_ipv6 = (struct rte_ipv6_hdr *)
1174 					(raw_pkt + len);
1175 				udp = (struct rte_udp_hdr *)esp_ipv6;
1176 				udp->dst_port =	rte_cpu_to_be_16
1177 					(I40E_FDIR_ESP_DST_PORT);
1178 
1179 				udp->dgram_len = rte_cpu_to_be_16
1180 					(I40E_FDIR_UDP_DEFAULT_LEN);
1181 				esp = (struct rte_flow_item_esp *)
1182 					((unsigned char *)esp_ipv6 +
1183 						sizeof(struct rte_udp_hdr));
1184 				esp->hdr.spi =
1185 					fdir_input->flow.esp_ipv6_udp_flow.spi;
1186 				payload = (unsigned char *)esp +
1187 					sizeof(struct rte_esp_hdr);
1188 				len += sizeof(struct rte_udp_hdr) +
1189 						sizeof(struct rte_esp_hdr);
1190 			}
1191 		}
1192 	} else {
1193 		PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1194 		return -1;
1195 	}
1196 
1197 	/* fill the flexbytes to payload */
1198 	for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1199 		pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1200 		size = pf->fdir.flex_set[pit_idx].size;
1201 		if (size == 0)
1202 			continue;
1203 		dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1204 		ptr = payload +
1205 		      pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1206 		(void)rte_memcpy(ptr,
1207 				 &fdir_input->flow_ext.flexbytes[dst],
1208 				 size * sizeof(uint16_t));
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 /* Construct the tx flags */
1215 static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,uint32_t td_offset,unsigned int size,uint32_t td_tag)1216 i40e_build_ctob(uint32_t td_cmd,
1217 		uint32_t td_offset,
1218 		unsigned int size,
1219 		uint32_t td_tag)
1220 {
1221 	return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1222 			((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
1223 			((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1224 			((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1225 			((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
1226 }
1227 
1228 /*
1229  * check the programming status descriptor in rx queue.
1230  * done after Programming Flow Director is programmed on
1231  * tx queue
1232  */
1233 static inline int
i40e_check_fdir_programming_status(struct i40e_rx_queue * rxq)1234 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1235 {
1236 	volatile union i40e_rx_desc *rxdp;
1237 	uint64_t qword1;
1238 	uint32_t rx_status;
1239 	uint32_t len, id;
1240 	uint32_t error;
1241 	int ret = 0;
1242 
1243 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1244 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1245 	rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1246 			>> I40E_RXD_QW1_STATUS_SHIFT;
1247 
1248 	if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1249 		len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1250 		id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1251 			    I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1252 
1253 		if (len  == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1254 		    id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1255 			error = (qword1 &
1256 				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1257 				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1258 			if (error == (0x1 <<
1259 				I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1260 				PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1261 					    " (FD_ID %u): programming status"
1262 					    " reported.",
1263 					    rxdp->wb.qword0.hi_dword.fd_id);
1264 				ret = -1;
1265 			} else if (error == (0x1 <<
1266 				I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1267 				PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1268 					    " (FD_ID %u): programming status"
1269 					    " reported.",
1270 					    rxdp->wb.qword0.hi_dword.fd_id);
1271 				ret = -1;
1272 			} else
1273 				PMD_DRV_LOG(ERR, "invalid programming status"
1274 					    " reported, error = %u.", error);
1275 		} else
1276 			PMD_DRV_LOG(INFO, "unknown programming status"
1277 				    " reported, len = %d, id = %u.", len, id);
1278 		rxdp->wb.qword1.status_error_len = 0;
1279 		rxq->rx_tail++;
1280 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1281 			rxq->rx_tail = 0;
1282 		if (rxq->rx_tail == 0)
1283 			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1284 		else
1285 			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1286 	}
1287 
1288 	return ret;
1289 }
1290 
1291 static inline void
i40e_fdir_programming_status_cleanup(struct i40e_rx_queue * rxq)1292 i40e_fdir_programming_status_cleanup(struct i40e_rx_queue *rxq)
1293 {
1294 	uint16_t retry_count = 0;
1295 
1296 	/* capture the previous error report(if any) from rx ring */
1297 	while ((i40e_check_fdir_programming_status(rxq) < 0) &&
1298 			(++retry_count < I40E_FDIR_NUM_RX_DESC))
1299 		PMD_DRV_LOG(INFO, "error report captured.");
1300 }
1301 
1302 static int
i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf * input,struct i40e_fdir_filter * filter)1303 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1304 			 struct i40e_fdir_filter *filter)
1305 {
1306 	rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1307 	if (input->input.flow_ext.pkt_template) {
1308 		filter->fdir.input.flow.raw_flow.packet = NULL;
1309 		filter->fdir.input.flow.raw_flow.length =
1310 			rte_hash_crc(input->input.flow.raw_flow.packet,
1311 				     input->input.flow.raw_flow.length,
1312 				     input->input.flow.raw_flow.pctype);
1313 	}
1314 	return 0;
1315 }
1316 
1317 /* Check if there exists the flow director filter */
1318 static struct i40e_fdir_filter *
i40e_sw_fdir_filter_lookup(struct i40e_fdir_info * fdir_info,const struct i40e_fdir_input * input)1319 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1320 			const struct i40e_fdir_input *input)
1321 {
1322 	int ret;
1323 
1324 	if (input->flow_ext.pkt_template)
1325 		ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1326 						(const void *)input,
1327 						input->flow.raw_flow.length);
1328 	else
1329 		ret = rte_hash_lookup(fdir_info->hash_table,
1330 				      (const void *)input);
1331 	if (ret < 0)
1332 		return NULL;
1333 
1334 	return fdir_info->hash_map[ret];
1335 }
1336 
1337 /* Add a flow director filter into the SW list */
1338 static int
i40e_sw_fdir_filter_insert(struct i40e_pf * pf,struct i40e_fdir_filter * filter)1339 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1340 {
1341 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1342 	struct i40e_fdir_filter *hash_filter;
1343 	int ret;
1344 
1345 	if (filter->fdir.input.flow_ext.pkt_template)
1346 		ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1347 				 &filter->fdir.input,
1348 				 filter->fdir.input.flow.raw_flow.length);
1349 	else
1350 		ret = rte_hash_add_key(fdir_info->hash_table,
1351 				       &filter->fdir.input);
1352 	if (ret < 0) {
1353 		PMD_DRV_LOG(ERR,
1354 			    "Failed to insert fdir filter to hash table %d!",
1355 			    ret);
1356 		return ret;
1357 	}
1358 
1359 	if (fdir_info->hash_map[ret])
1360 		return -1;
1361 
1362 	hash_filter = &fdir_info->fdir_filter_array[ret];
1363 	rte_memcpy(hash_filter, filter, sizeof(*filter));
1364 	fdir_info->hash_map[ret] = hash_filter;
1365 	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
1366 
1367 	return 0;
1368 }
1369 
1370 /* Delete a flow director filter from the SW list */
1371 int
i40e_sw_fdir_filter_del(struct i40e_pf * pf,struct i40e_fdir_input * input)1372 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1373 {
1374 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1375 	struct i40e_fdir_filter *filter;
1376 	int ret;
1377 
1378 	if (input->flow_ext.pkt_template)
1379 		ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1380 						 input,
1381 						 input->flow.raw_flow.length);
1382 	else
1383 		ret = rte_hash_del_key(fdir_info->hash_table, input);
1384 	if (ret < 0) {
1385 		PMD_DRV_LOG(ERR,
1386 			    "Failed to delete fdir filter to hash table %d!",
1387 			    ret);
1388 		return ret;
1389 	}
1390 	filter = fdir_info->hash_map[ret];
1391 	fdir_info->hash_map[ret] = NULL;
1392 
1393 	TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1394 
1395 	return 0;
1396 }
1397 
1398 struct rte_flow *
i40e_fdir_entry_pool_get(struct i40e_fdir_info * fdir_info)1399 i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
1400 {
1401 	struct rte_flow *flow = NULL;
1402 	uint64_t slab = 0;
1403 	uint32_t pos = 0;
1404 	uint32_t i = 0;
1405 	int ret;
1406 
1407 	if (fdir_info->fdir_actual_cnt >=
1408 			fdir_info->fdir_space_size) {
1409 		PMD_DRV_LOG(ERR, "Fdir space full");
1410 		return NULL;
1411 	}
1412 
1413 	ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
1414 			&slab);
1415 
1416 	/* normally this won't happen as the fdir_actual_cnt should be
1417 	 * same with the number of the set bits in fdir_flow_pool,
1418 	 * but anyway handle this error condition here for safe
1419 	 */
1420 	if (ret == 0) {
1421 		PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
1422 		return NULL;
1423 	}
1424 
1425 	i = rte_bsf64(slab);
1426 	pos += i;
1427 	rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
1428 	flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
1429 
1430 	memset(flow, 0, sizeof(struct rte_flow));
1431 
1432 	return flow;
1433 }
1434 
1435 void
i40e_fdir_entry_pool_put(struct i40e_fdir_info * fdir_info,struct rte_flow * flow)1436 i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
1437 		struct rte_flow *flow)
1438 {
1439 	struct i40e_fdir_entry *f;
1440 
1441 	f = FLOW_TO_FLOW_BITMAP(flow);
1442 	rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
1443 }
1444 
1445 static int
i40e_flow_store_flex_pit(struct i40e_pf * pf,struct i40e_fdir_flex_pit * flex_pit,enum i40e_flxpld_layer_idx layer_idx,uint8_t raw_id)1446 i40e_flow_store_flex_pit(struct i40e_pf *pf,
1447 			 struct i40e_fdir_flex_pit *flex_pit,
1448 			 enum i40e_flxpld_layer_idx layer_idx,
1449 			 uint8_t raw_id)
1450 {
1451 	uint8_t field_idx;
1452 
1453 	field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
1454 	/* Check if the configuration is conflicted */
1455 	if (pf->fdir.flex_pit_flag[layer_idx] &&
1456 	    (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
1457 	     pf->fdir.flex_set[field_idx].size != flex_pit->size ||
1458 	     pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
1459 		return -1;
1460 
1461 	/* Check if the configuration exists. */
1462 	if (pf->fdir.flex_pit_flag[layer_idx] &&
1463 	    (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
1464 	     pf->fdir.flex_set[field_idx].size == flex_pit->size &&
1465 	     pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
1466 		return 1;
1467 
1468 	pf->fdir.flex_set[field_idx].src_offset =
1469 		flex_pit->src_offset;
1470 	pf->fdir.flex_set[field_idx].size =
1471 		flex_pit->size;
1472 	pf->fdir.flex_set[field_idx].dst_offset =
1473 		flex_pit->dst_offset;
1474 
1475 	return 0;
1476 }
1477 
1478 static void
i40e_flow_set_fdir_flex_pit(struct i40e_pf * pf,enum i40e_flxpld_layer_idx layer_idx,uint8_t raw_id)1479 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
1480 			    enum i40e_flxpld_layer_idx layer_idx,
1481 			    uint8_t raw_id)
1482 {
1483 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1484 	uint32_t flx_pit, flx_ort;
1485 	uint16_t min_next_off = 0;
1486 	uint8_t field_idx;
1487 	uint8_t i;
1488 
1489 	if (raw_id) {
1490 		flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
1491 			  (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
1492 			  (layer_idx * I40E_MAX_FLXPLD_FIED);
1493 		I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
1494 	}
1495 
1496 	/* Set flex pit */
1497 	for (i = 0; i < raw_id; i++) {
1498 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1499 		flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
1500 				     pf->fdir.flex_set[field_idx].size,
1501 				     pf->fdir.flex_set[field_idx].dst_offset);
1502 
1503 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1504 		min_next_off = pf->fdir.flex_set[field_idx].src_offset +
1505 			pf->fdir.flex_set[field_idx].size;
1506 	}
1507 
1508 	for (; i < I40E_MAX_FLXPLD_FIED; i++) {
1509 		/* set the non-used register obeying register's constrain */
1510 		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1511 		flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
1512 				     NONUSE_FLX_PIT_DEST_OFF);
1513 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1514 		min_next_off++;
1515 	}
1516 
1517 	pf->fdir.flex_pit_flag[layer_idx] = 1;
1518 }
1519 
1520 static int
i40e_flow_store_flex_mask(struct i40e_pf * pf,enum i40e_filter_pctype pctype,uint8_t * mask)1521 i40e_flow_store_flex_mask(struct i40e_pf *pf,
1522 			  enum i40e_filter_pctype pctype,
1523 			  uint8_t *mask)
1524 {
1525 	struct i40e_fdir_flex_mask flex_mask;
1526 	uint8_t nb_bitmask = 0;
1527 	uint16_t mask_tmp;
1528 	uint8_t i;
1529 
1530 	memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
1531 	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
1532 		mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
1533 		if (mask_tmp) {
1534 			flex_mask.word_mask |=
1535 				I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
1536 			if (mask_tmp != UINT16_MAX) {
1537 				flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
1538 				flex_mask.bitmask[nb_bitmask].offset =
1539 					i / sizeof(uint16_t);
1540 				nb_bitmask++;
1541 				if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
1542 					return -1;
1543 			}
1544 		}
1545 	}
1546 	flex_mask.nb_bitmask = nb_bitmask;
1547 
1548 	if (pf->fdir.flex_mask_flag[pctype] &&
1549 	    (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1550 		    sizeof(struct i40e_fdir_flex_mask))))
1551 		return -2;
1552 	else if (pf->fdir.flex_mask_flag[pctype] &&
1553 		 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1554 			  sizeof(struct i40e_fdir_flex_mask))))
1555 		return 1;
1556 
1557 	memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
1558 	       sizeof(struct i40e_fdir_flex_mask));
1559 	return 0;
1560 }
1561 
1562 static void
i40e_flow_set_fdir_flex_msk(struct i40e_pf * pf,enum i40e_filter_pctype pctype)1563 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
1564 			    enum i40e_filter_pctype pctype)
1565 {
1566 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1567 	struct i40e_fdir_flex_mask *flex_mask;
1568 	uint32_t flxinset, fd_mask;
1569 	uint8_t i;
1570 
1571 	/* Set flex mask */
1572 	flex_mask = &pf->fdir.flex_mask[pctype];
1573 	flxinset = (flex_mask->word_mask <<
1574 		    I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
1575 		I40E_PRTQF_FD_FLXINSET_INSET_MASK;
1576 	i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
1577 
1578 	for (i = 0; i < flex_mask->nb_bitmask; i++) {
1579 		fd_mask = (flex_mask->bitmask[i].mask <<
1580 			   I40E_PRTQF_FD_MSK_MASK_SHIFT) &
1581 			   I40E_PRTQF_FD_MSK_MASK_MASK;
1582 		fd_mask |= ((flex_mask->bitmask[i].offset +
1583 			     I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
1584 			    I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
1585 				I40E_PRTQF_FD_MSK_OFFSET_MASK;
1586 		i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
1587 	}
1588 
1589 	pf->fdir.flex_mask_flag[pctype] = 1;
1590 }
1591 
1592 static inline unsigned char *
i40e_find_available_buffer(struct rte_eth_dev * dev)1593 i40e_find_available_buffer(struct rte_eth_dev *dev)
1594 {
1595 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1596 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1597 	struct i40e_tx_queue *txq = pf->fdir.txq;
1598 
1599 	/* no available buffer
1600 	 * search for more available buffers from the current
1601 	 * descriptor, until an unavailable one
1602 	 */
1603 	if (fdir_info->txq_available_buf_count <= 0) {
1604 		uint16_t tmp_tail;
1605 		volatile struct i40e_tx_desc *tmp_txdp;
1606 
1607 		tmp_tail = txq->tx_tail;
1608 		tmp_txdp = &txq->tx_ring[tmp_tail + 1];
1609 
1610 		do {
1611 			if ((tmp_txdp->cmd_type_offset_bsz &
1612 					rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1613 					rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1614 				fdir_info->txq_available_buf_count++;
1615 			else
1616 				break;
1617 
1618 			tmp_tail += 2;
1619 			if (tmp_tail >= txq->nb_tx_desc)
1620 				tmp_tail = 0;
1621 		} while (tmp_tail != txq->tx_tail);
1622 	}
1623 
1624 	if (fdir_info->txq_available_buf_count > 0)
1625 		fdir_info->txq_available_buf_count--;
1626 	else
1627 		return NULL;
1628 	return (unsigned char *)fdir_info->prg_pkt[txq->tx_tail >> 1];
1629 }
1630 
1631 /**
1632  * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1633  * @pf: board private structure
1634  * @filter: fdir filter entry
1635  * @add: 0 - delete, 1 - add
1636  */
1637 int
i40e_flow_add_del_fdir_filter(struct rte_eth_dev * dev,const struct i40e_fdir_filter_conf * filter,bool add)1638 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1639 			      const struct i40e_fdir_filter_conf *filter,
1640 			      bool add)
1641 {
1642 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1643 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1644 	enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
1645 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1646 	uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
1647 	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1648 	struct i40e_fdir_flex_pit flex_pit;
1649 	enum i40e_filter_pctype pctype;
1650 	struct i40e_fdir_filter *node;
1651 	unsigned char *pkt = NULL;
1652 	bool cfg_flex_pit = true;
1653 	bool wait_status = true;
1654 	uint8_t field_idx;
1655 	int ret = 0;
1656 	int i;
1657 
1658 	if (pf->fdir.fdir_vsi == NULL) {
1659 		PMD_DRV_LOG(ERR, "FDIR is not enabled");
1660 		return -ENOTSUP;
1661 	}
1662 
1663 	if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1664 		PMD_DRV_LOG(ERR, "Invalid queue ID");
1665 		return -EINVAL;
1666 	}
1667 	if (filter->input.flow_ext.is_vf &&
1668 	    filter->input.flow_ext.dst_id >= pf->vf_num) {
1669 		PMD_DRV_LOG(ERR, "Invalid VF ID");
1670 		return -EINVAL;
1671 	}
1672 	if (filter->input.flow_ext.pkt_template) {
1673 		if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1674 		    !filter->input.flow.raw_flow.packet) {
1675 			PMD_DRV_LOG(ERR, "Invalid raw packet template"
1676 				" flow filter parameters!");
1677 			return -EINVAL;
1678 		}
1679 		pctype = filter->input.flow.raw_flow.pctype;
1680 	} else {
1681 		pctype = filter->input.pctype;
1682 	}
1683 
1684 	/* Check if there is the filter in SW list */
1685 	memset(&check_filter, 0, sizeof(check_filter));
1686 	i40e_fdir_filter_convert(filter, &check_filter);
1687 
1688 	if (add) {
1689 		if (!filter->input.flow_ext.customized_pctype) {
1690 			for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
1691 				layer_idx = filter->input.flow_ext.layer_idx;
1692 				field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1693 				flex_pit = filter->input.flow_ext.flex_pit[field_idx];
1694 
1695 				/* Store flex pit to SW */
1696 				ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1697 							       layer_idx, i);
1698 				if (ret < 0) {
1699 					PMD_DRV_LOG(ERR, "Conflict with the"
1700 						    " first flexible rule.");
1701 					return -EINVAL;
1702 				} else if (ret > 0) {
1703 					cfg_flex_pit = false;
1704 				}
1705 			}
1706 
1707 			if (cfg_flex_pit)
1708 				i40e_flow_set_fdir_flex_pit(pf, layer_idx,
1709 						filter->input.flow_ext.raw_id);
1710 
1711 			/* Store flex mask to SW */
1712 			for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++)
1713 				flex_mask[i] =
1714 					filter->input.flow_ext.flex_mask[i];
1715 
1716 			ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1717 			if (ret == -1) {
1718 				PMD_DRV_LOG(ERR, "Exceed maximal"
1719 					    " number of bitmasks");
1720 				return -EINVAL;
1721 			} else if (ret == -2) {
1722 				PMD_DRV_LOG(ERR, "Conflict with the"
1723 					    " first flexible rule");
1724 				return -EINVAL;
1725 			} else if (ret == 0) {
1726 				i40e_flow_set_fdir_flex_msk(pf, pctype);
1727 			}
1728 		}
1729 
1730 		ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
1731 		if (ret < 0) {
1732 			PMD_DRV_LOG(ERR,
1733 				    "Conflict with existing flow director rules!");
1734 			return -EINVAL;
1735 		}
1736 
1737 		if (fdir_info->fdir_invalprio == 1 &&
1738 				fdir_info->fdir_guarantee_free_space > 0)
1739 			wait_status = false;
1740 	} else {
1741 		node = i40e_sw_fdir_filter_lookup(fdir_info,
1742 				&check_filter.fdir.input);
1743 		if (!node) {
1744 			PMD_DRV_LOG(ERR,
1745 				    "There's no corresponding flow firector filter!");
1746 			return -EINVAL;
1747 		}
1748 
1749 		ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1750 		if (ret < 0) {
1751 			PMD_DRV_LOG(ERR,
1752 					"Error deleting fdir rule from hash table!");
1753 			return -EINVAL;
1754 		}
1755 
1756 		pf->fdir.flex_mask_flag[pctype] = 0;
1757 
1758 		if (fdir_info->fdir_invalprio == 1)
1759 			wait_status = false;
1760 	}
1761 
1762 	/* find a buffer to store the pkt */
1763 	pkt = i40e_find_available_buffer(dev);
1764 	if (pkt == NULL)
1765 		goto error_op;
1766 
1767 	memset(pkt, 0, I40E_FDIR_PKT_LEN);
1768 	ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1769 	if (ret < 0) {
1770 		PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1771 		goto error_op;
1772 	}
1773 
1774 	if (hw->mac.type == I40E_MAC_X722) {
1775 		/* get translated pctype value in fd pctype register */
1776 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1777 			hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1778 	}
1779 
1780 	ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add,
1781 			wait_status);
1782 	if (ret < 0) {
1783 		PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1784 			    pctype);
1785 		goto error_op;
1786 	}
1787 
1788 	if (add) {
1789 		fdir_info->fdir_actual_cnt++;
1790 		if (fdir_info->fdir_invalprio == 1 &&
1791 				fdir_info->fdir_guarantee_free_space > 0)
1792 			fdir_info->fdir_guarantee_free_space--;
1793 	} else {
1794 		fdir_info->fdir_actual_cnt--;
1795 		if (fdir_info->fdir_invalprio == 1 &&
1796 				fdir_info->fdir_guarantee_free_space <
1797 				fdir_info->fdir_guarantee_total_space)
1798 			fdir_info->fdir_guarantee_free_space++;
1799 	}
1800 
1801 	return ret;
1802 
1803 error_op:
1804 	/* roll back */
1805 	if (add)
1806 		i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
1807 	else
1808 		i40e_sw_fdir_filter_insert(pf, &check_filter);
1809 
1810 	return ret;
1811 }
1812 
1813 /*
1814  * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1815  * Is done by Flow Director Programming Descriptor followed by packet
1816  * structure that contains the filter fields need to match.
1817  * @pf: board private structure
1818  * @pctype: pctype
1819  * @filter: fdir filter entry
1820  * @add: 0 - delete, 1 - add
1821  */
1822 static int
i40e_flow_fdir_filter_programming(struct i40e_pf * pf,enum i40e_filter_pctype pctype,const struct i40e_fdir_filter_conf * filter,bool add,bool wait_status)1823 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1824 				  enum i40e_filter_pctype pctype,
1825 				  const struct i40e_fdir_filter_conf *filter,
1826 				  bool add, bool wait_status)
1827 {
1828 	struct i40e_tx_queue *txq = pf->fdir.txq;
1829 	struct i40e_rx_queue *rxq = pf->fdir.rxq;
1830 	const struct i40e_fdir_action *fdir_action = &filter->action;
1831 	volatile struct i40e_tx_desc *txdp;
1832 	volatile struct i40e_filter_program_desc *fdirdp;
1833 	uint32_t td_cmd;
1834 	uint16_t vsi_id;
1835 	uint8_t dest;
1836 	uint32_t i;
1837 
1838 	PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1839 	fdirdp = (volatile struct i40e_filter_program_desc *)
1840 				(&txq->tx_ring[txq->tx_tail]);
1841 
1842 	fdirdp->qindex_flex_ptype_vsi =
1843 			rte_cpu_to_le_32((fdir_action->rx_queue <<
1844 					  I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1845 					  I40E_TXD_FLTR_QW0_QINDEX_MASK);
1846 
1847 	fdirdp->qindex_flex_ptype_vsi |=
1848 			rte_cpu_to_le_32((fdir_action->flex_off <<
1849 					  I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1850 					  I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1851 
1852 	fdirdp->qindex_flex_ptype_vsi |=
1853 			rte_cpu_to_le_32((pctype <<
1854 					  I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1855 					  I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1856 
1857 	if (filter->input.flow_ext.is_vf)
1858 		vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1859 	else
1860 		/* Use LAN VSI Id by default */
1861 		vsi_id = pf->main_vsi->vsi_id;
1862 	fdirdp->qindex_flex_ptype_vsi |=
1863 		rte_cpu_to_le_32(((uint32_t)vsi_id <<
1864 				  I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1865 				  I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1866 
1867 	fdirdp->dtype_cmd_cntindex =
1868 			rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1869 
1870 	if (add)
1871 		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1872 				I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1873 				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1874 	else
1875 		fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1876 				I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1877 				I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1878 
1879 	if (fdir_action->behavior == I40E_FDIR_REJECT)
1880 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1881 	else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1882 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1883 	else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1884 		dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1885 	else {
1886 		PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1887 		return -EINVAL;
1888 	}
1889 
1890 	fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1891 				I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1892 				I40E_TXD_FLTR_QW1_DEST_MASK);
1893 
1894 	fdirdp->dtype_cmd_cntindex |=
1895 		rte_cpu_to_le_32((fdir_action->report_status <<
1896 				I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1897 				I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1898 
1899 	fdirdp->dtype_cmd_cntindex |=
1900 			rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1901 	fdirdp->dtype_cmd_cntindex |=
1902 			rte_cpu_to_le_32(
1903 			((uint32_t)pf->fdir.match_counter_index <<
1904 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1905 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1906 
1907 	fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1908 
1909 	PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1910 	txdp = &txq->tx_ring[txq->tx_tail + 1];
1911 	txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
1912 
1913 	td_cmd = I40E_TX_DESC_CMD_EOP |
1914 		 I40E_TX_DESC_CMD_RS  |
1915 		 I40E_TX_DESC_CMD_DUMMY;
1916 
1917 	txdp->cmd_type_offset_bsz =
1918 		i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1919 
1920 	txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1921 	if (txq->tx_tail >= txq->nb_tx_desc)
1922 		txq->tx_tail = 0;
1923 	/* Update the tx tail register */
1924 	rte_wmb();
1925 
1926 	/* fdir program rx queue cleanup */
1927 	i40e_fdir_programming_status_cleanup(rxq);
1928 
1929 	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1930 
1931 	if (wait_status) {
1932 		for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1933 			if ((txdp->cmd_type_offset_bsz &
1934 					rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1935 					rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1936 				break;
1937 			rte_delay_us(1);
1938 		}
1939 		if (i >= I40E_FDIR_MAX_WAIT_US) {
1940 			PMD_DRV_LOG(ERR,
1941 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
1942 			return -ETIMEDOUT;
1943 		}
1944 		/* totally delay 10 ms to check programming status*/
1945 		rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1946 		if (i40e_check_fdir_programming_status(rxq) < 0) {
1947 			PMD_DRV_LOG(ERR,
1948 			    "Failed to program FDIR filter: programming status reported.");
1949 			return -ETIMEDOUT;
1950 		}
1951 	}
1952 
1953 	return 0;
1954 }
1955 
1956 /*
1957  * i40e_fdir_flush - clear all filters of Flow Director table
1958  * @pf: board private structure
1959  */
1960 int
i40e_fdir_flush(struct rte_eth_dev * dev)1961 i40e_fdir_flush(struct rte_eth_dev *dev)
1962 {
1963 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1964 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1965 	uint32_t reg;
1966 	uint16_t guarant_cnt, best_cnt;
1967 	uint16_t i;
1968 
1969 	I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1970 	I40E_WRITE_FLUSH(hw);
1971 
1972 	for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1973 		rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1974 		reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1975 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1976 			break;
1977 	}
1978 	if (i >= I40E_FDIR_FLUSH_RETRY) {
1979 		PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1980 		return -ETIMEDOUT;
1981 	}
1982 	guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1983 				I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1984 				I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1985 	best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1986 				I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1987 				I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1988 	if (guarant_cnt != 0 || best_cnt != 0) {
1989 		PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1990 		return -ENOSYS;
1991 	} else
1992 		PMD_DRV_LOG(INFO, "FD table Flush success.");
1993 	return 0;
1994 }
1995 
1996 static inline void
i40e_fdir_info_get_flex_set(struct i40e_pf * pf,struct rte_eth_flex_payload_cfg * flex_set,uint16_t * num)1997 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1998 			struct rte_eth_flex_payload_cfg *flex_set,
1999 			uint16_t *num)
2000 {
2001 	struct i40e_fdir_flex_pit *flex_pit;
2002 	struct rte_eth_flex_payload_cfg *ptr = flex_set;
2003 	uint16_t src, dst, size, j, k;
2004 	uint8_t i, layer_idx;
2005 
2006 	for (layer_idx = I40E_FLXPLD_L2_IDX;
2007 	     layer_idx <= I40E_FLXPLD_L4_IDX;
2008 	     layer_idx++) {
2009 		if (layer_idx == I40E_FLXPLD_L2_IDX)
2010 			ptr->type = RTE_ETH_L2_PAYLOAD;
2011 		else if (layer_idx == I40E_FLXPLD_L3_IDX)
2012 			ptr->type = RTE_ETH_L3_PAYLOAD;
2013 		else if (layer_idx == I40E_FLXPLD_L4_IDX)
2014 			ptr->type = RTE_ETH_L4_PAYLOAD;
2015 
2016 		for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2017 			flex_pit = &pf->fdir.flex_set[layer_idx *
2018 				I40E_MAX_FLXPLD_FIED + i];
2019 			if (flex_pit->size == 0)
2020 				continue;
2021 			src = flex_pit->src_offset * sizeof(uint16_t);
2022 			dst = flex_pit->dst_offset * sizeof(uint16_t);
2023 			size = flex_pit->size * sizeof(uint16_t);
2024 			for (j = src, k = dst; j < src + size; j++, k++)
2025 				ptr->src_offset[k] = j;
2026 		}
2027 		(*num)++;
2028 		ptr++;
2029 	}
2030 }
2031 
2032 static inline void
i40e_fdir_info_get_flex_mask(struct i40e_pf * pf,struct rte_eth_fdir_flex_mask * flex_mask,uint16_t * num)2033 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2034 			struct rte_eth_fdir_flex_mask *flex_mask,
2035 			uint16_t *num)
2036 {
2037 	struct i40e_fdir_flex_mask *mask;
2038 	struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2039 	uint16_t flow_type;
2040 	uint8_t i, j;
2041 	uint16_t off_bytes, mask_tmp;
2042 
2043 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2044 	     i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2045 	     i++) {
2046 		mask =  &pf->fdir.flex_mask[i];
2047 		flow_type = i40e_pctype_to_flowtype(pf->adapter,
2048 						    (enum i40e_filter_pctype)i);
2049 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2050 			continue;
2051 
2052 		for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2053 			if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2054 				ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2055 				ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2056 			} else {
2057 				ptr->mask[j * sizeof(uint16_t)] = 0x0;
2058 				ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2059 			}
2060 		}
2061 		for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2062 			off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2063 			mask_tmp = ~mask->bitmask[j].mask;
2064 			ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2065 			ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2066 		}
2067 		ptr->flow_type = flow_type;
2068 		ptr++;
2069 		(*num)++;
2070 	}
2071 }
2072 
2073 /*
2074  * i40e_fdir_info_get - get information of Flow Director
2075  * @pf: ethernet device to get info from
2076  * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2077  *    the flow director information.
2078  */
2079 void
i40e_fdir_info_get(struct rte_eth_dev * dev,struct rte_eth_fdir_info * fdir)2080 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2081 {
2082 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2083 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2084 	uint16_t num_flex_set = 0;
2085 	uint16_t num_flex_mask = 0;
2086 	uint16_t i;
2087 
2088 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2089 		fdir->mode = RTE_FDIR_MODE_PERFECT;
2090 	else
2091 		fdir->mode = RTE_FDIR_MODE_NONE;
2092 
2093 	fdir->guarant_spc =
2094 		(uint32_t)hw->func_caps.fd_filters_guaranteed;
2095 	fdir->best_spc =
2096 		(uint32_t)hw->func_caps.fd_filters_best_effort;
2097 	fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2098 	fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2099 	for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2100 		fdir->flow_types_mask[i] = 0ULL;
2101 	fdir->flex_payload_unit = sizeof(uint16_t);
2102 	fdir->flex_bitmask_unit = sizeof(uint16_t);
2103 	fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2104 	fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2105 	fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2106 
2107 	i40e_fdir_info_get_flex_set(pf,
2108 				fdir->flex_conf.flex_set,
2109 				&num_flex_set);
2110 	i40e_fdir_info_get_flex_mask(pf,
2111 				fdir->flex_conf.flex_mask,
2112 				&num_flex_mask);
2113 
2114 	fdir->flex_conf.nb_payloads = num_flex_set;
2115 	fdir->flex_conf.nb_flexmasks = num_flex_mask;
2116 }
2117 
2118 /*
2119  * i40e_fdir_stat_get - get statistics of Flow Director
2120  * @pf: ethernet device to get info from
2121  * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2122  *    the flow director statistics.
2123  */
2124 void
i40e_fdir_stats_get(struct rte_eth_dev * dev,struct rte_eth_fdir_stats * stat)2125 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2126 {
2127 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2128 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2129 	uint32_t fdstat;
2130 
2131 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2132 	stat->guarant_cnt =
2133 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2134 			    I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2135 	stat->best_cnt =
2136 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2137 			    I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2138 }
2139 
2140 /* Restore flow director filter */
2141 void
i40e_fdir_filter_restore(struct i40e_pf * pf)2142 i40e_fdir_filter_restore(struct i40e_pf *pf)
2143 {
2144 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2145 	struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2146 	struct i40e_fdir_filter *f;
2147 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2148 	uint32_t fdstat;
2149 	uint32_t guarant_cnt;  /**< Number of filters in guaranteed spaces. */
2150 	uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
2151 
2152 	TAILQ_FOREACH(f, fdir_list, rules)
2153 		i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2154 
2155 	fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2156 	guarant_cnt =
2157 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2158 			   I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2159 	best_cnt =
2160 		(uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2161 			   I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2162 
2163 	PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d",
2164 		    guarant_cnt, best_cnt);
2165 }
2166