xref: /dpdk/drivers/net/txgbe/txgbe_fdir.c (revision 295968d1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <stdarg.h>
9 #include <errno.h>
10 #include <sys/queue.h>
11 #include <rte_malloc.h>
12 
13 #include "txgbe_logs.h"
14 #include "base/txgbe.h"
15 #include "txgbe_ethdev.h"
16 
17 #define TXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /*default flexbytes offset in bytes*/
18 #define TXGBE_MAX_FLX_SOURCE_OFF        62
19 #define TXGBE_FDIRCMD_CMD_INTERVAL_US   10
20 
21 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
22 	uint8_t ipv6_addr[16]; \
23 	uint8_t i; \
24 	rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
25 	(ipv6m) = 0; \
26 	for (i = 0; i < sizeof(ipv6_addr); i++) { \
27 		if (ipv6_addr[i] == UINT8_MAX) \
28 			(ipv6m) |= 1 << i; \
29 		else if (ipv6_addr[i] != 0) { \
30 			PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
31 			return -EINVAL; \
32 		} \
33 	} \
34 } while (0)
35 
36 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
37 	uint8_t ipv6_addr[16]; \
38 	uint8_t i; \
39 	for (i = 0; i < sizeof(ipv6_addr); i++) { \
40 		if ((ipv6m) & (1 << i)) \
41 			ipv6_addr[i] = UINT8_MAX; \
42 		else \
43 			ipv6_addr[i] = 0; \
44 	} \
45 	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
46 } while (0)
47 
48 /**
49  *  Initialize Flow Director control registers
50  *  @hw: pointer to hardware structure
51  *  @fdirctrl: value to write to flow director control register
52  **/
53 static int
txgbe_fdir_enable(struct txgbe_hw * hw,uint32_t fdirctrl)54 txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
55 {
56 	int i;
57 
58 	PMD_INIT_FUNC_TRACE();
59 
60 	/* Prime the keys for hashing */
61 	wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
62 	wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
63 
64 	/*
65 	 * Continue setup of fdirctrl register bits:
66 	 *  Set the maximum length per hash bucket to 0xA filters
67 	 *  Send interrupt when 64 filters are left
68 	 */
69 	fdirctrl |= TXGBE_FDIRCTL_MAXLEN(0xA) |
70 		    TXGBE_FDIRCTL_FULLTHR(4);
71 
72 	/*
73 	 * Poll init-done after we write the register.  Estimated times:
74 	 *      10G: PBALLOC = 11b, timing is 60us
75 	 *       1G: PBALLOC = 11b, timing is 600us
76 	 *     100M: PBALLOC = 11b, timing is 6ms
77 	 *
78 	 *     Multiple these timings by 4 if under full Rx load
79 	 *
80 	 * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for
81 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
82 	 * this might not finish in our poll time, but we can live with that
83 	 * for now.
84 	 */
85 	wr32(hw, TXGBE_FDIRCTL, fdirctrl);
86 	txgbe_flush(hw);
87 	for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
88 		if (rd32(hw, TXGBE_FDIRCTL) & TXGBE_FDIRCTL_INITDONE)
89 			break;
90 		msec_delay(1);
91 	}
92 
93 	if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
94 		PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
95 		return -ETIMEDOUT;
96 	}
97 	return 0;
98 }
99 
100 /*
101  * Set appropriate bits in fdirctrl for: variable reporting levels, moving
102  * flexbytes matching field, and drop queue (only for perfect matching mode).
103  */
104 static inline int
configure_fdir_flags(const struct rte_eth_fdir_conf * conf,uint32_t * fdirctrl,uint32_t * flex)105 configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
106 		     uint32_t *fdirctrl, uint32_t *flex)
107 {
108 	*fdirctrl = 0;
109 	*flex = 0;
110 
111 	switch (conf->pballoc) {
112 	case RTE_ETH_FDIR_PBALLOC_64K:
113 		/* 8k - 1 signature filters */
114 		*fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
115 		break;
116 	case RTE_ETH_FDIR_PBALLOC_128K:
117 		/* 16k - 1 signature filters */
118 		*fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
119 		break;
120 	case RTE_ETH_FDIR_PBALLOC_256K:
121 		/* 32k - 1 signature filters */
122 		*fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
123 		break;
124 	default:
125 		/* bad value */
126 		PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
127 		return -EINVAL;
128 	};
129 
130 	/* status flags: write hash & swindex in the rx descriptor */
131 	switch (conf->status) {
132 	case RTE_FDIR_NO_REPORT_STATUS:
133 		/* do nothing, default mode */
134 		break;
135 	case RTE_FDIR_REPORT_STATUS:
136 		/* report status when the packet matches a fdir rule */
137 		*fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH;
138 		break;
139 	case RTE_FDIR_REPORT_STATUS_ALWAYS:
140 		/* always report status */
141 		*fdirctrl |= TXGBE_FDIRCTL_REPORT_ALWAYS;
142 		break;
143 	default:
144 		/* bad value */
145 		PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
146 		return -EINVAL;
147 	};
148 
149 	*flex |= TXGBE_FDIRFLEXCFG_BASE_MAC;
150 	*flex |= TXGBE_FDIRFLEXCFG_OFST(TXGBE_DEFAULT_FLEXBYTES_OFFSET / 2);
151 
152 	switch (conf->mode) {
153 	case RTE_FDIR_MODE_SIGNATURE:
154 		break;
155 	case RTE_FDIR_MODE_PERFECT:
156 		*fdirctrl |= TXGBE_FDIRCTL_PERFECT;
157 		*fdirctrl |= TXGBE_FDIRCTL_DROPQP(conf->drop_queue);
158 		break;
159 	default:
160 		/* bad value */
161 		PMD_INIT_LOG(ERR, "Invalid fdir_conf->mode value");
162 		return -EINVAL;
163 	}
164 
165 	return 0;
166 }
167 
168 int
txgbe_fdir_set_input_mask(struct rte_eth_dev * dev)169 txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
170 {
171 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
172 	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
173 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
174 	/*
175 	 * mask VM pool and DIPv6 since there are currently not supported
176 	 * mask FLEX byte, it will be set in flex_conf
177 	 */
178 	uint32_t fdirm = TXGBE_FDIRMSK_POOL;
179 	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
180 	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
181 
182 	PMD_INIT_FUNC_TRACE();
183 
184 	if (mode != RTE_FDIR_MODE_SIGNATURE &&
185 	    mode != RTE_FDIR_MODE_PERFECT) {
186 		PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
187 		return -ENOTSUP;
188 	}
189 
190 	/*
191 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
192 	 * are zero, then assume a full mask for that field. Also assume that
193 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
194 	 * cannot be masked out in this implementation.
195 	 */
196 	if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) {
197 		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
198 		fdirm |= TXGBE_FDIRMSK_L4P;
199 	}
200 
201 	/* TBD: don't support encapsulation yet */
202 	wr32(hw, TXGBE_FDIRMSK, fdirm);
203 
204 	/* store the TCP/UDP port masks */
205 	fdirtcpm = rte_be_to_cpu_16(info->mask.dst_port_mask) << 16;
206 	fdirtcpm |= rte_be_to_cpu_16(info->mask.src_port_mask);
207 
208 	/* write all the same so that UDP, TCP and SCTP use the same mask
209 	 * (little-endian)
210 	 */
211 	wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm);
212 	wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
213 	wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
214 
215 	/* Store source and destination IPv4 masks (big-endian) */
216 	wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask);
217 	wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask);
218 
219 	if (mode == RTE_FDIR_MODE_SIGNATURE) {
220 		/*
221 		 * Store source and destination IPv6 masks (bit reversed)
222 		 */
223 		fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) |
224 			    TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask);
225 
226 		wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m);
227 	}
228 
229 	return 0;
230 }
231 
232 static int
txgbe_fdir_store_input_mask(struct rte_eth_dev * dev)233 txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
234 {
235 	struct rte_eth_fdir_masks *input_mask =
236 				&dev->data->dev_conf.fdir_conf.mask;
237 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
238 	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
239 	uint16_t dst_ipv6m = 0;
240 	uint16_t src_ipv6m = 0;
241 
242 	if (mode != RTE_FDIR_MODE_SIGNATURE &&
243 	    mode != RTE_FDIR_MODE_PERFECT) {
244 		PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
245 		return -ENOTSUP;
246 	}
247 
248 	memset(&info->mask, 0, sizeof(struct txgbe_hw_fdir_mask));
249 	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
250 	info->mask.src_port_mask = input_mask->src_port_mask;
251 	info->mask.dst_port_mask = input_mask->dst_port_mask;
252 	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
253 	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
254 	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
255 	IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
256 	info->mask.src_ipv6_mask = src_ipv6m;
257 	info->mask.dst_ipv6_mask = dst_ipv6m;
258 
259 	return 0;
260 }
261 
262 int
txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev * dev,uint16_t offset)263 txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
264 				uint16_t offset)
265 {
266 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
267 	int i;
268 
269 	for (i = 0; i < 64; i++) {
270 		uint32_t flexreg, flex;
271 		flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
272 		flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
273 		flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2);
274 		flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
275 		flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
276 		wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
277 	}
278 
279 	txgbe_flush(hw);
280 	for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
281 		if (rd32(hw, TXGBE_FDIRCTL) &
282 			TXGBE_FDIRCTL_INITDONE)
283 			break;
284 		msec_delay(1);
285 	}
286 	return 0;
287 }
288 
289 /*
290  * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
291  * arguments are valid
292  */
293 static int
txgbe_set_fdir_flex_conf(struct rte_eth_dev * dev,uint32_t flex)294 txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex)
295 {
296 	const struct rte_eth_fdir_flex_conf *conf =
297 				&dev->data->dev_conf.fdir_conf.flex_conf;
298 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
299 	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
300 	const struct rte_eth_flex_payload_cfg *flex_cfg;
301 	const struct rte_eth_fdir_flex_mask *flex_mask;
302 	uint16_t flexbytes = 0;
303 	uint16_t i;
304 
305 	if (conf == NULL) {
306 		PMD_DRV_LOG(ERR, "NULL pointer.");
307 		return -EINVAL;
308 	}
309 
310 	flex |= TXGBE_FDIRFLEXCFG_DIA;
311 
312 	for (i = 0; i < conf->nb_payloads; i++) {
313 		flex_cfg = &conf->flex_set[i];
314 		if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
315 			PMD_DRV_LOG(ERR, "unsupported payload type.");
316 			return -EINVAL;
317 		}
318 		if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
319 		    (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
320 		     flex_cfg->src_offset[0] <= TXGBE_MAX_FLX_SOURCE_OFF) {
321 			flex &= ~TXGBE_FDIRFLEXCFG_OFST_MASK;
322 			flex |=
323 			    TXGBE_FDIRFLEXCFG_OFST(flex_cfg->src_offset[0] / 2);
324 		} else {
325 			PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
326 			return -EINVAL;
327 		}
328 	}
329 
330 	for (i = 0; i < conf->nb_flexmasks; i++) {
331 		flex_mask = &conf->flex_mask[i];
332 		if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
333 			PMD_DRV_LOG(ERR, "flexmask should be set globally.");
334 			return -EINVAL;
335 		}
336 		flexbytes = (uint16_t)(((flex_mask->mask[1] << 8) & 0xFF00) |
337 					((flex_mask->mask[0]) & 0xFF));
338 		if (flexbytes == UINT16_MAX) {
339 			flex &= ~TXGBE_FDIRFLEXCFG_DIA;
340 		} else if (flexbytes != 0) {
341 		     /* TXGBE_FDIRFLEXCFG_DIA is set by default when set mask */
342 			PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
343 			return -EINVAL;
344 		}
345 	}
346 
347 	info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
348 	info->flex_bytes_offset = (uint8_t)(TXGBD_FDIRFLEXCFG_OFST(flex) * 2);
349 
350 	for (i = 0; i < 64; i++) {
351 		uint32_t flexreg;
352 		flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
353 		flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
354 		flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
355 		wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
356 	}
357 	return 0;
358 }
359 
360 int
txgbe_fdir_configure(struct rte_eth_dev * dev)361 txgbe_fdir_configure(struct rte_eth_dev *dev)
362 {
363 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
364 	int err;
365 	uint32_t fdirctrl, flex, pbsize;
366 	int i;
367 	enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
368 
369 	PMD_INIT_FUNC_TRACE();
370 
371 	/* supports mac-vlan and tunnel mode */
372 	if (mode != RTE_FDIR_MODE_SIGNATURE &&
373 	    mode != RTE_FDIR_MODE_PERFECT)
374 		return -ENOSYS;
375 
376 	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf,
377 				   &fdirctrl, &flex);
378 	if (err)
379 		return err;
380 
381 	/*
382 	 * Before enabling Flow Director, the Rx Packet Buffer size
383 	 * must be reduced.  The new value is the current size minus
384 	 * flow director memory usage size.
385 	 */
386 	pbsize = rd32(hw, TXGBE_PBRXSIZE(0));
387 	pbsize -= TXGBD_FDIRCTL_BUF_BYTE(fdirctrl);
388 	wr32(hw, TXGBE_PBRXSIZE(0), pbsize);
389 
390 	/*
391 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
392 	 * initialized to zero for non DCB mode otherwise actual total RX PB
393 	 * would be bigger than programmed and filter space would run into
394 	 * the PB 0 region.
395 	 */
396 	for (i = 1; i < 8; i++)
397 		wr32(hw, TXGBE_PBRXSIZE(i), 0);
398 
399 	err = txgbe_fdir_store_input_mask(dev);
400 	if (err < 0) {
401 		PMD_INIT_LOG(ERR, " Error on setting FD mask");
402 		return err;
403 	}
404 
405 	err = txgbe_fdir_set_input_mask(dev);
406 	if (err < 0) {
407 		PMD_INIT_LOG(ERR, " Error on setting FD mask");
408 		return err;
409 	}
410 
411 	err = txgbe_set_fdir_flex_conf(dev, flex);
412 	if (err < 0) {
413 		PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
414 		return err;
415 	}
416 
417 	err = txgbe_fdir_enable(hw, fdirctrl);
418 	if (err < 0) {
419 		PMD_INIT_LOG(ERR, " Error on enabling FD.");
420 		return err;
421 	}
422 	return 0;
423 }
424 
425 /*
426  * Note that the bkt_hash field in the txgbe_atr_input structure is also never
427  * set.
428  *
429  * Compute the hashes for SW ATR
430  *  @stream: input bitstream to compute the hash on
431  *  @key: 32-bit hash key
432  **/
433 static uint32_t
txgbe_atr_compute_hash(struct txgbe_atr_input * atr_input,uint32_t key)434 txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
435 				 uint32_t key)
436 {
437 	/*
438 	 * The algorithm is as follows:
439 	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
440 	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
441 	 *    and A[n] x B[n] is bitwise AND between same length strings
442 	 *
443 	 *    K[n] is 16 bits, defined as:
444 	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
445 	 *       for n modulo 32 < 15, K[n] =
446 	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
447 	 *
448 	 *    S[n] is 16 bits, defined as:
449 	 *       for n >= 15, S[n] = S[n:n - 15]
450 	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
451 	 *
452 	 *    To simplify for programming, the algorithm is implemented
453 	 *    in software this way:
454 	 *
455 	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
456 	 *
457 	 *    for (i = 0; i < 352; i+=32)
458 	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
459 	 *
460 	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
461 	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
462 	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
463 	 *
464 	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
465 	 *
466 	 *    if (key[0])
467 	 *        hash[15:0] ^= Stream[15:0];
468 	 *
469 	 *    for (i = 0; i < 16; i++) {
470 	 *        if (key[i])
471 	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
472 	 *        if (key[i + 16])
473 	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
474 	 *    }
475 	 *
476 	 */
477 	__be32 *dword_stream = (__be32 *)atr_input;
478 	__be32 common_hash_dword = 0;
479 	u32 hi_hash_dword, lo_hash_dword, flow_pool_ptid;
480 	u32 hash_result = 0;
481 	u8 i;
482 
483 	/* record the flow_vm_vlan bits as they are a key part to the hash */
484 	flow_pool_ptid = be_to_cpu32(dword_stream[0]);
485 
486 	/* generate common hash dword */
487 	for (i = 1; i <= 10; i++)
488 		common_hash_dword ^= dword_stream[i];
489 
490 	hi_hash_dword = be_to_cpu32(common_hash_dword);
491 
492 	/* low dword is word swapped version of common */
493 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
494 
495 	/* apply (Flow ID/VM Pool/Packet Type) bits to hash words */
496 	hi_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid >> 16);
497 
498 	/* Process bits 0 and 16 */
499 	if (key & 0x0001)
500 		hash_result ^= lo_hash_dword;
501 	if (key & 0x00010000)
502 		hash_result ^= hi_hash_dword;
503 
504 	/*
505 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
506 	 * delay this because bit 0 of the stream should not be processed
507 	 * so we do not add the vlan until after bit 0 was processed
508 	 */
509 	lo_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid << 16);
510 
511 	/* process the remaining 30 bits in the key 2 bits at a time */
512 	for (i = 15; i; i--) {
513 		if (key & (0x0001 << i))
514 			hash_result ^= lo_hash_dword >> i;
515 		if (key & (0x00010000 << i))
516 			hash_result ^= hi_hash_dword >> i;
517 	}
518 
519 	return hash_result;
520 }
521 
522 static uint32_t
atr_compute_perfect_hash(struct txgbe_atr_input * input,enum rte_eth_fdir_pballoc_type pballoc)523 atr_compute_perfect_hash(struct txgbe_atr_input *input,
524 		enum rte_eth_fdir_pballoc_type pballoc)
525 {
526 	uint32_t bucket_hash;
527 
528 	bucket_hash = txgbe_atr_compute_hash(input,
529 				TXGBE_ATR_BUCKET_HASH_KEY);
530 	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
531 		bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
532 	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
533 		bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
534 	else
535 		bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
536 
537 	return TXGBE_FDIRPIHASH_BKT(bucket_hash);
538 }
539 
540 /**
541  * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
542  * @hw: pointer to hardware structure
543  */
544 static inline int
txgbe_fdir_check_cmd_complete(struct txgbe_hw * hw,uint32_t * fdircmd)545 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
546 {
547 	int i;
548 
549 	for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
550 		*fdircmd = rd32(hw, TXGBE_FDIRPICMD);
551 		if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
552 			return 0;
553 		rte_delay_us(TXGBE_FDIRCMD_CMD_INTERVAL_US);
554 	}
555 
556 	return -ETIMEDOUT;
557 }
558 
559 /*
560  * Calculate the hash value needed for signature-match filters. In the FreeBSD
561  * driver, this is done by the optimised function
562  * txgbe_atr_compute_sig_hash_raptor(). However that can't be used here as it
563  * doesn't support calculating a hash for an IPv6 filter.
564  */
565 static uint32_t
atr_compute_signature_hash(struct txgbe_atr_input * input,enum rte_eth_fdir_pballoc_type pballoc)566 atr_compute_signature_hash(struct txgbe_atr_input *input,
567 		enum rte_eth_fdir_pballoc_type pballoc)
568 {
569 	uint32_t bucket_hash, sig_hash;
570 
571 	bucket_hash = txgbe_atr_compute_hash(input,
572 				TXGBE_ATR_BUCKET_HASH_KEY);
573 	if (pballoc == RTE_ETH_FDIR_PBALLOC_256K)
574 		bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
575 	else if (pballoc == RTE_ETH_FDIR_PBALLOC_128K)
576 		bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
577 	else
578 		bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
579 
580 	sig_hash = txgbe_atr_compute_hash(input,
581 				TXGBE_ATR_SIGNATURE_HASH_KEY);
582 
583 	return TXGBE_FDIRPIHASH_SIG(sig_hash) |
584 	       TXGBE_FDIRPIHASH_BKT(bucket_hash);
585 }
586 
587 /**
588  * With the ability to set extra flags in FDIRPICMD register
589  * added, and IPv6 support also added. The hash value is also pre-calculated
590  * as the pballoc value is needed to do it.
591  */
592 static int
fdir_write_perfect_filter(struct txgbe_hw * hw,struct txgbe_atr_input * input,uint8_t queue,uint32_t fdircmd,uint32_t fdirhash,enum rte_fdir_mode mode)593 fdir_write_perfect_filter(struct txgbe_hw *hw,
594 			struct txgbe_atr_input *input, uint8_t queue,
595 			uint32_t fdircmd, uint32_t fdirhash,
596 			enum rte_fdir_mode mode)
597 {
598 	uint32_t fdirport, fdirflex;
599 	int err = 0;
600 
601 	UNREFERENCED_PARAMETER(mode);
602 
603 	/* record the IPv4 address (little-endian)
604 	 * can not use wr32.
605 	 */
606 	wr32(hw, TXGBE_FDIRPISIP4, be_to_le32(input->src_ip[0]));
607 	wr32(hw, TXGBE_FDIRPIDIP4, be_to_le32(input->dst_ip[0]));
608 
609 	/* record source and destination port (little-endian)*/
610 	fdirport = TXGBE_FDIRPIPORT_DST(be_to_le16(input->dst_port));
611 	fdirport |= TXGBE_FDIRPIPORT_SRC(be_to_le16(input->src_port));
612 	wr32(hw, TXGBE_FDIRPIPORT, fdirport);
613 
614 	/* record pkt_type (little-endian) and flex_bytes(big-endian) */
615 	fdirflex = TXGBE_FDIRPIFLEX_FLEX(be_to_npu16(input->flex_bytes));
616 	fdirflex |= TXGBE_FDIRPIFLEX_PTYPE(be_to_le16(input->pkt_type));
617 	wr32(hw, TXGBE_FDIRPIFLEX, fdirflex);
618 
619 	/* configure FDIRHASH register */
620 	fdirhash |= TXGBE_FDIRPIHASH_VLD;
621 	wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
622 
623 	/*
624 	 * flush all previous writes to make certain registers are
625 	 * programmed prior to issuing the command
626 	 */
627 	txgbe_flush(hw);
628 
629 	/* configure FDIRPICMD register */
630 	fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
631 		   TXGBE_FDIRPICMD_UPD |
632 		   TXGBE_FDIRPICMD_LAST |
633 		   TXGBE_FDIRPICMD_QPENA;
634 	fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
635 	fdircmd |= TXGBE_FDIRPICMD_QP(queue);
636 	fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool);
637 
638 	wr32(hw, TXGBE_FDIRPICMD, fdircmd);
639 
640 	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
641 
642 	err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
643 	if (err < 0)
644 		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
645 
646 	return err;
647 }
648 
649 /**
650  * This function supports setting extra fields in the FDIRPICMD register, and
651  * removes the code that was verifying the flow_type field. According to the
652  * documentation, a flow type of 00 (i.e. not TCP, UDP, or SCTP) is not
653  * supported, however it appears to work ok...
654  *  Adds a signature hash filter
655  *  @hw: pointer to hardware structure
656  *  @input: unique input dword
657  *  @queue: queue index to direct traffic to
658  *  @fdircmd: any extra flags to set in fdircmd register
659  *  @fdirhash: pre-calculated hash value for the filter
660  **/
661 static int
fdir_add_signature_filter(struct txgbe_hw * hw,struct txgbe_atr_input * input,uint8_t queue,uint32_t fdircmd,uint32_t fdirhash)662 fdir_add_signature_filter(struct txgbe_hw *hw,
663 		struct txgbe_atr_input *input, uint8_t queue, uint32_t fdircmd,
664 		uint32_t fdirhash)
665 {
666 	int err = 0;
667 
668 	PMD_INIT_FUNC_TRACE();
669 
670 	/* configure FDIRPICMD register */
671 	fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
672 		   TXGBE_FDIRPICMD_UPD |
673 		   TXGBE_FDIRPICMD_LAST |
674 		   TXGBE_FDIRPICMD_QPENA;
675 	fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
676 	fdircmd |= TXGBE_FDIRPICMD_QP(queue);
677 
678 	fdirhash |= TXGBE_FDIRPIHASH_VLD;
679 	wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
680 	wr32(hw, TXGBE_FDIRPICMD, fdircmd);
681 
682 	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
683 
684 	err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
685 	if (err < 0)
686 		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
687 
688 	return err;
689 }
690 
691 /*
692  * This is modified to take in the hash as a parameter so that
693  * it can be used for removing signature and perfect filters.
694  */
695 static int
fdir_erase_filter_raptor(struct txgbe_hw * hw,uint32_t fdirhash)696 fdir_erase_filter_raptor(struct txgbe_hw *hw, uint32_t fdirhash)
697 {
698 	uint32_t fdircmd = 0;
699 	int err = 0;
700 
701 	wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
702 
703 	/* flush hash to HW */
704 	txgbe_flush(hw);
705 
706 	/* Query if filter is present */
707 	wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_QRY);
708 
709 	err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
710 	if (err < 0) {
711 		PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
712 		return err;
713 	}
714 
715 	/* if filter exists in hardware then remove it */
716 	if (fdircmd & TXGBE_FDIRPICMD_VLD) {
717 		wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
718 		txgbe_flush(hw);
719 		wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_REM);
720 	}
721 
722 	err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
723 	if (err < 0)
724 		PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
725 
726 	return err;
727 }
728 
729 static inline struct txgbe_fdir_filter *
txgbe_fdir_filter_lookup(struct txgbe_hw_fdir_info * fdir_info,struct txgbe_atr_input * input)730 txgbe_fdir_filter_lookup(struct txgbe_hw_fdir_info *fdir_info,
731 			 struct txgbe_atr_input *input)
732 {
733 	int ret;
734 
735 	ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)input);
736 	if (ret < 0)
737 		return NULL;
738 
739 	return fdir_info->hash_map[ret];
740 }
741 
742 static inline int
txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info * fdir_info,struct txgbe_fdir_filter * fdir_filter)743 txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
744 			 struct txgbe_fdir_filter *fdir_filter)
745 {
746 	int ret;
747 
748 	ret = rte_hash_add_key(fdir_info->hash_handle, &fdir_filter->input);
749 	if (ret < 0) {
750 		PMD_DRV_LOG(ERR,
751 			    "Failed to insert fdir filter to hash table %d!",
752 			    ret);
753 		return ret;
754 	}
755 
756 	fdir_info->hash_map[ret] = fdir_filter;
757 
758 	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
759 
760 	return 0;
761 }
762 
763 static inline int
txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info * fdir_info,struct txgbe_atr_input * input)764 txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
765 			 struct txgbe_atr_input *input)
766 {
767 	int ret;
768 	struct txgbe_fdir_filter *fdir_filter;
769 
770 	ret = rte_hash_del_key(fdir_info->hash_handle, input);
771 	if (ret < 0)
772 		return ret;
773 
774 	fdir_filter = fdir_info->hash_map[ret];
775 	fdir_info->hash_map[ret] = NULL;
776 
777 	TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
778 	rte_free(fdir_filter);
779 
780 	return 0;
781 }
782 
783 int
txgbe_fdir_filter_program(struct rte_eth_dev * dev,struct txgbe_fdir_rule * rule,bool del,bool update)784 txgbe_fdir_filter_program(struct rte_eth_dev *dev,
785 			  struct txgbe_fdir_rule *rule,
786 			  bool del,
787 			  bool update)
788 {
789 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
790 	uint32_t fdirhash;
791 	uint8_t queue;
792 	bool is_perfect = FALSE;
793 	int err;
794 	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
795 	enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
796 	struct txgbe_fdir_filter *node;
797 
798 	if (fdir_mode == RTE_FDIR_MODE_NONE ||
799 	    fdir_mode != rule->mode)
800 		return -ENOTSUP;
801 
802 	if (fdir_mode >= RTE_FDIR_MODE_PERFECT)
803 		is_perfect = TRUE;
804 
805 	if (is_perfect) {
806 		if (rule->input.flow_type & TXGBE_ATR_L3TYPE_IPV6) {
807 			PMD_DRV_LOG(ERR, "IPv6 is not supported in"
808 				    " perfect mode!");
809 			return -ENOTSUP;
810 		}
811 		fdirhash = atr_compute_perfect_hash(&rule->input,
812 				dev->data->dev_conf.fdir_conf.pballoc);
813 		fdirhash |= TXGBE_FDIRPIHASH_IDX(rule->soft_id);
814 	} else {
815 		fdirhash = atr_compute_signature_hash(&rule->input,
816 				dev->data->dev_conf.fdir_conf.pballoc);
817 	}
818 
819 	if (del) {
820 		err = txgbe_remove_fdir_filter(info, &rule->input);
821 		if (err < 0) {
822 			PMD_DRV_LOG(ERR,
823 				"No such fdir filter to delete %d!", err);
824 			return err;
825 		}
826 
827 		err = fdir_erase_filter_raptor(hw, fdirhash);
828 		if (err < 0)
829 			PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
830 		else
831 			PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
832 		return err;
833 	}
834 
835 	/* add or update an fdir filter*/
836 	if (rule->fdirflags & TXGBE_FDIRPICMD_DROP) {
837 		if (!is_perfect) {
838 			PMD_DRV_LOG(ERR, "Drop option is not supported in"
839 				    " signature mode.");
840 			return -EINVAL;
841 		}
842 		queue = dev->data->dev_conf.fdir_conf.drop_queue;
843 	} else if (rule->queue < TXGBE_MAX_RX_QUEUE_NUM) {
844 		queue = rule->queue;
845 	} else {
846 		return -EINVAL;
847 	}
848 
849 	node = txgbe_fdir_filter_lookup(info, &rule->input);
850 	if (node) {
851 		if (!update) {
852 			PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
853 			return -EINVAL;
854 		}
855 		node->fdirflags = rule->fdirflags;
856 		node->fdirhash = fdirhash;
857 		node->queue = queue;
858 	} else {
859 		node = rte_zmalloc("txgbe_fdir",
860 				   sizeof(struct txgbe_fdir_filter), 0);
861 		if (!node)
862 			return -ENOMEM;
863 		rte_memcpy(&node->input, &rule->input,
864 			   sizeof(struct txgbe_atr_input));
865 		node->fdirflags = rule->fdirflags;
866 		node->fdirhash = fdirhash;
867 		node->queue = queue;
868 
869 		err = txgbe_insert_fdir_filter(info, node);
870 		if (err < 0) {
871 			rte_free(node);
872 			return err;
873 		}
874 	}
875 
876 	if (is_perfect)
877 		err = fdir_write_perfect_filter(hw, &node->input,
878 						node->queue, node->fdirflags,
879 						node->fdirhash, fdir_mode);
880 	else
881 		err = fdir_add_signature_filter(hw, &node->input,
882 						node->queue, node->fdirflags,
883 						node->fdirhash);
884 	if (err < 0) {
885 		PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
886 		txgbe_remove_fdir_filter(info, &rule->input);
887 	} else {
888 		PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
889 	}
890 
891 	return err;
892 }
893 
894 static int
txgbe_fdir_flush(struct rte_eth_dev * dev)895 txgbe_fdir_flush(struct rte_eth_dev *dev)
896 {
897 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
898 	struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
899 	int ret;
900 
901 	ret = txgbe_reinit_fdir_tables(hw);
902 	if (ret < 0) {
903 		PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
904 		return ret;
905 	}
906 
907 	info->f_add = 0;
908 	info->f_remove = 0;
909 	info->add = 0;
910 	info->remove = 0;
911 
912 	return ret;
913 }
914 
915 /* restore flow director filter */
916 void
txgbe_fdir_filter_restore(struct rte_eth_dev * dev)917 txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
918 {
919 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
920 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
921 	struct txgbe_fdir_filter *node;
922 	bool is_perfect = FALSE;
923 	enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
924 
925 	if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
926 	    fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
927 		is_perfect = TRUE;
928 
929 	if (is_perfect) {
930 		TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
931 			(void)fdir_write_perfect_filter(hw,
932 							      &node->input,
933 							      node->queue,
934 							      node->fdirflags,
935 							      node->fdirhash,
936 							      fdir_mode);
937 		}
938 	} else {
939 		TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
940 			(void)fdir_add_signature_filter(hw,
941 							      &node->input,
942 							      node->queue,
943 							      node->fdirflags,
944 							      node->fdirhash);
945 		}
946 	}
947 }
948 
949 /* remove all the flow director filters */
950 int
txgbe_clear_all_fdir_filter(struct rte_eth_dev * dev)951 txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
952 {
953 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
954 	struct txgbe_fdir_filter *fdir_filter;
955 	struct txgbe_fdir_filter *filter_flag;
956 	int ret = 0;
957 
958 	/* flush flow director */
959 	rte_hash_reset(fdir_info->hash_handle);
960 	memset(fdir_info->hash_map, 0,
961 	       sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
962 	filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
963 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
964 		TAILQ_REMOVE(&fdir_info->fdir_list,
965 			     fdir_filter,
966 			     entries);
967 		rte_free(fdir_filter);
968 	}
969 
970 	if (filter_flag != NULL)
971 		ret = txgbe_fdir_flush(dev);
972 
973 	return ret;
974 }
975