xref: /f-stack/dpdk/drivers/net/i40e/i40e_ethdev.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2010-2017 Intel Corporation
3a9643ea8Slogwang  */
4a9643ea8Slogwang 
5a9643ea8Slogwang #include <stdio.h>
6a9643ea8Slogwang #include <errno.h>
7a9643ea8Slogwang #include <stdint.h>
8a9643ea8Slogwang #include <string.h>
9a9643ea8Slogwang #include <unistd.h>
10a9643ea8Slogwang #include <stdarg.h>
11a9643ea8Slogwang #include <inttypes.h>
12a9643ea8Slogwang #include <assert.h>
13a9643ea8Slogwang 
14d30ea906Sjfb8856606 #include <rte_common.h>
152bfe3f2eSlogwang #include <rte_eal.h>
16a9643ea8Slogwang #include <rte_string_fns.h>
17a9643ea8Slogwang #include <rte_pci.h>
182bfe3f2eSlogwang #include <rte_bus_pci.h>
19a9643ea8Slogwang #include <rte_ether.h>
20d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
212bfe3f2eSlogwang #include <rte_ethdev_pci.h>
22a9643ea8Slogwang #include <rte_memzone.h>
23a9643ea8Slogwang #include <rte_malloc.h>
24a9643ea8Slogwang #include <rte_memcpy.h>
25a9643ea8Slogwang #include <rte_alarm.h>
26a9643ea8Slogwang #include <rte_dev.h>
27a9643ea8Slogwang #include <rte_tailq.h>
282bfe3f2eSlogwang #include <rte_hash_crc.h>
29*2d9fd380Sjfb8856606 #include <rte_bitmap.h>
30a9643ea8Slogwang 
31a9643ea8Slogwang #include "i40e_logs.h"
32a9643ea8Slogwang #include "base/i40e_prototype.h"
33a9643ea8Slogwang #include "base/i40e_adminq_cmd.h"
34a9643ea8Slogwang #include "base/i40e_type.h"
35a9643ea8Slogwang #include "base/i40e_register.h"
36a9643ea8Slogwang #include "base/i40e_dcb.h"
37a9643ea8Slogwang #include "i40e_ethdev.h"
38a9643ea8Slogwang #include "i40e_rxtx.h"
39a9643ea8Slogwang #include "i40e_pf.h"
40a9643ea8Slogwang #include "i40e_regs.h"
412bfe3f2eSlogwang #include "rte_pmd_i40e.h"
42a9643ea8Slogwang 
43a9643ea8Slogwang #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
44a9643ea8Slogwang #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
45d30ea906Sjfb8856606 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
46d30ea906Sjfb8856606 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
47d30ea906Sjfb8856606 #define ETH_I40E_USE_LATEST_VEC	"use-latest-supported-vec"
484418919fSjohnjiang #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
49a9643ea8Slogwang 
50a9643ea8Slogwang #define I40E_CLEAR_PXE_WAIT_MS     200
510c6bd470Sfengbojiang #define I40E_VSI_TSR_QINQ_STRIP		0x4010
520c6bd470Sfengbojiang #define I40E_VSI_TSR(_i)	(0x00050800 + ((_i) * 4))
53a9643ea8Slogwang 
54a9643ea8Slogwang /* Maximun number of capability elements */
55a9643ea8Slogwang #define I40E_MAX_CAP_ELE_NUM       128
56a9643ea8Slogwang 
572bfe3f2eSlogwang /* Wait count and interval */
58a9643ea8Slogwang #define I40E_CHK_Q_ENA_COUNT       1000
59a9643ea8Slogwang #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60a9643ea8Slogwang 
61a9643ea8Slogwang /* Maximun number of VSI */
62a9643ea8Slogwang #define I40E_MAX_NUM_VSIS          (384UL)
63a9643ea8Slogwang 
64a9643ea8Slogwang #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65a9643ea8Slogwang 
66a9643ea8Slogwang /* Flow control default timer */
67a9643ea8Slogwang #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68a9643ea8Slogwang 
69a9643ea8Slogwang /* Flow control enable fwd bit */
70a9643ea8Slogwang #define I40E_PRTMAC_FWD_CTRL   0x00000001
71a9643ea8Slogwang 
72a9643ea8Slogwang /* Receive Packet Buffer size */
73a9643ea8Slogwang #define I40E_RXPBSIZE (968 * 1024)
74a9643ea8Slogwang 
75a9643ea8Slogwang /* Kilobytes shift */
76a9643ea8Slogwang #define I40E_KILOSHIFT 10
77a9643ea8Slogwang 
782bfe3f2eSlogwang /* Flow control default high water */
792bfe3f2eSlogwang #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
802bfe3f2eSlogwang 
812bfe3f2eSlogwang /* Flow control default low water */
822bfe3f2eSlogwang #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
832bfe3f2eSlogwang 
84a9643ea8Slogwang /* Receive Average Packet Size in Byte*/
85a9643ea8Slogwang #define I40E_PACKET_AVERAGE_SIZE 128
86a9643ea8Slogwang 
87a9643ea8Slogwang /* Mask of PF interrupt causes */
88a9643ea8Slogwang #define I40E_PFINT_ICR0_ENA_MASK ( \
89a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
92a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97a9643ea8Slogwang 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98a9643ea8Slogwang 
99a9643ea8Slogwang #define I40E_FLOW_TYPES ( \
100a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110a9643ea8Slogwang 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111a9643ea8Slogwang 
112a9643ea8Slogwang /* Additional timesync values. */
113a9643ea8Slogwang #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114a9643ea8Slogwang #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115a9643ea8Slogwang #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116a9643ea8Slogwang #define I40E_PRTTSYN_TSYNENA     0x80000000
117a9643ea8Slogwang #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118a9643ea8Slogwang #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119a9643ea8Slogwang 
120a9643ea8Slogwang /**
121a9643ea8Slogwang  * Below are values for writing un-exposed registers suggested
122a9643ea8Slogwang  * by silicon experts
123a9643ea8Slogwang  */
124a9643ea8Slogwang /* Destination MAC address */
125a9643ea8Slogwang #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126a9643ea8Slogwang /* Source MAC address */
127a9643ea8Slogwang #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128a9643ea8Slogwang /* Outer (S-Tag) VLAN tag in the outer L2 header */
129a9643ea8Slogwang #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130a9643ea8Slogwang /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131a9643ea8Slogwang #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132a9643ea8Slogwang /* Single VLAN tag in the inner L2 header */
133a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134a9643ea8Slogwang /* Source IPv4 address */
135a9643ea8Slogwang #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136a9643ea8Slogwang /* Destination IPv4 address */
137a9643ea8Slogwang #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138a9643ea8Slogwang /* Source IPv4 address for X722 */
139a9643ea8Slogwang #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140a9643ea8Slogwang /* Destination IPv4 address for X722 */
141a9643ea8Slogwang #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142a9643ea8Slogwang /* IPv4 Protocol for X722 */
143a9643ea8Slogwang #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144a9643ea8Slogwang /* IPv4 Time to Live for X722 */
145a9643ea8Slogwang #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146a9643ea8Slogwang /* IPv4 Type of Service (TOS) */
147a9643ea8Slogwang #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148a9643ea8Slogwang /* IPv4 Protocol */
149a9643ea8Slogwang #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150a9643ea8Slogwang /* IPv4 Time to Live */
151a9643ea8Slogwang #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152a9643ea8Slogwang /* Source IPv6 address */
153a9643ea8Slogwang #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154a9643ea8Slogwang /* Destination IPv6 address */
155a9643ea8Slogwang #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156a9643ea8Slogwang /* IPv6 Traffic Class (TC) */
157a9643ea8Slogwang #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158a9643ea8Slogwang /* IPv6 Next Header */
159a9643ea8Slogwang #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160a9643ea8Slogwang /* IPv6 Hop Limit */
161a9643ea8Slogwang #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162a9643ea8Slogwang /* Source L4 port */
163a9643ea8Slogwang #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164a9643ea8Slogwang /* Destination L4 port */
165a9643ea8Slogwang #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166a9643ea8Slogwang /* SCTP verification tag */
167a9643ea8Slogwang #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168a9643ea8Slogwang /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170a9643ea8Slogwang /* Source port of tunneling UDP */
171a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172a9643ea8Slogwang /* Destination port of tunneling UDP */
173a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174a9643ea8Slogwang /* UDP Tunneling ID, NVGRE/GRE key */
175a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176a9643ea8Slogwang /* Last ether type */
177a9643ea8Slogwang #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178a9643ea8Slogwang /* Tunneling outer destination IPv4 address */
179a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180a9643ea8Slogwang /* Tunneling outer destination IPv6 address */
181a9643ea8Slogwang #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182a9643ea8Slogwang /* 1st word of flex payload */
183a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184a9643ea8Slogwang /* 2nd word of flex payload */
185a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186a9643ea8Slogwang /* 3rd word of flex payload */
187a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188a9643ea8Slogwang /* 4th word of flex payload */
189a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190a9643ea8Slogwang /* 5th word of flex payload */
191a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192a9643ea8Slogwang /* 6th word of flex payload */
193a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194a9643ea8Slogwang /* 7th word of flex payload */
195a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196a9643ea8Slogwang /* 8th word of flex payload */
197a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198a9643ea8Slogwang /* all 8 words flex payload */
199a9643ea8Slogwang #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200a9643ea8Slogwang #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201a9643ea8Slogwang 
202a9643ea8Slogwang #define I40E_TRANSLATE_INSET 0
203a9643ea8Slogwang #define I40E_TRANSLATE_REG   1
204a9643ea8Slogwang 
205a9643ea8Slogwang #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
206a9643ea8Slogwang #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
207a9643ea8Slogwang #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
208a9643ea8Slogwang #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
209a9643ea8Slogwang #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
210a9643ea8Slogwang #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
211a9643ea8Slogwang 
212a9643ea8Slogwang /* PCI offset for querying capability */
213a9643ea8Slogwang #define PCI_DEV_CAP_REG            0xA4
214a9643ea8Slogwang /* PCI offset for enabling/disabling Extended Tag */
215a9643ea8Slogwang #define PCI_DEV_CTRL_REG           0xA8
216a9643ea8Slogwang /* Bit mask of Extended Tag capability */
217a9643ea8Slogwang #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218a9643ea8Slogwang /* Bit shift of Extended Tag enable/disable */
219a9643ea8Slogwang #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220a9643ea8Slogwang /* Bit mask of Extended Tag enable/disable */
221a9643ea8Slogwang #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222a9643ea8Slogwang 
223d30ea906Sjfb8856606 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
224a9643ea8Slogwang static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
225a9643ea8Slogwang static int i40e_dev_configure(struct rte_eth_dev *dev);
226a9643ea8Slogwang static int i40e_dev_start(struct rte_eth_dev *dev);
227*2d9fd380Sjfb8856606 static int i40e_dev_stop(struct rte_eth_dev *dev);
228*2d9fd380Sjfb8856606 static int i40e_dev_close(struct rte_eth_dev *dev);
2292bfe3f2eSlogwang static int  i40e_dev_reset(struct rte_eth_dev *dev);
2304418919fSjohnjiang static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
2314418919fSjohnjiang static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
2324418919fSjohnjiang static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
2334418919fSjohnjiang static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
234a9643ea8Slogwang static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
235a9643ea8Slogwang static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
2362bfe3f2eSlogwang static int i40e_dev_stats_get(struct rte_eth_dev *dev,
237a9643ea8Slogwang 			       struct rte_eth_stats *stats);
238a9643ea8Slogwang static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
239a9643ea8Slogwang 			       struct rte_eth_xstat *xstats, unsigned n);
240a9643ea8Slogwang static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
241a9643ea8Slogwang 				     struct rte_eth_xstat_name *xstats_names,
242a9643ea8Slogwang 				     unsigned limit);
2434418919fSjohnjiang static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
2442bfe3f2eSlogwang static int i40e_fw_version_get(struct rte_eth_dev *dev,
2452bfe3f2eSlogwang 				char *fw_version, size_t fw_size);
2464418919fSjohnjiang static int i40e_dev_info_get(struct rte_eth_dev *dev,
247a9643ea8Slogwang 			     struct rte_eth_dev_info *dev_info);
248a9643ea8Slogwang static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249a9643ea8Slogwang 				uint16_t vlan_id,
250a9643ea8Slogwang 				int on);
251a9643ea8Slogwang static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252a9643ea8Slogwang 			      enum rte_vlan_type vlan_type,
253a9643ea8Slogwang 			      uint16_t tpid);
2542bfe3f2eSlogwang static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255a9643ea8Slogwang static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256a9643ea8Slogwang 				      uint16_t queue,
257a9643ea8Slogwang 				      int on);
258a9643ea8Slogwang static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259a9643ea8Slogwang static int i40e_dev_led_on(struct rte_eth_dev *dev);
260a9643ea8Slogwang static int i40e_dev_led_off(struct rte_eth_dev *dev);
261a9643ea8Slogwang static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262a9643ea8Slogwang 			      struct rte_eth_fc_conf *fc_conf);
263a9643ea8Slogwang static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264a9643ea8Slogwang 			      struct rte_eth_fc_conf *fc_conf);
265a9643ea8Slogwang static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266a9643ea8Slogwang 				       struct rte_eth_pfc_conf *pfc_conf);
2672bfe3f2eSlogwang static int i40e_macaddr_add(struct rte_eth_dev *dev,
2684418919fSjohnjiang 			    struct rte_ether_addr *mac_addr,
269a9643ea8Slogwang 			    uint32_t index,
270a9643ea8Slogwang 			    uint32_t pool);
271a9643ea8Slogwang static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272a9643ea8Slogwang static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273a9643ea8Slogwang 				    struct rte_eth_rss_reta_entry64 *reta_conf,
274a9643ea8Slogwang 				    uint16_t reta_size);
275a9643ea8Slogwang static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276a9643ea8Slogwang 				   struct rte_eth_rss_reta_entry64 *reta_conf,
277a9643ea8Slogwang 				   uint16_t reta_size);
278a9643ea8Slogwang 
279a9643ea8Slogwang static int i40e_get_cap(struct i40e_hw *hw);
280a9643ea8Slogwang static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281a9643ea8Slogwang static int i40e_pf_setup(struct i40e_pf *pf);
282a9643ea8Slogwang static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283a9643ea8Slogwang static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284a9643ea8Slogwang static int i40e_dcb_setup(struct rte_eth_dev *dev);
285a9643ea8Slogwang static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286a9643ea8Slogwang 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
287a9643ea8Slogwang static void i40e_stat_update_48(struct i40e_hw *hw,
288a9643ea8Slogwang 			       uint32_t hireg,
289a9643ea8Slogwang 			       uint32_t loreg,
290a9643ea8Slogwang 			       bool offset_loaded,
291a9643ea8Slogwang 			       uint64_t *offset,
292a9643ea8Slogwang 			       uint64_t *stat);
293a9643ea8Slogwang static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
2942bfe3f2eSlogwang static void i40e_dev_interrupt_handler(void *param);
295d30ea906Sjfb8856606 static void i40e_dev_alarm_handler(void *param);
296a9643ea8Slogwang static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
297a9643ea8Slogwang 				uint32_t base, uint32_t num);
298a9643ea8Slogwang static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
299a9643ea8Slogwang static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
300a9643ea8Slogwang 			uint32_t base);
301a9643ea8Slogwang static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
302a9643ea8Slogwang 			uint16_t num);
303a9643ea8Slogwang static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
304a9643ea8Slogwang static int i40e_veb_release(struct i40e_veb *veb);
305a9643ea8Slogwang static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
306a9643ea8Slogwang 						struct i40e_vsi *vsi);
307a9643ea8Slogwang static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308a9643ea8Slogwang static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309a9643ea8Slogwang 					     struct i40e_macvlan_filter *mv_f,
310a9643ea8Slogwang 					     int num,
311a9643ea8Slogwang 					     uint16_t vlan);
312a9643ea8Slogwang static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313a9643ea8Slogwang static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314a9643ea8Slogwang 				    struct rte_eth_rss_conf *rss_conf);
315a9643ea8Slogwang static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316a9643ea8Slogwang 				      struct rte_eth_rss_conf *rss_conf);
317a9643ea8Slogwang static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318a9643ea8Slogwang 					struct rte_eth_udp_tunnel *udp_tunnel);
319a9643ea8Slogwang static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320a9643ea8Slogwang 					struct rte_eth_udp_tunnel *udp_tunnel);
321a9643ea8Slogwang static void i40e_filter_input_set_init(struct i40e_pf *pf);
322a9643ea8Slogwang static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
323a9643ea8Slogwang 				enum rte_filter_type filter_type,
324a9643ea8Slogwang 				enum rte_filter_op filter_op,
325a9643ea8Slogwang 				void *arg);
326a9643ea8Slogwang static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
327a9643ea8Slogwang 				  struct rte_eth_dcb_info *dcb_info);
3282bfe3f2eSlogwang static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
329a9643ea8Slogwang static void i40e_configure_registers(struct i40e_hw *hw);
330a9643ea8Slogwang static void i40e_hw_init(struct rte_eth_dev *dev);
331a9643ea8Slogwang static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
3322bfe3f2eSlogwang static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
3332bfe3f2eSlogwang 						     uint16_t seid,
3342bfe3f2eSlogwang 						     uint16_t rule_type,
3352bfe3f2eSlogwang 						     uint16_t *entries,
3362bfe3f2eSlogwang 						     uint16_t count,
3372bfe3f2eSlogwang 						     uint16_t rule_id);
338a9643ea8Slogwang static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
339a9643ea8Slogwang 			struct rte_eth_mirror_conf *mirror_conf,
340a9643ea8Slogwang 			uint8_t sw_id, uint8_t on);
341a9643ea8Slogwang static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
342a9643ea8Slogwang 
343a9643ea8Slogwang static int i40e_timesync_enable(struct rte_eth_dev *dev);
344a9643ea8Slogwang static int i40e_timesync_disable(struct rte_eth_dev *dev);
345a9643ea8Slogwang static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
346a9643ea8Slogwang 					   struct timespec *timestamp,
347a9643ea8Slogwang 					   uint32_t flags);
348a9643ea8Slogwang static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
349a9643ea8Slogwang 					   struct timespec *timestamp);
350a9643ea8Slogwang static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
351a9643ea8Slogwang 
352a9643ea8Slogwang static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
353a9643ea8Slogwang 
354a9643ea8Slogwang static int i40e_timesync_read_time(struct rte_eth_dev *dev,
355a9643ea8Slogwang 				   struct timespec *timestamp);
356a9643ea8Slogwang static int i40e_timesync_write_time(struct rte_eth_dev *dev,
357a9643ea8Slogwang 				    const struct timespec *timestamp);
358a9643ea8Slogwang 
359a9643ea8Slogwang static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
360a9643ea8Slogwang 					 uint16_t queue_id);
361a9643ea8Slogwang static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
362a9643ea8Slogwang 					  uint16_t queue_id);
363a9643ea8Slogwang 
364a9643ea8Slogwang static int i40e_get_regs(struct rte_eth_dev *dev,
365a9643ea8Slogwang 			 struct rte_dev_reg_info *regs);
366a9643ea8Slogwang 
367a9643ea8Slogwang static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
368a9643ea8Slogwang 
369a9643ea8Slogwang static int i40e_get_eeprom(struct rte_eth_dev *dev,
370a9643ea8Slogwang 			   struct rte_dev_eeprom_info *eeprom);
371a9643ea8Slogwang 
372d30ea906Sjfb8856606 static int i40e_get_module_info(struct rte_eth_dev *dev,
373d30ea906Sjfb8856606 				struct rte_eth_dev_module_info *modinfo);
374d30ea906Sjfb8856606 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
375d30ea906Sjfb8856606 				  struct rte_dev_eeprom_info *info);
376d30ea906Sjfb8856606 
377d30ea906Sjfb8856606 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
3784418919fSjohnjiang 				      struct rte_ether_addr *mac_addr);
379a9643ea8Slogwang 
380a9643ea8Slogwang static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
381a9643ea8Slogwang 
3822bfe3f2eSlogwang static int i40e_ethertype_filter_convert(
3832bfe3f2eSlogwang 	const struct rte_eth_ethertype_filter *input,
3842bfe3f2eSlogwang 	struct i40e_ethertype_filter *filter);
3852bfe3f2eSlogwang static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
3862bfe3f2eSlogwang 				   struct i40e_ethertype_filter *filter);
3872bfe3f2eSlogwang 
3882bfe3f2eSlogwang static int i40e_tunnel_filter_convert(
389d30ea906Sjfb8856606 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
3902bfe3f2eSlogwang 	struct i40e_tunnel_filter *tunnel_filter);
3912bfe3f2eSlogwang static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
3922bfe3f2eSlogwang 				struct i40e_tunnel_filter *tunnel_filter);
3932bfe3f2eSlogwang static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
3942bfe3f2eSlogwang 
3952bfe3f2eSlogwang static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
3962bfe3f2eSlogwang static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
3972bfe3f2eSlogwang static void i40e_filter_restore(struct i40e_pf *pf);
3982bfe3f2eSlogwang static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
399*2d9fd380Sjfb8856606 static int i40e_pf_config_rss(struct i40e_pf *pf);
4002bfe3f2eSlogwang 
401d30ea906Sjfb8856606 static const char *const valid_keys[] = {
402d30ea906Sjfb8856606 	ETH_I40E_FLOATING_VEB_ARG,
403d30ea906Sjfb8856606 	ETH_I40E_FLOATING_VEB_LIST_ARG,
404d30ea906Sjfb8856606 	ETH_I40E_SUPPORT_MULTI_DRIVER,
405d30ea906Sjfb8856606 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
406d30ea906Sjfb8856606 	ETH_I40E_USE_LATEST_VEC,
4074418919fSjohnjiang 	ETH_I40E_VF_MSG_CFG,
408d30ea906Sjfb8856606 	NULL};
409d30ea906Sjfb8856606 
410a9643ea8Slogwang static const struct rte_pci_id pci_id_i40e_map[] = {
411a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
412a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
413a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
414a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
415a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
416a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
417a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
418a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
419a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
420a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
421a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
422a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
423a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
424a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
425a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
426a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
427a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
428a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
429a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
430a9643ea8Slogwang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
4314418919fSjohnjiang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
4324418919fSjohnjiang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
4334418919fSjohnjiang 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
434*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
435*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
436*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
437a9643ea8Slogwang 	{ .vendor_id = 0, /* sentinel */ },
438a9643ea8Slogwang };
439a9643ea8Slogwang 
440a9643ea8Slogwang static const struct eth_dev_ops i40e_eth_dev_ops = {
441a9643ea8Slogwang 	.dev_configure                = i40e_dev_configure,
442a9643ea8Slogwang 	.dev_start                    = i40e_dev_start,
443a9643ea8Slogwang 	.dev_stop                     = i40e_dev_stop,
444a9643ea8Slogwang 	.dev_close                    = i40e_dev_close,
4452bfe3f2eSlogwang 	.dev_reset		      = i40e_dev_reset,
446a9643ea8Slogwang 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
447a9643ea8Slogwang 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
448a9643ea8Slogwang 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
449a9643ea8Slogwang 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
450a9643ea8Slogwang 	.dev_set_link_up              = i40e_dev_set_link_up,
451a9643ea8Slogwang 	.dev_set_link_down            = i40e_dev_set_link_down,
452a9643ea8Slogwang 	.link_update                  = i40e_dev_link_update,
453a9643ea8Slogwang 	.stats_get                    = i40e_dev_stats_get,
454a9643ea8Slogwang 	.xstats_get                   = i40e_dev_xstats_get,
455a9643ea8Slogwang 	.xstats_get_names             = i40e_dev_xstats_get_names,
456a9643ea8Slogwang 	.stats_reset                  = i40e_dev_stats_reset,
457a9643ea8Slogwang 	.xstats_reset                 = i40e_dev_stats_reset,
4582bfe3f2eSlogwang 	.fw_version_get               = i40e_fw_version_get,
459a9643ea8Slogwang 	.dev_infos_get                = i40e_dev_info_get,
460a9643ea8Slogwang 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
461a9643ea8Slogwang 	.vlan_filter_set              = i40e_vlan_filter_set,
462a9643ea8Slogwang 	.vlan_tpid_set                = i40e_vlan_tpid_set,
463a9643ea8Slogwang 	.vlan_offload_set             = i40e_vlan_offload_set,
464a9643ea8Slogwang 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
465a9643ea8Slogwang 	.vlan_pvid_set                = i40e_vlan_pvid_set,
466a9643ea8Slogwang 	.rx_queue_start               = i40e_dev_rx_queue_start,
467a9643ea8Slogwang 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
468a9643ea8Slogwang 	.tx_queue_start               = i40e_dev_tx_queue_start,
469a9643ea8Slogwang 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
470a9643ea8Slogwang 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
471a9643ea8Slogwang 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
472a9643ea8Slogwang 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
473a9643ea8Slogwang 	.rx_queue_release             = i40e_dev_rx_queue_release,
474a9643ea8Slogwang 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
475a9643ea8Slogwang 	.tx_queue_release             = i40e_dev_tx_queue_release,
476a9643ea8Slogwang 	.dev_led_on                   = i40e_dev_led_on,
477a9643ea8Slogwang 	.dev_led_off                  = i40e_dev_led_off,
478a9643ea8Slogwang 	.flow_ctrl_get                = i40e_flow_ctrl_get,
479a9643ea8Slogwang 	.flow_ctrl_set                = i40e_flow_ctrl_set,
480a9643ea8Slogwang 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
481a9643ea8Slogwang 	.mac_addr_add                 = i40e_macaddr_add,
482a9643ea8Slogwang 	.mac_addr_remove              = i40e_macaddr_remove,
483a9643ea8Slogwang 	.reta_update                  = i40e_dev_rss_reta_update,
484a9643ea8Slogwang 	.reta_query                   = i40e_dev_rss_reta_query,
485a9643ea8Slogwang 	.rss_hash_update              = i40e_dev_rss_hash_update,
486a9643ea8Slogwang 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
487a9643ea8Slogwang 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
488a9643ea8Slogwang 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
489a9643ea8Slogwang 	.filter_ctrl                  = i40e_dev_filter_ctrl,
490a9643ea8Slogwang 	.rxq_info_get                 = i40e_rxq_info_get,
491a9643ea8Slogwang 	.txq_info_get                 = i40e_txq_info_get,
4924418919fSjohnjiang 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
4934418919fSjohnjiang 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
494a9643ea8Slogwang 	.mirror_rule_set              = i40e_mirror_rule_set,
495a9643ea8Slogwang 	.mirror_rule_reset            = i40e_mirror_rule_reset,
496a9643ea8Slogwang 	.timesync_enable              = i40e_timesync_enable,
497a9643ea8Slogwang 	.timesync_disable             = i40e_timesync_disable,
498a9643ea8Slogwang 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
499a9643ea8Slogwang 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
500a9643ea8Slogwang 	.get_dcb_info                 = i40e_dev_get_dcb_info,
501a9643ea8Slogwang 	.timesync_adjust_time         = i40e_timesync_adjust_time,
502a9643ea8Slogwang 	.timesync_read_time           = i40e_timesync_read_time,
503a9643ea8Slogwang 	.timesync_write_time          = i40e_timesync_write_time,
504a9643ea8Slogwang 	.get_reg                      = i40e_get_regs,
505a9643ea8Slogwang 	.get_eeprom_length            = i40e_get_eeprom_length,
506a9643ea8Slogwang 	.get_eeprom                   = i40e_get_eeprom,
507d30ea906Sjfb8856606 	.get_module_info              = i40e_get_module_info,
508d30ea906Sjfb8856606 	.get_module_eeprom            = i40e_get_module_eeprom,
509a9643ea8Slogwang 	.mac_addr_set                 = i40e_set_default_mac_addr,
510a9643ea8Slogwang 	.mtu_set                      = i40e_dev_mtu_set,
5112bfe3f2eSlogwang 	.tm_ops_get                   = i40e_tm_ops_get,
512*2d9fd380Sjfb8856606 	.tx_done_cleanup              = i40e_tx_done_cleanup,
513a9643ea8Slogwang };
514a9643ea8Slogwang 
515a9643ea8Slogwang /* store statistics names and its offset in stats structure */
516a9643ea8Slogwang struct rte_i40e_xstats_name_off {
517a9643ea8Slogwang 	char name[RTE_ETH_XSTATS_NAME_SIZE];
518a9643ea8Slogwang 	unsigned offset;
519a9643ea8Slogwang };
520a9643ea8Slogwang 
521a9643ea8Slogwang static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
522a9643ea8Slogwang 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
523a9643ea8Slogwang 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
524a9643ea8Slogwang 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
5254b05018fSfengbojiang 	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
526a9643ea8Slogwang 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
527a9643ea8Slogwang 		rx_unknown_protocol)},
528a9643ea8Slogwang 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
529a9643ea8Slogwang 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
530a9643ea8Slogwang 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
5314b05018fSfengbojiang 	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
532a9643ea8Slogwang };
533a9643ea8Slogwang 
534a9643ea8Slogwang #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
535a9643ea8Slogwang 		sizeof(rte_i40e_stats_strings[0]))
536a9643ea8Slogwang 
537a9643ea8Slogwang static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
538a9643ea8Slogwang 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
539a9643ea8Slogwang 		tx_dropped_link_down)},
540a9643ea8Slogwang 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
541a9643ea8Slogwang 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
542a9643ea8Slogwang 		illegal_bytes)},
543a9643ea8Slogwang 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
544a9643ea8Slogwang 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
545a9643ea8Slogwang 		mac_local_faults)},
546a9643ea8Slogwang 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
547a9643ea8Slogwang 		mac_remote_faults)},
548a9643ea8Slogwang 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
549a9643ea8Slogwang 		rx_length_errors)},
550a9643ea8Slogwang 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
551a9643ea8Slogwang 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
552a9643ea8Slogwang 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
553a9643ea8Slogwang 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
554a9643ea8Slogwang 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
555a9643ea8Slogwang 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
556a9643ea8Slogwang 		rx_size_127)},
557a9643ea8Slogwang 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
558a9643ea8Slogwang 		rx_size_255)},
559a9643ea8Slogwang 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
560a9643ea8Slogwang 		rx_size_511)},
561a9643ea8Slogwang 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
562a9643ea8Slogwang 		rx_size_1023)},
563a9643ea8Slogwang 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
564a9643ea8Slogwang 		rx_size_1522)},
565a9643ea8Slogwang 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
566a9643ea8Slogwang 		rx_size_big)},
567a9643ea8Slogwang 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
568a9643ea8Slogwang 		rx_undersize)},
569a9643ea8Slogwang 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
570a9643ea8Slogwang 		rx_oversize)},
571a9643ea8Slogwang 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
572a9643ea8Slogwang 		mac_short_packet_dropped)},
573a9643ea8Slogwang 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
574a9643ea8Slogwang 		rx_fragments)},
575a9643ea8Slogwang 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
576a9643ea8Slogwang 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
577a9643ea8Slogwang 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
578a9643ea8Slogwang 		tx_size_127)},
579a9643ea8Slogwang 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
580a9643ea8Slogwang 		tx_size_255)},
581a9643ea8Slogwang 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
582a9643ea8Slogwang 		tx_size_511)},
583a9643ea8Slogwang 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
584a9643ea8Slogwang 		tx_size_1023)},
585a9643ea8Slogwang 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
586a9643ea8Slogwang 		tx_size_1522)},
587a9643ea8Slogwang 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
588a9643ea8Slogwang 		tx_size_big)},
589a9643ea8Slogwang 	{"rx_flow_director_atr_match_packets",
590a9643ea8Slogwang 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
591a9643ea8Slogwang 	{"rx_flow_director_sb_match_packets",
592a9643ea8Slogwang 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
593a9643ea8Slogwang 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
594a9643ea8Slogwang 		tx_lpi_status)},
595a9643ea8Slogwang 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
596a9643ea8Slogwang 		rx_lpi_status)},
597a9643ea8Slogwang 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
598a9643ea8Slogwang 		tx_lpi_count)},
599a9643ea8Slogwang 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
600a9643ea8Slogwang 		rx_lpi_count)},
601a9643ea8Slogwang };
602a9643ea8Slogwang 
603a9643ea8Slogwang #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
604a9643ea8Slogwang 		sizeof(rte_i40e_hw_port_strings[0]))
605a9643ea8Slogwang 
606a9643ea8Slogwang static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
607a9643ea8Slogwang 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
608a9643ea8Slogwang 		priority_xon_rx)},
609a9643ea8Slogwang 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
610a9643ea8Slogwang 		priority_xoff_rx)},
611a9643ea8Slogwang };
612a9643ea8Slogwang 
613a9643ea8Slogwang #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
614a9643ea8Slogwang 		sizeof(rte_i40e_rxq_prio_strings[0]))
615a9643ea8Slogwang 
616a9643ea8Slogwang static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
617a9643ea8Slogwang 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
618a9643ea8Slogwang 		priority_xon_tx)},
619a9643ea8Slogwang 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
620a9643ea8Slogwang 		priority_xoff_tx)},
621a9643ea8Slogwang 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
622a9643ea8Slogwang 		priority_xon_2_xoff)},
623a9643ea8Slogwang };
624a9643ea8Slogwang 
625a9643ea8Slogwang #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
626a9643ea8Slogwang 		sizeof(rte_i40e_txq_prio_strings[0]))
627a9643ea8Slogwang 
628d30ea906Sjfb8856606 static int
eth_i40e_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)629d30ea906Sjfb8856606 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
6302bfe3f2eSlogwang 	struct rte_pci_device *pci_dev)
6312bfe3f2eSlogwang {
632d30ea906Sjfb8856606 	char name[RTE_ETH_NAME_MAX_LEN];
633d30ea906Sjfb8856606 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
634d30ea906Sjfb8856606 	int i, retval;
635d30ea906Sjfb8856606 
636d30ea906Sjfb8856606 	if (pci_dev->device.devargs) {
637d30ea906Sjfb8856606 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
638d30ea906Sjfb8856606 				&eth_da);
639d30ea906Sjfb8856606 		if (retval)
640d30ea906Sjfb8856606 			return retval;
641d30ea906Sjfb8856606 	}
642d30ea906Sjfb8856606 
643d30ea906Sjfb8856606 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
644d30ea906Sjfb8856606 		sizeof(struct i40e_adapter),
645d30ea906Sjfb8856606 		eth_dev_pci_specific_init, pci_dev,
646d30ea906Sjfb8856606 		eth_i40e_dev_init, NULL);
647d30ea906Sjfb8856606 
648d30ea906Sjfb8856606 	if (retval || eth_da.nb_representor_ports < 1)
649d30ea906Sjfb8856606 		return retval;
650d30ea906Sjfb8856606 
651d30ea906Sjfb8856606 	/* probe VF representor ports */
652d30ea906Sjfb8856606 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
653d30ea906Sjfb8856606 		pci_dev->device.name);
654d30ea906Sjfb8856606 
655d30ea906Sjfb8856606 	if (pf_ethdev == NULL)
656d30ea906Sjfb8856606 		return -ENODEV;
657d30ea906Sjfb8856606 
658d30ea906Sjfb8856606 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
659d30ea906Sjfb8856606 		struct i40e_vf_representor representor = {
660d30ea906Sjfb8856606 			.vf_id = eth_da.representor_ports[i],
661d30ea906Sjfb8856606 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
662d30ea906Sjfb8856606 				pf_ethdev->data->dev_private)->switch_domain_id,
663d30ea906Sjfb8856606 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
664d30ea906Sjfb8856606 				pf_ethdev->data->dev_private)
665d30ea906Sjfb8856606 		};
666d30ea906Sjfb8856606 
667d30ea906Sjfb8856606 		/* representor port net_bdf_port */
668d30ea906Sjfb8856606 		snprintf(name, sizeof(name), "net_%s_representor_%d",
669d30ea906Sjfb8856606 			pci_dev->device.name, eth_da.representor_ports[i]);
670d30ea906Sjfb8856606 
671d30ea906Sjfb8856606 		retval = rte_eth_dev_create(&pci_dev->device, name,
672d30ea906Sjfb8856606 			sizeof(struct i40e_vf_representor), NULL, NULL,
673d30ea906Sjfb8856606 			i40e_vf_representor_init, &representor);
674d30ea906Sjfb8856606 
675d30ea906Sjfb8856606 		if (retval)
676d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
677d30ea906Sjfb8856606 				"representor %s.", name);
678d30ea906Sjfb8856606 	}
679d30ea906Sjfb8856606 
680d30ea906Sjfb8856606 	return 0;
6812bfe3f2eSlogwang }
6822bfe3f2eSlogwang 
eth_i40e_pci_remove(struct rte_pci_device * pci_dev)6832bfe3f2eSlogwang static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
6842bfe3f2eSlogwang {
685d30ea906Sjfb8856606 	struct rte_eth_dev *ethdev;
686d30ea906Sjfb8856606 
687d30ea906Sjfb8856606 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
688d30ea906Sjfb8856606 	if (!ethdev)
6894418919fSjohnjiang 		return 0;
690d30ea906Sjfb8856606 
691d30ea906Sjfb8856606 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
6924418919fSjohnjiang 		return rte_eth_dev_pci_generic_remove(pci_dev,
6934418919fSjohnjiang 					i40e_vf_representor_uninit);
694d30ea906Sjfb8856606 	else
6954418919fSjohnjiang 		return rte_eth_dev_pci_generic_remove(pci_dev,
6964418919fSjohnjiang 						eth_i40e_dev_uninit);
6972bfe3f2eSlogwang }
6982bfe3f2eSlogwang 
6992bfe3f2eSlogwang static struct rte_pci_driver rte_i40e_pmd = {
700a9643ea8Slogwang 	.id_table = pci_id_i40e_map,
7014418919fSjohnjiang 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
7022bfe3f2eSlogwang 	.probe = eth_i40e_pci_probe,
7032bfe3f2eSlogwang 	.remove = eth_i40e_pci_remove,
704a9643ea8Slogwang };
705a9643ea8Slogwang 
7065af785ecSfengbojiang(姜凤波) static inline void
i40e_write_global_rx_ctl(struct i40e_hw * hw,uint32_t reg_addr,uint32_t reg_val)707d30ea906Sjfb8856606 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
708d30ea906Sjfb8856606 			 uint32_t reg_val)
7095af785ecSfengbojiang(姜凤波) {
710d30ea906Sjfb8856606 	uint32_t ori_reg_val;
711d30ea906Sjfb8856606 	struct rte_eth_dev *dev;
712d30ea906Sjfb8856606 
713d30ea906Sjfb8856606 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
714d30ea906Sjfb8856606 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7152bfe3f2eSlogwang 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
716d30ea906Sjfb8856606 	if (ori_reg_val != reg_val)
717d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING,
718d30ea906Sjfb8856606 			    "i40e device %s changed global register [0x%08x]."
719d30ea906Sjfb8856606 			    " original: 0x%08x, new: 0x%08x",
720d30ea906Sjfb8856606 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
721a9643ea8Slogwang }
722a9643ea8Slogwang 
7232bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
7242bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
7252bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
726a9643ea8Slogwang 
727a9643ea8Slogwang #ifndef I40E_GLQF_ORT
728a9643ea8Slogwang #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
729a9643ea8Slogwang #endif
730a9643ea8Slogwang #ifndef I40E_GLQF_PIT
731a9643ea8Slogwang #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
732a9643ea8Slogwang #endif
7332bfe3f2eSlogwang #ifndef I40E_GLQF_L3_MAP
7342bfe3f2eSlogwang #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
7352bfe3f2eSlogwang #endif
736a9643ea8Slogwang 
i40e_GLQF_reg_init(struct i40e_hw * hw)737a9643ea8Slogwang static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
738a9643ea8Slogwang {
739a9643ea8Slogwang 	/*
7402bfe3f2eSlogwang 	 * Initialize registers for parsing packet type of QinQ
7412bfe3f2eSlogwang 	 * This should be removed from code once proper
7422bfe3f2eSlogwang 	 * configuration API is added to avoid configuration conflicts
7432bfe3f2eSlogwang 	 * between ports of the same device.
7442bfe3f2eSlogwang 	 */
7452bfe3f2eSlogwang 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
7462bfe3f2eSlogwang 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
7472bfe3f2eSlogwang }
7482bfe3f2eSlogwang 
i40e_config_automask(struct i40e_pf * pf)7492bfe3f2eSlogwang static inline void i40e_config_automask(struct i40e_pf *pf)
7502bfe3f2eSlogwang {
7512bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7522bfe3f2eSlogwang 	uint32_t val;
7532bfe3f2eSlogwang 
7542bfe3f2eSlogwang 	/* INTENA flag is not auto-cleared for interrupt */
7552bfe3f2eSlogwang 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
7562bfe3f2eSlogwang 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
7572bfe3f2eSlogwang 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
7582bfe3f2eSlogwang 
7592bfe3f2eSlogwang 	/* If support multi-driver, PF will use INT0. */
7602bfe3f2eSlogwang 	if (!pf->support_multi_driver)
7612bfe3f2eSlogwang 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
7622bfe3f2eSlogwang 
7632bfe3f2eSlogwang 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
764a9643ea8Slogwang }
765a9643ea8Slogwang 
766a9643ea8Slogwang #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
767a9643ea8Slogwang 
768a9643ea8Slogwang /*
769a9643ea8Slogwang  * Add a ethertype filter to drop all flow control frames transmitted
770a9643ea8Slogwang  * from VSIs.
771a9643ea8Slogwang */
772a9643ea8Slogwang static void
i40e_add_tx_flow_control_drop_filter(struct i40e_pf * pf)773a9643ea8Slogwang i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
774a9643ea8Slogwang {
775a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
776a9643ea8Slogwang 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
777a9643ea8Slogwang 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
778a9643ea8Slogwang 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
779a9643ea8Slogwang 	int ret;
780a9643ea8Slogwang 
781a9643ea8Slogwang 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
782a9643ea8Slogwang 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
783a9643ea8Slogwang 				pf->main_vsi_seid, 0,
784a9643ea8Slogwang 				TRUE, NULL, NULL);
785a9643ea8Slogwang 	if (ret)
7862bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
7872bfe3f2eSlogwang 			"Failed to add filter to drop flow control frames from VSIs.");
788a9643ea8Slogwang }
789a9643ea8Slogwang 
790a9643ea8Slogwang static int
floating_veb_list_handler(__rte_unused const char * key,const char * floating_veb_value,void * opaque)791a9643ea8Slogwang floating_veb_list_handler(__rte_unused const char *key,
792a9643ea8Slogwang 			  const char *floating_veb_value,
793a9643ea8Slogwang 			  void *opaque)
794a9643ea8Slogwang {
795a9643ea8Slogwang 	int idx = 0;
796a9643ea8Slogwang 	unsigned int count = 0;
797a9643ea8Slogwang 	char *end = NULL;
798a9643ea8Slogwang 	int min, max;
799a9643ea8Slogwang 	bool *vf_floating_veb = opaque;
800a9643ea8Slogwang 
801a9643ea8Slogwang 	while (isblank(*floating_veb_value))
802a9643ea8Slogwang 		floating_veb_value++;
803a9643ea8Slogwang 
804a9643ea8Slogwang 	/* Reset floating VEB configuration for VFs */
805a9643ea8Slogwang 	for (idx = 0; idx < I40E_MAX_VF; idx++)
806a9643ea8Slogwang 		vf_floating_veb[idx] = false;
807a9643ea8Slogwang 
808a9643ea8Slogwang 	min = I40E_MAX_VF;
809a9643ea8Slogwang 	do {
810a9643ea8Slogwang 		while (isblank(*floating_veb_value))
811a9643ea8Slogwang 			floating_veb_value++;
812a9643ea8Slogwang 		if (*floating_veb_value == '\0')
813a9643ea8Slogwang 			return -1;
814a9643ea8Slogwang 		errno = 0;
815a9643ea8Slogwang 		idx = strtoul(floating_veb_value, &end, 10);
816a9643ea8Slogwang 		if (errno || end == NULL)
817a9643ea8Slogwang 			return -1;
818a9643ea8Slogwang 		while (isblank(*end))
819a9643ea8Slogwang 			end++;
820a9643ea8Slogwang 		if (*end == '-') {
821a9643ea8Slogwang 			min = idx;
822a9643ea8Slogwang 		} else if ((*end == ';') || (*end == '\0')) {
823a9643ea8Slogwang 			max = idx;
824a9643ea8Slogwang 			if (min == I40E_MAX_VF)
825a9643ea8Slogwang 				min = idx;
826a9643ea8Slogwang 			if (max >= I40E_MAX_VF)
827a9643ea8Slogwang 				max = I40E_MAX_VF - 1;
828a9643ea8Slogwang 			for (idx = min; idx <= max; idx++) {
829a9643ea8Slogwang 				vf_floating_veb[idx] = true;
830a9643ea8Slogwang 				count++;
831a9643ea8Slogwang 			}
832a9643ea8Slogwang 			min = I40E_MAX_VF;
833a9643ea8Slogwang 		} else {
834a9643ea8Slogwang 			return -1;
835a9643ea8Slogwang 		}
836a9643ea8Slogwang 		floating_veb_value = end + 1;
837a9643ea8Slogwang 	} while (*end != '\0');
838a9643ea8Slogwang 
839a9643ea8Slogwang 	if (count == 0)
840a9643ea8Slogwang 		return -1;
841a9643ea8Slogwang 
842a9643ea8Slogwang 	return 0;
843a9643ea8Slogwang }
844a9643ea8Slogwang 
845a9643ea8Slogwang static void
config_vf_floating_veb(struct rte_devargs * devargs,uint16_t floating_veb,bool * vf_floating_veb)846a9643ea8Slogwang config_vf_floating_veb(struct rte_devargs *devargs,
847a9643ea8Slogwang 		       uint16_t floating_veb,
848a9643ea8Slogwang 		       bool *vf_floating_veb)
849a9643ea8Slogwang {
850a9643ea8Slogwang 	struct rte_kvargs *kvlist;
851a9643ea8Slogwang 	int i;
852a9643ea8Slogwang 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
853a9643ea8Slogwang 
854a9643ea8Slogwang 	if (!floating_veb)
855a9643ea8Slogwang 		return;
856a9643ea8Slogwang 	/* All the VFs attach to the floating VEB by default
857a9643ea8Slogwang 	 * when the floating VEB is enabled.
858a9643ea8Slogwang 	 */
859a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_VF; i++)
860a9643ea8Slogwang 		vf_floating_veb[i] = true;
861a9643ea8Slogwang 
862a9643ea8Slogwang 	if (devargs == NULL)
863a9643ea8Slogwang 		return;
864a9643ea8Slogwang 
865d30ea906Sjfb8856606 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
866a9643ea8Slogwang 	if (kvlist == NULL)
867a9643ea8Slogwang 		return;
868a9643ea8Slogwang 
869a9643ea8Slogwang 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
870a9643ea8Slogwang 		rte_kvargs_free(kvlist);
871a9643ea8Slogwang 		return;
872a9643ea8Slogwang 	}
873a9643ea8Slogwang 	/* When the floating_veb_list parameter exists, all the VFs
874a9643ea8Slogwang 	 * will attach to the legacy VEB firstly, then configure VFs
875a9643ea8Slogwang 	 * to the floating VEB according to the floating_veb_list.
876a9643ea8Slogwang 	 */
877a9643ea8Slogwang 	if (rte_kvargs_process(kvlist, floating_veb_list,
878a9643ea8Slogwang 			       floating_veb_list_handler,
879a9643ea8Slogwang 			       vf_floating_veb) < 0) {
880a9643ea8Slogwang 		rte_kvargs_free(kvlist);
881a9643ea8Slogwang 		return;
882a9643ea8Slogwang 	}
883a9643ea8Slogwang 	rte_kvargs_free(kvlist);
884a9643ea8Slogwang }
885a9643ea8Slogwang 
886a9643ea8Slogwang static int
i40e_check_floating_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)887a9643ea8Slogwang i40e_check_floating_handler(__rte_unused const char *key,
888a9643ea8Slogwang 			    const char *value,
889a9643ea8Slogwang 			    __rte_unused void *opaque)
890a9643ea8Slogwang {
891a9643ea8Slogwang 	if (strcmp(value, "1"))
892a9643ea8Slogwang 		return -1;
893a9643ea8Slogwang 
894a9643ea8Slogwang 	return 0;
895a9643ea8Slogwang }
896a9643ea8Slogwang 
897a9643ea8Slogwang static int
is_floating_veb_supported(struct rte_devargs * devargs)898a9643ea8Slogwang is_floating_veb_supported(struct rte_devargs *devargs)
899a9643ea8Slogwang {
900a9643ea8Slogwang 	struct rte_kvargs *kvlist;
901a9643ea8Slogwang 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
902a9643ea8Slogwang 
903a9643ea8Slogwang 	if (devargs == NULL)
904a9643ea8Slogwang 		return 0;
905a9643ea8Slogwang 
906d30ea906Sjfb8856606 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
907a9643ea8Slogwang 	if (kvlist == NULL)
908a9643ea8Slogwang 		return 0;
909a9643ea8Slogwang 
910a9643ea8Slogwang 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
911a9643ea8Slogwang 		rte_kvargs_free(kvlist);
912a9643ea8Slogwang 		return 0;
913a9643ea8Slogwang 	}
914a9643ea8Slogwang 	/* Floating VEB is enabled when there's key-value:
915a9643ea8Slogwang 	 * enable_floating_veb=1
916a9643ea8Slogwang 	 */
917a9643ea8Slogwang 	if (rte_kvargs_process(kvlist, floating_veb_key,
918a9643ea8Slogwang 			       i40e_check_floating_handler, NULL) < 0) {
919a9643ea8Slogwang 		rte_kvargs_free(kvlist);
920a9643ea8Slogwang 		return 0;
921a9643ea8Slogwang 	}
922a9643ea8Slogwang 	rte_kvargs_free(kvlist);
923a9643ea8Slogwang 
924a9643ea8Slogwang 	return 1;
925a9643ea8Slogwang }
926a9643ea8Slogwang 
927a9643ea8Slogwang static void
config_floating_veb(struct rte_eth_dev * dev)928a9643ea8Slogwang config_floating_veb(struct rte_eth_dev *dev)
929a9643ea8Slogwang {
9302bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
931a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
932a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933a9643ea8Slogwang 
934a9643ea8Slogwang 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
935a9643ea8Slogwang 
936a9643ea8Slogwang 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
9372bfe3f2eSlogwang 		pf->floating_veb =
9382bfe3f2eSlogwang 			is_floating_veb_supported(pci_dev->device.devargs);
9392bfe3f2eSlogwang 		config_vf_floating_veb(pci_dev->device.devargs,
9402bfe3f2eSlogwang 				       pf->floating_veb,
941a9643ea8Slogwang 				       pf->floating_veb_list);
942a9643ea8Slogwang 	} else {
943a9643ea8Slogwang 		pf->floating_veb = false;
944a9643ea8Slogwang 	}
945a9643ea8Slogwang }
946a9643ea8Slogwang 
947a9643ea8Slogwang #define I40E_L2_TAGS_S_TAG_SHIFT 1
948a9643ea8Slogwang #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
949a9643ea8Slogwang 
950a9643ea8Slogwang static int
i40e_init_ethtype_filter_list(struct rte_eth_dev * dev)9512bfe3f2eSlogwang i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
9522bfe3f2eSlogwang {
9532bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9542bfe3f2eSlogwang 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9552bfe3f2eSlogwang 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
9562bfe3f2eSlogwang 	int ret;
9572bfe3f2eSlogwang 
9582bfe3f2eSlogwang 	struct rte_hash_parameters ethertype_hash_params = {
9592bfe3f2eSlogwang 		.name = ethertype_hash_name,
9602bfe3f2eSlogwang 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
9612bfe3f2eSlogwang 		.key_len = sizeof(struct i40e_ethertype_filter_input),
9622bfe3f2eSlogwang 		.hash_func = rte_hash_crc,
9632bfe3f2eSlogwang 		.hash_func_init_val = 0,
9642bfe3f2eSlogwang 		.socket_id = rte_socket_id(),
9652bfe3f2eSlogwang 	};
9662bfe3f2eSlogwang 
9672bfe3f2eSlogwang 	/* Initialize ethertype filter rule list and hash */
9682bfe3f2eSlogwang 	TAILQ_INIT(&ethertype_rule->ethertype_list);
9692bfe3f2eSlogwang 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
9702bfe3f2eSlogwang 		 "ethertype_%s", dev->device->name);
9712bfe3f2eSlogwang 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
9722bfe3f2eSlogwang 	if (!ethertype_rule->hash_table) {
9732bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
9742bfe3f2eSlogwang 		return -EINVAL;
9752bfe3f2eSlogwang 	}
9762bfe3f2eSlogwang 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
9772bfe3f2eSlogwang 				       sizeof(struct i40e_ethertype_filter *) *
9782bfe3f2eSlogwang 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
9792bfe3f2eSlogwang 				       0);
9802bfe3f2eSlogwang 	if (!ethertype_rule->hash_map) {
9812bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
9822bfe3f2eSlogwang 			     "Failed to allocate memory for ethertype hash map!");
9832bfe3f2eSlogwang 		ret = -ENOMEM;
9842bfe3f2eSlogwang 		goto err_ethertype_hash_map_alloc;
9852bfe3f2eSlogwang 	}
9862bfe3f2eSlogwang 
9872bfe3f2eSlogwang 	return 0;
9882bfe3f2eSlogwang 
9892bfe3f2eSlogwang err_ethertype_hash_map_alloc:
9902bfe3f2eSlogwang 	rte_hash_free(ethertype_rule->hash_table);
9912bfe3f2eSlogwang 
9922bfe3f2eSlogwang 	return ret;
9932bfe3f2eSlogwang }
9942bfe3f2eSlogwang 
9952bfe3f2eSlogwang static int
i40e_init_tunnel_filter_list(struct rte_eth_dev * dev)9962bfe3f2eSlogwang i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
9972bfe3f2eSlogwang {
9982bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9992bfe3f2eSlogwang 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
10002bfe3f2eSlogwang 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
10012bfe3f2eSlogwang 	int ret;
10022bfe3f2eSlogwang 
10032bfe3f2eSlogwang 	struct rte_hash_parameters tunnel_hash_params = {
10042bfe3f2eSlogwang 		.name = tunnel_hash_name,
10052bfe3f2eSlogwang 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
10062bfe3f2eSlogwang 		.key_len = sizeof(struct i40e_tunnel_filter_input),
10072bfe3f2eSlogwang 		.hash_func = rte_hash_crc,
10082bfe3f2eSlogwang 		.hash_func_init_val = 0,
10092bfe3f2eSlogwang 		.socket_id = rte_socket_id(),
10102bfe3f2eSlogwang 	};
10112bfe3f2eSlogwang 
10122bfe3f2eSlogwang 	/* Initialize tunnel filter rule list and hash */
10132bfe3f2eSlogwang 	TAILQ_INIT(&tunnel_rule->tunnel_list);
10142bfe3f2eSlogwang 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
10152bfe3f2eSlogwang 		 "tunnel_%s", dev->device->name);
10162bfe3f2eSlogwang 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
10172bfe3f2eSlogwang 	if (!tunnel_rule->hash_table) {
10182bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
10192bfe3f2eSlogwang 		return -EINVAL;
10202bfe3f2eSlogwang 	}
10212bfe3f2eSlogwang 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
10222bfe3f2eSlogwang 				    sizeof(struct i40e_tunnel_filter *) *
10232bfe3f2eSlogwang 				    I40E_MAX_TUNNEL_FILTER_NUM,
10242bfe3f2eSlogwang 				    0);
10252bfe3f2eSlogwang 	if (!tunnel_rule->hash_map) {
10262bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
10272bfe3f2eSlogwang 			     "Failed to allocate memory for tunnel hash map!");
10282bfe3f2eSlogwang 		ret = -ENOMEM;
10292bfe3f2eSlogwang 		goto err_tunnel_hash_map_alloc;
10302bfe3f2eSlogwang 	}
10312bfe3f2eSlogwang 
10322bfe3f2eSlogwang 	return 0;
10332bfe3f2eSlogwang 
10342bfe3f2eSlogwang err_tunnel_hash_map_alloc:
10352bfe3f2eSlogwang 	rte_hash_free(tunnel_rule->hash_table);
10362bfe3f2eSlogwang 
10372bfe3f2eSlogwang 	return ret;
10382bfe3f2eSlogwang }
10392bfe3f2eSlogwang 
10402bfe3f2eSlogwang static int
i40e_init_fdir_filter_list(struct rte_eth_dev * dev)10412bfe3f2eSlogwang i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
10422bfe3f2eSlogwang {
10432bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10452bfe3f2eSlogwang 	struct i40e_fdir_info *fdir_info = &pf->fdir;
10462bfe3f2eSlogwang 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1047*2d9fd380Sjfb8856606 	uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1048*2d9fd380Sjfb8856606 	uint32_t best = hw->func_caps.fd_filters_best_effort;
1049*2d9fd380Sjfb8856606 	struct rte_bitmap *bmp = NULL;
1050*2d9fd380Sjfb8856606 	uint32_t bmp_size;
1051*2d9fd380Sjfb8856606 	void *mem = NULL;
1052*2d9fd380Sjfb8856606 	uint32_t i = 0;
10532bfe3f2eSlogwang 	int ret;
10542bfe3f2eSlogwang 
10552bfe3f2eSlogwang 	struct rte_hash_parameters fdir_hash_params = {
10562bfe3f2eSlogwang 		.name = fdir_hash_name,
10572bfe3f2eSlogwang 		.entries = I40E_MAX_FDIR_FILTER_NUM,
10582bfe3f2eSlogwang 		.key_len = sizeof(struct i40e_fdir_input),
10592bfe3f2eSlogwang 		.hash_func = rte_hash_crc,
10602bfe3f2eSlogwang 		.hash_func_init_val = 0,
10612bfe3f2eSlogwang 		.socket_id = rte_socket_id(),
10622bfe3f2eSlogwang 	};
10632bfe3f2eSlogwang 
10642bfe3f2eSlogwang 	/* Initialize flow director filter rule list and hash */
10652bfe3f2eSlogwang 	TAILQ_INIT(&fdir_info->fdir_list);
10662bfe3f2eSlogwang 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
10672bfe3f2eSlogwang 		 "fdir_%s", dev->device->name);
10682bfe3f2eSlogwang 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
10692bfe3f2eSlogwang 	if (!fdir_info->hash_table) {
10702bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
10712bfe3f2eSlogwang 		return -EINVAL;
10722bfe3f2eSlogwang 	}
1073*2d9fd380Sjfb8856606 
10742bfe3f2eSlogwang 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
10752bfe3f2eSlogwang 					  sizeof(struct i40e_fdir_filter *) *
10762bfe3f2eSlogwang 					  I40E_MAX_FDIR_FILTER_NUM,
10772bfe3f2eSlogwang 					  0);
10782bfe3f2eSlogwang 	if (!fdir_info->hash_map) {
10792bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
10802bfe3f2eSlogwang 			     "Failed to allocate memory for fdir hash map!");
10812bfe3f2eSlogwang 		ret = -ENOMEM;
10822bfe3f2eSlogwang 		goto err_fdir_hash_map_alloc;
10832bfe3f2eSlogwang 	}
1084*2d9fd380Sjfb8856606 
1085*2d9fd380Sjfb8856606 	fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1086*2d9fd380Sjfb8856606 			sizeof(struct i40e_fdir_filter) *
1087*2d9fd380Sjfb8856606 			I40E_MAX_FDIR_FILTER_NUM,
1088*2d9fd380Sjfb8856606 			0);
1089*2d9fd380Sjfb8856606 
1090*2d9fd380Sjfb8856606 	if (!fdir_info->fdir_filter_array) {
1091*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
1092*2d9fd380Sjfb8856606 			     "Failed to allocate memory for fdir filter array!");
1093*2d9fd380Sjfb8856606 		ret = -ENOMEM;
1094*2d9fd380Sjfb8856606 		goto err_fdir_filter_array_alloc;
1095*2d9fd380Sjfb8856606 	}
1096*2d9fd380Sjfb8856606 
1097*2d9fd380Sjfb8856606 	fdir_info->fdir_space_size = alloc + best;
1098*2d9fd380Sjfb8856606 	fdir_info->fdir_actual_cnt = 0;
1099*2d9fd380Sjfb8856606 	fdir_info->fdir_guarantee_total_space = alloc;
1100*2d9fd380Sjfb8856606 	fdir_info->fdir_guarantee_free_space =
1101*2d9fd380Sjfb8856606 		fdir_info->fdir_guarantee_total_space;
1102*2d9fd380Sjfb8856606 
1103*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1104*2d9fd380Sjfb8856606 
1105*2d9fd380Sjfb8856606 	fdir_info->fdir_flow_pool.pool =
1106*2d9fd380Sjfb8856606 			rte_zmalloc("i40e_fdir_entry",
1107*2d9fd380Sjfb8856606 				sizeof(struct i40e_fdir_entry) *
1108*2d9fd380Sjfb8856606 				fdir_info->fdir_space_size,
1109*2d9fd380Sjfb8856606 				0);
1110*2d9fd380Sjfb8856606 
1111*2d9fd380Sjfb8856606 	if (!fdir_info->fdir_flow_pool.pool) {
1112*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
1113*2d9fd380Sjfb8856606 			     "Failed to allocate memory for bitmap flow!");
1114*2d9fd380Sjfb8856606 		ret = -ENOMEM;
1115*2d9fd380Sjfb8856606 		goto err_fdir_bitmap_flow_alloc;
1116*2d9fd380Sjfb8856606 	}
1117*2d9fd380Sjfb8856606 
1118*2d9fd380Sjfb8856606 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1119*2d9fd380Sjfb8856606 		fdir_info->fdir_flow_pool.pool[i].idx = i;
1120*2d9fd380Sjfb8856606 
1121*2d9fd380Sjfb8856606 	bmp_size =
1122*2d9fd380Sjfb8856606 		rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1123*2d9fd380Sjfb8856606 	mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1124*2d9fd380Sjfb8856606 	if (mem == NULL) {
1125*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
1126*2d9fd380Sjfb8856606 			     "Failed to allocate memory for fdir bitmap!");
1127*2d9fd380Sjfb8856606 		ret = -ENOMEM;
1128*2d9fd380Sjfb8856606 		goto err_fdir_mem_alloc;
1129*2d9fd380Sjfb8856606 	}
1130*2d9fd380Sjfb8856606 	bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1131*2d9fd380Sjfb8856606 	if (bmp == NULL) {
1132*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
1133*2d9fd380Sjfb8856606 			     "Failed to initialization fdir bitmap!");
1134*2d9fd380Sjfb8856606 		ret = -ENOMEM;
1135*2d9fd380Sjfb8856606 		goto err_fdir_bmp_alloc;
1136*2d9fd380Sjfb8856606 	}
1137*2d9fd380Sjfb8856606 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1138*2d9fd380Sjfb8856606 		rte_bitmap_set(bmp, i);
1139*2d9fd380Sjfb8856606 
1140*2d9fd380Sjfb8856606 	fdir_info->fdir_flow_pool.bitmap = bmp;
1141*2d9fd380Sjfb8856606 
11422bfe3f2eSlogwang 	return 0;
11432bfe3f2eSlogwang 
1144*2d9fd380Sjfb8856606 err_fdir_bmp_alloc:
1145*2d9fd380Sjfb8856606 	rte_free(mem);
1146*2d9fd380Sjfb8856606 err_fdir_mem_alloc:
1147*2d9fd380Sjfb8856606 	rte_free(fdir_info->fdir_flow_pool.pool);
1148*2d9fd380Sjfb8856606 err_fdir_bitmap_flow_alloc:
1149*2d9fd380Sjfb8856606 	rte_free(fdir_info->fdir_filter_array);
1150*2d9fd380Sjfb8856606 err_fdir_filter_array_alloc:
1151*2d9fd380Sjfb8856606 	rte_free(fdir_info->hash_map);
11522bfe3f2eSlogwang err_fdir_hash_map_alloc:
11532bfe3f2eSlogwang 	rte_hash_free(fdir_info->hash_table);
11542bfe3f2eSlogwang 
11552bfe3f2eSlogwang 	return ret;
11562bfe3f2eSlogwang }
11572bfe3f2eSlogwang 
11582bfe3f2eSlogwang static void
i40e_init_customized_info(struct i40e_pf * pf)11592bfe3f2eSlogwang i40e_init_customized_info(struct i40e_pf *pf)
11602bfe3f2eSlogwang {
11612bfe3f2eSlogwang 	int i;
11622bfe3f2eSlogwang 
11632bfe3f2eSlogwang 	/* Initialize customized pctype */
11642bfe3f2eSlogwang 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
11652bfe3f2eSlogwang 		pf->customized_pctype[i].index = i;
11662bfe3f2eSlogwang 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
11672bfe3f2eSlogwang 		pf->customized_pctype[i].valid = false;
11682bfe3f2eSlogwang 	}
11692bfe3f2eSlogwang 
11702bfe3f2eSlogwang 	pf->gtp_support = false;
1171*2d9fd380Sjfb8856606 	pf->esp_support = false;
1172*2d9fd380Sjfb8856606 }
1173*2d9fd380Sjfb8856606 
1174*2d9fd380Sjfb8856606 static void
i40e_init_filter_invalidation(struct i40e_pf * pf)1175*2d9fd380Sjfb8856606 i40e_init_filter_invalidation(struct i40e_pf *pf)
1176*2d9fd380Sjfb8856606 {
1177*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1178*2d9fd380Sjfb8856606 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1179*2d9fd380Sjfb8856606 	uint32_t glqf_ctl_reg = 0;
1180*2d9fd380Sjfb8856606 
1181*2d9fd380Sjfb8856606 	glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1182*2d9fd380Sjfb8856606 	if (!pf->support_multi_driver) {
1183*2d9fd380Sjfb8856606 		fdir_info->fdir_invalprio = 1;
1184*2d9fd380Sjfb8856606 		glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1185*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1186*2d9fd380Sjfb8856606 		i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1187*2d9fd380Sjfb8856606 	} else {
1188*2d9fd380Sjfb8856606 		if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1189*2d9fd380Sjfb8856606 			fdir_info->fdir_invalprio = 1;
1190*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1191*2d9fd380Sjfb8856606 		} else {
1192*2d9fd380Sjfb8856606 			fdir_info->fdir_invalprio = 0;
1193*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1194*2d9fd380Sjfb8856606 		}
1195*2d9fd380Sjfb8856606 	}
11962bfe3f2eSlogwang }
11972bfe3f2eSlogwang 
11982bfe3f2eSlogwang void
i40e_init_queue_region_conf(struct rte_eth_dev * dev)11992bfe3f2eSlogwang i40e_init_queue_region_conf(struct rte_eth_dev *dev)
12002bfe3f2eSlogwang {
12012bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12022bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12032bfe3f2eSlogwang 	struct i40e_queue_regions *info = &pf->queue_region;
12042bfe3f2eSlogwang 	uint16_t i;
12052bfe3f2eSlogwang 
12062bfe3f2eSlogwang 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
12072bfe3f2eSlogwang 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
12082bfe3f2eSlogwang 
12092bfe3f2eSlogwang 	memset(info, 0, sizeof(struct i40e_queue_regions));
12102bfe3f2eSlogwang }
12112bfe3f2eSlogwang 
12122bfe3f2eSlogwang static int
i40e_parse_multi_drv_handler(__rte_unused const char * key,const char * value,void * opaque)12132bfe3f2eSlogwang i40e_parse_multi_drv_handler(__rte_unused const char *key,
12142bfe3f2eSlogwang 			       const char *value,
12152bfe3f2eSlogwang 			       void *opaque)
12162bfe3f2eSlogwang {
12172bfe3f2eSlogwang 	struct i40e_pf *pf;
12182bfe3f2eSlogwang 	unsigned long support_multi_driver;
12192bfe3f2eSlogwang 	char *end;
12202bfe3f2eSlogwang 
12212bfe3f2eSlogwang 	pf = (struct i40e_pf *)opaque;
12222bfe3f2eSlogwang 
12232bfe3f2eSlogwang 	errno = 0;
12242bfe3f2eSlogwang 	support_multi_driver = strtoul(value, &end, 10);
12252bfe3f2eSlogwang 	if (errno != 0 || end == value || *end != 0) {
12262bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
12272bfe3f2eSlogwang 		return -(EINVAL);
12282bfe3f2eSlogwang 	}
12292bfe3f2eSlogwang 
12302bfe3f2eSlogwang 	if (support_multi_driver == 1 || support_multi_driver == 0)
12312bfe3f2eSlogwang 		pf->support_multi_driver = (bool)support_multi_driver;
12322bfe3f2eSlogwang 	else
12332bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
12342bfe3f2eSlogwang 			    "enable global configuration by default."
12352bfe3f2eSlogwang 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
12362bfe3f2eSlogwang 	return 0;
12372bfe3f2eSlogwang }
12382bfe3f2eSlogwang 
12392bfe3f2eSlogwang static int
i40e_support_multi_driver(struct rte_eth_dev * dev)12402bfe3f2eSlogwang i40e_support_multi_driver(struct rte_eth_dev *dev)
12412bfe3f2eSlogwang {
12422bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12432bfe3f2eSlogwang 	struct rte_kvargs *kvlist;
1244d30ea906Sjfb8856606 	int kvargs_count;
12452bfe3f2eSlogwang 
12462bfe3f2eSlogwang 	/* Enable global configuration by default */
12472bfe3f2eSlogwang 	pf->support_multi_driver = false;
12482bfe3f2eSlogwang 
12492bfe3f2eSlogwang 	if (!dev->device->devargs)
12502bfe3f2eSlogwang 		return 0;
12512bfe3f2eSlogwang 
12522bfe3f2eSlogwang 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
12532bfe3f2eSlogwang 	if (!kvlist)
12542bfe3f2eSlogwang 		return -EINVAL;
12552bfe3f2eSlogwang 
1256d30ea906Sjfb8856606 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1257d30ea906Sjfb8856606 	if (!kvargs_count) {
1258d30ea906Sjfb8856606 		rte_kvargs_free(kvlist);
1259d30ea906Sjfb8856606 		return 0;
1260d30ea906Sjfb8856606 	}
1261d30ea906Sjfb8856606 
1262d30ea906Sjfb8856606 	if (kvargs_count > 1)
12632bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
12642bfe3f2eSlogwang 			    "the first invalid or last valid one is used !",
12652bfe3f2eSlogwang 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
12662bfe3f2eSlogwang 
12672bfe3f2eSlogwang 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
12682bfe3f2eSlogwang 			       i40e_parse_multi_drv_handler, pf) < 0) {
12692bfe3f2eSlogwang 		rte_kvargs_free(kvlist);
12702bfe3f2eSlogwang 		return -EINVAL;
12712bfe3f2eSlogwang 	}
12722bfe3f2eSlogwang 
12732bfe3f2eSlogwang 	rte_kvargs_free(kvlist);
12742bfe3f2eSlogwang 	return 0;
12752bfe3f2eSlogwang }
12762bfe3f2eSlogwang 
12772bfe3f2eSlogwang static int
i40e_aq_debug_write_global_register(struct i40e_hw * hw,uint32_t reg_addr,uint64_t reg_val,struct i40e_asq_cmd_details * cmd_details)1278d30ea906Sjfb8856606 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1279d30ea906Sjfb8856606 				    uint32_t reg_addr, uint64_t reg_val,
1280d30ea906Sjfb8856606 				    struct i40e_asq_cmd_details *cmd_details)
1281d30ea906Sjfb8856606 {
1282d30ea906Sjfb8856606 	uint64_t ori_reg_val;
1283d30ea906Sjfb8856606 	struct rte_eth_dev *dev;
1284d30ea906Sjfb8856606 	int ret;
1285d30ea906Sjfb8856606 
1286d30ea906Sjfb8856606 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1287d30ea906Sjfb8856606 	if (ret != I40E_SUCCESS) {
1288d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
1289d30ea906Sjfb8856606 			    "Fail to debug read from 0x%08x",
1290d30ea906Sjfb8856606 			    reg_addr);
1291d30ea906Sjfb8856606 		return -EIO;
1292d30ea906Sjfb8856606 	}
1293d30ea906Sjfb8856606 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1294d30ea906Sjfb8856606 
1295d30ea906Sjfb8856606 	if (ori_reg_val != reg_val)
1296d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING,
1297d30ea906Sjfb8856606 			    "i40e device %s changed global register [0x%08x]."
1298d30ea906Sjfb8856606 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1299d30ea906Sjfb8856606 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1300d30ea906Sjfb8856606 
1301d30ea906Sjfb8856606 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1302d30ea906Sjfb8856606 }
1303d30ea906Sjfb8856606 
1304d30ea906Sjfb8856606 static int
i40e_parse_latest_vec_handler(__rte_unused const char * key,const char * value,void * opaque)1305d30ea906Sjfb8856606 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1306d30ea906Sjfb8856606 				const char *value,
1307d30ea906Sjfb8856606 				void *opaque)
1308d30ea906Sjfb8856606 {
13094b05018fSfengbojiang 	struct i40e_adapter *ad = opaque;
1310d30ea906Sjfb8856606 	int use_latest_vec;
1311d30ea906Sjfb8856606 
1312d30ea906Sjfb8856606 	use_latest_vec = atoi(value);
1313d30ea906Sjfb8856606 
1314d30ea906Sjfb8856606 	if (use_latest_vec != 0 && use_latest_vec != 1)
1315d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1316d30ea906Sjfb8856606 
1317d30ea906Sjfb8856606 	ad->use_latest_vec = (uint8_t)use_latest_vec;
1318d30ea906Sjfb8856606 
1319d30ea906Sjfb8856606 	return 0;
1320d30ea906Sjfb8856606 }
1321d30ea906Sjfb8856606 
1322d30ea906Sjfb8856606 static int
i40e_use_latest_vec(struct rte_eth_dev * dev)1323d30ea906Sjfb8856606 i40e_use_latest_vec(struct rte_eth_dev *dev)
1324d30ea906Sjfb8856606 {
1325d30ea906Sjfb8856606 	struct i40e_adapter *ad =
1326d30ea906Sjfb8856606 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1327d30ea906Sjfb8856606 	struct rte_kvargs *kvlist;
1328d30ea906Sjfb8856606 	int kvargs_count;
1329d30ea906Sjfb8856606 
1330d30ea906Sjfb8856606 	ad->use_latest_vec = false;
1331d30ea906Sjfb8856606 
1332d30ea906Sjfb8856606 	if (!dev->device->devargs)
1333d30ea906Sjfb8856606 		return 0;
1334d30ea906Sjfb8856606 
1335d30ea906Sjfb8856606 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1336d30ea906Sjfb8856606 	if (!kvlist)
1337d30ea906Sjfb8856606 		return -EINVAL;
1338d30ea906Sjfb8856606 
1339d30ea906Sjfb8856606 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1340d30ea906Sjfb8856606 	if (!kvargs_count) {
1341d30ea906Sjfb8856606 		rte_kvargs_free(kvlist);
1342d30ea906Sjfb8856606 		return 0;
1343d30ea906Sjfb8856606 	}
1344d30ea906Sjfb8856606 
1345d30ea906Sjfb8856606 	if (kvargs_count > 1)
1346d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1347d30ea906Sjfb8856606 			    "the first invalid or last valid one is used !",
1348d30ea906Sjfb8856606 			    ETH_I40E_USE_LATEST_VEC);
1349d30ea906Sjfb8856606 
1350d30ea906Sjfb8856606 	if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1351d30ea906Sjfb8856606 				i40e_parse_latest_vec_handler, ad) < 0) {
1352d30ea906Sjfb8856606 		rte_kvargs_free(kvlist);
1353d30ea906Sjfb8856606 		return -EINVAL;
1354d30ea906Sjfb8856606 	}
1355d30ea906Sjfb8856606 
1356d30ea906Sjfb8856606 	rte_kvargs_free(kvlist);
1357d30ea906Sjfb8856606 	return 0;
1358d30ea906Sjfb8856606 }
1359d30ea906Sjfb8856606 
13604418919fSjohnjiang static int
read_vf_msg_config(__rte_unused const char * key,const char * value,void * opaque)13614418919fSjohnjiang read_vf_msg_config(__rte_unused const char *key,
13624418919fSjohnjiang 			       const char *value,
13634418919fSjohnjiang 			       void *opaque)
13644418919fSjohnjiang {
13654418919fSjohnjiang 	struct i40e_vf_msg_cfg *cfg = opaque;
13664418919fSjohnjiang 
13674418919fSjohnjiang 	if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
13684418919fSjohnjiang 			&cfg->ignore_second) != 3) {
13694418919fSjohnjiang 		memset(cfg, 0, sizeof(*cfg));
13704418919fSjohnjiang 		PMD_DRV_LOG(ERR, "format error! example: "
13714418919fSjohnjiang 				"%s=60@120:180", ETH_I40E_VF_MSG_CFG);
13724418919fSjohnjiang 		return -EINVAL;
13734418919fSjohnjiang 	}
13744418919fSjohnjiang 
13754418919fSjohnjiang 	/*
13764418919fSjohnjiang 	 * If the message validation function been enabled, the 'period'
13774418919fSjohnjiang 	 * and 'ignore_second' must greater than 0.
13784418919fSjohnjiang 	 */
13794418919fSjohnjiang 	if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
13804418919fSjohnjiang 		memset(cfg, 0, sizeof(*cfg));
13814418919fSjohnjiang 		PMD_DRV_LOG(ERR, "%s error! the second and third"
13824418919fSjohnjiang 				" number must be greater than 0!",
13834418919fSjohnjiang 				ETH_I40E_VF_MSG_CFG);
13844418919fSjohnjiang 		return -EINVAL;
13854418919fSjohnjiang 	}
13864418919fSjohnjiang 
13874418919fSjohnjiang 	return 0;
13884418919fSjohnjiang }
13894418919fSjohnjiang 
13904418919fSjohnjiang static int
i40e_parse_vf_msg_config(struct rte_eth_dev * dev,struct i40e_vf_msg_cfg * msg_cfg)13914418919fSjohnjiang i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
13924418919fSjohnjiang 		struct i40e_vf_msg_cfg *msg_cfg)
13934418919fSjohnjiang {
13944418919fSjohnjiang 	struct rte_kvargs *kvlist;
13954418919fSjohnjiang 	int kvargs_count;
13964418919fSjohnjiang 	int ret = 0;
13974418919fSjohnjiang 
13984418919fSjohnjiang 	memset(msg_cfg, 0, sizeof(*msg_cfg));
13994418919fSjohnjiang 
14004418919fSjohnjiang 	if (!dev->device->devargs)
14014418919fSjohnjiang 		return ret;
14024418919fSjohnjiang 
14034418919fSjohnjiang 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
14044418919fSjohnjiang 	if (!kvlist)
14054418919fSjohnjiang 		return -EINVAL;
14064418919fSjohnjiang 
14074418919fSjohnjiang 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
14084418919fSjohnjiang 	if (!kvargs_count)
14094418919fSjohnjiang 		goto free_end;
14104418919fSjohnjiang 
14114418919fSjohnjiang 	if (kvargs_count > 1) {
14124418919fSjohnjiang 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
14134418919fSjohnjiang 				ETH_I40E_VF_MSG_CFG);
14144418919fSjohnjiang 		ret = -EINVAL;
14154418919fSjohnjiang 		goto free_end;
14164418919fSjohnjiang 	}
14174418919fSjohnjiang 
14184418919fSjohnjiang 	if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
14194418919fSjohnjiang 			read_vf_msg_config, msg_cfg) < 0)
14204418919fSjohnjiang 		ret = -EINVAL;
14214418919fSjohnjiang 
14224418919fSjohnjiang free_end:
14234418919fSjohnjiang 	rte_kvargs_free(kvlist);
14244418919fSjohnjiang 	return ret;
14254418919fSjohnjiang }
14264418919fSjohnjiang 
1427d30ea906Sjfb8856606 #define I40E_ALARM_INTERVAL 50000 /* us */
1428d30ea906Sjfb8856606 
1429d30ea906Sjfb8856606 static int
eth_i40e_dev_init(struct rte_eth_dev * dev,void * init_params __rte_unused)1430d30ea906Sjfb8856606 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1431a9643ea8Slogwang {
1432a9643ea8Slogwang 	struct rte_pci_device *pci_dev;
14332bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle;
1434a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1435a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1436a9643ea8Slogwang 	struct i40e_vsi *vsi;
1437a9643ea8Slogwang 	int ret;
14381646932aSjfb8856606 	uint32_t len, val;
1439a9643ea8Slogwang 	uint8_t aq_fail = 0;
1440a9643ea8Slogwang 
1441a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1442a9643ea8Slogwang 
1443a9643ea8Slogwang 	dev->dev_ops = &i40e_eth_dev_ops;
1444*2d9fd380Sjfb8856606 	dev->rx_queue_count = i40e_dev_rx_queue_count;
1445*2d9fd380Sjfb8856606 	dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1446*2d9fd380Sjfb8856606 	dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1447*2d9fd380Sjfb8856606 	dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1448a9643ea8Slogwang 	dev->rx_pkt_burst = i40e_recv_pkts;
1449a9643ea8Slogwang 	dev->tx_pkt_burst = i40e_xmit_pkts;
14502bfe3f2eSlogwang 	dev->tx_pkt_prepare = i40e_prep_pkts;
1451a9643ea8Slogwang 
1452a9643ea8Slogwang 	/* for secondary processes, we don't initialise any further as primary
1453a9643ea8Slogwang 	 * has already done this work. Only check we don't need a different
1454a9643ea8Slogwang 	 * RX function */
1455a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1456a9643ea8Slogwang 		i40e_set_rx_function(dev);
1457a9643ea8Slogwang 		i40e_set_tx_function(dev);
1458a9643ea8Slogwang 		return 0;
1459a9643ea8Slogwang 	}
14602bfe3f2eSlogwang 	i40e_set_default_ptype_table(dev);
14612bfe3f2eSlogwang 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
14622bfe3f2eSlogwang 	intr_handle = &pci_dev->intr_handle;
1463a9643ea8Slogwang 
1464a9643ea8Slogwang 	rte_eth_copy_pci_info(dev, pci_dev);
1465*2d9fd380Sjfb8856606 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1466a9643ea8Slogwang 
1467a9643ea8Slogwang 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1468a9643ea8Slogwang 	pf->adapter->eth_dev = dev;
1469a9643ea8Slogwang 	pf->dev_data = dev->data;
1470a9643ea8Slogwang 
1471a9643ea8Slogwang 	hw->back = I40E_PF_TO_ADAPTER(pf);
1472a9643ea8Slogwang 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1473a9643ea8Slogwang 	if (!hw->hw_addr) {
14742bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
14752bfe3f2eSlogwang 			"Hardware is not available, as address is NULL");
1476a9643ea8Slogwang 		return -ENODEV;
1477a9643ea8Slogwang 	}
1478a9643ea8Slogwang 
1479a9643ea8Slogwang 	hw->vendor_id = pci_dev->id.vendor_id;
1480a9643ea8Slogwang 	hw->device_id = pci_dev->id.device_id;
1481a9643ea8Slogwang 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1482a9643ea8Slogwang 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1483a9643ea8Slogwang 	hw->bus.device = pci_dev->addr.devid;
1484a9643ea8Slogwang 	hw->bus.func = pci_dev->addr.function;
1485a9643ea8Slogwang 	hw->adapter_stopped = 0;
14861646932aSjfb8856606 	hw->adapter_closed = 0;
1487a9643ea8Slogwang 
14884418919fSjohnjiang 	/* Init switch device pointer */
14894418919fSjohnjiang 	hw->switch_dev = NULL;
14904418919fSjohnjiang 
1491579bf1e2Sjfb8856606 	/*
1492579bf1e2Sjfb8856606 	 * Switch Tag value should not be identical to either the First Tag
1493579bf1e2Sjfb8856606 	 * or Second Tag values. So set something other than common Ethertype
1494579bf1e2Sjfb8856606 	 * for internal switching.
1495579bf1e2Sjfb8856606 	 */
1496579bf1e2Sjfb8856606 	hw->switch_tag = 0xffff;
1497579bf1e2Sjfb8856606 
14981646932aSjfb8856606 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
14991646932aSjfb8856606 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15001646932aSjfb8856606 		PMD_INIT_LOG(ERR, "\nERROR: "
15011646932aSjfb8856606 			"Firmware recovery mode detected. Limiting functionality.\n"
15021646932aSjfb8856606 			"Refer to the Intel(R) Ethernet Adapters and Devices "
15031646932aSjfb8856606 			"User Guide for details on firmware recovery mode.");
15041646932aSjfb8856606 		return -EIO;
15051646932aSjfb8856606 	}
15061646932aSjfb8856606 
15074418919fSjohnjiang 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
15082bfe3f2eSlogwang 	/* Check if need to support multi-driver */
15092bfe3f2eSlogwang 	i40e_support_multi_driver(dev);
1510d30ea906Sjfb8856606 	/* Check if users want the latest supported vec path */
1511d30ea906Sjfb8856606 	i40e_use_latest_vec(dev);
15122bfe3f2eSlogwang 
1513a9643ea8Slogwang 	/* Make sure all is clean before doing PF reset */
1514a9643ea8Slogwang 	i40e_clear_hw(hw);
1515a9643ea8Slogwang 
1516a9643ea8Slogwang 	/* Reset here to make sure all is clean for each PF */
1517a9643ea8Slogwang 	ret = i40e_pf_reset(hw);
1518a9643ea8Slogwang 	if (ret) {
1519a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1520a9643ea8Slogwang 		return ret;
1521a9643ea8Slogwang 	}
1522a9643ea8Slogwang 
1523a9643ea8Slogwang 	/* Initialize the shared code (base driver) */
1524a9643ea8Slogwang 	ret = i40e_init_shared_code(hw);
1525a9643ea8Slogwang 	if (ret) {
1526a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1527a9643ea8Slogwang 		return ret;
1528a9643ea8Slogwang 	}
1529a9643ea8Slogwang 
1530a9643ea8Slogwang 	/* Initialize the parameters for adminq */
1531a9643ea8Slogwang 	i40e_init_adminq_parameter(hw);
1532a9643ea8Slogwang 	ret = i40e_init_adminq(hw);
1533a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
1534a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1535a9643ea8Slogwang 		return -EIO;
1536a9643ea8Slogwang 	}
15374b05018fSfengbojiang 	/* Firmware of SFP x722 does not support adminq option */
15384b05018fSfengbojiang 	if (hw->device_id == I40E_DEV_ID_SFP_X722)
15394b05018fSfengbojiang 		hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
15404b05018fSfengbojiang 
1541a9643ea8Slogwang 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1542a9643ea8Slogwang 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1543a9643ea8Slogwang 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1544a9643ea8Slogwang 		     ((hw->nvm.version >> 12) & 0xf),
1545a9643ea8Slogwang 		     ((hw->nvm.version >> 4) & 0xff),
1546a9643ea8Slogwang 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1547a9643ea8Slogwang 
1548d30ea906Sjfb8856606 	/* Initialize the hardware */
1549d30ea906Sjfb8856606 	i40e_hw_init(dev);
1550d30ea906Sjfb8856606 
1551d30ea906Sjfb8856606 	i40e_config_automask(pf);
1552d30ea906Sjfb8856606 
1553d30ea906Sjfb8856606 	i40e_set_default_pctype_table(dev);
1554d30ea906Sjfb8856606 
1555d30ea906Sjfb8856606 	/*
1556d30ea906Sjfb8856606 	 * To work around the NVM issue, initialize registers
1557d30ea906Sjfb8856606 	 * for packet type of QinQ by software.
1558d30ea906Sjfb8856606 	 * It should be removed once issues are fixed in NVM.
1559d30ea906Sjfb8856606 	 */
1560d30ea906Sjfb8856606 	if (!pf->support_multi_driver)
1561d30ea906Sjfb8856606 		i40e_GLQF_reg_init(hw);
1562d30ea906Sjfb8856606 
1563d30ea906Sjfb8856606 	/* Initialize the input set for filters (hash and fd) to default value */
1564d30ea906Sjfb8856606 	i40e_filter_input_set_init(pf);
1565d30ea906Sjfb8856606 
15662bfe3f2eSlogwang 	/* initialise the L3_MAP register */
15672bfe3f2eSlogwang 	if (!pf->support_multi_driver) {
1568d30ea906Sjfb8856606 		ret = i40e_aq_debug_write_global_register(hw,
1569d30ea906Sjfb8856606 						   I40E_GLQF_L3_MAP(40),
15702bfe3f2eSlogwang 						   0x00000028,	NULL);
15712bfe3f2eSlogwang 		if (ret)
15722bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
15732bfe3f2eSlogwang 				     ret);
15742bfe3f2eSlogwang 		PMD_INIT_LOG(DEBUG,
15752bfe3f2eSlogwang 			     "Global register 0x%08x is changed with 0x28",
15762bfe3f2eSlogwang 			     I40E_GLQF_L3_MAP(40));
15772bfe3f2eSlogwang 	}
15782bfe3f2eSlogwang 
1579a9643ea8Slogwang 	/* Need the special FW version to support floating VEB */
1580a9643ea8Slogwang 	config_floating_veb(dev);
1581a9643ea8Slogwang 	/* Clear PXE mode */
1582a9643ea8Slogwang 	i40e_clear_pxe_mode(hw);
15832bfe3f2eSlogwang 	i40e_dev_sync_phy_type(hw);
1584a9643ea8Slogwang 
1585a9643ea8Slogwang 	/*
1586a9643ea8Slogwang 	 * On X710, performance number is far from the expectation on recent
1587a9643ea8Slogwang 	 * firmware versions. The fix for this issue may not be integrated in
1588a9643ea8Slogwang 	 * the following firmware version. So the workaround in software driver
1589a9643ea8Slogwang 	 * is needed. It needs to modify the initial values of 3 internal only
1590a9643ea8Slogwang 	 * registers. Note that the workaround can be removed when it is fixed
1591a9643ea8Slogwang 	 * in firmware in the future.
1592a9643ea8Slogwang 	 */
1593a9643ea8Slogwang 	i40e_configure_registers(hw);
1594a9643ea8Slogwang 
1595a9643ea8Slogwang 	/* Get hw capabilities */
1596a9643ea8Slogwang 	ret = i40e_get_cap(hw);
1597a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
1598a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1599a9643ea8Slogwang 		goto err_get_capabilities;
1600a9643ea8Slogwang 	}
1601a9643ea8Slogwang 
1602a9643ea8Slogwang 	/* Initialize parameters for PF */
1603a9643ea8Slogwang 	ret = i40e_pf_parameter_init(dev);
1604a9643ea8Slogwang 	if (ret != 0) {
1605a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1606a9643ea8Slogwang 		goto err_parameter_init;
1607a9643ea8Slogwang 	}
1608a9643ea8Slogwang 
1609a9643ea8Slogwang 	/* Initialize the queue management */
1610a9643ea8Slogwang 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1611a9643ea8Slogwang 	if (ret < 0) {
1612a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1613a9643ea8Slogwang 		goto err_qp_pool_init;
1614a9643ea8Slogwang 	}
1615a9643ea8Slogwang 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1616a9643ea8Slogwang 				hw->func_caps.num_msix_vectors - 1);
1617a9643ea8Slogwang 	if (ret < 0) {
1618a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1619a9643ea8Slogwang 		goto err_msix_pool_init;
1620a9643ea8Slogwang 	}
1621a9643ea8Slogwang 
1622a9643ea8Slogwang 	/* Initialize lan hmc */
1623a9643ea8Slogwang 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1624a9643ea8Slogwang 				hw->func_caps.num_rx_qp, 0, 0);
1625a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
1626a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1627a9643ea8Slogwang 		goto err_init_lan_hmc;
1628a9643ea8Slogwang 	}
1629a9643ea8Slogwang 
1630a9643ea8Slogwang 	/* Configure lan hmc */
1631a9643ea8Slogwang 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1632a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
1633a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1634a9643ea8Slogwang 		goto err_configure_lan_hmc;
1635a9643ea8Slogwang 	}
1636a9643ea8Slogwang 
1637a9643ea8Slogwang 	/* Get and check the mac address */
1638a9643ea8Slogwang 	i40e_get_mac_addr(hw, hw->mac.addr);
1639a9643ea8Slogwang 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1640a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "mac address is not valid");
1641a9643ea8Slogwang 		ret = -EIO;
1642a9643ea8Slogwang 		goto err_get_mac_addr;
1643a9643ea8Slogwang 	}
1644a9643ea8Slogwang 	/* Copy the permanent MAC address */
16454418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
16464418919fSjohnjiang 			(struct rte_ether_addr *)hw->mac.perm_addr);
1647a9643ea8Slogwang 
1648a9643ea8Slogwang 	/* Disable flow control */
1649a9643ea8Slogwang 	hw->fc.requested_mode = I40E_FC_NONE;
1650a9643ea8Slogwang 	i40e_set_fc(hw, &aq_fail, TRUE);
1651a9643ea8Slogwang 
1652a9643ea8Slogwang 	/* Set the global registers with default ether type value */
16532bfe3f2eSlogwang 	if (!pf->support_multi_driver) {
16542bfe3f2eSlogwang 		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
16554418919fSjohnjiang 					 RTE_ETHER_TYPE_VLAN);
1656a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
16572bfe3f2eSlogwang 			PMD_INIT_LOG(ERR,
16582bfe3f2eSlogwang 				     "Failed to set the default outer "
1659a9643ea8Slogwang 				     "VLAN ether type");
1660a9643ea8Slogwang 			goto err_setup_pf_switch;
1661a9643ea8Slogwang 		}
16622bfe3f2eSlogwang 	}
1663a9643ea8Slogwang 
1664a9643ea8Slogwang 	/* PF setup, which includes VSI setup */
1665a9643ea8Slogwang 	ret = i40e_pf_setup(pf);
1666a9643ea8Slogwang 	if (ret) {
1667a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1668a9643ea8Slogwang 		goto err_setup_pf_switch;
1669a9643ea8Slogwang 	}
1670a9643ea8Slogwang 
1671a9643ea8Slogwang 	vsi = pf->main_vsi;
1672a9643ea8Slogwang 
1673a9643ea8Slogwang 	/* Disable double vlan by default */
1674a9643ea8Slogwang 	i40e_vsi_config_double_vlan(vsi, FALSE);
1675a9643ea8Slogwang 
1676a9643ea8Slogwang 	/* Disable S-TAG identification when floating_veb is disabled */
1677a9643ea8Slogwang 	if (!pf->floating_veb) {
1678a9643ea8Slogwang 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1679a9643ea8Slogwang 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1680a9643ea8Slogwang 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1681a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1682a9643ea8Slogwang 		}
1683a9643ea8Slogwang 	}
1684a9643ea8Slogwang 
1685a9643ea8Slogwang 	if (!vsi->max_macaddrs)
16864418919fSjohnjiang 		len = RTE_ETHER_ADDR_LEN;
1687a9643ea8Slogwang 	else
16884418919fSjohnjiang 		len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1689a9643ea8Slogwang 
1690a9643ea8Slogwang 	/* Should be after VSI initialized */
1691a9643ea8Slogwang 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1692a9643ea8Slogwang 	if (!dev->data->mac_addrs) {
16932bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
16942bfe3f2eSlogwang 			"Failed to allocated memory for storing mac address");
1695a9643ea8Slogwang 		goto err_mac_alloc;
1696a9643ea8Slogwang 	}
16974418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1698a9643ea8Slogwang 					&dev->data->mac_addrs[0]);
1699a9643ea8Slogwang 
17002bfe3f2eSlogwang 	/* Init dcb to sw mode by default */
17012bfe3f2eSlogwang 	ret = i40e_dcb_init_configure(dev, TRUE);
17022bfe3f2eSlogwang 	if (ret != I40E_SUCCESS) {
17032bfe3f2eSlogwang 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
17042bfe3f2eSlogwang 		pf->flags &= ~I40E_FLAG_DCB;
17052bfe3f2eSlogwang 	}
17062bfe3f2eSlogwang 	/* Update HW struct after DCB configuration */
17072bfe3f2eSlogwang 	i40e_get_cap(hw);
17082bfe3f2eSlogwang 
1709a9643ea8Slogwang 	/* initialize pf host driver to setup SRIOV resource if applicable */
1710a9643ea8Slogwang 	i40e_pf_host_init(dev);
1711a9643ea8Slogwang 
1712a9643ea8Slogwang 	/* register callback func to eal lib */
17132bfe3f2eSlogwang 	rte_intr_callback_register(intr_handle,
17142bfe3f2eSlogwang 				   i40e_dev_interrupt_handler, dev);
1715a9643ea8Slogwang 
1716a9643ea8Slogwang 	/* configure and enable device interrupt */
1717a9643ea8Slogwang 	i40e_pf_config_irq0(hw, TRUE);
1718a9643ea8Slogwang 	i40e_pf_enable_irq0(hw);
1719a9643ea8Slogwang 
1720a9643ea8Slogwang 	/* enable uio intr after callback register */
17212bfe3f2eSlogwang 	rte_intr_enable(intr_handle);
1722d30ea906Sjfb8856606 
1723d30ea906Sjfb8856606 	/* By default disable flexible payload in global configuration */
1724d30ea906Sjfb8856606 	if (!pf->support_multi_driver)
1725d30ea906Sjfb8856606 		i40e_flex_payload_reg_set_default(hw);
1726d30ea906Sjfb8856606 
1727a9643ea8Slogwang 	/*
1728a9643ea8Slogwang 	 * Add an ethertype filter to drop all flow control frames transmitted
1729a9643ea8Slogwang 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1730a9643ea8Slogwang 	 * frames to wire.
1731a9643ea8Slogwang 	 */
1732a9643ea8Slogwang 	i40e_add_tx_flow_control_drop_filter(pf);
1733a9643ea8Slogwang 
1734a9643ea8Slogwang 	/* Set the max frame size to 0x2600 by default,
1735a9643ea8Slogwang 	 * in case other drivers changed the default value.
1736a9643ea8Slogwang 	 */
1737*2d9fd380Sjfb8856606 	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1738a9643ea8Slogwang 
1739a9643ea8Slogwang 	/* initialize mirror rule list */
1740a9643ea8Slogwang 	TAILQ_INIT(&pf->mirror_list);
1741a9643ea8Slogwang 
1742*2d9fd380Sjfb8856606 	/* initialize RSS rule list */
1743*2d9fd380Sjfb8856606 	TAILQ_INIT(&pf->rss_config_list);
1744*2d9fd380Sjfb8856606 
17452bfe3f2eSlogwang 	/* initialize Traffic Manager configuration */
17462bfe3f2eSlogwang 	i40e_tm_conf_init(dev);
17472bfe3f2eSlogwang 
17482bfe3f2eSlogwang 	/* Initialize customized information */
17492bfe3f2eSlogwang 	i40e_init_customized_info(pf);
17502bfe3f2eSlogwang 
1751*2d9fd380Sjfb8856606 	/* Initialize the filter invalidation configuration */
1752*2d9fd380Sjfb8856606 	i40e_init_filter_invalidation(pf);
1753*2d9fd380Sjfb8856606 
17542bfe3f2eSlogwang 	ret = i40e_init_ethtype_filter_list(dev);
17552bfe3f2eSlogwang 	if (ret < 0)
17562bfe3f2eSlogwang 		goto err_init_ethtype_filter_list;
17572bfe3f2eSlogwang 	ret = i40e_init_tunnel_filter_list(dev);
17582bfe3f2eSlogwang 	if (ret < 0)
17592bfe3f2eSlogwang 		goto err_init_tunnel_filter_list;
17602bfe3f2eSlogwang 	ret = i40e_init_fdir_filter_list(dev);
17612bfe3f2eSlogwang 	if (ret < 0)
17622bfe3f2eSlogwang 		goto err_init_fdir_filter_list;
17632bfe3f2eSlogwang 
17642bfe3f2eSlogwang 	/* initialize queue region configuration */
17652bfe3f2eSlogwang 	i40e_init_queue_region_conf(dev);
1766a9643ea8Slogwang 
1767*2d9fd380Sjfb8856606 	/* initialize RSS configuration from rte_flow */
1768d30ea906Sjfb8856606 	memset(&pf->rss_info, 0,
1769d30ea906Sjfb8856606 		sizeof(struct i40e_rte_flow_rss_conf));
1770d30ea906Sjfb8856606 
17711646932aSjfb8856606 	/* reset all stats of the device, including pf and main vsi */
17721646932aSjfb8856606 	i40e_dev_stats_reset(dev);
17731646932aSjfb8856606 
1774a9643ea8Slogwang 	return 0;
1775a9643ea8Slogwang 
17762bfe3f2eSlogwang err_init_fdir_filter_list:
17772bfe3f2eSlogwang 	rte_free(pf->tunnel.hash_table);
17782bfe3f2eSlogwang 	rte_free(pf->tunnel.hash_map);
17792bfe3f2eSlogwang err_init_tunnel_filter_list:
17802bfe3f2eSlogwang 	rte_free(pf->ethertype.hash_table);
17812bfe3f2eSlogwang 	rte_free(pf->ethertype.hash_map);
17822bfe3f2eSlogwang err_init_ethtype_filter_list:
17832bfe3f2eSlogwang 	rte_free(dev->data->mac_addrs);
17844b05018fSfengbojiang 	dev->data->mac_addrs = NULL;
1785a9643ea8Slogwang err_mac_alloc:
1786a9643ea8Slogwang 	i40e_vsi_release(pf->main_vsi);
1787a9643ea8Slogwang err_setup_pf_switch:
1788a9643ea8Slogwang err_get_mac_addr:
1789a9643ea8Slogwang err_configure_lan_hmc:
1790a9643ea8Slogwang 	(void)i40e_shutdown_lan_hmc(hw);
1791a9643ea8Slogwang err_init_lan_hmc:
1792a9643ea8Slogwang 	i40e_res_pool_destroy(&pf->msix_pool);
1793a9643ea8Slogwang err_msix_pool_init:
1794a9643ea8Slogwang 	i40e_res_pool_destroy(&pf->qp_pool);
1795a9643ea8Slogwang err_qp_pool_init:
1796a9643ea8Slogwang err_parameter_init:
1797a9643ea8Slogwang err_get_capabilities:
1798a9643ea8Slogwang 	(void)i40e_shutdown_adminq(hw);
1799a9643ea8Slogwang 
1800a9643ea8Slogwang 	return ret;
1801a9643ea8Slogwang }
1802a9643ea8Slogwang 
18032bfe3f2eSlogwang static void
i40e_rm_ethtype_filter_list(struct i40e_pf * pf)18042bfe3f2eSlogwang i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
18052bfe3f2eSlogwang {
18062bfe3f2eSlogwang 	struct i40e_ethertype_filter *p_ethertype;
18072bfe3f2eSlogwang 	struct i40e_ethertype_rule *ethertype_rule;
18082bfe3f2eSlogwang 
18092bfe3f2eSlogwang 	ethertype_rule = &pf->ethertype;
18102bfe3f2eSlogwang 	/* Remove all ethertype filter rules and hash */
18112bfe3f2eSlogwang 	if (ethertype_rule->hash_map)
18122bfe3f2eSlogwang 		rte_free(ethertype_rule->hash_map);
18132bfe3f2eSlogwang 	if (ethertype_rule->hash_table)
18142bfe3f2eSlogwang 		rte_hash_free(ethertype_rule->hash_table);
18152bfe3f2eSlogwang 
18162bfe3f2eSlogwang 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
18172bfe3f2eSlogwang 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
18182bfe3f2eSlogwang 			     p_ethertype, rules);
18192bfe3f2eSlogwang 		rte_free(p_ethertype);
18202bfe3f2eSlogwang 	}
18212bfe3f2eSlogwang }
18222bfe3f2eSlogwang 
18232bfe3f2eSlogwang static void
i40e_rm_tunnel_filter_list(struct i40e_pf * pf)18242bfe3f2eSlogwang i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
18252bfe3f2eSlogwang {
18262bfe3f2eSlogwang 	struct i40e_tunnel_filter *p_tunnel;
18272bfe3f2eSlogwang 	struct i40e_tunnel_rule *tunnel_rule;
18282bfe3f2eSlogwang 
18292bfe3f2eSlogwang 	tunnel_rule = &pf->tunnel;
18302bfe3f2eSlogwang 	/* Remove all tunnel director rules and hash */
18312bfe3f2eSlogwang 	if (tunnel_rule->hash_map)
18322bfe3f2eSlogwang 		rte_free(tunnel_rule->hash_map);
18332bfe3f2eSlogwang 	if (tunnel_rule->hash_table)
18342bfe3f2eSlogwang 		rte_hash_free(tunnel_rule->hash_table);
18352bfe3f2eSlogwang 
18362bfe3f2eSlogwang 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
18372bfe3f2eSlogwang 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
18382bfe3f2eSlogwang 		rte_free(p_tunnel);
18392bfe3f2eSlogwang 	}
18402bfe3f2eSlogwang }
18412bfe3f2eSlogwang 
18422bfe3f2eSlogwang static void
i40e_rm_fdir_filter_list(struct i40e_pf * pf)18432bfe3f2eSlogwang i40e_rm_fdir_filter_list(struct i40e_pf *pf)
18442bfe3f2eSlogwang {
18452bfe3f2eSlogwang 	struct i40e_fdir_filter *p_fdir;
18462bfe3f2eSlogwang 	struct i40e_fdir_info *fdir_info;
18472bfe3f2eSlogwang 
18482bfe3f2eSlogwang 	fdir_info = &pf->fdir;
1849*2d9fd380Sjfb8856606 
1850*2d9fd380Sjfb8856606 	/* Remove all flow director rules */
1851*2d9fd380Sjfb8856606 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1852*2d9fd380Sjfb8856606 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1853*2d9fd380Sjfb8856606 }
1854*2d9fd380Sjfb8856606 
1855*2d9fd380Sjfb8856606 static void
i40e_fdir_memory_cleanup(struct i40e_pf * pf)1856*2d9fd380Sjfb8856606 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1857*2d9fd380Sjfb8856606 {
1858*2d9fd380Sjfb8856606 	struct i40e_fdir_info *fdir_info;
1859*2d9fd380Sjfb8856606 
1860*2d9fd380Sjfb8856606 	fdir_info = &pf->fdir;
1861*2d9fd380Sjfb8856606 
1862*2d9fd380Sjfb8856606 	/* flow director memory cleanup */
18632bfe3f2eSlogwang 	if (fdir_info->hash_map)
18642bfe3f2eSlogwang 		rte_free(fdir_info->hash_map);
18652bfe3f2eSlogwang 	if (fdir_info->hash_table)
18662bfe3f2eSlogwang 		rte_hash_free(fdir_info->hash_table);
1867*2d9fd380Sjfb8856606 	if (fdir_info->fdir_flow_pool.bitmap)
1868*2d9fd380Sjfb8856606 		rte_free(fdir_info->fdir_flow_pool.bitmap);
1869*2d9fd380Sjfb8856606 	if (fdir_info->fdir_flow_pool.pool)
1870*2d9fd380Sjfb8856606 		rte_free(fdir_info->fdir_flow_pool.pool);
1871*2d9fd380Sjfb8856606 	if (fdir_info->fdir_filter_array)
1872*2d9fd380Sjfb8856606 		rte_free(fdir_info->fdir_filter_array);
18732bfe3f2eSlogwang }
18742bfe3f2eSlogwang 
i40e_flex_payload_reg_set_default(struct i40e_hw * hw)1875d30ea906Sjfb8856606 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1876d30ea906Sjfb8856606 {
1877d30ea906Sjfb8856606 	/*
1878d30ea906Sjfb8856606 	 * Disable by default flexible payload
1879d30ea906Sjfb8856606 	 * for corresponding L2/L3/L4 layers.
1880d30ea906Sjfb8856606 	 */
1881d30ea906Sjfb8856606 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1882d30ea906Sjfb8856606 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1883d30ea906Sjfb8856606 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1884d30ea906Sjfb8856606 }
1885d30ea906Sjfb8856606 
1886a9643ea8Slogwang static int
eth_i40e_dev_uninit(struct rte_eth_dev * dev)1887a9643ea8Slogwang eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1888a9643ea8Slogwang {
1889a9643ea8Slogwang 	struct i40e_hw *hw;
1890a9643ea8Slogwang 
1891a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1892a9643ea8Slogwang 
1893a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1894a9643ea8Slogwang 		return 0;
1895a9643ea8Slogwang 
1896a9643ea8Slogwang 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1897d30ea906Sjfb8856606 
18981646932aSjfb8856606 	if (hw->adapter_closed == 0)
1899a9643ea8Slogwang 		i40e_dev_close(dev);
1900a9643ea8Slogwang 
1901a9643ea8Slogwang 	return 0;
1902a9643ea8Slogwang }
1903a9643ea8Slogwang 
1904a9643ea8Slogwang static int
i40e_dev_configure(struct rte_eth_dev * dev)1905a9643ea8Slogwang i40e_dev_configure(struct rte_eth_dev *dev)
1906a9643ea8Slogwang {
1907a9643ea8Slogwang 	struct i40e_adapter *ad =
1908a9643ea8Slogwang 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1909a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
19102bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1911a9643ea8Slogwang 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1912a9643ea8Slogwang 	int i, ret;
1913a9643ea8Slogwang 
19142bfe3f2eSlogwang 	ret = i40e_dev_sync_phy_type(hw);
19152bfe3f2eSlogwang 	if (ret)
19162bfe3f2eSlogwang 		return ret;
19172bfe3f2eSlogwang 
1918a9643ea8Slogwang 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1919a9643ea8Slogwang 	 * bulk allocation or vector Rx preconditions we will reset it.
1920a9643ea8Slogwang 	 */
1921a9643ea8Slogwang 	ad->rx_bulk_alloc_allowed = true;
1922a9643ea8Slogwang 	ad->rx_vec_allowed = true;
1923a9643ea8Slogwang 	ad->tx_simple_allowed = true;
1924a9643ea8Slogwang 	ad->tx_vec_allowed = true;
1925a9643ea8Slogwang 
19264418919fSjohnjiang 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
19274418919fSjohnjiang 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
19284418919fSjohnjiang 
1929d30ea906Sjfb8856606 	/* Only legacy filter API needs the following fdir config. So when the
1930d30ea906Sjfb8856606 	 * legacy filter API is deprecated, the following codes should also be
1931d30ea906Sjfb8856606 	 * removed.
1932d30ea906Sjfb8856606 	 */
1933a9643ea8Slogwang 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1934a9643ea8Slogwang 		ret = i40e_fdir_setup(pf);
1935a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
1936a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1937a9643ea8Slogwang 			return -ENOTSUP;
1938a9643ea8Slogwang 		}
1939a9643ea8Slogwang 		ret = i40e_fdir_configure(dev);
1940a9643ea8Slogwang 		if (ret < 0) {
1941a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1942a9643ea8Slogwang 			goto err;
1943a9643ea8Slogwang 		}
1944a9643ea8Slogwang 	} else
1945a9643ea8Slogwang 		i40e_fdir_teardown(pf);
1946a9643ea8Slogwang 
1947a9643ea8Slogwang 	ret = i40e_dev_init_vlan(dev);
1948a9643ea8Slogwang 	if (ret < 0)
1949a9643ea8Slogwang 		goto err;
1950a9643ea8Slogwang 
1951a9643ea8Slogwang 	/* VMDQ setup.
1952a9643ea8Slogwang 	 *  General PMD driver call sequence are NIC init, configure,
1953a9643ea8Slogwang 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1954a9643ea8Slogwang 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1955a9643ea8Slogwang 	 *  applicable. So, VMDQ setting has to be done before
1956a9643ea8Slogwang 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1957a9643ea8Slogwang 	 *  For RSS setting, it will try to calculate actual configured RX queue
1958a9643ea8Slogwang 	 *  number, which will be available after rx_queue_setup(). dev_start()
1959a9643ea8Slogwang 	 *  function is good to place RSS setup.
1960a9643ea8Slogwang 	 */
1961a9643ea8Slogwang 	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1962a9643ea8Slogwang 		ret = i40e_vmdq_setup(dev);
1963a9643ea8Slogwang 		if (ret)
1964a9643ea8Slogwang 			goto err;
1965a9643ea8Slogwang 	}
1966a9643ea8Slogwang 
1967a9643ea8Slogwang 	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1968a9643ea8Slogwang 		ret = i40e_dcb_setup(dev);
1969a9643ea8Slogwang 		if (ret) {
1970a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1971a9643ea8Slogwang 			goto err_dcb;
1972a9643ea8Slogwang 		}
1973a9643ea8Slogwang 	}
1974a9643ea8Slogwang 
19752bfe3f2eSlogwang 	TAILQ_INIT(&pf->flow_list);
19762bfe3f2eSlogwang 
1977a9643ea8Slogwang 	return 0;
1978a9643ea8Slogwang 
1979a9643ea8Slogwang err_dcb:
1980a9643ea8Slogwang 	/* need to release vmdq resource if exists */
1981a9643ea8Slogwang 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1982a9643ea8Slogwang 		i40e_vsi_release(pf->vmdq[i].vsi);
1983a9643ea8Slogwang 		pf->vmdq[i].vsi = NULL;
1984a9643ea8Slogwang 	}
1985a9643ea8Slogwang 	rte_free(pf->vmdq);
1986a9643ea8Slogwang 	pf->vmdq = NULL;
1987a9643ea8Slogwang err:
1988d30ea906Sjfb8856606 	/* Need to release fdir resource if exists.
1989d30ea906Sjfb8856606 	 * Only legacy filter API needs the following fdir config. So when the
1990d30ea906Sjfb8856606 	 * legacy filter API is deprecated, the following code should also be
1991d30ea906Sjfb8856606 	 * removed.
1992d30ea906Sjfb8856606 	 */
1993a9643ea8Slogwang 	i40e_fdir_teardown(pf);
1994a9643ea8Slogwang 	return ret;
1995a9643ea8Slogwang }
1996a9643ea8Slogwang 
1997a9643ea8Slogwang void
i40e_vsi_queues_unbind_intr(struct i40e_vsi * vsi)1998a9643ea8Slogwang i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1999a9643ea8Slogwang {
2000a9643ea8Slogwang 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
20012bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
20022bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2003a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2004a9643ea8Slogwang 	uint16_t msix_vect = vsi->msix_intr;
2005a9643ea8Slogwang 	uint16_t i;
2006a9643ea8Slogwang 
2007a9643ea8Slogwang 	for (i = 0; i < vsi->nb_qps; i++) {
2008a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2009a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2010a9643ea8Slogwang 		rte_wmb();
2011a9643ea8Slogwang 	}
2012a9643ea8Slogwang 
2013a9643ea8Slogwang 	if (vsi->type != I40E_VSI_SRIOV) {
2014a9643ea8Slogwang 		if (!rte_intr_allow_others(intr_handle)) {
2015a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2016a9643ea8Slogwang 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2017a9643ea8Slogwang 			I40E_WRITE_REG(hw,
2018a9643ea8Slogwang 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2019a9643ea8Slogwang 				       0);
2020a9643ea8Slogwang 		} else {
2021a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2022a9643ea8Slogwang 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2023a9643ea8Slogwang 			I40E_WRITE_REG(hw,
2024a9643ea8Slogwang 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2025a9643ea8Slogwang 						       msix_vect - 1), 0);
2026a9643ea8Slogwang 		}
2027a9643ea8Slogwang 	} else {
2028a9643ea8Slogwang 		uint32_t reg;
2029a9643ea8Slogwang 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2030a9643ea8Slogwang 			vsi->user_param + (msix_vect - 1);
2031a9643ea8Slogwang 
2032a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2033a9643ea8Slogwang 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2034a9643ea8Slogwang 	}
2035a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
2036a9643ea8Slogwang }
2037a9643ea8Slogwang 
2038a9643ea8Slogwang static void
__vsi_queues_bind_intr(struct i40e_vsi * vsi,uint16_t msix_vect,int base_queue,int nb_queue,uint16_t itr_idx)2039a9643ea8Slogwang __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
20402bfe3f2eSlogwang 		       int base_queue, int nb_queue,
20412bfe3f2eSlogwang 		       uint16_t itr_idx)
2042a9643ea8Slogwang {
2043a9643ea8Slogwang 	int i;
2044a9643ea8Slogwang 	uint32_t val;
2045a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
20462bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2047a9643ea8Slogwang 
2048a9643ea8Slogwang 	/* Bind all RX queues to allocated MSIX interrupt */
2049a9643ea8Slogwang 	for (i = 0; i < nb_queue; i++) {
2050a9643ea8Slogwang 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
20512bfe3f2eSlogwang 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2052a9643ea8Slogwang 			((base_queue + i + 1) <<
2053a9643ea8Slogwang 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2054a9643ea8Slogwang 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2055a9643ea8Slogwang 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2056a9643ea8Slogwang 
2057a9643ea8Slogwang 		if (i == nb_queue - 1)
2058a9643ea8Slogwang 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2059a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2060a9643ea8Slogwang 	}
2061a9643ea8Slogwang 
2062a9643ea8Slogwang 	/* Write first RX queue to Link list register as the head element */
2063a9643ea8Slogwang 	if (vsi->type != I40E_VSI_SRIOV) {
2064a9643ea8Slogwang 		uint16_t interval =
2065d30ea906Sjfb8856606 			i40e_calc_itr_interval(1, pf->support_multi_driver);
2066a9643ea8Slogwang 
2067a9643ea8Slogwang 		if (msix_vect == I40E_MISC_VEC_ID) {
2068a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2069a9643ea8Slogwang 				       (base_queue <<
2070a9643ea8Slogwang 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2071a9643ea8Slogwang 				       (0x0 <<
2072a9643ea8Slogwang 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2073a9643ea8Slogwang 			I40E_WRITE_REG(hw,
2074a9643ea8Slogwang 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2075a9643ea8Slogwang 				       interval);
2076a9643ea8Slogwang 		} else {
2077a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2078a9643ea8Slogwang 				       (base_queue <<
2079a9643ea8Slogwang 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2080a9643ea8Slogwang 				       (0x0 <<
2081a9643ea8Slogwang 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2082a9643ea8Slogwang 			I40E_WRITE_REG(hw,
2083a9643ea8Slogwang 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2084a9643ea8Slogwang 						       msix_vect - 1),
2085a9643ea8Slogwang 				       interval);
2086a9643ea8Slogwang 		}
2087a9643ea8Slogwang 	} else {
2088a9643ea8Slogwang 		uint32_t reg;
2089a9643ea8Slogwang 
2090a9643ea8Slogwang 		if (msix_vect == I40E_MISC_VEC_ID) {
2091a9643ea8Slogwang 			I40E_WRITE_REG(hw,
2092a9643ea8Slogwang 				       I40E_VPINT_LNKLST0(vsi->user_param),
2093a9643ea8Slogwang 				       (base_queue <<
2094a9643ea8Slogwang 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2095a9643ea8Slogwang 				       (0x0 <<
2096a9643ea8Slogwang 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2097a9643ea8Slogwang 		} else {
2098a9643ea8Slogwang 			/* num_msix_vectors_vf needs to minus irq0 */
2099a9643ea8Slogwang 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2100a9643ea8Slogwang 				vsi->user_param + (msix_vect - 1);
2101a9643ea8Slogwang 
2102a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2103a9643ea8Slogwang 				       (base_queue <<
2104a9643ea8Slogwang 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2105a9643ea8Slogwang 				       (0x0 <<
2106a9643ea8Slogwang 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2107a9643ea8Slogwang 		}
2108a9643ea8Slogwang 	}
2109a9643ea8Slogwang 
2110a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
2111a9643ea8Slogwang }
2112a9643ea8Slogwang 
21130c6bd470Sfengbojiang int
i40e_vsi_queues_bind_intr(struct i40e_vsi * vsi,uint16_t itr_idx)21142bfe3f2eSlogwang i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2115a9643ea8Slogwang {
2116a9643ea8Slogwang 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
21172bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
21182bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2119a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2120a9643ea8Slogwang 	uint16_t msix_vect = vsi->msix_intr;
2121a9643ea8Slogwang 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2122a9643ea8Slogwang 	uint16_t queue_idx = 0;
2123a9643ea8Slogwang 	int record = 0;
2124a9643ea8Slogwang 	int i;
2125a9643ea8Slogwang 
2126a9643ea8Slogwang 	for (i = 0; i < vsi->nb_qps; i++) {
2127a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2128a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2129a9643ea8Slogwang 	}
2130a9643ea8Slogwang 
2131a9643ea8Slogwang 	/* VF bind interrupt */
2132a9643ea8Slogwang 	if (vsi->type == I40E_VSI_SRIOV) {
21330c6bd470Sfengbojiang 		if (vsi->nb_msix == 0) {
21340c6bd470Sfengbojiang 			PMD_DRV_LOG(ERR, "No msix resource");
21350c6bd470Sfengbojiang 			return -EINVAL;
21360c6bd470Sfengbojiang 		}
2137a9643ea8Slogwang 		__vsi_queues_bind_intr(vsi, msix_vect,
21382bfe3f2eSlogwang 				       vsi->base_queue, vsi->nb_qps,
21392bfe3f2eSlogwang 				       itr_idx);
21400c6bd470Sfengbojiang 		return 0;
2141a9643ea8Slogwang 	}
2142a9643ea8Slogwang 
2143a9643ea8Slogwang 	/* PF & VMDq bind interrupt */
2144a9643ea8Slogwang 	if (rte_intr_dp_is_en(intr_handle)) {
2145a9643ea8Slogwang 		if (vsi->type == I40E_VSI_MAIN) {
2146a9643ea8Slogwang 			queue_idx = 0;
2147a9643ea8Slogwang 			record = 1;
2148a9643ea8Slogwang 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2149a9643ea8Slogwang 			struct i40e_vsi *main_vsi =
2150a9643ea8Slogwang 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2151a9643ea8Slogwang 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2152a9643ea8Slogwang 			record = 1;
2153a9643ea8Slogwang 		}
2154a9643ea8Slogwang 	}
2155a9643ea8Slogwang 
2156a9643ea8Slogwang 	for (i = 0; i < vsi->nb_used_qps; i++) {
21570c6bd470Sfengbojiang 		if (vsi->nb_msix == 0) {
21580c6bd470Sfengbojiang 			PMD_DRV_LOG(ERR, "No msix resource");
21590c6bd470Sfengbojiang 			return -EINVAL;
21600c6bd470Sfengbojiang 		} else if (nb_msix <= 1) {
2161a9643ea8Slogwang 			if (!rte_intr_allow_others(intr_handle))
2162a9643ea8Slogwang 				/* allow to share MISC_VEC_ID */
2163a9643ea8Slogwang 				msix_vect = I40E_MISC_VEC_ID;
2164a9643ea8Slogwang 
2165a9643ea8Slogwang 			/* no enough msix_vect, map all to one */
2166a9643ea8Slogwang 			__vsi_queues_bind_intr(vsi, msix_vect,
2167a9643ea8Slogwang 					       vsi->base_queue + i,
21682bfe3f2eSlogwang 					       vsi->nb_used_qps - i,
21692bfe3f2eSlogwang 					       itr_idx);
2170a9643ea8Slogwang 			for (; !!record && i < vsi->nb_used_qps; i++)
2171a9643ea8Slogwang 				intr_handle->intr_vec[queue_idx + i] =
2172a9643ea8Slogwang 					msix_vect;
2173a9643ea8Slogwang 			break;
2174a9643ea8Slogwang 		}
2175a9643ea8Slogwang 		/* 1:1 queue/msix_vect mapping */
2176a9643ea8Slogwang 		__vsi_queues_bind_intr(vsi, msix_vect,
21772bfe3f2eSlogwang 				       vsi->base_queue + i, 1,
21782bfe3f2eSlogwang 				       itr_idx);
2179a9643ea8Slogwang 		if (!!record)
2180a9643ea8Slogwang 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2181a9643ea8Slogwang 
2182a9643ea8Slogwang 		msix_vect++;
2183a9643ea8Slogwang 		nb_msix--;
2184a9643ea8Slogwang 	}
21850c6bd470Sfengbojiang 
21860c6bd470Sfengbojiang 	return 0;
2187a9643ea8Slogwang }
2188a9643ea8Slogwang 
21890c6bd470Sfengbojiang void
i40e_vsi_enable_queues_intr(struct i40e_vsi * vsi)2190a9643ea8Slogwang i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2191a9643ea8Slogwang {
2192a9643ea8Slogwang 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
21932bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
21942bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2195a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
21962bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2197a9643ea8Slogwang 	uint16_t msix_intr, i;
2198a9643ea8Slogwang 
21992bfe3f2eSlogwang 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2200a9643ea8Slogwang 		for (i = 0; i < vsi->nb_msix; i++) {
2201a9643ea8Slogwang 			msix_intr = vsi->msix_intr + i;
2202a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2203a9643ea8Slogwang 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2204a9643ea8Slogwang 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
22052bfe3f2eSlogwang 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2206a9643ea8Slogwang 		}
2207a9643ea8Slogwang 	else
2208a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2209a9643ea8Slogwang 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2210a9643ea8Slogwang 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
22112bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2212a9643ea8Slogwang 
2213a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
2214a9643ea8Slogwang }
2215a9643ea8Slogwang 
22160c6bd470Sfengbojiang void
i40e_vsi_disable_queues_intr(struct i40e_vsi * vsi)2217a9643ea8Slogwang i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2218a9643ea8Slogwang {
2219a9643ea8Slogwang 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
22202bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
22212bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2222a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
22232bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2224a9643ea8Slogwang 	uint16_t msix_intr, i;
2225a9643ea8Slogwang 
22262bfe3f2eSlogwang 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2227a9643ea8Slogwang 		for (i = 0; i < vsi->nb_msix; i++) {
2228a9643ea8Slogwang 			msix_intr = vsi->msix_intr + i;
2229a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
22302bfe3f2eSlogwang 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2231a9643ea8Slogwang 		}
2232a9643ea8Slogwang 	else
22332bfe3f2eSlogwang 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
22342bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2235a9643ea8Slogwang 
2236a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
2237a9643ea8Slogwang }
2238a9643ea8Slogwang 
2239a9643ea8Slogwang static inline uint8_t
i40e_parse_link_speeds(uint16_t link_speeds)2240a9643ea8Slogwang i40e_parse_link_speeds(uint16_t link_speeds)
2241a9643ea8Slogwang {
2242a9643ea8Slogwang 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2243a9643ea8Slogwang 
2244a9643ea8Slogwang 	if (link_speeds & ETH_LINK_SPEED_40G)
2245a9643ea8Slogwang 		link_speed |= I40E_LINK_SPEED_40GB;
22462bfe3f2eSlogwang 	if (link_speeds & ETH_LINK_SPEED_25G)
22472bfe3f2eSlogwang 		link_speed |= I40E_LINK_SPEED_25GB;
2248a9643ea8Slogwang 	if (link_speeds & ETH_LINK_SPEED_20G)
2249a9643ea8Slogwang 		link_speed |= I40E_LINK_SPEED_20GB;
2250a9643ea8Slogwang 	if (link_speeds & ETH_LINK_SPEED_10G)
2251a9643ea8Slogwang 		link_speed |= I40E_LINK_SPEED_10GB;
2252a9643ea8Slogwang 	if (link_speeds & ETH_LINK_SPEED_1G)
2253a9643ea8Slogwang 		link_speed |= I40E_LINK_SPEED_1GB;
2254a9643ea8Slogwang 	if (link_speeds & ETH_LINK_SPEED_100M)
2255a9643ea8Slogwang 		link_speed |= I40E_LINK_SPEED_100MB;
2256a9643ea8Slogwang 
2257a9643ea8Slogwang 	return link_speed;
2258a9643ea8Slogwang }
2259a9643ea8Slogwang 
2260a9643ea8Slogwang static int
i40e_phy_conf_link(struct i40e_hw * hw,uint8_t abilities,uint8_t force_speed,bool is_up)2261a9643ea8Slogwang i40e_phy_conf_link(struct i40e_hw *hw,
2262a9643ea8Slogwang 		   uint8_t abilities,
22632bfe3f2eSlogwang 		   uint8_t force_speed,
22642bfe3f2eSlogwang 		   bool is_up)
2265a9643ea8Slogwang {
2266a9643ea8Slogwang 	enum i40e_status_code status;
2267a9643ea8Slogwang 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2268a9643ea8Slogwang 	struct i40e_aq_set_phy_config phy_conf;
22692bfe3f2eSlogwang 	enum i40e_aq_phy_type cnt;
2270579bf1e2Sjfb8856606 	uint8_t avail_speed;
22712bfe3f2eSlogwang 	uint32_t phy_type_mask = 0;
22722bfe3f2eSlogwang 
2273a9643ea8Slogwang 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2274a9643ea8Slogwang 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2275a9643ea8Slogwang 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2276a9643ea8Slogwang 			I40E_AQ_PHY_FLAG_LOW_POWER;
2277a9643ea8Slogwang 	int ret = -ENOTSUP;
2278a9643ea8Slogwang 
2279579bf1e2Sjfb8856606 	/* To get phy capabilities of available speeds. */
2280579bf1e2Sjfb8856606 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2281579bf1e2Sjfb8856606 					      NULL);
2282579bf1e2Sjfb8856606 	if (status) {
2283579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2284579bf1e2Sjfb8856606 				status);
2285579bf1e2Sjfb8856606 		return ret;
2286579bf1e2Sjfb8856606 	}
2287579bf1e2Sjfb8856606 	avail_speed = phy_ab.link_speed;
2288a9643ea8Slogwang 
2289579bf1e2Sjfb8856606 	/* To get the current phy config. */
2290a9643ea8Slogwang 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2291a9643ea8Slogwang 					      NULL);
2292579bf1e2Sjfb8856606 	if (status) {
2293579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2294579bf1e2Sjfb8856606 				status);
2295a9643ea8Slogwang 		return ret;
2296579bf1e2Sjfb8856606 	}
2297a9643ea8Slogwang 
2298579bf1e2Sjfb8856606 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2299579bf1e2Sjfb8856606 	 * no need to set up again.
2300579bf1e2Sjfb8856606 	 */
2301579bf1e2Sjfb8856606 	if (is_up && phy_ab.phy_type != 0 &&
2302579bf1e2Sjfb8856606 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2303579bf1e2Sjfb8856606 		     phy_ab.link_speed != 0)
23042bfe3f2eSlogwang 		return I40E_SUCCESS;
23052bfe3f2eSlogwang 
2306a9643ea8Slogwang 	memset(&phy_conf, 0, sizeof(phy_conf));
2307a9643ea8Slogwang 
2308a9643ea8Slogwang 	/* bits 0-2 use the values from get_phy_abilities_resp */
2309a9643ea8Slogwang 	abilities &= ~mask;
2310a9643ea8Slogwang 	abilities |= phy_ab.abilities & mask;
2311a9643ea8Slogwang 
2312a9643ea8Slogwang 	phy_conf.abilities = abilities;
2313a9643ea8Slogwang 
2314579bf1e2Sjfb8856606 	/* If link needs to go up, but the force speed is not supported,
2315579bf1e2Sjfb8856606 	 * Warn users and config the default available speeds.
2316579bf1e2Sjfb8856606 	 */
2317579bf1e2Sjfb8856606 	if (is_up && !(force_speed & avail_speed)) {
2318579bf1e2Sjfb8856606 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2319579bf1e2Sjfb8856606 		phy_conf.link_speed = avail_speed;
2320579bf1e2Sjfb8856606 	} else {
2321579bf1e2Sjfb8856606 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2322579bf1e2Sjfb8856606 	}
23232bfe3f2eSlogwang 
2324579bf1e2Sjfb8856606 	/* PHY type mask needs to include each type except PHY type extension */
2325579bf1e2Sjfb8856606 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
23262bfe3f2eSlogwang 		phy_type_mask |= 1 << cnt;
23272bfe3f2eSlogwang 
2328a9643ea8Slogwang 	/* use get_phy_abilities_resp value for the rest */
23292bfe3f2eSlogwang 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
23302bfe3f2eSlogwang 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
23312bfe3f2eSlogwang 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
23322bfe3f2eSlogwang 		I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
23332bfe3f2eSlogwang 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2334a9643ea8Slogwang 	phy_conf.eee_capability = phy_ab.eee_capability;
2335a9643ea8Slogwang 	phy_conf.eeer = phy_ab.eeer_val;
2336a9643ea8Slogwang 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2337a9643ea8Slogwang 
2338a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2339a9643ea8Slogwang 		    phy_ab.abilities, phy_ab.link_speed);
2340a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2341a9643ea8Slogwang 		    phy_conf.abilities, phy_conf.link_speed);
2342a9643ea8Slogwang 
2343a9643ea8Slogwang 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2344a9643ea8Slogwang 	if (status)
2345a9643ea8Slogwang 		return ret;
2346a9643ea8Slogwang 
2347a9643ea8Slogwang 	return I40E_SUCCESS;
2348a9643ea8Slogwang }
2349a9643ea8Slogwang 
2350a9643ea8Slogwang static int
i40e_apply_link_speed(struct rte_eth_dev * dev)2351a9643ea8Slogwang i40e_apply_link_speed(struct rte_eth_dev *dev)
2352a9643ea8Slogwang {
2353a9643ea8Slogwang 	uint8_t speed;
2354a9643ea8Slogwang 	uint8_t abilities = 0;
2355a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2356a9643ea8Slogwang 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2357a9643ea8Slogwang 
23584418919fSjohnjiang 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
23594418919fSjohnjiang 		     I40E_AQ_PHY_LINK_ENABLED;
23604418919fSjohnjiang 
2361579bf1e2Sjfb8856606 	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2362579bf1e2Sjfb8856606 		conf->link_speeds = ETH_LINK_SPEED_40G |
2363579bf1e2Sjfb8856606 				    ETH_LINK_SPEED_25G |
2364579bf1e2Sjfb8856606 				    ETH_LINK_SPEED_20G |
2365579bf1e2Sjfb8856606 				    ETH_LINK_SPEED_10G |
2366579bf1e2Sjfb8856606 				    ETH_LINK_SPEED_1G |
2367579bf1e2Sjfb8856606 				    ETH_LINK_SPEED_100M;
23684418919fSjohnjiang 
23694418919fSjohnjiang 		abilities |= I40E_AQ_PHY_AN_ENABLED;
23704418919fSjohnjiang 	} else {
23714418919fSjohnjiang 		abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2372579bf1e2Sjfb8856606 	}
2373a9643ea8Slogwang 	speed = i40e_parse_link_speeds(conf->link_speeds);
2374a9643ea8Slogwang 
23752bfe3f2eSlogwang 	return i40e_phy_conf_link(hw, abilities, speed, true);
2376a9643ea8Slogwang }
2377a9643ea8Slogwang 
2378a9643ea8Slogwang static int
i40e_dev_start(struct rte_eth_dev * dev)2379a9643ea8Slogwang i40e_dev_start(struct rte_eth_dev *dev)
2380a9643ea8Slogwang {
2381a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2382a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2383a9643ea8Slogwang 	struct i40e_vsi *main_vsi = pf->main_vsi;
2384a9643ea8Slogwang 	int ret, i;
23852bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
23862bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2387a9643ea8Slogwang 	uint32_t intr_vector = 0;
23882bfe3f2eSlogwang 	struct i40e_vsi *vsi;
23890c6bd470Sfengbojiang 	uint16_t nb_rxq, nb_txq;
2390a9643ea8Slogwang 
2391a9643ea8Slogwang 	hw->adapter_stopped = 0;
2392a9643ea8Slogwang 
2393a9643ea8Slogwang 	rte_intr_disable(intr_handle);
2394a9643ea8Slogwang 
2395a9643ea8Slogwang 	if ((rte_intr_cap_multiple(intr_handle) ||
2396a9643ea8Slogwang 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2397a9643ea8Slogwang 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2398a9643ea8Slogwang 		intr_vector = dev->data->nb_rx_queues;
23992bfe3f2eSlogwang 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
24002bfe3f2eSlogwang 		if (ret)
24012bfe3f2eSlogwang 			return ret;
2402a9643ea8Slogwang 	}
2403a9643ea8Slogwang 
2404a9643ea8Slogwang 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2405a9643ea8Slogwang 		intr_handle->intr_vec =
2406a9643ea8Slogwang 			rte_zmalloc("intr_vec",
2407a9643ea8Slogwang 				    dev->data->nb_rx_queues * sizeof(int),
2408a9643ea8Slogwang 				    0);
2409a9643ea8Slogwang 		if (!intr_handle->intr_vec) {
24102bfe3f2eSlogwang 			PMD_INIT_LOG(ERR,
24112bfe3f2eSlogwang 				"Failed to allocate %d rx_queues intr_vec",
24122bfe3f2eSlogwang 				dev->data->nb_rx_queues);
2413a9643ea8Slogwang 			return -ENOMEM;
2414a9643ea8Slogwang 		}
2415a9643ea8Slogwang 	}
2416a9643ea8Slogwang 
2417a9643ea8Slogwang 	/* Initialize VSI */
2418a9643ea8Slogwang 	ret = i40e_dev_rxtx_init(pf);
2419a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
2420a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
24210c6bd470Sfengbojiang 		return ret;
2422a9643ea8Slogwang 	}
2423a9643ea8Slogwang 
2424a9643ea8Slogwang 	/* Map queues with MSIX interrupt */
2425a9643ea8Slogwang 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2426a9643ea8Slogwang 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
24270c6bd470Sfengbojiang 	ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
24280c6bd470Sfengbojiang 	if (ret < 0)
24290c6bd470Sfengbojiang 		return ret;
2430a9643ea8Slogwang 	i40e_vsi_enable_queues_intr(main_vsi);
2431a9643ea8Slogwang 
2432a9643ea8Slogwang 	/* Map VMDQ VSI queues with MSIX interrupt */
2433a9643ea8Slogwang 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2434a9643ea8Slogwang 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
24350c6bd470Sfengbojiang 		ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
24362bfe3f2eSlogwang 						I40E_ITR_INDEX_DEFAULT);
24370c6bd470Sfengbojiang 		if (ret < 0)
24380c6bd470Sfengbojiang 			return ret;
2439a9643ea8Slogwang 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2440a9643ea8Slogwang 	}
2441a9643ea8Slogwang 
24420c6bd470Sfengbojiang 	/* Enable all queues which have been configured */
24430c6bd470Sfengbojiang 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
24440c6bd470Sfengbojiang 		ret = i40e_dev_rx_queue_start(dev, nb_rxq);
24450c6bd470Sfengbojiang 		if (ret)
24460c6bd470Sfengbojiang 			goto rx_err;
2447a9643ea8Slogwang 	}
2448a9643ea8Slogwang 
24490c6bd470Sfengbojiang 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
24500c6bd470Sfengbojiang 		ret = i40e_dev_tx_queue_start(dev, nb_txq);
24510c6bd470Sfengbojiang 		if (ret)
24520c6bd470Sfengbojiang 			goto tx_err;
2453a9643ea8Slogwang 	}
2454a9643ea8Slogwang 
2455a9643ea8Slogwang 	/* Enable receiving broadcast packets */
2456a9643ea8Slogwang 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2457a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
2458a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2459a9643ea8Slogwang 
2460a9643ea8Slogwang 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2461a9643ea8Slogwang 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2462a9643ea8Slogwang 						true, NULL);
2463a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
2464a9643ea8Slogwang 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2465a9643ea8Slogwang 	}
2466a9643ea8Slogwang 
24672bfe3f2eSlogwang 	/* Enable the VLAN promiscuous mode. */
24682bfe3f2eSlogwang 	if (pf->vfs) {
24692bfe3f2eSlogwang 		for (i = 0; i < pf->vf_num; i++) {
24702bfe3f2eSlogwang 			vsi = pf->vfs[i].vsi;
24712bfe3f2eSlogwang 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
24722bfe3f2eSlogwang 						     true, NULL);
24732bfe3f2eSlogwang 		}
24742bfe3f2eSlogwang 	}
24752bfe3f2eSlogwang 
2476d30ea906Sjfb8856606 	/* Enable mac loopback mode */
2477d30ea906Sjfb8856606 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2478d30ea906Sjfb8856606 	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2479d30ea906Sjfb8856606 		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2480d30ea906Sjfb8856606 		if (ret != I40E_SUCCESS) {
2481d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "fail to set loopback link");
24820c6bd470Sfengbojiang 			goto tx_err;
2483d30ea906Sjfb8856606 		}
2484d30ea906Sjfb8856606 	}
2485d30ea906Sjfb8856606 
2486a9643ea8Slogwang 	/* Apply link configure */
2487a9643ea8Slogwang 	ret = i40e_apply_link_speed(dev);
2488a9643ea8Slogwang 	if (I40E_SUCCESS != ret) {
2489a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
24900c6bd470Sfengbojiang 		goto tx_err;
2491a9643ea8Slogwang 	}
2492a9643ea8Slogwang 
2493a9643ea8Slogwang 	if (!rte_intr_allow_others(intr_handle)) {
2494a9643ea8Slogwang 		rte_intr_callback_unregister(intr_handle,
2495a9643ea8Slogwang 					     i40e_dev_interrupt_handler,
2496a9643ea8Slogwang 					     (void *)dev);
2497a9643ea8Slogwang 		/* configure and enable device interrupt */
2498a9643ea8Slogwang 		i40e_pf_config_irq0(hw, FALSE);
2499a9643ea8Slogwang 		i40e_pf_enable_irq0(hw);
2500a9643ea8Slogwang 
2501a9643ea8Slogwang 		if (dev->data->dev_conf.intr_conf.lsc != 0)
25022bfe3f2eSlogwang 			PMD_INIT_LOG(INFO,
25032bfe3f2eSlogwang 				"lsc won't enable because of no intr multiplex");
25042bfe3f2eSlogwang 	} else {
2505a9643ea8Slogwang 		ret = i40e_aq_set_phy_int_mask(hw,
2506a9643ea8Slogwang 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2507a9643ea8Slogwang 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2508a9643ea8Slogwang 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2509a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
2510a9643ea8Slogwang 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2511a9643ea8Slogwang 
25122bfe3f2eSlogwang 		/* Call get_link_info aq commond to enable/disable LSE */
2513a9643ea8Slogwang 		i40e_dev_link_update(dev, 0);
2514a9643ea8Slogwang 	}
2515a9643ea8Slogwang 
2516d30ea906Sjfb8856606 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2517d30ea906Sjfb8856606 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2518d30ea906Sjfb8856606 				  i40e_dev_alarm_handler, dev);
2519d30ea906Sjfb8856606 	} else {
2520a9643ea8Slogwang 		/* enable uio intr after callback register */
2521a9643ea8Slogwang 		rte_intr_enable(intr_handle);
2522d30ea906Sjfb8856606 	}
2523a9643ea8Slogwang 
25242bfe3f2eSlogwang 	i40e_filter_restore(pf);
25252bfe3f2eSlogwang 
25262bfe3f2eSlogwang 	if (pf->tm_conf.root && !pf->tm_conf.committed)
25272bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING,
25282bfe3f2eSlogwang 			    "please call hierarchy_commit() "
25292bfe3f2eSlogwang 			    "before starting the port");
25302bfe3f2eSlogwang 
2531a9643ea8Slogwang 	return I40E_SUCCESS;
2532a9643ea8Slogwang 
25330c6bd470Sfengbojiang tx_err:
25340c6bd470Sfengbojiang 	for (i = 0; i < nb_txq; i++)
25350c6bd470Sfengbojiang 		i40e_dev_tx_queue_stop(dev, i);
25360c6bd470Sfengbojiang rx_err:
25370c6bd470Sfengbojiang 	for (i = 0; i < nb_rxq; i++)
25380c6bd470Sfengbojiang 		i40e_dev_rx_queue_stop(dev, i);
2539a9643ea8Slogwang 
2540a9643ea8Slogwang 	return ret;
2541a9643ea8Slogwang }
2542a9643ea8Slogwang 
2543*2d9fd380Sjfb8856606 static int
i40e_dev_stop(struct rte_eth_dev * dev)2544a9643ea8Slogwang i40e_dev_stop(struct rte_eth_dev *dev)
2545a9643ea8Slogwang {
2546a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
25472bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2548a9643ea8Slogwang 	struct i40e_vsi *main_vsi = pf->main_vsi;
25492bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
25502bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2551a9643ea8Slogwang 	int i;
2552a9643ea8Slogwang 
25532bfe3f2eSlogwang 	if (hw->adapter_stopped == 1)
2554*2d9fd380Sjfb8856606 		return 0;
2555d30ea906Sjfb8856606 
2556d30ea906Sjfb8856606 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2557d30ea906Sjfb8856606 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2558d30ea906Sjfb8856606 		rte_intr_enable(intr_handle);
2559d30ea906Sjfb8856606 	}
2560d30ea906Sjfb8856606 
2561a9643ea8Slogwang 	/* Disable all queues */
25620c6bd470Sfengbojiang 	for (i = 0; i < dev->data->nb_tx_queues; i++)
25630c6bd470Sfengbojiang 		i40e_dev_tx_queue_stop(dev, i);
25640c6bd470Sfengbojiang 
25650c6bd470Sfengbojiang 	for (i = 0; i < dev->data->nb_rx_queues; i++)
25660c6bd470Sfengbojiang 		i40e_dev_rx_queue_stop(dev, i);
2567a9643ea8Slogwang 
2568a9643ea8Slogwang 	/* un-map queues with interrupt registers */
2569a9643ea8Slogwang 	i40e_vsi_disable_queues_intr(main_vsi);
2570a9643ea8Slogwang 	i40e_vsi_queues_unbind_intr(main_vsi);
2571a9643ea8Slogwang 
2572a9643ea8Slogwang 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2573a9643ea8Slogwang 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2574a9643ea8Slogwang 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2575a9643ea8Slogwang 	}
2576a9643ea8Slogwang 
2577a9643ea8Slogwang 	/* Clear all queues and release memory */
2578a9643ea8Slogwang 	i40e_dev_clear_queues(dev);
2579a9643ea8Slogwang 
2580a9643ea8Slogwang 	/* Set link down */
2581a9643ea8Slogwang 	i40e_dev_set_link_down(dev);
2582a9643ea8Slogwang 
2583a9643ea8Slogwang 	if (!rte_intr_allow_others(intr_handle))
2584a9643ea8Slogwang 		/* resume to the default handler */
2585a9643ea8Slogwang 		rte_intr_callback_register(intr_handle,
2586a9643ea8Slogwang 					   i40e_dev_interrupt_handler,
2587a9643ea8Slogwang 					   (void *)dev);
2588a9643ea8Slogwang 
2589a9643ea8Slogwang 	/* Clean datapath event and queue/vec mapping */
2590a9643ea8Slogwang 	rte_intr_efd_disable(intr_handle);
2591a9643ea8Slogwang 	if (intr_handle->intr_vec) {
2592a9643ea8Slogwang 		rte_free(intr_handle->intr_vec);
2593a9643ea8Slogwang 		intr_handle->intr_vec = NULL;
2594a9643ea8Slogwang 	}
25952bfe3f2eSlogwang 
25962bfe3f2eSlogwang 	/* reset hierarchy commit */
25972bfe3f2eSlogwang 	pf->tm_conf.committed = false;
25982bfe3f2eSlogwang 
25992bfe3f2eSlogwang 	hw->adapter_stopped = 1;
2600*2d9fd380Sjfb8856606 	dev->data->dev_started = 0;
26011646932aSjfb8856606 
26021646932aSjfb8856606 	pf->adapter->rss_reta_updated = 0;
2603*2d9fd380Sjfb8856606 
2604*2d9fd380Sjfb8856606 	return 0;
2605a9643ea8Slogwang }
2606a9643ea8Slogwang 
2607*2d9fd380Sjfb8856606 static int
i40e_dev_close(struct rte_eth_dev * dev)2608a9643ea8Slogwang i40e_dev_close(struct rte_eth_dev *dev)
2609a9643ea8Slogwang {
2610a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2611a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
26122bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
26132bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
26142bfe3f2eSlogwang 	struct i40e_mirror_rule *p_mirror;
26154418919fSjohnjiang 	struct i40e_filter_control_settings settings;
26164418919fSjohnjiang 	struct rte_flow *p_flow;
2617a9643ea8Slogwang 	uint32_t reg;
2618a9643ea8Slogwang 	int i;
26192bfe3f2eSlogwang 	int ret;
26204418919fSjohnjiang 	uint8_t aq_fail = 0;
26214418919fSjohnjiang 	int retries = 0;
2622a9643ea8Slogwang 
2623a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2624*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2625*2d9fd380Sjfb8856606 		return 0;
2626a9643ea8Slogwang 
26274418919fSjohnjiang 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
26284418919fSjohnjiang 	if (ret)
26294418919fSjohnjiang 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
26304418919fSjohnjiang 
26314418919fSjohnjiang 
2632*2d9fd380Sjfb8856606 	ret = i40e_dev_stop(dev);
26332bfe3f2eSlogwang 
26342bfe3f2eSlogwang 	/* Remove all mirror rules */
26352bfe3f2eSlogwang 	while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
26362bfe3f2eSlogwang 		ret = i40e_aq_del_mirror_rule(hw,
26372bfe3f2eSlogwang 					      pf->main_vsi->veb->seid,
26382bfe3f2eSlogwang 					      p_mirror->rule_type,
26392bfe3f2eSlogwang 					      p_mirror->entries,
26402bfe3f2eSlogwang 					      p_mirror->num_entries,
26412bfe3f2eSlogwang 					      p_mirror->id);
26422bfe3f2eSlogwang 		if (ret < 0)
26432bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
26442bfe3f2eSlogwang 				    "status = %d, aq_err = %d.", ret,
26452bfe3f2eSlogwang 				    hw->aq.asq_last_status);
26462bfe3f2eSlogwang 
26472bfe3f2eSlogwang 		/* remove mirror software resource anyway */
26482bfe3f2eSlogwang 		TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
26492bfe3f2eSlogwang 		rte_free(p_mirror);
26502bfe3f2eSlogwang 		pf->nb_mirror_rule--;
26512bfe3f2eSlogwang 	}
26522bfe3f2eSlogwang 
2653a9643ea8Slogwang 	i40e_dev_free_queues(dev);
2654a9643ea8Slogwang 
2655a9643ea8Slogwang 	/* Disable interrupt */
2656a9643ea8Slogwang 	i40e_pf_disable_irq0(hw);
26572bfe3f2eSlogwang 	rte_intr_disable(intr_handle);
2658a9643ea8Slogwang 
2659d30ea906Sjfb8856606 	/*
2660d30ea906Sjfb8856606 	 * Only legacy filter API needs the following fdir config. So when the
2661d30ea906Sjfb8856606 	 * legacy filter API is deprecated, the following code should also be
2662d30ea906Sjfb8856606 	 * removed.
2663d30ea906Sjfb8856606 	 */
2664579bf1e2Sjfb8856606 	i40e_fdir_teardown(pf);
2665579bf1e2Sjfb8856606 
2666a9643ea8Slogwang 	/* shutdown and destroy the HMC */
2667a9643ea8Slogwang 	i40e_shutdown_lan_hmc(hw);
2668a9643ea8Slogwang 
2669a9643ea8Slogwang 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2670a9643ea8Slogwang 		i40e_vsi_release(pf->vmdq[i].vsi);
2671a9643ea8Slogwang 		pf->vmdq[i].vsi = NULL;
2672a9643ea8Slogwang 	}
2673a9643ea8Slogwang 	rte_free(pf->vmdq);
2674a9643ea8Slogwang 	pf->vmdq = NULL;
2675a9643ea8Slogwang 
26762bfe3f2eSlogwang 	/* release all the existing VSIs and VEBs */
26772bfe3f2eSlogwang 	i40e_vsi_release(pf->main_vsi);
26782bfe3f2eSlogwang 
2679a9643ea8Slogwang 	/* shutdown the adminq */
2680a9643ea8Slogwang 	i40e_aq_queue_shutdown(hw, true);
2681a9643ea8Slogwang 	i40e_shutdown_adminq(hw);
2682a9643ea8Slogwang 
2683a9643ea8Slogwang 	i40e_res_pool_destroy(&pf->qp_pool);
2684a9643ea8Slogwang 	i40e_res_pool_destroy(&pf->msix_pool);
2685a9643ea8Slogwang 
2686d30ea906Sjfb8856606 	/* Disable flexible payload in global configuration */
2687d30ea906Sjfb8856606 	if (!pf->support_multi_driver)
2688d30ea906Sjfb8856606 		i40e_flex_payload_reg_set_default(hw);
2689d30ea906Sjfb8856606 
2690a9643ea8Slogwang 	/* force a PF reset to clean anything leftover */
2691a9643ea8Slogwang 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2692a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2693a9643ea8Slogwang 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2694a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
26951646932aSjfb8856606 
26964418919fSjohnjiang 	/* Clear PXE mode */
26974418919fSjohnjiang 	i40e_clear_pxe_mode(hw);
26984418919fSjohnjiang 
26994418919fSjohnjiang 	/* Unconfigure filter control */
27004418919fSjohnjiang 	memset(&settings, 0, sizeof(settings));
27014418919fSjohnjiang 	ret = i40e_set_filter_control(hw, &settings);
27024418919fSjohnjiang 	if (ret)
27034418919fSjohnjiang 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
27044418919fSjohnjiang 					ret);
27054418919fSjohnjiang 
27064418919fSjohnjiang 	/* Disable flow control */
27074418919fSjohnjiang 	hw->fc.requested_mode = I40E_FC_NONE;
27084418919fSjohnjiang 	i40e_set_fc(hw, &aq_fail, TRUE);
27094418919fSjohnjiang 
27104418919fSjohnjiang 	/* uninitialize pf host driver */
27114418919fSjohnjiang 	i40e_pf_host_uninit(dev);
27124418919fSjohnjiang 
27134418919fSjohnjiang 	do {
27144418919fSjohnjiang 		ret = rte_intr_callback_unregister(intr_handle,
27154418919fSjohnjiang 				i40e_dev_interrupt_handler, dev);
27164418919fSjohnjiang 		if (ret >= 0 || ret == -ENOENT) {
27174418919fSjohnjiang 			break;
27184418919fSjohnjiang 		} else if (ret != -EAGAIN) {
27194418919fSjohnjiang 			PMD_INIT_LOG(ERR,
27204418919fSjohnjiang 				 "intr callback unregister failed: %d",
27214418919fSjohnjiang 				 ret);
27224418919fSjohnjiang 		}
27234418919fSjohnjiang 		i40e_msec_delay(500);
27244418919fSjohnjiang 	} while (retries++ < 5);
27254418919fSjohnjiang 
27264418919fSjohnjiang 	i40e_rm_ethtype_filter_list(pf);
27274418919fSjohnjiang 	i40e_rm_tunnel_filter_list(pf);
27284418919fSjohnjiang 	i40e_rm_fdir_filter_list(pf);
27294418919fSjohnjiang 
27304418919fSjohnjiang 	/* Remove all flows */
27314418919fSjohnjiang 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
27324418919fSjohnjiang 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2733*2d9fd380Sjfb8856606 		/* Do not free FDIR flows since they are static allocated */
2734*2d9fd380Sjfb8856606 		if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
27354418919fSjohnjiang 			rte_free(p_flow);
27364418919fSjohnjiang 	}
27374418919fSjohnjiang 
2738*2d9fd380Sjfb8856606 	/* release the fdir static allocated memory */
2739*2d9fd380Sjfb8856606 	i40e_fdir_memory_cleanup(pf);
2740*2d9fd380Sjfb8856606 
27414418919fSjohnjiang 	/* Remove all Traffic Manager configuration */
27424418919fSjohnjiang 	i40e_tm_conf_uninit(dev);
27434418919fSjohnjiang 
27441646932aSjfb8856606 	hw->adapter_closed = 1;
2745*2d9fd380Sjfb8856606 	return ret;
2746a9643ea8Slogwang }
2747a9643ea8Slogwang 
27482bfe3f2eSlogwang /*
27492bfe3f2eSlogwang  * Reset PF device only to re-initialize resources in PMD layer
27502bfe3f2eSlogwang  */
27512bfe3f2eSlogwang static int
i40e_dev_reset(struct rte_eth_dev * dev)27522bfe3f2eSlogwang i40e_dev_reset(struct rte_eth_dev *dev)
27532bfe3f2eSlogwang {
27542bfe3f2eSlogwang 	int ret;
27552bfe3f2eSlogwang 
27562bfe3f2eSlogwang 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
27572bfe3f2eSlogwang 	 * its VF to make them align with it. The detailed notification
27582bfe3f2eSlogwang 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
27592bfe3f2eSlogwang 	 * To avoid unexpected behavior in VF, currently reset of PF with
27602bfe3f2eSlogwang 	 * SR-IOV activation is not supported. It might be supported later.
27612bfe3f2eSlogwang 	 */
27622bfe3f2eSlogwang 	if (dev->data->sriov.active)
27632bfe3f2eSlogwang 		return -ENOTSUP;
27642bfe3f2eSlogwang 
27652bfe3f2eSlogwang 	ret = eth_i40e_dev_uninit(dev);
27662bfe3f2eSlogwang 	if (ret)
27672bfe3f2eSlogwang 		return ret;
27682bfe3f2eSlogwang 
2769d30ea906Sjfb8856606 	ret = eth_i40e_dev_init(dev, NULL);
27702bfe3f2eSlogwang 
27712bfe3f2eSlogwang 	return ret;
27722bfe3f2eSlogwang }
27732bfe3f2eSlogwang 
27744418919fSjohnjiang static int
i40e_dev_promiscuous_enable(struct rte_eth_dev * dev)2775a9643ea8Slogwang i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2776a9643ea8Slogwang {
2777a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2778a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2779a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
2780a9643ea8Slogwang 	int status;
2781a9643ea8Slogwang 
2782a9643ea8Slogwang 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2783a9643ea8Slogwang 						     true, NULL, true);
27844418919fSjohnjiang 	if (status != I40E_SUCCESS) {
2785a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
27864418919fSjohnjiang 		return -EAGAIN;
27874418919fSjohnjiang 	}
2788a9643ea8Slogwang 
2789a9643ea8Slogwang 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2790a9643ea8Slogwang 							TRUE, NULL);
27914418919fSjohnjiang 	if (status != I40E_SUCCESS) {
2792a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
27934418919fSjohnjiang 		/* Rollback unicast promiscuous mode */
27944418919fSjohnjiang 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
27954418919fSjohnjiang 						    false, NULL, true);
27964418919fSjohnjiang 		return -EAGAIN;
2797a9643ea8Slogwang 	}
2798a9643ea8Slogwang 
27994418919fSjohnjiang 	return 0;
28004418919fSjohnjiang }
28014418919fSjohnjiang 
28024418919fSjohnjiang static int
i40e_dev_promiscuous_disable(struct rte_eth_dev * dev)2803a9643ea8Slogwang i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2804a9643ea8Slogwang {
2805a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2806a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2807a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
2808a9643ea8Slogwang 	int status;
2809a9643ea8Slogwang 
2810a9643ea8Slogwang 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2811a9643ea8Slogwang 						     false, NULL, true);
28124418919fSjohnjiang 	if (status != I40E_SUCCESS) {
2813a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
28144418919fSjohnjiang 		return -EAGAIN;
28154418919fSjohnjiang 	}
2816a9643ea8Slogwang 
2817d30ea906Sjfb8856606 	/* must remain in all_multicast mode */
2818d30ea906Sjfb8856606 	if (dev->data->all_multicast == 1)
28194418919fSjohnjiang 		return 0;
2820d30ea906Sjfb8856606 
2821a9643ea8Slogwang 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2822a9643ea8Slogwang 							false, NULL);
28234418919fSjohnjiang 	if (status != I40E_SUCCESS) {
2824a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
28254418919fSjohnjiang 		/* Rollback unicast promiscuous mode */
28264418919fSjohnjiang 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
28274418919fSjohnjiang 						    true, NULL, true);
28284418919fSjohnjiang 		return -EAGAIN;
2829a9643ea8Slogwang 	}
2830a9643ea8Slogwang 
28314418919fSjohnjiang 	return 0;
28324418919fSjohnjiang }
28334418919fSjohnjiang 
28344418919fSjohnjiang static int
i40e_dev_allmulticast_enable(struct rte_eth_dev * dev)2835a9643ea8Slogwang i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2836a9643ea8Slogwang {
2837a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2838a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2839a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
2840a9643ea8Slogwang 	int ret;
2841a9643ea8Slogwang 
2842a9643ea8Slogwang 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
28434418919fSjohnjiang 	if (ret != I40E_SUCCESS) {
2844a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
28454418919fSjohnjiang 		return -EAGAIN;
2846a9643ea8Slogwang 	}
2847a9643ea8Slogwang 
28484418919fSjohnjiang 	return 0;
28494418919fSjohnjiang }
28504418919fSjohnjiang 
28514418919fSjohnjiang static int
i40e_dev_allmulticast_disable(struct rte_eth_dev * dev)2852a9643ea8Slogwang i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2853a9643ea8Slogwang {
2854a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2855a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2856a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
2857a9643ea8Slogwang 	int ret;
2858a9643ea8Slogwang 
2859a9643ea8Slogwang 	if (dev->data->promiscuous == 1)
28604418919fSjohnjiang 		return 0; /* must remain in all_multicast mode */
2861a9643ea8Slogwang 
2862a9643ea8Slogwang 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2863a9643ea8Slogwang 				vsi->seid, FALSE, NULL);
28644418919fSjohnjiang 	if (ret != I40E_SUCCESS) {
2865a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
28664418919fSjohnjiang 		return -EAGAIN;
28674418919fSjohnjiang 	}
28684418919fSjohnjiang 
28694418919fSjohnjiang 	return 0;
2870a9643ea8Slogwang }
2871a9643ea8Slogwang 
2872a9643ea8Slogwang /*
2873a9643ea8Slogwang  * Set device link up.
2874a9643ea8Slogwang  */
2875a9643ea8Slogwang static int
i40e_dev_set_link_up(struct rte_eth_dev * dev)2876a9643ea8Slogwang i40e_dev_set_link_up(struct rte_eth_dev *dev)
2877a9643ea8Slogwang {
2878a9643ea8Slogwang 	/* re-apply link speed setting */
2879a9643ea8Slogwang 	return i40e_apply_link_speed(dev);
2880a9643ea8Slogwang }
2881a9643ea8Slogwang 
2882a9643ea8Slogwang /*
2883a9643ea8Slogwang  * Set device link down.
2884a9643ea8Slogwang  */
2885a9643ea8Slogwang static int
i40e_dev_set_link_down(struct rte_eth_dev * dev)2886a9643ea8Slogwang i40e_dev_set_link_down(struct rte_eth_dev *dev)
2887a9643ea8Slogwang {
2888a9643ea8Slogwang 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
28892bfe3f2eSlogwang 	uint8_t abilities = 0;
2890a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2891a9643ea8Slogwang 
28922bfe3f2eSlogwang 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
28932bfe3f2eSlogwang 	return i40e_phy_conf_link(hw, abilities, speed, false);
2894a9643ea8Slogwang }
2895a9643ea8Slogwang 
2896579bf1e2Sjfb8856606 static __rte_always_inline void
update_link_reg(struct i40e_hw * hw,struct rte_eth_link * link)2897d30ea906Sjfb8856606 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2898579bf1e2Sjfb8856606 {
2899579bf1e2Sjfb8856606 /* Link status registers and values*/
2900579bf1e2Sjfb8856606 #define I40E_PRTMAC_LINKSTA		0x001E2420
2901579bf1e2Sjfb8856606 #define I40E_REG_LINK_UP		0x40000080
2902579bf1e2Sjfb8856606 #define I40E_PRTMAC_MACC		0x001E24E0
2903579bf1e2Sjfb8856606 #define I40E_REG_MACC_25GB		0x00020000
2904579bf1e2Sjfb8856606 #define I40E_REG_SPEED_MASK		0x38000000
29051646932aSjfb8856606 #define I40E_REG_SPEED_0		0x00000000
29061646932aSjfb8856606 #define I40E_REG_SPEED_1		0x08000000
29071646932aSjfb8856606 #define I40E_REG_SPEED_2		0x10000000
29081646932aSjfb8856606 #define I40E_REG_SPEED_3		0x18000000
29091646932aSjfb8856606 #define I40E_REG_SPEED_4		0x20000000
2910579bf1e2Sjfb8856606 	uint32_t link_speed;
2911579bf1e2Sjfb8856606 	uint32_t reg_val;
2912579bf1e2Sjfb8856606 
2913579bf1e2Sjfb8856606 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2914579bf1e2Sjfb8856606 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2915579bf1e2Sjfb8856606 	reg_val &= I40E_REG_LINK_UP;
2916579bf1e2Sjfb8856606 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2917579bf1e2Sjfb8856606 
2918579bf1e2Sjfb8856606 	if (unlikely(link->link_status == 0))
2919579bf1e2Sjfb8856606 		return;
2920579bf1e2Sjfb8856606 
2921579bf1e2Sjfb8856606 	/* Parse the link status */
2922579bf1e2Sjfb8856606 	switch (link_speed) {
29231646932aSjfb8856606 	case I40E_REG_SPEED_0:
2924579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_100M;
2925579bf1e2Sjfb8856606 		break;
29261646932aSjfb8856606 	case I40E_REG_SPEED_1:
2927579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_1G;
2928579bf1e2Sjfb8856606 		break;
29291646932aSjfb8856606 	case I40E_REG_SPEED_2:
29301646932aSjfb8856606 		if (hw->mac.type == I40E_MAC_X722)
29311646932aSjfb8856606 			link->link_speed = ETH_SPEED_NUM_2_5G;
29321646932aSjfb8856606 		else
2933579bf1e2Sjfb8856606 			link->link_speed = ETH_SPEED_NUM_10G;
2934579bf1e2Sjfb8856606 		break;
29351646932aSjfb8856606 	case I40E_REG_SPEED_3:
29361646932aSjfb8856606 		if (hw->mac.type == I40E_MAC_X722) {
29371646932aSjfb8856606 			link->link_speed = ETH_SPEED_NUM_5G;
29381646932aSjfb8856606 		} else {
2939579bf1e2Sjfb8856606 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2940579bf1e2Sjfb8856606 
2941579bf1e2Sjfb8856606 			if (reg_val & I40E_REG_MACC_25GB)
2942579bf1e2Sjfb8856606 				link->link_speed = ETH_SPEED_NUM_25G;
2943579bf1e2Sjfb8856606 			else
2944579bf1e2Sjfb8856606 				link->link_speed = ETH_SPEED_NUM_40G;
29451646932aSjfb8856606 		}
29461646932aSjfb8856606 		break;
29471646932aSjfb8856606 	case I40E_REG_SPEED_4:
29481646932aSjfb8856606 		if (hw->mac.type == I40E_MAC_X722)
29491646932aSjfb8856606 			link->link_speed = ETH_SPEED_NUM_10G;
29501646932aSjfb8856606 		else
29511646932aSjfb8856606 			link->link_speed = ETH_SPEED_NUM_20G;
2952579bf1e2Sjfb8856606 		break;
2953579bf1e2Sjfb8856606 	default:
2954579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2955579bf1e2Sjfb8856606 		break;
2956579bf1e2Sjfb8856606 	}
2957579bf1e2Sjfb8856606 }
2958579bf1e2Sjfb8856606 
2959579bf1e2Sjfb8856606 static __rte_always_inline void
update_link_aq(struct i40e_hw * hw,struct rte_eth_link * link,bool enable_lse,int wait_to_complete)2960d30ea906Sjfb8856606 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2961d30ea906Sjfb8856606 	bool enable_lse, int wait_to_complete)
2962a9643ea8Slogwang {
2963a9643ea8Slogwang #define CHECK_INTERVAL             100  /* 100ms */
2964a9643ea8Slogwang #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2965579bf1e2Sjfb8856606 	uint32_t rep_cnt = MAX_REPEAT_TIME;
2966a9643ea8Slogwang 	struct i40e_link_status link_status;
2967a9643ea8Slogwang 	int status;
2968a9643ea8Slogwang 
2969a9643ea8Slogwang 	memset(&link_status, 0, sizeof(link_status));
2970a9643ea8Slogwang 
2971a9643ea8Slogwang 	do {
2972d30ea906Sjfb8856606 		memset(&link_status, 0, sizeof(link_status));
2973d30ea906Sjfb8856606 
2974a9643ea8Slogwang 		/* Get link status information from hardware */
2975a9643ea8Slogwang 		status = i40e_aq_get_link_info(hw, enable_lse,
2976a9643ea8Slogwang 						&link_status, NULL);
2977579bf1e2Sjfb8856606 		if (unlikely(status != I40E_SUCCESS)) {
29784418919fSjohnjiang 			link->link_speed = ETH_SPEED_NUM_NONE;
2979579bf1e2Sjfb8856606 			link->link_duplex = ETH_LINK_FULL_DUPLEX;
2980a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to get link info");
2981579bf1e2Sjfb8856606 			return;
2982a9643ea8Slogwang 		}
2983a9643ea8Slogwang 
2984579bf1e2Sjfb8856606 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2985d30ea906Sjfb8856606 		if (!wait_to_complete || link->link_status)
2986a9643ea8Slogwang 			break;
2987a9643ea8Slogwang 
2988a9643ea8Slogwang 		rte_delay_ms(CHECK_INTERVAL);
29892bfe3f2eSlogwang 	} while (--rep_cnt);
2990a9643ea8Slogwang 
2991a9643ea8Slogwang 	/* Parse the link status */
2992a9643ea8Slogwang 	switch (link_status.link_speed) {
2993a9643ea8Slogwang 	case I40E_LINK_SPEED_100MB:
2994579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_100M;
2995a9643ea8Slogwang 		break;
2996a9643ea8Slogwang 	case I40E_LINK_SPEED_1GB:
2997579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_1G;
2998a9643ea8Slogwang 		break;
2999a9643ea8Slogwang 	case I40E_LINK_SPEED_10GB:
3000579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_10G;
3001a9643ea8Slogwang 		break;
3002a9643ea8Slogwang 	case I40E_LINK_SPEED_20GB:
3003579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_20G;
3004a9643ea8Slogwang 		break;
30052bfe3f2eSlogwang 	case I40E_LINK_SPEED_25GB:
3006579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_25G;
30072bfe3f2eSlogwang 		break;
3008a9643ea8Slogwang 	case I40E_LINK_SPEED_40GB:
3009579bf1e2Sjfb8856606 		link->link_speed = ETH_SPEED_NUM_40G;
3010a9643ea8Slogwang 		break;
3011a9643ea8Slogwang 	default:
3012*2d9fd380Sjfb8856606 		if (link->link_status)
3013*2d9fd380Sjfb8856606 			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3014*2d9fd380Sjfb8856606 		else
30154418919fSjohnjiang 			link->link_speed = ETH_SPEED_NUM_NONE;
3016a9643ea8Slogwang 		break;
3017a9643ea8Slogwang 	}
3018579bf1e2Sjfb8856606 }
3019a9643ea8Slogwang 
3020579bf1e2Sjfb8856606 int
i40e_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)3021579bf1e2Sjfb8856606 i40e_dev_link_update(struct rte_eth_dev *dev,
3022579bf1e2Sjfb8856606 		     int wait_to_complete)
3023579bf1e2Sjfb8856606 {
3024579bf1e2Sjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3025d30ea906Sjfb8856606 	struct rte_eth_link link;
3026579bf1e2Sjfb8856606 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3027d30ea906Sjfb8856606 	int ret;
3028579bf1e2Sjfb8856606 
3029579bf1e2Sjfb8856606 	memset(&link, 0, sizeof(link));
3030579bf1e2Sjfb8856606 
3031579bf1e2Sjfb8856606 	/* i40e uses full duplex only */
3032579bf1e2Sjfb8856606 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3033a9643ea8Slogwang 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3034a9643ea8Slogwang 			ETH_LINK_SPEED_FIXED);
3035a9643ea8Slogwang 
3036d30ea906Sjfb8856606 	if (!wait_to_complete && !enable_lse)
3037d30ea906Sjfb8856606 		update_link_reg(hw, &link);
3038579bf1e2Sjfb8856606 	else
3039d30ea906Sjfb8856606 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
3040579bf1e2Sjfb8856606 
30414418919fSjohnjiang 	if (hw->switch_dev)
30424418919fSjohnjiang 		rte_eth_linkstatus_get(hw->switch_dev, &link);
30434418919fSjohnjiang 
3044d30ea906Sjfb8856606 	ret = rte_eth_linkstatus_set(dev, &link);
30452bfe3f2eSlogwang 	i40e_notify_all_vfs_link_status(dev);
30462bfe3f2eSlogwang 
3047d30ea906Sjfb8856606 	return ret;
3048a9643ea8Slogwang }
3049a9643ea8Slogwang 
30500c6bd470Sfengbojiang static void
i40e_stat_update_48_in_64(struct i40e_hw * hw,uint32_t hireg,uint32_t loreg,bool offset_loaded,uint64_t * offset,uint64_t * stat,uint64_t * prev_stat)30510c6bd470Sfengbojiang i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
30520c6bd470Sfengbojiang 			  uint32_t loreg, bool offset_loaded, uint64_t *offset,
30530c6bd470Sfengbojiang 			  uint64_t *stat, uint64_t *prev_stat)
30540c6bd470Sfengbojiang {
30550c6bd470Sfengbojiang 	i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
30560c6bd470Sfengbojiang 	/* enlarge the limitation when statistics counters overflowed */
30570c6bd470Sfengbojiang 	if (offset_loaded) {
30580c6bd470Sfengbojiang 		if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
30590c6bd470Sfengbojiang 			*stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
30600c6bd470Sfengbojiang 		*stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
30610c6bd470Sfengbojiang 	}
30620c6bd470Sfengbojiang 	*prev_stat = *stat;
30630c6bd470Sfengbojiang }
30640c6bd470Sfengbojiang 
3065a9643ea8Slogwang /* Get all the statistics of a VSI */
3066a9643ea8Slogwang void
i40e_update_vsi_stats(struct i40e_vsi * vsi)3067a9643ea8Slogwang i40e_update_vsi_stats(struct i40e_vsi *vsi)
3068a9643ea8Slogwang {
3069a9643ea8Slogwang 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3070a9643ea8Slogwang 	struct i40e_eth_stats *nes = &vsi->eth_stats;
3071a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3072a9643ea8Slogwang 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3073a9643ea8Slogwang 
30740c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3075a9643ea8Slogwang 				  vsi->offset_loaded, &oes->rx_bytes,
30760c6bd470Sfengbojiang 				  &nes->rx_bytes, &vsi->prev_rx_bytes);
3077a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3078a9643ea8Slogwang 			    vsi->offset_loaded, &oes->rx_unicast,
3079a9643ea8Slogwang 			    &nes->rx_unicast);
3080a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3081a9643ea8Slogwang 			    vsi->offset_loaded, &oes->rx_multicast,
3082a9643ea8Slogwang 			    &nes->rx_multicast);
3083a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3084a9643ea8Slogwang 			    vsi->offset_loaded, &oes->rx_broadcast,
3085a9643ea8Slogwang 			    &nes->rx_broadcast);
30862bfe3f2eSlogwang 	/* exclude CRC bytes */
30872bfe3f2eSlogwang 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
30884418919fSjohnjiang 		nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
30892bfe3f2eSlogwang 
3090a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3091a9643ea8Slogwang 			    &oes->rx_discards, &nes->rx_discards);
3092a9643ea8Slogwang 	/* GLV_REPC not supported */
3093a9643ea8Slogwang 	/* GLV_RMPC not supported */
3094a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3095a9643ea8Slogwang 			    &oes->rx_unknown_protocol,
3096a9643ea8Slogwang 			    &nes->rx_unknown_protocol);
30970c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3098a9643ea8Slogwang 				  vsi->offset_loaded, &oes->tx_bytes,
30990c6bd470Sfengbojiang 				  &nes->tx_bytes, &vsi->prev_tx_bytes);
3100a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3101a9643ea8Slogwang 			    vsi->offset_loaded, &oes->tx_unicast,
3102a9643ea8Slogwang 			    &nes->tx_unicast);
3103a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3104a9643ea8Slogwang 			    vsi->offset_loaded, &oes->tx_multicast,
3105a9643ea8Slogwang 			    &nes->tx_multicast);
3106a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3107a9643ea8Slogwang 			    vsi->offset_loaded,  &oes->tx_broadcast,
3108a9643ea8Slogwang 			    &nes->tx_broadcast);
3109a9643ea8Slogwang 	/* GLV_TDPC not supported */
3110a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3111a9643ea8Slogwang 			    &oes->tx_errors, &nes->tx_errors);
3112a9643ea8Slogwang 	vsi->offset_loaded = true;
3113a9643ea8Slogwang 
3114a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3115a9643ea8Slogwang 		    vsi->vsi_id);
3116a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3117a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3118a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3119a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3120a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3121a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3122a9643ea8Slogwang 		    nes->rx_unknown_protocol);
3123a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3124a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3125a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3126a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3127a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3128a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3129a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3130a9643ea8Slogwang 		    vsi->vsi_id);
3131a9643ea8Slogwang }
3132a9643ea8Slogwang 
3133a9643ea8Slogwang static void
i40e_read_stats_registers(struct i40e_pf * pf,struct i40e_hw * hw)3134a9643ea8Slogwang i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3135a9643ea8Slogwang {
3136a9643ea8Slogwang 	unsigned int i;
3137a9643ea8Slogwang 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3138a9643ea8Slogwang 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3139a9643ea8Slogwang 
31402bfe3f2eSlogwang 	/* Get rx/tx bytes of internal transfer packets */
31410c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
31422bfe3f2eSlogwang 				  I40E_GLV_GORCL(hw->port),
31432bfe3f2eSlogwang 				  pf->offset_loaded,
31442bfe3f2eSlogwang 				  &pf->internal_stats_offset.rx_bytes,
31450c6bd470Sfengbojiang 				  &pf->internal_stats.rx_bytes,
31460c6bd470Sfengbojiang 				  &pf->internal_prev_rx_bytes);
31470c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
31482bfe3f2eSlogwang 				  I40E_GLV_GOTCL(hw->port),
31492bfe3f2eSlogwang 				  pf->offset_loaded,
31502bfe3f2eSlogwang 				  &pf->internal_stats_offset.tx_bytes,
31510c6bd470Sfengbojiang 				  &pf->internal_stats.tx_bytes,
31520c6bd470Sfengbojiang 				  &pf->internal_prev_tx_bytes);
31532bfe3f2eSlogwang 	/* Get total internal rx packet count */
31542bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
31552bfe3f2eSlogwang 			    I40E_GLV_UPRCL(hw->port),
31562bfe3f2eSlogwang 			    pf->offset_loaded,
31572bfe3f2eSlogwang 			    &pf->internal_stats_offset.rx_unicast,
31582bfe3f2eSlogwang 			    &pf->internal_stats.rx_unicast);
31592bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
31602bfe3f2eSlogwang 			    I40E_GLV_MPRCL(hw->port),
31612bfe3f2eSlogwang 			    pf->offset_loaded,
31622bfe3f2eSlogwang 			    &pf->internal_stats_offset.rx_multicast,
31632bfe3f2eSlogwang 			    &pf->internal_stats.rx_multicast);
31642bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
31652bfe3f2eSlogwang 			    I40E_GLV_BPRCL(hw->port),
31662bfe3f2eSlogwang 			    pf->offset_loaded,
31672bfe3f2eSlogwang 			    &pf->internal_stats_offset.rx_broadcast,
31682bfe3f2eSlogwang 			    &pf->internal_stats.rx_broadcast);
31692bfe3f2eSlogwang 	/* Get total internal tx packet count */
31702bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
31712bfe3f2eSlogwang 			    I40E_GLV_UPTCL(hw->port),
31722bfe3f2eSlogwang 			    pf->offset_loaded,
31732bfe3f2eSlogwang 			    &pf->internal_stats_offset.tx_unicast,
31742bfe3f2eSlogwang 			    &pf->internal_stats.tx_unicast);
31752bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
31762bfe3f2eSlogwang 			    I40E_GLV_MPTCL(hw->port),
31772bfe3f2eSlogwang 			    pf->offset_loaded,
31782bfe3f2eSlogwang 			    &pf->internal_stats_offset.tx_multicast,
31792bfe3f2eSlogwang 			    &pf->internal_stats.tx_multicast);
31802bfe3f2eSlogwang 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
31812bfe3f2eSlogwang 			    I40E_GLV_BPTCL(hw->port),
31822bfe3f2eSlogwang 			    pf->offset_loaded,
31832bfe3f2eSlogwang 			    &pf->internal_stats_offset.tx_broadcast,
31842bfe3f2eSlogwang 			    &pf->internal_stats.tx_broadcast);
31852bfe3f2eSlogwang 
31862bfe3f2eSlogwang 	/* exclude CRC size */
31872bfe3f2eSlogwang 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
31882bfe3f2eSlogwang 		pf->internal_stats.rx_multicast +
31894418919fSjohnjiang 		pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
31902bfe3f2eSlogwang 
3191a9643ea8Slogwang 	/* Get statistics of struct i40e_eth_stats */
31920c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3193a9643ea8Slogwang 				  I40E_GLPRT_GORCL(hw->port),
3194a9643ea8Slogwang 				  pf->offset_loaded, &os->eth.rx_bytes,
31950c6bd470Sfengbojiang 				  &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3196a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3197a9643ea8Slogwang 			    I40E_GLPRT_UPRCL(hw->port),
3198a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.rx_unicast,
3199a9643ea8Slogwang 			    &ns->eth.rx_unicast);
3200a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3201a9643ea8Slogwang 			    I40E_GLPRT_MPRCL(hw->port),
3202a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.rx_multicast,
3203a9643ea8Slogwang 			    &ns->eth.rx_multicast);
3204a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3205a9643ea8Slogwang 			    I40E_GLPRT_BPRCL(hw->port),
3206a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.rx_broadcast,
3207a9643ea8Slogwang 			    &ns->eth.rx_broadcast);
3208a9643ea8Slogwang 	/* Workaround: CRC size should not be included in byte statistics,
32094418919fSjohnjiang 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
32104418919fSjohnjiang 	 * packet.
3211a9643ea8Slogwang 	 */
3212a9643ea8Slogwang 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
32134418919fSjohnjiang 		ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3214a9643ea8Slogwang 
32152bfe3f2eSlogwang 	/* exclude internal rx bytes
32162bfe3f2eSlogwang 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
32172bfe3f2eSlogwang 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
32182bfe3f2eSlogwang 	 * value.
32192bfe3f2eSlogwang 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
32202bfe3f2eSlogwang 	 */
32212bfe3f2eSlogwang 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
32222bfe3f2eSlogwang 		ns->eth.rx_bytes = 0;
32232bfe3f2eSlogwang 	else
32242bfe3f2eSlogwang 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
32252bfe3f2eSlogwang 
32262bfe3f2eSlogwang 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
32272bfe3f2eSlogwang 		ns->eth.rx_unicast = 0;
32282bfe3f2eSlogwang 	else
32292bfe3f2eSlogwang 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
32302bfe3f2eSlogwang 
32312bfe3f2eSlogwang 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
32322bfe3f2eSlogwang 		ns->eth.rx_multicast = 0;
32332bfe3f2eSlogwang 	else
32342bfe3f2eSlogwang 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
32352bfe3f2eSlogwang 
32362bfe3f2eSlogwang 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
32372bfe3f2eSlogwang 		ns->eth.rx_broadcast = 0;
32382bfe3f2eSlogwang 	else
32392bfe3f2eSlogwang 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
32402bfe3f2eSlogwang 
3241a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3242a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.rx_discards,
3243a9643ea8Slogwang 			    &ns->eth.rx_discards);
3244a9643ea8Slogwang 	/* GLPRT_REPC not supported */
3245a9643ea8Slogwang 	/* GLPRT_RMPC not supported */
3246a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3247a9643ea8Slogwang 			    pf->offset_loaded,
3248a9643ea8Slogwang 			    &os->eth.rx_unknown_protocol,
3249a9643ea8Slogwang 			    &ns->eth.rx_unknown_protocol);
32500c6bd470Sfengbojiang 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3251a9643ea8Slogwang 				  I40E_GLPRT_GOTCL(hw->port),
3252a9643ea8Slogwang 				  pf->offset_loaded, &os->eth.tx_bytes,
32530c6bd470Sfengbojiang 				  &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3254a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3255a9643ea8Slogwang 			    I40E_GLPRT_UPTCL(hw->port),
3256a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.tx_unicast,
3257a9643ea8Slogwang 			    &ns->eth.tx_unicast);
3258a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3259a9643ea8Slogwang 			    I40E_GLPRT_MPTCL(hw->port),
3260a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.tx_multicast,
3261a9643ea8Slogwang 			    &ns->eth.tx_multicast);
3262a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3263a9643ea8Slogwang 			    I40E_GLPRT_BPTCL(hw->port),
3264a9643ea8Slogwang 			    pf->offset_loaded, &os->eth.tx_broadcast,
3265a9643ea8Slogwang 			    &ns->eth.tx_broadcast);
3266a9643ea8Slogwang 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
32674418919fSjohnjiang 		ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
32682bfe3f2eSlogwang 
32692bfe3f2eSlogwang 	/* exclude internal tx bytes
32702bfe3f2eSlogwang 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
32712bfe3f2eSlogwang 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
32722bfe3f2eSlogwang 	 * value.
32732bfe3f2eSlogwang 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
32742bfe3f2eSlogwang 	 */
32752bfe3f2eSlogwang 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
32762bfe3f2eSlogwang 		ns->eth.tx_bytes = 0;
32772bfe3f2eSlogwang 	else
32782bfe3f2eSlogwang 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
32792bfe3f2eSlogwang 
32802bfe3f2eSlogwang 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
32812bfe3f2eSlogwang 		ns->eth.tx_unicast = 0;
32822bfe3f2eSlogwang 	else
32832bfe3f2eSlogwang 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
32842bfe3f2eSlogwang 
32852bfe3f2eSlogwang 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
32862bfe3f2eSlogwang 		ns->eth.tx_multicast = 0;
32872bfe3f2eSlogwang 	else
32882bfe3f2eSlogwang 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
32892bfe3f2eSlogwang 
32902bfe3f2eSlogwang 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
32912bfe3f2eSlogwang 		ns->eth.tx_broadcast = 0;
32922bfe3f2eSlogwang 	else
32932bfe3f2eSlogwang 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
32942bfe3f2eSlogwang 
3295a9643ea8Slogwang 	/* GLPRT_TEPC not supported */
3296a9643ea8Slogwang 
3297a9643ea8Slogwang 	/* additional port specific stats */
3298a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3299a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_dropped_link_down,
3300a9643ea8Slogwang 			    &ns->tx_dropped_link_down);
3301a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3302a9643ea8Slogwang 			    pf->offset_loaded, &os->crc_errors,
3303a9643ea8Slogwang 			    &ns->crc_errors);
3304a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3305a9643ea8Slogwang 			    pf->offset_loaded, &os->illegal_bytes,
3306a9643ea8Slogwang 			    &ns->illegal_bytes);
3307a9643ea8Slogwang 	/* GLPRT_ERRBC not supported */
3308a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3309a9643ea8Slogwang 			    pf->offset_loaded, &os->mac_local_faults,
3310a9643ea8Slogwang 			    &ns->mac_local_faults);
3311a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3312a9643ea8Slogwang 			    pf->offset_loaded, &os->mac_remote_faults,
3313a9643ea8Slogwang 			    &ns->mac_remote_faults);
3314a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3315a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_length_errors,
3316a9643ea8Slogwang 			    &ns->rx_length_errors);
3317a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3318a9643ea8Slogwang 			    pf->offset_loaded, &os->link_xon_rx,
3319a9643ea8Slogwang 			    &ns->link_xon_rx);
3320a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3321a9643ea8Slogwang 			    pf->offset_loaded, &os->link_xoff_rx,
3322a9643ea8Slogwang 			    &ns->link_xoff_rx);
3323a9643ea8Slogwang 	for (i = 0; i < 8; i++) {
3324a9643ea8Slogwang 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3325a9643ea8Slogwang 				    pf->offset_loaded,
3326a9643ea8Slogwang 				    &os->priority_xon_rx[i],
3327a9643ea8Slogwang 				    &ns->priority_xon_rx[i]);
3328a9643ea8Slogwang 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3329a9643ea8Slogwang 				    pf->offset_loaded,
3330a9643ea8Slogwang 				    &os->priority_xoff_rx[i],
3331a9643ea8Slogwang 				    &ns->priority_xoff_rx[i]);
3332a9643ea8Slogwang 	}
3333a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3334a9643ea8Slogwang 			    pf->offset_loaded, &os->link_xon_tx,
3335a9643ea8Slogwang 			    &ns->link_xon_tx);
3336a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3337a9643ea8Slogwang 			    pf->offset_loaded, &os->link_xoff_tx,
3338a9643ea8Slogwang 			    &ns->link_xoff_tx);
3339a9643ea8Slogwang 	for (i = 0; i < 8; i++) {
3340a9643ea8Slogwang 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3341a9643ea8Slogwang 				    pf->offset_loaded,
3342a9643ea8Slogwang 				    &os->priority_xon_tx[i],
3343a9643ea8Slogwang 				    &ns->priority_xon_tx[i]);
3344a9643ea8Slogwang 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3345a9643ea8Slogwang 				    pf->offset_loaded,
3346a9643ea8Slogwang 				    &os->priority_xoff_tx[i],
3347a9643ea8Slogwang 				    &ns->priority_xoff_tx[i]);
3348a9643ea8Slogwang 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3349a9643ea8Slogwang 				    pf->offset_loaded,
3350a9643ea8Slogwang 				    &os->priority_xon_2_xoff[i],
3351a9643ea8Slogwang 				    &ns->priority_xon_2_xoff[i]);
3352a9643ea8Slogwang 	}
3353a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3354a9643ea8Slogwang 			    I40E_GLPRT_PRC64L(hw->port),
3355a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_64,
3356a9643ea8Slogwang 			    &ns->rx_size_64);
3357a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3358a9643ea8Slogwang 			    I40E_GLPRT_PRC127L(hw->port),
3359a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_127,
3360a9643ea8Slogwang 			    &ns->rx_size_127);
3361a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3362a9643ea8Slogwang 			    I40E_GLPRT_PRC255L(hw->port),
3363a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_255,
3364a9643ea8Slogwang 			    &ns->rx_size_255);
3365a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3366a9643ea8Slogwang 			    I40E_GLPRT_PRC511L(hw->port),
3367a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_511,
3368a9643ea8Slogwang 			    &ns->rx_size_511);
3369a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3370a9643ea8Slogwang 			    I40E_GLPRT_PRC1023L(hw->port),
3371a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_1023,
3372a9643ea8Slogwang 			    &ns->rx_size_1023);
3373a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3374a9643ea8Slogwang 			    I40E_GLPRT_PRC1522L(hw->port),
3375a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_1522,
3376a9643ea8Slogwang 			    &ns->rx_size_1522);
3377a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3378a9643ea8Slogwang 			    I40E_GLPRT_PRC9522L(hw->port),
3379a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_size_big,
3380a9643ea8Slogwang 			    &ns->rx_size_big);
3381a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3382a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_undersize,
3383a9643ea8Slogwang 			    &ns->rx_undersize);
3384a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3385a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_fragments,
3386a9643ea8Slogwang 			    &ns->rx_fragments);
3387a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3388a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_oversize,
3389a9643ea8Slogwang 			    &ns->rx_oversize);
3390a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3391a9643ea8Slogwang 			    pf->offset_loaded, &os->rx_jabber,
3392a9643ea8Slogwang 			    &ns->rx_jabber);
3393a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3394a9643ea8Slogwang 			    I40E_GLPRT_PTC64L(hw->port),
3395a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_64,
3396a9643ea8Slogwang 			    &ns->tx_size_64);
3397a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3398a9643ea8Slogwang 			    I40E_GLPRT_PTC127L(hw->port),
3399a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_127,
3400a9643ea8Slogwang 			    &ns->tx_size_127);
3401a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3402a9643ea8Slogwang 			    I40E_GLPRT_PTC255L(hw->port),
3403a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_255,
3404a9643ea8Slogwang 			    &ns->tx_size_255);
3405a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3406a9643ea8Slogwang 			    I40E_GLPRT_PTC511L(hw->port),
3407a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_511,
3408a9643ea8Slogwang 			    &ns->tx_size_511);
3409a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3410a9643ea8Slogwang 			    I40E_GLPRT_PTC1023L(hw->port),
3411a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_1023,
3412a9643ea8Slogwang 			    &ns->tx_size_1023);
3413a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3414a9643ea8Slogwang 			    I40E_GLPRT_PTC1522L(hw->port),
3415a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_1522,
3416a9643ea8Slogwang 			    &ns->tx_size_1522);
3417a9643ea8Slogwang 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3418a9643ea8Slogwang 			    I40E_GLPRT_PTC9522L(hw->port),
3419a9643ea8Slogwang 			    pf->offset_loaded, &os->tx_size_big,
3420a9643ea8Slogwang 			    &ns->tx_size_big);
3421a9643ea8Slogwang 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3422a9643ea8Slogwang 			   pf->offset_loaded,
3423a9643ea8Slogwang 			   &os->fd_sb_match, &ns->fd_sb_match);
3424a9643ea8Slogwang 	/* GLPRT_MSPDC not supported */
3425a9643ea8Slogwang 	/* GLPRT_XEC not supported */
3426a9643ea8Slogwang 
3427a9643ea8Slogwang 	pf->offset_loaded = true;
3428a9643ea8Slogwang 
3429a9643ea8Slogwang 	if (pf->main_vsi)
3430a9643ea8Slogwang 		i40e_update_vsi_stats(pf->main_vsi);
3431a9643ea8Slogwang }
3432a9643ea8Slogwang 
3433a9643ea8Slogwang /* Get all statistics of a port */
34342bfe3f2eSlogwang static int
i40e_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)3435a9643ea8Slogwang i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3436a9643ea8Slogwang {
3437a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3438a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3439a9643ea8Slogwang 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
34401646932aSjfb8856606 	struct i40e_vsi *vsi;
3441a9643ea8Slogwang 	unsigned i;
3442a9643ea8Slogwang 
3443a9643ea8Slogwang 	/* call read registers - updates values, now write them to struct */
3444a9643ea8Slogwang 	i40e_read_stats_registers(pf, hw);
3445a9643ea8Slogwang 
34461646932aSjfb8856606 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
34471646932aSjfb8856606 			pf->main_vsi->eth_stats.rx_multicast +
34481646932aSjfb8856606 			pf->main_vsi->eth_stats.rx_broadcast -
3449a9643ea8Slogwang 			pf->main_vsi->eth_stats.rx_discards;
34502bfe3f2eSlogwang 	stats->opackets = ns->eth.tx_unicast +
34512bfe3f2eSlogwang 			ns->eth.tx_multicast +
34522bfe3f2eSlogwang 			ns->eth.tx_broadcast;
34531646932aSjfb8856606 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3454a9643ea8Slogwang 	stats->obytes   = ns->eth.tx_bytes;
3455a9643ea8Slogwang 	stats->oerrors  = ns->eth.tx_errors +
3456a9643ea8Slogwang 			pf->main_vsi->eth_stats.tx_errors;
3457a9643ea8Slogwang 
3458a9643ea8Slogwang 	/* Rx Errors */
3459a9643ea8Slogwang 	stats->imissed  = ns->eth.rx_discards +
3460a9643ea8Slogwang 			pf->main_vsi->eth_stats.rx_discards;
3461a9643ea8Slogwang 	stats->ierrors  = ns->crc_errors +
3462a9643ea8Slogwang 			ns->rx_length_errors + ns->rx_undersize +
3463a9643ea8Slogwang 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3464a9643ea8Slogwang 
34651646932aSjfb8856606 	if (pf->vfs) {
34661646932aSjfb8856606 		for (i = 0; i < pf->vf_num; i++) {
34671646932aSjfb8856606 			vsi = pf->vfs[i].vsi;
34681646932aSjfb8856606 			i40e_update_vsi_stats(vsi);
34691646932aSjfb8856606 
34701646932aSjfb8856606 			stats->ipackets += (vsi->eth_stats.rx_unicast +
34711646932aSjfb8856606 					vsi->eth_stats.rx_multicast +
34721646932aSjfb8856606 					vsi->eth_stats.rx_broadcast -
34731646932aSjfb8856606 					vsi->eth_stats.rx_discards);
34741646932aSjfb8856606 			stats->ibytes   += vsi->eth_stats.rx_bytes;
34751646932aSjfb8856606 			stats->oerrors  += vsi->eth_stats.tx_errors;
34761646932aSjfb8856606 			stats->imissed  += vsi->eth_stats.rx_discards;
34771646932aSjfb8856606 		}
34781646932aSjfb8856606 	}
34791646932aSjfb8856606 
3480a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3481a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3482a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3483a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3484a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3485a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3486a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3487a9643ea8Slogwang 		    ns->eth.rx_unknown_protocol);
3488a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3489a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3490a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3491a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3492a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3493a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3494a9643ea8Slogwang 
3495a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3496a9643ea8Slogwang 		    ns->tx_dropped_link_down);
3497a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3498a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3499a9643ea8Slogwang 		    ns->illegal_bytes);
3500a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3501a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3502a9643ea8Slogwang 		    ns->mac_local_faults);
3503a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3504a9643ea8Slogwang 		    ns->mac_remote_faults);
3505a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3506a9643ea8Slogwang 		    ns->rx_length_errors);
3507a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3508a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3509a9643ea8Slogwang 	for (i = 0; i < 8; i++) {
3510a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3511a9643ea8Slogwang 				i, ns->priority_xon_rx[i]);
3512a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3513a9643ea8Slogwang 				i, ns->priority_xoff_rx[i]);
3514a9643ea8Slogwang 	}
3515a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3516a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3517a9643ea8Slogwang 	for (i = 0; i < 8; i++) {
3518a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3519a9643ea8Slogwang 				i, ns->priority_xon_tx[i]);
3520a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3521a9643ea8Slogwang 				i, ns->priority_xoff_tx[i]);
3522a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3523a9643ea8Slogwang 				i, ns->priority_xon_2_xoff[i]);
3524a9643ea8Slogwang 	}
3525a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3526a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3527a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3528a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3529a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3530a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3531a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3532a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3533a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3534a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3535a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3536a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3537a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3538a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3539a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3540a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3541a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3542a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3543a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3544a9643ea8Slogwang 			ns->mac_short_packet_dropped);
3545a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3546a9643ea8Slogwang 		    ns->checksum_error);
3547a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3548a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
35492bfe3f2eSlogwang 	return 0;
3550a9643ea8Slogwang }
3551a9643ea8Slogwang 
3552a9643ea8Slogwang /* Reset the statistics */
35534418919fSjohnjiang static int
i40e_dev_stats_reset(struct rte_eth_dev * dev)3554a9643ea8Slogwang i40e_dev_stats_reset(struct rte_eth_dev *dev)
3555a9643ea8Slogwang {
3556a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3557a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3558a9643ea8Slogwang 
3559a9643ea8Slogwang 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3560a9643ea8Slogwang 	pf->offset_loaded = false;
3561a9643ea8Slogwang 	if (pf->main_vsi)
3562a9643ea8Slogwang 		pf->main_vsi->offset_loaded = false;
3563a9643ea8Slogwang 
3564a9643ea8Slogwang 	/* read the stats, reading current register values into offset */
3565a9643ea8Slogwang 	i40e_read_stats_registers(pf, hw);
35664418919fSjohnjiang 
35674418919fSjohnjiang 	return 0;
3568a9643ea8Slogwang }
3569a9643ea8Slogwang 
3570a9643ea8Slogwang static uint32_t
i40e_xstats_calc_num(void)3571a9643ea8Slogwang i40e_xstats_calc_num(void)
3572a9643ea8Slogwang {
3573a9643ea8Slogwang 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3574a9643ea8Slogwang 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3575a9643ea8Slogwang 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3576a9643ea8Slogwang }
3577a9643ea8Slogwang 
i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)3578a9643ea8Slogwang static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3579a9643ea8Slogwang 				     struct rte_eth_xstat_name *xstats_names,
3580a9643ea8Slogwang 				     __rte_unused unsigned limit)
3581a9643ea8Slogwang {
3582a9643ea8Slogwang 	unsigned count = 0;
3583a9643ea8Slogwang 	unsigned i, prio;
3584a9643ea8Slogwang 
3585a9643ea8Slogwang 	if (xstats_names == NULL)
3586a9643ea8Slogwang 		return i40e_xstats_calc_num();
3587a9643ea8Slogwang 
3588a9643ea8Slogwang 	/* Note: limit checked in rte_eth_xstats_names() */
3589a9643ea8Slogwang 
3590a9643ea8Slogwang 	/* Get stats from i40e_eth_stats struct */
3591a9643ea8Slogwang 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
35924418919fSjohnjiang 		strlcpy(xstats_names[count].name,
35934418919fSjohnjiang 			rte_i40e_stats_strings[i].name,
35944418919fSjohnjiang 			sizeof(xstats_names[count].name));
3595a9643ea8Slogwang 		count++;
3596a9643ea8Slogwang 	}
3597a9643ea8Slogwang 
3598a9643ea8Slogwang 	/* Get individiual stats from i40e_hw_port struct */
3599a9643ea8Slogwang 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
36004418919fSjohnjiang 		strlcpy(xstats_names[count].name,
36014418919fSjohnjiang 			rte_i40e_hw_port_strings[i].name,
36024418919fSjohnjiang 			sizeof(xstats_names[count].name));
3603a9643ea8Slogwang 		count++;
3604a9643ea8Slogwang 	}
3605a9643ea8Slogwang 
3606a9643ea8Slogwang 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3607a9643ea8Slogwang 		for (prio = 0; prio < 8; prio++) {
3608a9643ea8Slogwang 			snprintf(xstats_names[count].name,
3609a9643ea8Slogwang 				 sizeof(xstats_names[count].name),
3610a9643ea8Slogwang 				 "rx_priority%u_%s", prio,
3611a9643ea8Slogwang 				 rte_i40e_rxq_prio_strings[i].name);
3612a9643ea8Slogwang 			count++;
3613a9643ea8Slogwang 		}
3614a9643ea8Slogwang 	}
3615a9643ea8Slogwang 
3616a9643ea8Slogwang 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3617a9643ea8Slogwang 		for (prio = 0; prio < 8; prio++) {
3618a9643ea8Slogwang 			snprintf(xstats_names[count].name,
3619a9643ea8Slogwang 				 sizeof(xstats_names[count].name),
3620a9643ea8Slogwang 				 "tx_priority%u_%s", prio,
3621a9643ea8Slogwang 				 rte_i40e_txq_prio_strings[i].name);
3622a9643ea8Slogwang 			count++;
3623a9643ea8Slogwang 		}
3624a9643ea8Slogwang 	}
3625a9643ea8Slogwang 	return count;
3626a9643ea8Slogwang }
3627a9643ea8Slogwang 
3628a9643ea8Slogwang static int
i40e_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)3629a9643ea8Slogwang i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3630a9643ea8Slogwang 		    unsigned n)
3631a9643ea8Slogwang {
3632a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3633a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3634a9643ea8Slogwang 	unsigned i, count, prio;
3635a9643ea8Slogwang 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3636a9643ea8Slogwang 
3637a9643ea8Slogwang 	count = i40e_xstats_calc_num();
3638a9643ea8Slogwang 	if (n < count)
3639a9643ea8Slogwang 		return count;
3640a9643ea8Slogwang 
3641a9643ea8Slogwang 	i40e_read_stats_registers(pf, hw);
3642a9643ea8Slogwang 
3643a9643ea8Slogwang 	if (xstats == NULL)
3644a9643ea8Slogwang 		return 0;
3645a9643ea8Slogwang 
3646a9643ea8Slogwang 	count = 0;
3647a9643ea8Slogwang 
3648a9643ea8Slogwang 	/* Get stats from i40e_eth_stats struct */
3649a9643ea8Slogwang 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3650a9643ea8Slogwang 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3651a9643ea8Slogwang 			rte_i40e_stats_strings[i].offset);
36522bfe3f2eSlogwang 		xstats[count].id = count;
3653a9643ea8Slogwang 		count++;
3654a9643ea8Slogwang 	}
3655a9643ea8Slogwang 
3656a9643ea8Slogwang 	/* Get individiual stats from i40e_hw_port struct */
3657a9643ea8Slogwang 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3658a9643ea8Slogwang 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3659a9643ea8Slogwang 			rte_i40e_hw_port_strings[i].offset);
36602bfe3f2eSlogwang 		xstats[count].id = count;
3661a9643ea8Slogwang 		count++;
3662a9643ea8Slogwang 	}
3663a9643ea8Slogwang 
3664a9643ea8Slogwang 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3665a9643ea8Slogwang 		for (prio = 0; prio < 8; prio++) {
3666a9643ea8Slogwang 			xstats[count].value =
3667a9643ea8Slogwang 				*(uint64_t *)(((char *)hw_stats) +
3668a9643ea8Slogwang 				rte_i40e_rxq_prio_strings[i].offset +
3669a9643ea8Slogwang 				(sizeof(uint64_t) * prio));
36702bfe3f2eSlogwang 			xstats[count].id = count;
3671a9643ea8Slogwang 			count++;
3672a9643ea8Slogwang 		}
3673a9643ea8Slogwang 	}
3674a9643ea8Slogwang 
3675a9643ea8Slogwang 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3676a9643ea8Slogwang 		for (prio = 0; prio < 8; prio++) {
3677a9643ea8Slogwang 			xstats[count].value =
3678a9643ea8Slogwang 				*(uint64_t *)(((char *)hw_stats) +
3679a9643ea8Slogwang 				rte_i40e_txq_prio_strings[i].offset +
3680a9643ea8Slogwang 				(sizeof(uint64_t) * prio));
36812bfe3f2eSlogwang 			xstats[count].id = count;
3682a9643ea8Slogwang 			count++;
3683a9643ea8Slogwang 		}
3684a9643ea8Slogwang 	}
3685a9643ea8Slogwang 
3686a9643ea8Slogwang 	return count;
3687a9643ea8Slogwang }
3688a9643ea8Slogwang 
3689a9643ea8Slogwang static int
i40e_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)36902bfe3f2eSlogwang i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
36912bfe3f2eSlogwang {
36922bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
36932bfe3f2eSlogwang 	u32 full_ver;
36942bfe3f2eSlogwang 	u8 ver, patch;
36952bfe3f2eSlogwang 	u16 build;
36962bfe3f2eSlogwang 	int ret;
36972bfe3f2eSlogwang 
36982bfe3f2eSlogwang 	full_ver = hw->nvm.oem_ver;
36992bfe3f2eSlogwang 	ver = (u8)(full_ver >> 24);
37002bfe3f2eSlogwang 	build = (u16)((full_ver >> 8) & 0xffff);
37012bfe3f2eSlogwang 	patch = (u8)(full_ver & 0xff);
37022bfe3f2eSlogwang 
37032bfe3f2eSlogwang 	ret = snprintf(fw_version, fw_size,
37042bfe3f2eSlogwang 		 "%d.%d%d 0x%08x %d.%d.%d",
37052bfe3f2eSlogwang 		 ((hw->nvm.version >> 12) & 0xf),
37062bfe3f2eSlogwang 		 ((hw->nvm.version >> 4) & 0xff),
37072bfe3f2eSlogwang 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
37082bfe3f2eSlogwang 		 ver, build, patch);
37092bfe3f2eSlogwang 
37102bfe3f2eSlogwang 	ret += 1; /* add the size of '\0' */
37112bfe3f2eSlogwang 	if (fw_size < (u32)ret)
37122bfe3f2eSlogwang 		return ret;
37132bfe3f2eSlogwang 	else
37142bfe3f2eSlogwang 		return 0;
37152bfe3f2eSlogwang }
37162bfe3f2eSlogwang 
37171646932aSjfb8856606 /*
37181646932aSjfb8856606  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
37191646932aSjfb8856606  * the Rx data path does not hang if the FW LLDP is stopped.
37201646932aSjfb8856606  * return true if lldp need to stop
37211646932aSjfb8856606  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
37221646932aSjfb8856606  */
37231646932aSjfb8856606 static bool
i40e_need_stop_lldp(struct rte_eth_dev * dev)37241646932aSjfb8856606 i40e_need_stop_lldp(struct rte_eth_dev *dev)
37251646932aSjfb8856606 {
37261646932aSjfb8856606 	double nvm_ver;
37271646932aSjfb8856606 	char ver_str[64] = {0};
37281646932aSjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
37291646932aSjfb8856606 
37301646932aSjfb8856606 	i40e_fw_version_get(dev, ver_str, 64);
37311646932aSjfb8856606 	nvm_ver = atof(ver_str);
37321646932aSjfb8856606 	if ((hw->mac.type == I40E_MAC_X722 ||
37331646932aSjfb8856606 	     hw->mac.type == I40E_MAC_X722_VF) &&
37341646932aSjfb8856606 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
37351646932aSjfb8856606 		return true;
37361646932aSjfb8856606 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
37371646932aSjfb8856606 		return true;
37381646932aSjfb8856606 
37391646932aSjfb8856606 	return false;
37401646932aSjfb8856606 }
37411646932aSjfb8856606 
37424418919fSjohnjiang static int
i40e_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)3743a9643ea8Slogwang i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3744a9643ea8Slogwang {
3745a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3746a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3747a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
37482bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3749a9643ea8Slogwang 
3750a9643ea8Slogwang 	dev_info->max_rx_queues = vsi->nb_qps;
3751a9643ea8Slogwang 	dev_info->max_tx_queues = vsi->nb_qps;
3752a9643ea8Slogwang 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3753a9643ea8Slogwang 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3754a9643ea8Slogwang 	dev_info->max_mac_addrs = vsi->max_macaddrs;
37552bfe3f2eSlogwang 	dev_info->max_vfs = pci_dev->max_vfs;
37564418919fSjohnjiang 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
37574418919fSjohnjiang 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3758d30ea906Sjfb8856606 	dev_info->rx_queue_offload_capa = 0;
3759a9643ea8Slogwang 	dev_info->rx_offload_capa =
3760a9643ea8Slogwang 		DEV_RX_OFFLOAD_VLAN_STRIP |
3761a9643ea8Slogwang 		DEV_RX_OFFLOAD_QINQ_STRIP |
3762a9643ea8Slogwang 		DEV_RX_OFFLOAD_IPV4_CKSUM |
3763a9643ea8Slogwang 		DEV_RX_OFFLOAD_UDP_CKSUM |
3764d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_TCP_CKSUM |
3765d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3766d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_KEEP_CRC |
3767d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_SCATTER |
3768d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_VLAN_EXTEND |
3769d30ea906Sjfb8856606 		DEV_RX_OFFLOAD_VLAN_FILTER |
37704418919fSjohnjiang 		DEV_RX_OFFLOAD_JUMBO_FRAME |
37714418919fSjohnjiang 		DEV_RX_OFFLOAD_RSS_HASH;
3772d30ea906Sjfb8856606 
3773d30ea906Sjfb8856606 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3774a9643ea8Slogwang 	dev_info->tx_offload_capa =
3775a9643ea8Slogwang 		DEV_TX_OFFLOAD_VLAN_INSERT |
3776a9643ea8Slogwang 		DEV_TX_OFFLOAD_QINQ_INSERT |
3777a9643ea8Slogwang 		DEV_TX_OFFLOAD_IPV4_CKSUM |
3778a9643ea8Slogwang 		DEV_TX_OFFLOAD_UDP_CKSUM |
3779a9643ea8Slogwang 		DEV_TX_OFFLOAD_TCP_CKSUM |
3780a9643ea8Slogwang 		DEV_TX_OFFLOAD_SCTP_CKSUM |
3781a9643ea8Slogwang 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
37822bfe3f2eSlogwang 		DEV_TX_OFFLOAD_TCP_TSO |
37832bfe3f2eSlogwang 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
37842bfe3f2eSlogwang 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
37852bfe3f2eSlogwang 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3786d30ea906Sjfb8856606 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3787d30ea906Sjfb8856606 		DEV_TX_OFFLOAD_MULTI_SEGS |
3788d30ea906Sjfb8856606 		dev_info->tx_queue_offload_capa;
3789d30ea906Sjfb8856606 	dev_info->dev_capa =
3790d30ea906Sjfb8856606 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3791d30ea906Sjfb8856606 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3792d30ea906Sjfb8856606 
3793a9643ea8Slogwang 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3794a9643ea8Slogwang 						sizeof(uint32_t);
3795a9643ea8Slogwang 	dev_info->reta_size = pf->hash_lut_size;
37962bfe3f2eSlogwang 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3797a9643ea8Slogwang 
3798a9643ea8Slogwang 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3799a9643ea8Slogwang 		.rx_thresh = {
3800a9643ea8Slogwang 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3801a9643ea8Slogwang 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3802a9643ea8Slogwang 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3803a9643ea8Slogwang 		},
3804a9643ea8Slogwang 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3805a9643ea8Slogwang 		.rx_drop_en = 0,
3806d30ea906Sjfb8856606 		.offloads = 0,
3807a9643ea8Slogwang 	};
3808a9643ea8Slogwang 
3809a9643ea8Slogwang 	dev_info->default_txconf = (struct rte_eth_txconf) {
3810a9643ea8Slogwang 		.tx_thresh = {
3811a9643ea8Slogwang 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3812a9643ea8Slogwang 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3813a9643ea8Slogwang 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3814a9643ea8Slogwang 		},
3815a9643ea8Slogwang 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3816a9643ea8Slogwang 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3817d30ea906Sjfb8856606 		.offloads = 0,
3818a9643ea8Slogwang 	};
3819a9643ea8Slogwang 
3820a9643ea8Slogwang 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3821a9643ea8Slogwang 		.nb_max = I40E_MAX_RING_DESC,
3822a9643ea8Slogwang 		.nb_min = I40E_MIN_RING_DESC,
3823a9643ea8Slogwang 		.nb_align = I40E_ALIGN_RING_DESC,
3824a9643ea8Slogwang 	};
3825a9643ea8Slogwang 
3826a9643ea8Slogwang 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3827a9643ea8Slogwang 		.nb_max = I40E_MAX_RING_DESC,
3828a9643ea8Slogwang 		.nb_min = I40E_MIN_RING_DESC,
3829a9643ea8Slogwang 		.nb_align = I40E_ALIGN_RING_DESC,
38302bfe3f2eSlogwang 		.nb_seg_max = I40E_TX_MAX_SEG,
38312bfe3f2eSlogwang 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3832a9643ea8Slogwang 	};
3833a9643ea8Slogwang 
3834a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_VMDQ) {
3835a9643ea8Slogwang 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3836a9643ea8Slogwang 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3837a9643ea8Slogwang 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3838a9643ea8Slogwang 						pf->max_nb_vmdq_vsi;
3839a9643ea8Slogwang 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3840a9643ea8Slogwang 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3841a9643ea8Slogwang 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3842a9643ea8Slogwang 	}
3843a9643ea8Slogwang 
3844d30ea906Sjfb8856606 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3845a9643ea8Slogwang 		/* For XL710 */
3846a9643ea8Slogwang 		dev_info->speed_capa = ETH_LINK_SPEED_40G;
3847d30ea906Sjfb8856606 		dev_info->default_rxportconf.nb_queues = 2;
3848d30ea906Sjfb8856606 		dev_info->default_txportconf.nb_queues = 2;
3849d30ea906Sjfb8856606 		if (dev->data->nb_rx_queues == 1)
3850d30ea906Sjfb8856606 			dev_info->default_rxportconf.ring_size = 2048;
3851d30ea906Sjfb8856606 		else
3852d30ea906Sjfb8856606 			dev_info->default_rxportconf.ring_size = 1024;
3853d30ea906Sjfb8856606 		if (dev->data->nb_tx_queues == 1)
3854d30ea906Sjfb8856606 			dev_info->default_txportconf.ring_size = 1024;
3855d30ea906Sjfb8856606 		else
3856d30ea906Sjfb8856606 			dev_info->default_txportconf.ring_size = 512;
3857d30ea906Sjfb8856606 
3858d30ea906Sjfb8856606 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
38592bfe3f2eSlogwang 		/* For XXV710 */
38602bfe3f2eSlogwang 		dev_info->speed_capa = ETH_LINK_SPEED_25G;
3861d30ea906Sjfb8856606 		dev_info->default_rxportconf.nb_queues = 1;
3862d30ea906Sjfb8856606 		dev_info->default_txportconf.nb_queues = 1;
3863d30ea906Sjfb8856606 		dev_info->default_rxportconf.ring_size = 256;
3864d30ea906Sjfb8856606 		dev_info->default_txportconf.ring_size = 256;
3865d30ea906Sjfb8856606 	} else {
3866a9643ea8Slogwang 		/* For X710 */
3867a9643ea8Slogwang 		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3868d30ea906Sjfb8856606 		dev_info->default_rxportconf.nb_queues = 1;
3869d30ea906Sjfb8856606 		dev_info->default_txportconf.nb_queues = 1;
3870d30ea906Sjfb8856606 		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3871d30ea906Sjfb8856606 			dev_info->default_rxportconf.ring_size = 512;
3872d30ea906Sjfb8856606 			dev_info->default_txportconf.ring_size = 256;
3873d30ea906Sjfb8856606 		} else {
3874d30ea906Sjfb8856606 			dev_info->default_rxportconf.ring_size = 256;
3875d30ea906Sjfb8856606 			dev_info->default_txportconf.ring_size = 256;
3876d30ea906Sjfb8856606 		}
3877d30ea906Sjfb8856606 	}
3878d30ea906Sjfb8856606 	dev_info->default_rxportconf.burst_size = 32;
3879d30ea906Sjfb8856606 	dev_info->default_txportconf.burst_size = 32;
38804418919fSjohnjiang 
38814418919fSjohnjiang 	return 0;
3882a9643ea8Slogwang }
3883a9643ea8Slogwang 
3884a9643ea8Slogwang static int
i40e_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)3885a9643ea8Slogwang i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3886a9643ea8Slogwang {
3887a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3888a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
3889a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
3890a9643ea8Slogwang 
3891a9643ea8Slogwang 	if (on)
3892a9643ea8Slogwang 		return i40e_vsi_add_vlan(vsi, vlan_id);
3893a9643ea8Slogwang 	else
3894a9643ea8Slogwang 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3895a9643ea8Slogwang }
3896a9643ea8Slogwang 
3897a9643ea8Slogwang static int
i40e_vlan_tpid_set_by_registers(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid,int qinq)38982bfe3f2eSlogwang i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3899a9643ea8Slogwang 				enum rte_vlan_type vlan_type,
39002bfe3f2eSlogwang 				uint16_t tpid, int qinq)
3901a9643ea8Slogwang {
3902a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
39032bfe3f2eSlogwang 	uint64_t reg_r = 0;
39042bfe3f2eSlogwang 	uint64_t reg_w = 0;
39052bfe3f2eSlogwang 	uint16_t reg_id = 3;
39062bfe3f2eSlogwang 	int ret;
3907a9643ea8Slogwang 
39082bfe3f2eSlogwang 	if (qinq) {
39092bfe3f2eSlogwang 		if (vlan_type == ETH_VLAN_TYPE_OUTER)
3910a9643ea8Slogwang 			reg_id = 2;
3911a9643ea8Slogwang 	}
39122bfe3f2eSlogwang 
3913a9643ea8Slogwang 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3914a9643ea8Slogwang 					  &reg_r, NULL);
3915a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
39162bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
39172bfe3f2eSlogwang 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
39182bfe3f2eSlogwang 			   reg_id);
39192bfe3f2eSlogwang 		return -EIO;
3920a9643ea8Slogwang 	}
39212bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG,
39222bfe3f2eSlogwang 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
39232bfe3f2eSlogwang 		    reg_id, reg_r);
3924a9643ea8Slogwang 
3925a9643ea8Slogwang 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3926a9643ea8Slogwang 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3927a9643ea8Slogwang 	if (reg_r == reg_w) {
3928a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "No need to write");
39292bfe3f2eSlogwang 		return 0;
3930a9643ea8Slogwang 	}
3931a9643ea8Slogwang 
3932d30ea906Sjfb8856606 	ret = i40e_aq_debug_write_global_register(hw,
3933d30ea906Sjfb8856606 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
3934a9643ea8Slogwang 					   reg_w, NULL);
3935a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
39362bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
39372bfe3f2eSlogwang 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
39382bfe3f2eSlogwang 			    reg_id);
39392bfe3f2eSlogwang 		return -EIO;
39402bfe3f2eSlogwang 	}
39412bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG,
39422bfe3f2eSlogwang 		    "Global register 0x%08x is changed with value 0x%08x",
39432bfe3f2eSlogwang 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
39442bfe3f2eSlogwang 
39452bfe3f2eSlogwang 	return 0;
39462bfe3f2eSlogwang }
39472bfe3f2eSlogwang 
39482bfe3f2eSlogwang static int
i40e_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)39492bfe3f2eSlogwang i40e_vlan_tpid_set(struct rte_eth_dev *dev,
39502bfe3f2eSlogwang 		   enum rte_vlan_type vlan_type,
39512bfe3f2eSlogwang 		   uint16_t tpid)
39522bfe3f2eSlogwang {
39532bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
39542bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3955d30ea906Sjfb8856606 	int qinq = dev->data->dev_conf.rxmode.offloads &
3956d30ea906Sjfb8856606 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
39572bfe3f2eSlogwang 	int ret = 0;
39582bfe3f2eSlogwang 
39592bfe3f2eSlogwang 	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
39602bfe3f2eSlogwang 	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
39612bfe3f2eSlogwang 	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
39622bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
39632bfe3f2eSlogwang 			    "Unsupported vlan type.");
39642bfe3f2eSlogwang 		return -EINVAL;
39652bfe3f2eSlogwang 	}
39662bfe3f2eSlogwang 
39672bfe3f2eSlogwang 	if (pf->support_multi_driver) {
39682bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
39692bfe3f2eSlogwang 		return -ENOTSUP;
39702bfe3f2eSlogwang 	}
39712bfe3f2eSlogwang 
39722bfe3f2eSlogwang 	/* 802.1ad frames ability is added in NVM API 1.7*/
39732bfe3f2eSlogwang 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
39742bfe3f2eSlogwang 		if (qinq) {
39752bfe3f2eSlogwang 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
39762bfe3f2eSlogwang 				hw->first_tag = rte_cpu_to_le_16(tpid);
39772bfe3f2eSlogwang 			else if (vlan_type == ETH_VLAN_TYPE_INNER)
39782bfe3f2eSlogwang 				hw->second_tag = rte_cpu_to_le_16(tpid);
39792bfe3f2eSlogwang 		} else {
39802bfe3f2eSlogwang 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
39812bfe3f2eSlogwang 				hw->second_tag = rte_cpu_to_le_16(tpid);
39822bfe3f2eSlogwang 		}
3983d30ea906Sjfb8856606 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
39842bfe3f2eSlogwang 		if (ret != I40E_SUCCESS) {
39852bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
39862bfe3f2eSlogwang 				    "Set switch config failed aq_err: %d",
39872bfe3f2eSlogwang 				    hw->aq.asq_last_status);
3988a9643ea8Slogwang 			ret = -EIO;
3989a9643ea8Slogwang 		}
39902bfe3f2eSlogwang 	} else
39912bfe3f2eSlogwang 		/* If NVM API < 1.7, keep the register setting */
39922bfe3f2eSlogwang 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
39932bfe3f2eSlogwang 						      tpid, qinq);
3994a9643ea8Slogwang 
3995a9643ea8Slogwang 	return ret;
3996a9643ea8Slogwang }
3997a9643ea8Slogwang 
39980c6bd470Sfengbojiang /* Configure outer vlan stripping on or off in QinQ mode */
39990c6bd470Sfengbojiang static int
i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi * vsi,bool on)40000c6bd470Sfengbojiang i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
40010c6bd470Sfengbojiang {
40020c6bd470Sfengbojiang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
40030c6bd470Sfengbojiang 	int ret = I40E_SUCCESS;
40040c6bd470Sfengbojiang 	uint32_t reg;
40050c6bd470Sfengbojiang 
40060c6bd470Sfengbojiang 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
40070c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
40080c6bd470Sfengbojiang 		return -EINVAL;
40090c6bd470Sfengbojiang 	}
40100c6bd470Sfengbojiang 
40110c6bd470Sfengbojiang 	/* Configure for outer VLAN RX stripping */
40120c6bd470Sfengbojiang 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
40130c6bd470Sfengbojiang 
40140c6bd470Sfengbojiang 	if (on)
40150c6bd470Sfengbojiang 		reg |= I40E_VSI_TSR_QINQ_STRIP;
40160c6bd470Sfengbojiang 	else
40170c6bd470Sfengbojiang 		reg &= ~I40E_VSI_TSR_QINQ_STRIP;
40180c6bd470Sfengbojiang 
40190c6bd470Sfengbojiang 	ret = i40e_aq_debug_write_register(hw,
40200c6bd470Sfengbojiang 						   I40E_VSI_TSR(vsi->vsi_id),
40210c6bd470Sfengbojiang 						   reg, NULL);
40220c6bd470Sfengbojiang 	if (ret < 0) {
40230c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
40240c6bd470Sfengbojiang 				    vsi->vsi_id);
40250c6bd470Sfengbojiang 		return I40E_ERR_CONFIG;
40260c6bd470Sfengbojiang 	}
40270c6bd470Sfengbojiang 
40280c6bd470Sfengbojiang 	return ret;
40290c6bd470Sfengbojiang }
40300c6bd470Sfengbojiang 
40312bfe3f2eSlogwang static int
i40e_vlan_offload_set(struct rte_eth_dev * dev,int mask)4032a9643ea8Slogwang i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4033a9643ea8Slogwang {
4034a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4035a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
4036d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode;
4037a9643ea8Slogwang 
4038d30ea906Sjfb8856606 	rxmode = &dev->data->dev_conf.rxmode;
4039a9643ea8Slogwang 	if (mask & ETH_VLAN_FILTER_MASK) {
4040d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4041a9643ea8Slogwang 			i40e_vsi_config_vlan_filter(vsi, TRUE);
4042a9643ea8Slogwang 		else
4043a9643ea8Slogwang 			i40e_vsi_config_vlan_filter(vsi, FALSE);
4044a9643ea8Slogwang 	}
4045a9643ea8Slogwang 
4046a9643ea8Slogwang 	if (mask & ETH_VLAN_STRIP_MASK) {
4047a9643ea8Slogwang 		/* Enable or disable VLAN stripping */
4048d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4049a9643ea8Slogwang 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
4050a9643ea8Slogwang 		else
4051a9643ea8Slogwang 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
4052a9643ea8Slogwang 	}
4053a9643ea8Slogwang 
4054a9643ea8Slogwang 	if (mask & ETH_VLAN_EXTEND_MASK) {
4055d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4056a9643ea8Slogwang 			i40e_vsi_config_double_vlan(vsi, TRUE);
40572bfe3f2eSlogwang 			/* Set global registers with default ethertype. */
4058a9643ea8Slogwang 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
40594418919fSjohnjiang 					   RTE_ETHER_TYPE_VLAN);
4060a9643ea8Slogwang 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
40614418919fSjohnjiang 					   RTE_ETHER_TYPE_VLAN);
4062a9643ea8Slogwang 		}
4063a9643ea8Slogwang 		else
4064a9643ea8Slogwang 			i40e_vsi_config_double_vlan(vsi, FALSE);
4065a9643ea8Slogwang 	}
40662bfe3f2eSlogwang 
40670c6bd470Sfengbojiang 	if (mask & ETH_QINQ_STRIP_MASK) {
40680c6bd470Sfengbojiang 		/* Enable or disable outer VLAN stripping */
40690c6bd470Sfengbojiang 		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
40700c6bd470Sfengbojiang 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
40710c6bd470Sfengbojiang 		else
40720c6bd470Sfengbojiang 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
40730c6bd470Sfengbojiang 	}
40740c6bd470Sfengbojiang 
40752bfe3f2eSlogwang 	return 0;
4076a9643ea8Slogwang }
4077a9643ea8Slogwang 
4078a9643ea8Slogwang static void
i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t queue,__rte_unused int on)4079a9643ea8Slogwang i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4080a9643ea8Slogwang 			  __rte_unused uint16_t queue,
4081a9643ea8Slogwang 			  __rte_unused int on)
4082a9643ea8Slogwang {
4083a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
4084a9643ea8Slogwang }
4085a9643ea8Slogwang 
4086a9643ea8Slogwang static int
i40e_vlan_pvid_set(struct rte_eth_dev * dev,uint16_t pvid,int on)4087a9643ea8Slogwang i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4088a9643ea8Slogwang {
4089a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4090a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
4091a9643ea8Slogwang 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4092a9643ea8Slogwang 	struct i40e_vsi_vlan_pvid_info info;
4093a9643ea8Slogwang 
4094a9643ea8Slogwang 	memset(&info, 0, sizeof(info));
4095a9643ea8Slogwang 	info.on = on;
4096a9643ea8Slogwang 	if (info.on)
4097a9643ea8Slogwang 		info.config.pvid = pvid;
4098a9643ea8Slogwang 	else {
4099a9643ea8Slogwang 		info.config.reject.tagged =
4100a9643ea8Slogwang 				data->dev_conf.txmode.hw_vlan_reject_tagged;
4101a9643ea8Slogwang 		info.config.reject.untagged =
4102a9643ea8Slogwang 				data->dev_conf.txmode.hw_vlan_reject_untagged;
4103a9643ea8Slogwang 	}
4104a9643ea8Slogwang 
4105a9643ea8Slogwang 	return i40e_vsi_vlan_pvid_set(vsi, &info);
4106a9643ea8Slogwang }
4107a9643ea8Slogwang 
4108a9643ea8Slogwang static int
i40e_dev_led_on(struct rte_eth_dev * dev)4109a9643ea8Slogwang i40e_dev_led_on(struct rte_eth_dev *dev)
4110a9643ea8Slogwang {
4111a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4112a9643ea8Slogwang 	uint32_t mode = i40e_led_get(hw);
4113a9643ea8Slogwang 
4114a9643ea8Slogwang 	if (mode == 0)
4115a9643ea8Slogwang 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4116a9643ea8Slogwang 
4117a9643ea8Slogwang 	return 0;
4118a9643ea8Slogwang }
4119a9643ea8Slogwang 
4120a9643ea8Slogwang static int
i40e_dev_led_off(struct rte_eth_dev * dev)4121a9643ea8Slogwang i40e_dev_led_off(struct rte_eth_dev *dev)
4122a9643ea8Slogwang {
4123a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4124a9643ea8Slogwang 	uint32_t mode = i40e_led_get(hw);
4125a9643ea8Slogwang 
4126a9643ea8Slogwang 	if (mode != 0)
4127a9643ea8Slogwang 		i40e_led_set(hw, 0, false);
4128a9643ea8Slogwang 
4129a9643ea8Slogwang 	return 0;
4130a9643ea8Slogwang }
4131a9643ea8Slogwang 
4132a9643ea8Slogwang static int
i40e_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4133a9643ea8Slogwang i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4134a9643ea8Slogwang {
4135a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4136a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4137a9643ea8Slogwang 
4138a9643ea8Slogwang 	fc_conf->pause_time = pf->fc_conf.pause_time;
41392bfe3f2eSlogwang 
41402bfe3f2eSlogwang 	/* read out from register, in case they are modified by other port */
41412bfe3f2eSlogwang 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
41422bfe3f2eSlogwang 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
41432bfe3f2eSlogwang 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
41442bfe3f2eSlogwang 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
41452bfe3f2eSlogwang 
4146a9643ea8Slogwang 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4147a9643ea8Slogwang 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4148a9643ea8Slogwang 
4149a9643ea8Slogwang 	 /* Return current mode according to actual setting*/
4150a9643ea8Slogwang 	switch (hw->fc.current_mode) {
4151a9643ea8Slogwang 	case I40E_FC_FULL:
4152a9643ea8Slogwang 		fc_conf->mode = RTE_FC_FULL;
4153a9643ea8Slogwang 		break;
4154a9643ea8Slogwang 	case I40E_FC_TX_PAUSE:
4155a9643ea8Slogwang 		fc_conf->mode = RTE_FC_TX_PAUSE;
4156a9643ea8Slogwang 		break;
4157a9643ea8Slogwang 	case I40E_FC_RX_PAUSE:
4158a9643ea8Slogwang 		fc_conf->mode = RTE_FC_RX_PAUSE;
4159a9643ea8Slogwang 		break;
4160a9643ea8Slogwang 	case I40E_FC_NONE:
4161a9643ea8Slogwang 	default:
4162a9643ea8Slogwang 		fc_conf->mode = RTE_FC_NONE;
4163a9643ea8Slogwang 	};
4164a9643ea8Slogwang 
4165a9643ea8Slogwang 	return 0;
4166a9643ea8Slogwang }
4167a9643ea8Slogwang 
4168a9643ea8Slogwang static int
i40e_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4169a9643ea8Slogwang i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4170a9643ea8Slogwang {
4171a9643ea8Slogwang 	uint32_t mflcn_reg, fctrl_reg, reg;
4172a9643ea8Slogwang 	uint32_t max_high_water;
4173a9643ea8Slogwang 	uint8_t i, aq_failure;
4174a9643ea8Slogwang 	int err;
4175a9643ea8Slogwang 	struct i40e_hw *hw;
4176a9643ea8Slogwang 	struct i40e_pf *pf;
4177a9643ea8Slogwang 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4178a9643ea8Slogwang 		[RTE_FC_NONE] = I40E_FC_NONE,
4179a9643ea8Slogwang 		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4180a9643ea8Slogwang 		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4181a9643ea8Slogwang 		[RTE_FC_FULL] = I40E_FC_FULL
4182a9643ea8Slogwang 	};
4183a9643ea8Slogwang 
4184a9643ea8Slogwang 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4185a9643ea8Slogwang 
4186a9643ea8Slogwang 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4187a9643ea8Slogwang 	if ((fc_conf->high_water > max_high_water) ||
4188a9643ea8Slogwang 			(fc_conf->high_water < fc_conf->low_water)) {
41892bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
41902bfe3f2eSlogwang 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
41912bfe3f2eSlogwang 			max_high_water);
4192a9643ea8Slogwang 		return -EINVAL;
4193a9643ea8Slogwang 	}
4194a9643ea8Slogwang 
4195a9643ea8Slogwang 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4196a9643ea8Slogwang 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4197a9643ea8Slogwang 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4198a9643ea8Slogwang 
4199a9643ea8Slogwang 	pf->fc_conf.pause_time = fc_conf->pause_time;
4200a9643ea8Slogwang 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4201a9643ea8Slogwang 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4202a9643ea8Slogwang 
4203a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
4204a9643ea8Slogwang 
4205a9643ea8Slogwang 	/* All the link flow control related enable/disable register
4206a9643ea8Slogwang 	 * configuration is handle by the F/W
4207a9643ea8Slogwang 	 */
4208a9643ea8Slogwang 	err = i40e_set_fc(hw, &aq_failure, true);
4209a9643ea8Slogwang 	if (err < 0)
4210a9643ea8Slogwang 		return -ENOSYS;
4211a9643ea8Slogwang 
42122bfe3f2eSlogwang 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4213a9643ea8Slogwang 		/* Configure flow control refresh threshold,
4214a9643ea8Slogwang 		 * the value for stat_tx_pause_refresh_timer[8]
4215a9643ea8Slogwang 		 * is used for global pause operation.
4216a9643ea8Slogwang 		 */
4217a9643ea8Slogwang 
4218a9643ea8Slogwang 		I40E_WRITE_REG(hw,
4219a9643ea8Slogwang 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4220a9643ea8Slogwang 			       pf->fc_conf.pause_time);
4221a9643ea8Slogwang 
4222a9643ea8Slogwang 		/* configure the timer value included in transmitted pause
4223a9643ea8Slogwang 		 * frame,
4224a9643ea8Slogwang 		 * the value for stat_tx_pause_quanta[8] is used for global
4225a9643ea8Slogwang 		 * pause operation
4226a9643ea8Slogwang 		 */
4227a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4228a9643ea8Slogwang 			       pf->fc_conf.pause_time);
4229a9643ea8Slogwang 
4230a9643ea8Slogwang 		fctrl_reg = I40E_READ_REG(hw,
4231a9643ea8Slogwang 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4232a9643ea8Slogwang 
4233a9643ea8Slogwang 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4234a9643ea8Slogwang 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4235a9643ea8Slogwang 		else
4236a9643ea8Slogwang 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4237a9643ea8Slogwang 
4238a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4239a9643ea8Slogwang 			       fctrl_reg);
4240a9643ea8Slogwang 	} else {
4241a9643ea8Slogwang 		/* Configure pause time (2 TCs per register) */
4242a9643ea8Slogwang 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4243a9643ea8Slogwang 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4244a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4245a9643ea8Slogwang 
4246a9643ea8Slogwang 		/* Configure flow control refresh threshold value */
4247a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4248a9643ea8Slogwang 			       pf->fc_conf.pause_time / 2);
4249a9643ea8Slogwang 
4250a9643ea8Slogwang 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4251a9643ea8Slogwang 
4252a9643ea8Slogwang 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
4253a9643ea8Slogwang 		 *depending on configuration
4254a9643ea8Slogwang 		 */
4255a9643ea8Slogwang 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
4256a9643ea8Slogwang 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4257a9643ea8Slogwang 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4258a9643ea8Slogwang 		} else {
4259a9643ea8Slogwang 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4260a9643ea8Slogwang 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4261a9643ea8Slogwang 		}
4262a9643ea8Slogwang 
4263a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4264a9643ea8Slogwang 	}
4265a9643ea8Slogwang 
42662bfe3f2eSlogwang 	if (!pf->support_multi_driver) {
42672bfe3f2eSlogwang 		/* config water marker both based on the packets and bytes */
42682bfe3f2eSlogwang 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4269a9643ea8Slogwang 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4270a9643ea8Slogwang 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
42712bfe3f2eSlogwang 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4272a9643ea8Slogwang 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4273a9643ea8Slogwang 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
42742bfe3f2eSlogwang 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4275a9643ea8Slogwang 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4276a9643ea8Slogwang 				  << I40E_KILOSHIFT);
42772bfe3f2eSlogwang 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4278a9643ea8Slogwang 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4279a9643ea8Slogwang 				   << I40E_KILOSHIFT);
42802bfe3f2eSlogwang 	} else {
42812bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
42822bfe3f2eSlogwang 			    "Water marker configuration is not supported.");
42832bfe3f2eSlogwang 	}
4284a9643ea8Slogwang 
4285a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
4286a9643ea8Slogwang 
4287a9643ea8Slogwang 	return 0;
4288a9643ea8Slogwang }
4289a9643ea8Slogwang 
4290a9643ea8Slogwang static int
i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev * dev,__rte_unused struct rte_eth_pfc_conf * pfc_conf)4291a9643ea8Slogwang i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4292a9643ea8Slogwang 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4293a9643ea8Slogwang {
4294a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
4295a9643ea8Slogwang 
4296a9643ea8Slogwang 	return -ENOSYS;
4297a9643ea8Slogwang }
4298a9643ea8Slogwang 
4299a9643ea8Slogwang /* Add a MAC address, and update filters */
43002bfe3f2eSlogwang static int
i40e_macaddr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,__rte_unused uint32_t index,uint32_t pool)4301a9643ea8Slogwang i40e_macaddr_add(struct rte_eth_dev *dev,
43024418919fSjohnjiang 		 struct rte_ether_addr *mac_addr,
4303a9643ea8Slogwang 		 __rte_unused uint32_t index,
4304a9643ea8Slogwang 		 uint32_t pool)
4305a9643ea8Slogwang {
4306a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4307a9643ea8Slogwang 	struct i40e_mac_filter_info mac_filter;
4308a9643ea8Slogwang 	struct i40e_vsi *vsi;
4309d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4310a9643ea8Slogwang 	int ret;
4311a9643ea8Slogwang 
4312a9643ea8Slogwang 	/* If VMDQ not enabled or configured, return */
4313a9643ea8Slogwang 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4314a9643ea8Slogwang 			  !pf->nb_cfg_vmdq_vsi)) {
4315a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4316a9643ea8Slogwang 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4317a9643ea8Slogwang 			pool);
43182bfe3f2eSlogwang 		return -ENOTSUP;
4319a9643ea8Slogwang 	}
4320a9643ea8Slogwang 
4321a9643ea8Slogwang 	if (pool > pf->nb_cfg_vmdq_vsi) {
4322a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4323a9643ea8Slogwang 				pool, pf->nb_cfg_vmdq_vsi);
43242bfe3f2eSlogwang 		return -EINVAL;
4325a9643ea8Slogwang 	}
4326a9643ea8Slogwang 
43274418919fSjohnjiang 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4328d30ea906Sjfb8856606 	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4329*2d9fd380Sjfb8856606 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4330a9643ea8Slogwang 	else
4331*2d9fd380Sjfb8856606 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4332a9643ea8Slogwang 
4333a9643ea8Slogwang 	if (pool == 0)
4334a9643ea8Slogwang 		vsi = pf->main_vsi;
4335a9643ea8Slogwang 	else
4336a9643ea8Slogwang 		vsi = pf->vmdq[pool - 1].vsi;
4337a9643ea8Slogwang 
4338a9643ea8Slogwang 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4339a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
4340a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
43412bfe3f2eSlogwang 		return -ENODEV;
4342a9643ea8Slogwang 	}
43432bfe3f2eSlogwang 	return 0;
4344a9643ea8Slogwang }
4345a9643ea8Slogwang 
4346a9643ea8Slogwang /* Remove a MAC address, and update filters */
4347a9643ea8Slogwang static void
i40e_macaddr_remove(struct rte_eth_dev * dev,uint32_t index)4348a9643ea8Slogwang i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4349a9643ea8Slogwang {
4350a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4351a9643ea8Slogwang 	struct i40e_vsi *vsi;
4352a9643ea8Slogwang 	struct rte_eth_dev_data *data = dev->data;
43534418919fSjohnjiang 	struct rte_ether_addr *macaddr;
4354a9643ea8Slogwang 	int ret;
4355a9643ea8Slogwang 	uint32_t i;
4356a9643ea8Slogwang 	uint64_t pool_sel;
4357a9643ea8Slogwang 
4358a9643ea8Slogwang 	macaddr = &(data->mac_addrs[index]);
4359a9643ea8Slogwang 
4360a9643ea8Slogwang 	pool_sel = dev->data->mac_pool_sel[index];
4361a9643ea8Slogwang 
4362a9643ea8Slogwang 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4363a9643ea8Slogwang 		if (pool_sel & (1ULL << i)) {
4364a9643ea8Slogwang 			if (i == 0)
4365a9643ea8Slogwang 				vsi = pf->main_vsi;
4366a9643ea8Slogwang 			else {
4367a9643ea8Slogwang 				/* No VMDQ pool enabled or configured */
4368a9643ea8Slogwang 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4369a9643ea8Slogwang 					(i > pf->nb_cfg_vmdq_vsi)) {
43702bfe3f2eSlogwang 					PMD_DRV_LOG(ERR,
43712bfe3f2eSlogwang 						"No VMDQ pool enabled/configured");
4372a9643ea8Slogwang 					return;
4373a9643ea8Slogwang 				}
4374a9643ea8Slogwang 				vsi = pf->vmdq[i - 1].vsi;
4375a9643ea8Slogwang 			}
4376a9643ea8Slogwang 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4377a9643ea8Slogwang 
4378a9643ea8Slogwang 			if (ret) {
4379a9643ea8Slogwang 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4380a9643ea8Slogwang 				return;
4381a9643ea8Slogwang 			}
4382a9643ea8Slogwang 		}
4383a9643ea8Slogwang 	}
4384a9643ea8Slogwang }
4385a9643ea8Slogwang 
4386a9643ea8Slogwang static int
i40e_get_rss_lut(struct i40e_vsi * vsi,uint8_t * lut,uint16_t lut_size)4387a9643ea8Slogwang i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4388a9643ea8Slogwang {
4389a9643ea8Slogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4390a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4391d30ea906Sjfb8856606 	uint32_t reg;
4392a9643ea8Slogwang 	int ret;
4393a9643ea8Slogwang 
4394a9643ea8Slogwang 	if (!lut)
4395a9643ea8Slogwang 		return -EINVAL;
4396a9643ea8Slogwang 
4397a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
43981646932aSjfb8856606 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
43991646932aSjfb8856606 					  vsi->type != I40E_VSI_SRIOV,
4400a9643ea8Slogwang 					  lut, lut_size);
4401a9643ea8Slogwang 		if (ret) {
4402a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4403a9643ea8Slogwang 			return ret;
4404a9643ea8Slogwang 		}
4405a9643ea8Slogwang 	} else {
4406a9643ea8Slogwang 		uint32_t *lut_dw = (uint32_t *)lut;
4407a9643ea8Slogwang 		uint16_t i, lut_size_dw = lut_size / 4;
4408a9643ea8Slogwang 
4409d30ea906Sjfb8856606 		if (vsi->type == I40E_VSI_SRIOV) {
4410d30ea906Sjfb8856606 			for (i = 0; i <= lut_size_dw; i++) {
4411d30ea906Sjfb8856606 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4412d30ea906Sjfb8856606 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4413d30ea906Sjfb8856606 			}
4414d30ea906Sjfb8856606 		} else {
4415a9643ea8Slogwang 			for (i = 0; i < lut_size_dw; i++)
4416d30ea906Sjfb8856606 				lut_dw[i] = I40E_READ_REG(hw,
4417d30ea906Sjfb8856606 							  I40E_PFQF_HLUT(i));
4418d30ea906Sjfb8856606 		}
4419a9643ea8Slogwang 	}
4420a9643ea8Slogwang 
4421a9643ea8Slogwang 	return 0;
4422a9643ea8Slogwang }
4423a9643ea8Slogwang 
4424d30ea906Sjfb8856606 int
i40e_set_rss_lut(struct i40e_vsi * vsi,uint8_t * lut,uint16_t lut_size)4425a9643ea8Slogwang i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4426a9643ea8Slogwang {
4427a9643ea8Slogwang 	struct i40e_pf *pf;
4428a9643ea8Slogwang 	struct i40e_hw *hw;
4429a9643ea8Slogwang 	int ret;
4430a9643ea8Slogwang 
4431a9643ea8Slogwang 	if (!vsi || !lut)
4432a9643ea8Slogwang 		return -EINVAL;
4433a9643ea8Slogwang 
4434a9643ea8Slogwang 	pf = I40E_VSI_TO_PF(vsi);
4435a9643ea8Slogwang 	hw = I40E_VSI_TO_HW(vsi);
4436a9643ea8Slogwang 
4437a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
44381646932aSjfb8856606 		ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
44391646932aSjfb8856606 					  vsi->type != I40E_VSI_SRIOV,
4440a9643ea8Slogwang 					  lut, lut_size);
4441a9643ea8Slogwang 		if (ret) {
4442a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4443a9643ea8Slogwang 			return ret;
4444a9643ea8Slogwang 		}
4445a9643ea8Slogwang 	} else {
4446a9643ea8Slogwang 		uint32_t *lut_dw = (uint32_t *)lut;
4447a9643ea8Slogwang 		uint16_t i, lut_size_dw = lut_size / 4;
4448a9643ea8Slogwang 
4449d30ea906Sjfb8856606 		if (vsi->type == I40E_VSI_SRIOV) {
4450a9643ea8Slogwang 			for (i = 0; i < lut_size_dw; i++)
4451d30ea906Sjfb8856606 				I40E_WRITE_REG(
4452d30ea906Sjfb8856606 					hw,
4453d30ea906Sjfb8856606 					I40E_VFQF_HLUT1(i, vsi->user_param),
4454d30ea906Sjfb8856606 					lut_dw[i]);
4455d30ea906Sjfb8856606 		} else {
4456d30ea906Sjfb8856606 			for (i = 0; i < lut_size_dw; i++)
4457d30ea906Sjfb8856606 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4458d30ea906Sjfb8856606 					       lut_dw[i]);
4459d30ea906Sjfb8856606 		}
4460a9643ea8Slogwang 		I40E_WRITE_FLUSH(hw);
4461a9643ea8Slogwang 	}
4462a9643ea8Slogwang 
4463a9643ea8Slogwang 	return 0;
4464a9643ea8Slogwang }
4465a9643ea8Slogwang 
4466a9643ea8Slogwang static int
i40e_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)4467a9643ea8Slogwang i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4468a9643ea8Slogwang 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4469a9643ea8Slogwang 			 uint16_t reta_size)
4470a9643ea8Slogwang {
4471a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4472a9643ea8Slogwang 	uint16_t i, lut_size = pf->hash_lut_size;
4473a9643ea8Slogwang 	uint16_t idx, shift;
4474a9643ea8Slogwang 	uint8_t *lut;
4475a9643ea8Slogwang 	int ret;
4476a9643ea8Slogwang 
4477a9643ea8Slogwang 	if (reta_size != lut_size ||
4478a9643ea8Slogwang 		reta_size > ETH_RSS_RETA_SIZE_512) {
44792bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
44802bfe3f2eSlogwang 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
44812bfe3f2eSlogwang 			reta_size, lut_size);
4482a9643ea8Slogwang 		return -EINVAL;
4483a9643ea8Slogwang 	}
4484a9643ea8Slogwang 
4485a9643ea8Slogwang 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4486a9643ea8Slogwang 	if (!lut) {
4487a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4488a9643ea8Slogwang 		return -ENOMEM;
4489a9643ea8Slogwang 	}
4490a9643ea8Slogwang 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4491a9643ea8Slogwang 	if (ret)
4492a9643ea8Slogwang 		goto out;
4493a9643ea8Slogwang 	for (i = 0; i < reta_size; i++) {
4494a9643ea8Slogwang 		idx = i / RTE_RETA_GROUP_SIZE;
4495a9643ea8Slogwang 		shift = i % RTE_RETA_GROUP_SIZE;
4496a9643ea8Slogwang 		if (reta_conf[idx].mask & (1ULL << shift))
4497a9643ea8Slogwang 			lut[i] = reta_conf[idx].reta[shift];
4498a9643ea8Slogwang 	}
4499a9643ea8Slogwang 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4500a9643ea8Slogwang 
45011646932aSjfb8856606 	pf->adapter->rss_reta_updated = 1;
45021646932aSjfb8856606 
4503a9643ea8Slogwang out:
4504a9643ea8Slogwang 	rte_free(lut);
4505a9643ea8Slogwang 
4506a9643ea8Slogwang 	return ret;
4507a9643ea8Slogwang }
4508a9643ea8Slogwang 
4509a9643ea8Slogwang static int
i40e_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)4510a9643ea8Slogwang i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4511a9643ea8Slogwang 			struct rte_eth_rss_reta_entry64 *reta_conf,
4512a9643ea8Slogwang 			uint16_t reta_size)
4513a9643ea8Slogwang {
4514a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4515a9643ea8Slogwang 	uint16_t i, lut_size = pf->hash_lut_size;
4516a9643ea8Slogwang 	uint16_t idx, shift;
4517a9643ea8Slogwang 	uint8_t *lut;
4518a9643ea8Slogwang 	int ret;
4519a9643ea8Slogwang 
4520a9643ea8Slogwang 	if (reta_size != lut_size ||
4521a9643ea8Slogwang 		reta_size > ETH_RSS_RETA_SIZE_512) {
45222bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
45232bfe3f2eSlogwang 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
45242bfe3f2eSlogwang 			reta_size, lut_size);
4525a9643ea8Slogwang 		return -EINVAL;
4526a9643ea8Slogwang 	}
4527a9643ea8Slogwang 
4528a9643ea8Slogwang 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4529a9643ea8Slogwang 	if (!lut) {
4530a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4531a9643ea8Slogwang 		return -ENOMEM;
4532a9643ea8Slogwang 	}
4533a9643ea8Slogwang 
4534a9643ea8Slogwang 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4535a9643ea8Slogwang 	if (ret)
4536a9643ea8Slogwang 		goto out;
4537a9643ea8Slogwang 	for (i = 0; i < reta_size; i++) {
4538a9643ea8Slogwang 		idx = i / RTE_RETA_GROUP_SIZE;
4539a9643ea8Slogwang 		shift = i % RTE_RETA_GROUP_SIZE;
4540a9643ea8Slogwang 		if (reta_conf[idx].mask & (1ULL << shift))
4541a9643ea8Slogwang 			reta_conf[idx].reta[shift] = lut[i];
4542a9643ea8Slogwang 	}
4543a9643ea8Slogwang 
4544a9643ea8Slogwang out:
4545a9643ea8Slogwang 	rte_free(lut);
4546a9643ea8Slogwang 
4547a9643ea8Slogwang 	return ret;
4548a9643ea8Slogwang }
4549a9643ea8Slogwang 
4550a9643ea8Slogwang /**
4551a9643ea8Slogwang  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4552a9643ea8Slogwang  * @hw:   pointer to the HW structure
4553a9643ea8Slogwang  * @mem:  pointer to mem struct to fill out
4554a9643ea8Slogwang  * @size: size of memory requested
4555a9643ea8Slogwang  * @alignment: what to align the allocation to
4556a9643ea8Slogwang  **/
4557a9643ea8Slogwang enum i40e_status_code
i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_dma_mem * mem,u64 size,u32 alignment)4558*2d9fd380Sjfb8856606 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4559a9643ea8Slogwang 			struct i40e_dma_mem *mem,
4560a9643ea8Slogwang 			u64 size,
4561a9643ea8Slogwang 			u32 alignment)
4562a9643ea8Slogwang {
4563a9643ea8Slogwang 	const struct rte_memzone *mz = NULL;
4564a9643ea8Slogwang 	char z_name[RTE_MEMZONE_NAMESIZE];
4565a9643ea8Slogwang 
4566a9643ea8Slogwang 	if (!mem)
4567a9643ea8Slogwang 		return I40E_ERR_PARAM;
4568a9643ea8Slogwang 
4569a9643ea8Slogwang 	snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4570d30ea906Sjfb8856606 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4571d30ea906Sjfb8856606 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4572a9643ea8Slogwang 	if (!mz)
4573a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
4574a9643ea8Slogwang 
4575a9643ea8Slogwang 	mem->size = size;
4576a9643ea8Slogwang 	mem->va = mz->addr;
45772bfe3f2eSlogwang 	mem->pa = mz->iova;
4578a9643ea8Slogwang 	mem->zone = (const void *)mz;
45792bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG,
45802bfe3f2eSlogwang 		"memzone %s allocated with physical address: %"PRIu64,
45812bfe3f2eSlogwang 		mz->name, mem->pa);
4582a9643ea8Slogwang 
4583a9643ea8Slogwang 	return I40E_SUCCESS;
4584a9643ea8Slogwang }
4585a9643ea8Slogwang 
4586a9643ea8Slogwang /**
4587a9643ea8Slogwang  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4588a9643ea8Slogwang  * @hw:   pointer to the HW structure
4589a9643ea8Slogwang  * @mem:  ptr to mem struct to free
4590a9643ea8Slogwang  **/
4591a9643ea8Slogwang enum i40e_status_code
i40e_free_dma_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_dma_mem * mem)4592*2d9fd380Sjfb8856606 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4593a9643ea8Slogwang 		    struct i40e_dma_mem *mem)
4594a9643ea8Slogwang {
4595a9643ea8Slogwang 	if (!mem)
4596a9643ea8Slogwang 		return I40E_ERR_PARAM;
4597a9643ea8Slogwang 
45982bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG,
45992bfe3f2eSlogwang 		"memzone %s to be freed with physical address: %"PRIu64,
46002bfe3f2eSlogwang 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4601a9643ea8Slogwang 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4602a9643ea8Slogwang 	mem->zone = NULL;
4603a9643ea8Slogwang 	mem->va = NULL;
4604a9643ea8Slogwang 	mem->pa = (u64)0;
4605a9643ea8Slogwang 
4606a9643ea8Slogwang 	return I40E_SUCCESS;
4607a9643ea8Slogwang }
4608a9643ea8Slogwang 
4609a9643ea8Slogwang /**
4610a9643ea8Slogwang  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4611a9643ea8Slogwang  * @hw:   pointer to the HW structure
4612a9643ea8Slogwang  * @mem:  pointer to mem struct to fill out
4613a9643ea8Slogwang  * @size: size of memory requested
4614a9643ea8Slogwang  **/
4615a9643ea8Slogwang enum i40e_status_code
i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_virt_mem * mem,u32 size)4616*2d9fd380Sjfb8856606 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4617a9643ea8Slogwang 			 struct i40e_virt_mem *mem,
4618a9643ea8Slogwang 			 u32 size)
4619a9643ea8Slogwang {
4620a9643ea8Slogwang 	if (!mem)
4621a9643ea8Slogwang 		return I40E_ERR_PARAM;
4622a9643ea8Slogwang 
4623a9643ea8Slogwang 	mem->size = size;
4624a9643ea8Slogwang 	mem->va = rte_zmalloc("i40e", size, 0);
4625a9643ea8Slogwang 
4626a9643ea8Slogwang 	if (mem->va)
4627a9643ea8Slogwang 		return I40E_SUCCESS;
4628a9643ea8Slogwang 	else
4629a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
4630a9643ea8Slogwang }
4631a9643ea8Slogwang 
4632a9643ea8Slogwang /**
4633a9643ea8Slogwang  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4634a9643ea8Slogwang  * @hw:   pointer to the HW structure
4635a9643ea8Slogwang  * @mem:  pointer to mem struct to free
4636a9643ea8Slogwang  **/
4637a9643ea8Slogwang enum i40e_status_code
i40e_free_virt_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_virt_mem * mem)4638*2d9fd380Sjfb8856606 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4639a9643ea8Slogwang 		     struct i40e_virt_mem *mem)
4640a9643ea8Slogwang {
4641a9643ea8Slogwang 	if (!mem)
4642a9643ea8Slogwang 		return I40E_ERR_PARAM;
4643a9643ea8Slogwang 
4644a9643ea8Slogwang 	rte_free(mem->va);
4645a9643ea8Slogwang 	mem->va = NULL;
4646a9643ea8Slogwang 
4647a9643ea8Slogwang 	return I40E_SUCCESS;
4648a9643ea8Slogwang }
4649a9643ea8Slogwang 
4650a9643ea8Slogwang void
i40e_init_spinlock_d(struct i40e_spinlock * sp)4651a9643ea8Slogwang i40e_init_spinlock_d(struct i40e_spinlock *sp)
4652a9643ea8Slogwang {
4653a9643ea8Slogwang 	rte_spinlock_init(&sp->spinlock);
4654a9643ea8Slogwang }
4655a9643ea8Slogwang 
4656a9643ea8Slogwang void
i40e_acquire_spinlock_d(struct i40e_spinlock * sp)4657a9643ea8Slogwang i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4658a9643ea8Slogwang {
4659a9643ea8Slogwang 	rte_spinlock_lock(&sp->spinlock);
4660a9643ea8Slogwang }
4661a9643ea8Slogwang 
4662a9643ea8Slogwang void
i40e_release_spinlock_d(struct i40e_spinlock * sp)4663a9643ea8Slogwang i40e_release_spinlock_d(struct i40e_spinlock *sp)
4664a9643ea8Slogwang {
4665a9643ea8Slogwang 	rte_spinlock_unlock(&sp->spinlock);
4666a9643ea8Slogwang }
4667a9643ea8Slogwang 
4668a9643ea8Slogwang void
i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock * sp)4669*2d9fd380Sjfb8856606 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4670a9643ea8Slogwang {
4671a9643ea8Slogwang 	return;
4672a9643ea8Slogwang }
4673a9643ea8Slogwang 
4674a9643ea8Slogwang /**
4675a9643ea8Slogwang  * Get the hardware capabilities, which will be parsed
4676a9643ea8Slogwang  * and saved into struct i40e_hw.
4677a9643ea8Slogwang  */
4678a9643ea8Slogwang static int
i40e_get_cap(struct i40e_hw * hw)4679a9643ea8Slogwang i40e_get_cap(struct i40e_hw *hw)
4680a9643ea8Slogwang {
4681a9643ea8Slogwang 	struct i40e_aqc_list_capabilities_element_resp *buf;
4682a9643ea8Slogwang 	uint16_t len, size = 0;
4683a9643ea8Slogwang 	int ret;
4684a9643ea8Slogwang 
4685a9643ea8Slogwang 	/* Calculate a huge enough buff for saving response data temporarily */
4686a9643ea8Slogwang 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4687a9643ea8Slogwang 						I40E_MAX_CAP_ELE_NUM;
4688a9643ea8Slogwang 	buf = rte_zmalloc("i40e", len, 0);
4689a9643ea8Slogwang 	if (!buf) {
4690a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4691a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
4692a9643ea8Slogwang 	}
4693a9643ea8Slogwang 
4694a9643ea8Slogwang 	/* Get, parse the capabilities and save it to hw */
4695a9643ea8Slogwang 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4696a9643ea8Slogwang 			i40e_aqc_opc_list_func_capabilities, NULL);
4697a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
4698a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4699a9643ea8Slogwang 
4700a9643ea8Slogwang 	/* Free the temporary buffer after being used */
4701a9643ea8Slogwang 	rte_free(buf);
4702a9643ea8Slogwang 
4703a9643ea8Slogwang 	return ret;
4704a9643ea8Slogwang }
4705a9643ea8Slogwang 
4706d30ea906Sjfb8856606 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4707d30ea906Sjfb8856606 
i40e_pf_parse_vf_queue_number_handler(const char * key,const char * value,void * opaque)4708d30ea906Sjfb8856606 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4709d30ea906Sjfb8856606 		const char *value,
4710d30ea906Sjfb8856606 		void *opaque)
4711d30ea906Sjfb8856606 {
4712d30ea906Sjfb8856606 	struct i40e_pf *pf;
4713d30ea906Sjfb8856606 	unsigned long num;
4714d30ea906Sjfb8856606 	char *end;
4715d30ea906Sjfb8856606 
4716d30ea906Sjfb8856606 	pf = (struct i40e_pf *)opaque;
4717d30ea906Sjfb8856606 	RTE_SET_USED(key);
4718d30ea906Sjfb8856606 
4719d30ea906Sjfb8856606 	errno = 0;
4720d30ea906Sjfb8856606 	num = strtoul(value, &end, 0);
4721d30ea906Sjfb8856606 	if (errno != 0 || end == value || *end != 0) {
4722d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4723d30ea906Sjfb8856606 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4724d30ea906Sjfb8856606 		return -(EINVAL);
4725d30ea906Sjfb8856606 	}
4726d30ea906Sjfb8856606 
4727d30ea906Sjfb8856606 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4728d30ea906Sjfb8856606 		pf->vf_nb_qp_max = (uint16_t)num;
4729d30ea906Sjfb8856606 	else
4730d30ea906Sjfb8856606 		/* here return 0 to make next valid same argument work */
4731d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4732d30ea906Sjfb8856606 			    "power of 2 and equal or less than 16 !, Now it is "
4733d30ea906Sjfb8856606 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4734d30ea906Sjfb8856606 
4735d30ea906Sjfb8856606 	return 0;
4736d30ea906Sjfb8856606 }
4737d30ea906Sjfb8856606 
i40e_pf_config_vf_rxq_number(struct rte_eth_dev * dev)4738d30ea906Sjfb8856606 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4739d30ea906Sjfb8856606 {
4740d30ea906Sjfb8856606 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4741d30ea906Sjfb8856606 	struct rte_kvargs *kvlist;
4742d30ea906Sjfb8856606 	int kvargs_count;
4743d30ea906Sjfb8856606 
4744d30ea906Sjfb8856606 	/* set default queue number per VF as 4 */
4745d30ea906Sjfb8856606 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4746d30ea906Sjfb8856606 
4747d30ea906Sjfb8856606 	if (dev->device->devargs == NULL)
4748d30ea906Sjfb8856606 		return 0;
4749d30ea906Sjfb8856606 
4750d30ea906Sjfb8856606 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4751d30ea906Sjfb8856606 	if (kvlist == NULL)
4752d30ea906Sjfb8856606 		return -(EINVAL);
4753d30ea906Sjfb8856606 
4754d30ea906Sjfb8856606 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4755d30ea906Sjfb8856606 	if (!kvargs_count) {
4756d30ea906Sjfb8856606 		rte_kvargs_free(kvlist);
4757d30ea906Sjfb8856606 		return 0;
4758d30ea906Sjfb8856606 	}
4759d30ea906Sjfb8856606 
4760d30ea906Sjfb8856606 	if (kvargs_count > 1)
4761d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4762d30ea906Sjfb8856606 			    "the first invalid or last valid one is used !",
4763d30ea906Sjfb8856606 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4764d30ea906Sjfb8856606 
4765d30ea906Sjfb8856606 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4766d30ea906Sjfb8856606 			   i40e_pf_parse_vf_queue_number_handler, pf);
4767d30ea906Sjfb8856606 
4768d30ea906Sjfb8856606 	rte_kvargs_free(kvlist);
4769d30ea906Sjfb8856606 
4770d30ea906Sjfb8856606 	return 0;
4771d30ea906Sjfb8856606 }
4772d30ea906Sjfb8856606 
4773a9643ea8Slogwang static int
i40e_pf_parameter_init(struct rte_eth_dev * dev)4774a9643ea8Slogwang i40e_pf_parameter_init(struct rte_eth_dev *dev)
4775a9643ea8Slogwang {
4776a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4777a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
47782bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4779a9643ea8Slogwang 	uint16_t qp_count = 0, vsi_count = 0;
4780a9643ea8Slogwang 
47812bfe3f2eSlogwang 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4782a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4783a9643ea8Slogwang 		return -EINVAL;
4784a9643ea8Slogwang 	}
4785d30ea906Sjfb8856606 
4786d30ea906Sjfb8856606 	i40e_pf_config_vf_rxq_number(dev);
4787d30ea906Sjfb8856606 
4788a9643ea8Slogwang 	/* Add the parameter init for LFC */
4789a9643ea8Slogwang 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4790a9643ea8Slogwang 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4791a9643ea8Slogwang 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4792a9643ea8Slogwang 
4793a9643ea8Slogwang 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4794a9643ea8Slogwang 	pf->max_num_vsi = hw->func_caps.num_vsis;
4795a9643ea8Slogwang 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4796a9643ea8Slogwang 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4797a9643ea8Slogwang 
4798a9643ea8Slogwang 	/* FDir queue/VSI allocation */
4799a9643ea8Slogwang 	pf->fdir_qp_offset = 0;
4800a9643ea8Slogwang 	if (hw->func_caps.fd) {
4801a9643ea8Slogwang 		pf->flags |= I40E_FLAG_FDIR;
4802a9643ea8Slogwang 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4803a9643ea8Slogwang 	} else {
4804a9643ea8Slogwang 		pf->fdir_nb_qps = 0;
4805a9643ea8Slogwang 	}
4806a9643ea8Slogwang 	qp_count += pf->fdir_nb_qps;
4807a9643ea8Slogwang 	vsi_count += 1;
4808a9643ea8Slogwang 
4809a9643ea8Slogwang 	/* LAN queue/VSI allocation */
4810a9643ea8Slogwang 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4811a9643ea8Slogwang 	if (!hw->func_caps.rss) {
4812a9643ea8Slogwang 		pf->lan_nb_qps = 1;
4813a9643ea8Slogwang 	} else {
4814a9643ea8Slogwang 		pf->flags |= I40E_FLAG_RSS;
4815a9643ea8Slogwang 		if (hw->mac.type == I40E_MAC_X722)
4816a9643ea8Slogwang 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4817a9643ea8Slogwang 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4818a9643ea8Slogwang 	}
4819a9643ea8Slogwang 	qp_count += pf->lan_nb_qps;
4820a9643ea8Slogwang 	vsi_count += 1;
4821a9643ea8Slogwang 
4822a9643ea8Slogwang 	/* VF queue/VSI allocation */
4823a9643ea8Slogwang 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
48242bfe3f2eSlogwang 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4825a9643ea8Slogwang 		pf->flags |= I40E_FLAG_SRIOV;
4826d30ea906Sjfb8856606 		pf->vf_nb_qps = pf->vf_nb_qp_max;
48272bfe3f2eSlogwang 		pf->vf_num = pci_dev->max_vfs;
48282bfe3f2eSlogwang 		PMD_DRV_LOG(DEBUG,
48292bfe3f2eSlogwang 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
48302bfe3f2eSlogwang 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4831a9643ea8Slogwang 	} else {
4832a9643ea8Slogwang 		pf->vf_nb_qps = 0;
4833a9643ea8Slogwang 		pf->vf_num = 0;
4834a9643ea8Slogwang 	}
4835a9643ea8Slogwang 	qp_count += pf->vf_nb_qps * pf->vf_num;
4836a9643ea8Slogwang 	vsi_count += pf->vf_num;
4837a9643ea8Slogwang 
4838a9643ea8Slogwang 	/* VMDq queue/VSI allocation */
4839a9643ea8Slogwang 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4840a9643ea8Slogwang 	pf->vmdq_nb_qps = 0;
4841a9643ea8Slogwang 	pf->max_nb_vmdq_vsi = 0;
4842a9643ea8Slogwang 	if (hw->func_caps.vmdq) {
4843a9643ea8Slogwang 		if (qp_count < hw->func_caps.num_tx_qp &&
4844a9643ea8Slogwang 			vsi_count < hw->func_caps.num_vsis) {
4845a9643ea8Slogwang 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4846a9643ea8Slogwang 				qp_count) / pf->vmdq_nb_qp_max;
4847a9643ea8Slogwang 
4848a9643ea8Slogwang 			/* Limit the maximum number of VMDq vsi to the maximum
4849a9643ea8Slogwang 			 * ethdev can support
4850a9643ea8Slogwang 			 */
4851a9643ea8Slogwang 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4852a9643ea8Slogwang 				hw->func_caps.num_vsis - vsi_count);
4853a9643ea8Slogwang 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4854a9643ea8Slogwang 				ETH_64_POOLS);
4855a9643ea8Slogwang 			if (pf->max_nb_vmdq_vsi) {
4856a9643ea8Slogwang 				pf->flags |= I40E_FLAG_VMDQ;
4857a9643ea8Slogwang 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
48582bfe3f2eSlogwang 				PMD_DRV_LOG(DEBUG,
48592bfe3f2eSlogwang 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
48602bfe3f2eSlogwang 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
48612bfe3f2eSlogwang 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4862a9643ea8Slogwang 			} else {
48632bfe3f2eSlogwang 				PMD_DRV_LOG(INFO,
48642bfe3f2eSlogwang 					"No enough queues left for VMDq");
4865a9643ea8Slogwang 			}
4866a9643ea8Slogwang 		} else {
4867a9643ea8Slogwang 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4868a9643ea8Slogwang 		}
4869a9643ea8Slogwang 	}
4870a9643ea8Slogwang 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4871a9643ea8Slogwang 	vsi_count += pf->max_nb_vmdq_vsi;
4872a9643ea8Slogwang 
4873a9643ea8Slogwang 	if (hw->func_caps.dcb)
4874a9643ea8Slogwang 		pf->flags |= I40E_FLAG_DCB;
4875a9643ea8Slogwang 
4876a9643ea8Slogwang 	if (qp_count > hw->func_caps.num_tx_qp) {
48772bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
48782bfe3f2eSlogwang 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
48792bfe3f2eSlogwang 			qp_count, hw->func_caps.num_tx_qp);
4880a9643ea8Slogwang 		return -EINVAL;
4881a9643ea8Slogwang 	}
4882a9643ea8Slogwang 	if (vsi_count > hw->func_caps.num_vsis) {
48832bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
48842bfe3f2eSlogwang 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
48852bfe3f2eSlogwang 			vsi_count, hw->func_caps.num_vsis);
4886a9643ea8Slogwang 		return -EINVAL;
4887a9643ea8Slogwang 	}
4888a9643ea8Slogwang 
4889a9643ea8Slogwang 	return 0;
4890a9643ea8Slogwang }
4891a9643ea8Slogwang 
4892a9643ea8Slogwang static int
i40e_pf_get_switch_config(struct i40e_pf * pf)4893a9643ea8Slogwang i40e_pf_get_switch_config(struct i40e_pf *pf)
4894a9643ea8Slogwang {
4895a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4896a9643ea8Slogwang 	struct i40e_aqc_get_switch_config_resp *switch_config;
4897a9643ea8Slogwang 	struct i40e_aqc_switch_config_element_resp *element;
4898a9643ea8Slogwang 	uint16_t start_seid = 0, num_reported;
4899a9643ea8Slogwang 	int ret;
4900a9643ea8Slogwang 
4901a9643ea8Slogwang 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4902a9643ea8Slogwang 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4903a9643ea8Slogwang 	if (!switch_config) {
4904a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
4905a9643ea8Slogwang 		return -ENOMEM;
4906a9643ea8Slogwang 	}
4907a9643ea8Slogwang 
4908a9643ea8Slogwang 	/* Get the switch configurations */
4909a9643ea8Slogwang 	ret = i40e_aq_get_switch_config(hw, switch_config,
4910a9643ea8Slogwang 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
4911a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
4912a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4913a9643ea8Slogwang 		goto fail;
4914a9643ea8Slogwang 	}
4915a9643ea8Slogwang 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4916a9643ea8Slogwang 	if (num_reported != 1) { /* The number should be 1 */
4917a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4918a9643ea8Slogwang 		goto fail;
4919a9643ea8Slogwang 	}
4920a9643ea8Slogwang 
4921a9643ea8Slogwang 	/* Parse the switch configuration elements */
4922a9643ea8Slogwang 	element = &(switch_config->element[0]);
4923a9643ea8Slogwang 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4924a9643ea8Slogwang 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4925a9643ea8Slogwang 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4926a9643ea8Slogwang 	} else
4927a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "Unknown element type");
4928a9643ea8Slogwang 
4929a9643ea8Slogwang fail:
4930a9643ea8Slogwang 	rte_free(switch_config);
4931a9643ea8Slogwang 
4932a9643ea8Slogwang 	return ret;
4933a9643ea8Slogwang }
4934a9643ea8Slogwang 
4935a9643ea8Slogwang static int
i40e_res_pool_init(struct i40e_res_pool_info * pool,uint32_t base,uint32_t num)4936a9643ea8Slogwang i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4937a9643ea8Slogwang 			uint32_t num)
4938a9643ea8Slogwang {
4939a9643ea8Slogwang 	struct pool_entry *entry;
4940a9643ea8Slogwang 
4941a9643ea8Slogwang 	if (pool == NULL || num == 0)
4942a9643ea8Slogwang 		return -EINVAL;
4943a9643ea8Slogwang 
4944a9643ea8Slogwang 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4945a9643ea8Slogwang 	if (entry == NULL) {
4946a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4947a9643ea8Slogwang 		return -ENOMEM;
4948a9643ea8Slogwang 	}
4949a9643ea8Slogwang 
4950a9643ea8Slogwang 	/* queue heap initialize */
4951a9643ea8Slogwang 	pool->num_free = num;
4952a9643ea8Slogwang 	pool->num_alloc = 0;
4953a9643ea8Slogwang 	pool->base = base;
4954a9643ea8Slogwang 	LIST_INIT(&pool->alloc_list);
4955a9643ea8Slogwang 	LIST_INIT(&pool->free_list);
4956a9643ea8Slogwang 
4957a9643ea8Slogwang 	/* Initialize element  */
4958a9643ea8Slogwang 	entry->base = 0;
4959a9643ea8Slogwang 	entry->len = num;
4960a9643ea8Slogwang 
4961a9643ea8Slogwang 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
4962a9643ea8Slogwang 	return 0;
4963a9643ea8Slogwang }
4964a9643ea8Slogwang 
4965a9643ea8Slogwang static void
i40e_res_pool_destroy(struct i40e_res_pool_info * pool)4966a9643ea8Slogwang i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4967a9643ea8Slogwang {
4968a9643ea8Slogwang 	struct pool_entry *entry, *next_entry;
4969a9643ea8Slogwang 
4970a9643ea8Slogwang 	if (pool == NULL)
4971a9643ea8Slogwang 		return;
4972a9643ea8Slogwang 
4973a9643ea8Slogwang 	for (entry = LIST_FIRST(&pool->alloc_list);
4974a9643ea8Slogwang 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4975a9643ea8Slogwang 			entry = next_entry) {
4976a9643ea8Slogwang 		LIST_REMOVE(entry, next);
4977a9643ea8Slogwang 		rte_free(entry);
4978a9643ea8Slogwang 	}
4979a9643ea8Slogwang 
4980a9643ea8Slogwang 	for (entry = LIST_FIRST(&pool->free_list);
4981a9643ea8Slogwang 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4982a9643ea8Slogwang 			entry = next_entry) {
4983a9643ea8Slogwang 		LIST_REMOVE(entry, next);
4984a9643ea8Slogwang 		rte_free(entry);
4985a9643ea8Slogwang 	}
4986a9643ea8Slogwang 
4987a9643ea8Slogwang 	pool->num_free = 0;
4988a9643ea8Slogwang 	pool->num_alloc = 0;
4989a9643ea8Slogwang 	pool->base = 0;
4990a9643ea8Slogwang 	LIST_INIT(&pool->alloc_list);
4991a9643ea8Slogwang 	LIST_INIT(&pool->free_list);
4992a9643ea8Slogwang }
4993a9643ea8Slogwang 
4994a9643ea8Slogwang static int
i40e_res_pool_free(struct i40e_res_pool_info * pool,uint32_t base)4995a9643ea8Slogwang i40e_res_pool_free(struct i40e_res_pool_info *pool,
4996a9643ea8Slogwang 		       uint32_t base)
4997a9643ea8Slogwang {
4998a9643ea8Slogwang 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4999a9643ea8Slogwang 	uint32_t pool_offset;
50000c6bd470Sfengbojiang 	uint16_t len;
5001a9643ea8Slogwang 	int insert;
5002a9643ea8Slogwang 
5003a9643ea8Slogwang 	if (pool == NULL) {
5004a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid parameter");
5005a9643ea8Slogwang 		return -EINVAL;
5006a9643ea8Slogwang 	}
5007a9643ea8Slogwang 
5008a9643ea8Slogwang 	pool_offset = base - pool->base;
5009a9643ea8Slogwang 	/* Lookup in alloc list */
5010a9643ea8Slogwang 	LIST_FOREACH(entry, &pool->alloc_list, next) {
5011a9643ea8Slogwang 		if (entry->base == pool_offset) {
5012a9643ea8Slogwang 			valid_entry = entry;
5013a9643ea8Slogwang 			LIST_REMOVE(entry, next);
5014a9643ea8Slogwang 			break;
5015a9643ea8Slogwang 		}
5016a9643ea8Slogwang 	}
5017a9643ea8Slogwang 
5018a9643ea8Slogwang 	/* Not find, return */
5019a9643ea8Slogwang 	if (valid_entry == NULL) {
5020a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to find entry");
5021a9643ea8Slogwang 		return -EINVAL;
5022a9643ea8Slogwang 	}
5023a9643ea8Slogwang 
5024a9643ea8Slogwang 	/**
5025a9643ea8Slogwang 	 * Found it, move it to free list  and try to merge.
5026a9643ea8Slogwang 	 * In order to make merge easier, always sort it by qbase.
5027a9643ea8Slogwang 	 * Find adjacent prev and last entries.
5028a9643ea8Slogwang 	 */
5029a9643ea8Slogwang 	prev = next = NULL;
5030a9643ea8Slogwang 	LIST_FOREACH(entry, &pool->free_list, next) {
5031a9643ea8Slogwang 		if (entry->base > valid_entry->base) {
5032a9643ea8Slogwang 			next = entry;
5033a9643ea8Slogwang 			break;
5034a9643ea8Slogwang 		}
5035a9643ea8Slogwang 		prev = entry;
5036a9643ea8Slogwang 	}
5037a9643ea8Slogwang 
5038a9643ea8Slogwang 	insert = 0;
50390c6bd470Sfengbojiang 	len = valid_entry->len;
5040a9643ea8Slogwang 	/* Try to merge with next one*/
5041a9643ea8Slogwang 	if (next != NULL) {
5042a9643ea8Slogwang 		/* Merge with next one */
50430c6bd470Sfengbojiang 		if (valid_entry->base + len == next->base) {
5044a9643ea8Slogwang 			next->base = valid_entry->base;
50450c6bd470Sfengbojiang 			next->len += len;
5046a9643ea8Slogwang 			rte_free(valid_entry);
5047a9643ea8Slogwang 			valid_entry = next;
5048a9643ea8Slogwang 			insert = 1;
5049a9643ea8Slogwang 		}
5050a9643ea8Slogwang 	}
5051a9643ea8Slogwang 
5052a9643ea8Slogwang 	if (prev != NULL) {
5053a9643ea8Slogwang 		/* Merge with previous one */
5054a9643ea8Slogwang 		if (prev->base + prev->len == valid_entry->base) {
50550c6bd470Sfengbojiang 			prev->len += len;
5056a9643ea8Slogwang 			/* If it merge with next one, remove next node */
5057a9643ea8Slogwang 			if (insert == 1) {
5058a9643ea8Slogwang 				LIST_REMOVE(valid_entry, next);
5059a9643ea8Slogwang 				rte_free(valid_entry);
50600c6bd470Sfengbojiang 				valid_entry = NULL;
5061a9643ea8Slogwang 			} else {
5062a9643ea8Slogwang 				rte_free(valid_entry);
50630c6bd470Sfengbojiang 				valid_entry = NULL;
5064a9643ea8Slogwang 				insert = 1;
5065a9643ea8Slogwang 			}
5066a9643ea8Slogwang 		}
5067a9643ea8Slogwang 	}
5068a9643ea8Slogwang 
5069a9643ea8Slogwang 	/* Not find any entry to merge, insert */
5070a9643ea8Slogwang 	if (insert == 0) {
5071a9643ea8Slogwang 		if (prev != NULL)
5072a9643ea8Slogwang 			LIST_INSERT_AFTER(prev, valid_entry, next);
5073a9643ea8Slogwang 		else if (next != NULL)
5074a9643ea8Slogwang 			LIST_INSERT_BEFORE(next, valid_entry, next);
5075a9643ea8Slogwang 		else /* It's empty list, insert to head */
5076a9643ea8Slogwang 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5077a9643ea8Slogwang 	}
5078a9643ea8Slogwang 
50790c6bd470Sfengbojiang 	pool->num_free += len;
50800c6bd470Sfengbojiang 	pool->num_alloc -= len;
5081a9643ea8Slogwang 
5082a9643ea8Slogwang 	return 0;
5083a9643ea8Slogwang }
5084a9643ea8Slogwang 
5085a9643ea8Slogwang static int
i40e_res_pool_alloc(struct i40e_res_pool_info * pool,uint16_t num)5086a9643ea8Slogwang i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5087a9643ea8Slogwang 		       uint16_t num)
5088a9643ea8Slogwang {
5089a9643ea8Slogwang 	struct pool_entry *entry, *valid_entry;
5090a9643ea8Slogwang 
5091a9643ea8Slogwang 	if (pool == NULL || num == 0) {
5092a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid parameter");
5093a9643ea8Slogwang 		return -EINVAL;
5094a9643ea8Slogwang 	}
5095a9643ea8Slogwang 
5096a9643ea8Slogwang 	if (pool->num_free < num) {
5097a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5098a9643ea8Slogwang 			    num, pool->num_free);
5099a9643ea8Slogwang 		return -ENOMEM;
5100a9643ea8Slogwang 	}
5101a9643ea8Slogwang 
5102a9643ea8Slogwang 	valid_entry = NULL;
5103a9643ea8Slogwang 	/* Lookup  in free list and find most fit one */
5104a9643ea8Slogwang 	LIST_FOREACH(entry, &pool->free_list, next) {
5105a9643ea8Slogwang 		if (entry->len >= num) {
5106a9643ea8Slogwang 			/* Find best one */
5107a9643ea8Slogwang 			if (entry->len == num) {
5108a9643ea8Slogwang 				valid_entry = entry;
5109a9643ea8Slogwang 				break;
5110a9643ea8Slogwang 			}
5111a9643ea8Slogwang 			if (valid_entry == NULL || valid_entry->len > entry->len)
5112a9643ea8Slogwang 				valid_entry = entry;
5113a9643ea8Slogwang 		}
5114a9643ea8Slogwang 	}
5115a9643ea8Slogwang 
5116a9643ea8Slogwang 	/* Not find one to satisfy the request, return */
5117a9643ea8Slogwang 	if (valid_entry == NULL) {
5118a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "No valid entry found");
5119a9643ea8Slogwang 		return -ENOMEM;
5120a9643ea8Slogwang 	}
5121a9643ea8Slogwang 	/**
5122a9643ea8Slogwang 	 * The entry have equal queue number as requested,
5123a9643ea8Slogwang 	 * remove it from alloc_list.
5124a9643ea8Slogwang 	 */
5125a9643ea8Slogwang 	if (valid_entry->len == num) {
5126a9643ea8Slogwang 		LIST_REMOVE(valid_entry, next);
5127a9643ea8Slogwang 	} else {
5128a9643ea8Slogwang 		/**
5129a9643ea8Slogwang 		 * The entry have more numbers than requested,
5130a9643ea8Slogwang 		 * create a new entry for alloc_list and minus its
5131a9643ea8Slogwang 		 * queue base and number in free_list.
5132a9643ea8Slogwang 		 */
5133a9643ea8Slogwang 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5134a9643ea8Slogwang 		if (entry == NULL) {
51352bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
51362bfe3f2eSlogwang 				"Failed to allocate memory for resource pool");
5137a9643ea8Slogwang 			return -ENOMEM;
5138a9643ea8Slogwang 		}
5139a9643ea8Slogwang 		entry->base = valid_entry->base;
5140a9643ea8Slogwang 		entry->len = num;
5141a9643ea8Slogwang 		valid_entry->base += num;
5142a9643ea8Slogwang 		valid_entry->len -= num;
5143a9643ea8Slogwang 		valid_entry = entry;
5144a9643ea8Slogwang 	}
5145a9643ea8Slogwang 
5146a9643ea8Slogwang 	/* Insert it into alloc list, not sorted */
5147a9643ea8Slogwang 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5148a9643ea8Slogwang 
5149a9643ea8Slogwang 	pool->num_free -= valid_entry->len;
5150a9643ea8Slogwang 	pool->num_alloc += valid_entry->len;
5151a9643ea8Slogwang 
5152a9643ea8Slogwang 	return valid_entry->base + pool->base;
5153a9643ea8Slogwang }
5154a9643ea8Slogwang 
5155a9643ea8Slogwang /**
5156a9643ea8Slogwang  * bitmap_is_subset - Check whether src2 is subset of src1
5157a9643ea8Slogwang  **/
5158a9643ea8Slogwang static inline int
bitmap_is_subset(uint8_t src1,uint8_t src2)5159a9643ea8Slogwang bitmap_is_subset(uint8_t src1, uint8_t src2)
5160a9643ea8Slogwang {
5161a9643ea8Slogwang 	return !((src1 ^ src2) & src2);
5162a9643ea8Slogwang }
5163a9643ea8Slogwang 
5164a9643ea8Slogwang static enum i40e_status_code
validate_tcmap_parameter(struct i40e_vsi * vsi,uint8_t enabled_tcmap)5165a9643ea8Slogwang validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5166a9643ea8Slogwang {
5167a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5168a9643ea8Slogwang 
5169a9643ea8Slogwang 	/* If DCB is not supported, only default TC is supported */
5170a9643ea8Slogwang 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5171a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5172a9643ea8Slogwang 		return I40E_NOT_SUPPORTED;
5173a9643ea8Slogwang 	}
5174a9643ea8Slogwang 
5175a9643ea8Slogwang 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
51762bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
51772bfe3f2eSlogwang 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
51782bfe3f2eSlogwang 			hw->func_caps.enabled_tcmap, enabled_tcmap);
5179a9643ea8Slogwang 		return I40E_NOT_SUPPORTED;
5180a9643ea8Slogwang 	}
5181a9643ea8Slogwang 	return I40E_SUCCESS;
5182a9643ea8Slogwang }
5183a9643ea8Slogwang 
5184a9643ea8Slogwang int
i40e_vsi_vlan_pvid_set(struct i40e_vsi * vsi,struct i40e_vsi_vlan_pvid_info * info)5185a9643ea8Slogwang i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5186a9643ea8Slogwang 				struct i40e_vsi_vlan_pvid_info *info)
5187a9643ea8Slogwang {
5188a9643ea8Slogwang 	struct i40e_hw *hw;
5189a9643ea8Slogwang 	struct i40e_vsi_context ctxt;
5190a9643ea8Slogwang 	uint8_t vlan_flags = 0;
5191a9643ea8Slogwang 	int ret;
5192a9643ea8Slogwang 
5193a9643ea8Slogwang 	if (vsi == NULL || info == NULL) {
5194a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid parameters");
5195a9643ea8Slogwang 		return I40E_ERR_PARAM;
5196a9643ea8Slogwang 	}
5197a9643ea8Slogwang 
5198a9643ea8Slogwang 	if (info->on) {
5199a9643ea8Slogwang 		vsi->info.pvid = info->config.pvid;
5200a9643ea8Slogwang 		/**
5201a9643ea8Slogwang 		 * If insert pvid is enabled, only tagged pkts are
5202a9643ea8Slogwang 		 * allowed to be sent out.
5203a9643ea8Slogwang 		 */
5204a9643ea8Slogwang 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5205a9643ea8Slogwang 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5206a9643ea8Slogwang 	} else {
5207a9643ea8Slogwang 		vsi->info.pvid = 0;
5208a9643ea8Slogwang 		if (info->config.reject.tagged == 0)
5209a9643ea8Slogwang 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5210a9643ea8Slogwang 
5211a9643ea8Slogwang 		if (info->config.reject.untagged == 0)
5212a9643ea8Slogwang 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5213a9643ea8Slogwang 	}
5214a9643ea8Slogwang 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5215a9643ea8Slogwang 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5216a9643ea8Slogwang 	vsi->info.port_vlan_flags |= vlan_flags;
5217a9643ea8Slogwang 	vsi->info.valid_sections =
5218a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5219a9643ea8Slogwang 	memset(&ctxt, 0, sizeof(ctxt));
52202bfe3f2eSlogwang 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5221a9643ea8Slogwang 	ctxt.seid = vsi->seid;
5222a9643ea8Slogwang 
5223a9643ea8Slogwang 	hw = I40E_VSI_TO_HW(vsi);
5224a9643ea8Slogwang 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5225a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
5226a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5227a9643ea8Slogwang 
5228a9643ea8Slogwang 	return ret;
5229a9643ea8Slogwang }
5230a9643ea8Slogwang 
5231a9643ea8Slogwang static int
i40e_vsi_update_tc_bandwidth(struct i40e_vsi * vsi,uint8_t enabled_tcmap)5232a9643ea8Slogwang i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5233a9643ea8Slogwang {
5234a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5235a9643ea8Slogwang 	int i, ret;
5236a9643ea8Slogwang 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5237a9643ea8Slogwang 
5238a9643ea8Slogwang 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5239a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
5240a9643ea8Slogwang 		return ret;
5241a9643ea8Slogwang 
5242a9643ea8Slogwang 	if (!vsi->seid) {
5243a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "seid not valid");
5244a9643ea8Slogwang 		return -EINVAL;
5245a9643ea8Slogwang 	}
5246a9643ea8Slogwang 
5247a9643ea8Slogwang 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5248a9643ea8Slogwang 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5249a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5250a9643ea8Slogwang 		tc_bw_data.tc_bw_credits[i] =
5251a9643ea8Slogwang 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5252a9643ea8Slogwang 
5253a9643ea8Slogwang 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5254a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
5255a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5256a9643ea8Slogwang 		return ret;
5257a9643ea8Slogwang 	}
5258a9643ea8Slogwang 
52592bfe3f2eSlogwang 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5260a9643ea8Slogwang 					sizeof(vsi->info.qs_handle));
5261a9643ea8Slogwang 	return I40E_SUCCESS;
5262a9643ea8Slogwang }
5263a9643ea8Slogwang 
5264a9643ea8Slogwang static enum i40e_status_code
i40e_vsi_config_tc_queue_mapping(struct i40e_vsi * vsi,struct i40e_aqc_vsi_properties_data * info,uint8_t enabled_tcmap)5265a9643ea8Slogwang i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5266a9643ea8Slogwang 				 struct i40e_aqc_vsi_properties_data *info,
5267a9643ea8Slogwang 				 uint8_t enabled_tcmap)
5268a9643ea8Slogwang {
5269a9643ea8Slogwang 	enum i40e_status_code ret;
5270a9643ea8Slogwang 	int i, total_tc = 0;
5271a9643ea8Slogwang 	uint16_t qpnum_per_tc, bsf, qp_idx;
5272a9643ea8Slogwang 
5273a9643ea8Slogwang 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5274a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
5275a9643ea8Slogwang 		return ret;
5276a9643ea8Slogwang 
5277a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5278a9643ea8Slogwang 		if (enabled_tcmap & (1 << i))
5279a9643ea8Slogwang 			total_tc++;
52802bfe3f2eSlogwang 	if (total_tc == 0)
52812bfe3f2eSlogwang 		total_tc = 1;
5282a9643ea8Slogwang 	vsi->enabled_tc = enabled_tcmap;
5283a9643ea8Slogwang 
5284a9643ea8Slogwang 	/* Number of queues per enabled TC */
5285a9643ea8Slogwang 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5286a9643ea8Slogwang 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5287a9643ea8Slogwang 	bsf = rte_bsf32(qpnum_per_tc);
5288a9643ea8Slogwang 
5289a9643ea8Slogwang 	/* Adjust the queue number to actual queues that can be applied */
5290a9643ea8Slogwang 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5291a9643ea8Slogwang 		vsi->nb_qps = qpnum_per_tc * total_tc;
5292a9643ea8Slogwang 
5293a9643ea8Slogwang 	/**
5294a9643ea8Slogwang 	 * Configure TC and queue mapping parameters, for enabled TC,
5295a9643ea8Slogwang 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5296a9643ea8Slogwang 	 * default queue will serve it.
5297a9643ea8Slogwang 	 */
5298a9643ea8Slogwang 	qp_idx = 0;
5299a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5300a9643ea8Slogwang 		if (vsi->enabled_tc & (1 << i)) {
5301a9643ea8Slogwang 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5302a9643ea8Slogwang 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5303a9643ea8Slogwang 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5304a9643ea8Slogwang 			qp_idx += qpnum_per_tc;
5305a9643ea8Slogwang 		} else
5306a9643ea8Slogwang 			info->tc_mapping[i] = 0;
5307a9643ea8Slogwang 	}
5308a9643ea8Slogwang 
5309a9643ea8Slogwang 	/* Associate queue number with VSI */
5310a9643ea8Slogwang 	if (vsi->type == I40E_VSI_SRIOV) {
5311a9643ea8Slogwang 		info->mapping_flags |=
5312a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5313a9643ea8Slogwang 		for (i = 0; i < vsi->nb_qps; i++)
5314a9643ea8Slogwang 			info->queue_mapping[i] =
5315a9643ea8Slogwang 				rte_cpu_to_le_16(vsi->base_queue + i);
5316a9643ea8Slogwang 	} else {
5317a9643ea8Slogwang 		info->mapping_flags |=
5318a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5319a9643ea8Slogwang 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5320a9643ea8Slogwang 	}
5321a9643ea8Slogwang 	info->valid_sections |=
5322a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5323a9643ea8Slogwang 
5324a9643ea8Slogwang 	return I40E_SUCCESS;
5325a9643ea8Slogwang }
5326a9643ea8Slogwang 
5327a9643ea8Slogwang static int
i40e_veb_release(struct i40e_veb * veb)5328a9643ea8Slogwang i40e_veb_release(struct i40e_veb *veb)
5329a9643ea8Slogwang {
5330a9643ea8Slogwang 	struct i40e_vsi *vsi;
5331a9643ea8Slogwang 	struct i40e_hw *hw;
5332a9643ea8Slogwang 
5333a9643ea8Slogwang 	if (veb == NULL)
5334a9643ea8Slogwang 		return -EINVAL;
5335a9643ea8Slogwang 
5336a9643ea8Slogwang 	if (!TAILQ_EMPTY(&veb->head)) {
5337a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5338a9643ea8Slogwang 		return -EACCES;
5339a9643ea8Slogwang 	}
5340a9643ea8Slogwang 	/* associate_vsi field is NULL for floating VEB */
5341a9643ea8Slogwang 	if (veb->associate_vsi != NULL) {
5342a9643ea8Slogwang 		vsi = veb->associate_vsi;
5343a9643ea8Slogwang 		hw = I40E_VSI_TO_HW(vsi);
5344a9643ea8Slogwang 
5345a9643ea8Slogwang 		vsi->uplink_seid = veb->uplink_seid;
5346a9643ea8Slogwang 		vsi->veb = NULL;
5347a9643ea8Slogwang 	} else {
5348a9643ea8Slogwang 		veb->associate_pf->main_vsi->floating_veb = NULL;
5349a9643ea8Slogwang 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5350a9643ea8Slogwang 	}
5351a9643ea8Slogwang 
5352a9643ea8Slogwang 	i40e_aq_delete_element(hw, veb->seid, NULL);
5353a9643ea8Slogwang 	rte_free(veb);
5354a9643ea8Slogwang 	return I40E_SUCCESS;
5355a9643ea8Slogwang }
5356a9643ea8Slogwang 
5357a9643ea8Slogwang /* Setup a veb */
5358a9643ea8Slogwang static struct i40e_veb *
i40e_veb_setup(struct i40e_pf * pf,struct i40e_vsi * vsi)5359a9643ea8Slogwang i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5360a9643ea8Slogwang {
5361a9643ea8Slogwang 	struct i40e_veb *veb;
5362a9643ea8Slogwang 	int ret;
5363a9643ea8Slogwang 	struct i40e_hw *hw;
5364a9643ea8Slogwang 
5365a9643ea8Slogwang 	if (pf == NULL) {
5366a9643ea8Slogwang 		PMD_DRV_LOG(ERR,
5367a9643ea8Slogwang 			    "veb setup failed, associated PF shouldn't null");
5368a9643ea8Slogwang 		return NULL;
5369a9643ea8Slogwang 	}
5370a9643ea8Slogwang 	hw = I40E_PF_TO_HW(pf);
5371a9643ea8Slogwang 
5372a9643ea8Slogwang 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5373a9643ea8Slogwang 	if (!veb) {
5374a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5375a9643ea8Slogwang 		goto fail;
5376a9643ea8Slogwang 	}
5377a9643ea8Slogwang 
5378a9643ea8Slogwang 	veb->associate_vsi = vsi;
5379a9643ea8Slogwang 	veb->associate_pf = pf;
5380a9643ea8Slogwang 	TAILQ_INIT(&veb->head);
5381a9643ea8Slogwang 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5382a9643ea8Slogwang 
5383a9643ea8Slogwang 	/* create floating veb if vsi is NULL */
5384a9643ea8Slogwang 	if (vsi != NULL) {
5385a9643ea8Slogwang 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5386a9643ea8Slogwang 				      I40E_DEFAULT_TCMAP, false,
5387a9643ea8Slogwang 				      &veb->seid, false, NULL);
5388a9643ea8Slogwang 	} else {
5389a9643ea8Slogwang 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5390a9643ea8Slogwang 				      true, &veb->seid, false, NULL);
5391a9643ea8Slogwang 	}
5392a9643ea8Slogwang 
5393a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
5394a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5395a9643ea8Slogwang 			    hw->aq.asq_last_status);
5396a9643ea8Slogwang 		goto fail;
5397a9643ea8Slogwang 	}
53982bfe3f2eSlogwang 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5399a9643ea8Slogwang 
5400a9643ea8Slogwang 	/* get statistics index */
5401a9643ea8Slogwang 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5402a9643ea8Slogwang 				&veb->stats_idx, NULL, NULL, NULL);
5403a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
54042bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5405a9643ea8Slogwang 			    hw->aq.asq_last_status);
5406a9643ea8Slogwang 		goto fail;
5407a9643ea8Slogwang 	}
5408a9643ea8Slogwang 	/* Get VEB bandwidth, to be implemented */
5409a9643ea8Slogwang 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5410a9643ea8Slogwang 	if (vsi)
5411a9643ea8Slogwang 		vsi->uplink_seid = veb->seid;
5412a9643ea8Slogwang 
5413a9643ea8Slogwang 	return veb;
5414a9643ea8Slogwang fail:
5415a9643ea8Slogwang 	rte_free(veb);
5416a9643ea8Slogwang 	return NULL;
5417a9643ea8Slogwang }
5418a9643ea8Slogwang 
5419a9643ea8Slogwang int
i40e_vsi_release(struct i40e_vsi * vsi)5420a9643ea8Slogwang i40e_vsi_release(struct i40e_vsi *vsi)
5421a9643ea8Slogwang {
5422a9643ea8Slogwang 	struct i40e_pf *pf;
5423a9643ea8Slogwang 	struct i40e_hw *hw;
5424a9643ea8Slogwang 	struct i40e_vsi_list *vsi_list;
5425a9643ea8Slogwang 	void *temp;
5426a9643ea8Slogwang 	int ret;
5427a9643ea8Slogwang 	struct i40e_mac_filter *f;
5428a9643ea8Slogwang 	uint16_t user_param;
5429a9643ea8Slogwang 
5430a9643ea8Slogwang 	if (!vsi)
5431a9643ea8Slogwang 		return I40E_SUCCESS;
5432a9643ea8Slogwang 
54332bfe3f2eSlogwang 	if (!vsi->adapter)
54342bfe3f2eSlogwang 		return -EFAULT;
54352bfe3f2eSlogwang 
5436a9643ea8Slogwang 	user_param = vsi->user_param;
5437a9643ea8Slogwang 
5438a9643ea8Slogwang 	pf = I40E_VSI_TO_PF(vsi);
5439a9643ea8Slogwang 	hw = I40E_VSI_TO_HW(vsi);
5440a9643ea8Slogwang 
5441a9643ea8Slogwang 	/* VSI has child to attach, release child first */
5442a9643ea8Slogwang 	if (vsi->veb) {
5443a9643ea8Slogwang 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5444a9643ea8Slogwang 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5445a9643ea8Slogwang 				return -1;
5446a9643ea8Slogwang 		}
5447a9643ea8Slogwang 		i40e_veb_release(vsi->veb);
5448a9643ea8Slogwang 	}
5449a9643ea8Slogwang 
5450a9643ea8Slogwang 	if (vsi->floating_veb) {
5451a9643ea8Slogwang 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5452a9643ea8Slogwang 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5453a9643ea8Slogwang 				return -1;
5454a9643ea8Slogwang 		}
5455a9643ea8Slogwang 	}
5456a9643ea8Slogwang 
5457a9643ea8Slogwang 	/* Remove all macvlan filters of the VSI */
5458a9643ea8Slogwang 	i40e_vsi_remove_all_macvlan_filter(vsi);
5459a9643ea8Slogwang 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5460a9643ea8Slogwang 		rte_free(f);
5461a9643ea8Slogwang 
5462a9643ea8Slogwang 	if (vsi->type != I40E_VSI_MAIN &&
5463a9643ea8Slogwang 	    ((vsi->type != I40E_VSI_SRIOV) ||
5464a9643ea8Slogwang 	    !pf->floating_veb_list[user_param])) {
5465a9643ea8Slogwang 		/* Remove vsi from parent's sibling list */
5466a9643ea8Slogwang 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5467a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5468a9643ea8Slogwang 			return I40E_ERR_PARAM;
5469a9643ea8Slogwang 		}
5470a9643ea8Slogwang 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5471a9643ea8Slogwang 				&vsi->sib_vsi_list, list);
5472a9643ea8Slogwang 
5473a9643ea8Slogwang 		/* Remove all switch element of the VSI */
5474a9643ea8Slogwang 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5475a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
5476a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to delete element");
5477a9643ea8Slogwang 	}
5478a9643ea8Slogwang 
5479a9643ea8Slogwang 	if ((vsi->type == I40E_VSI_SRIOV) &&
5480a9643ea8Slogwang 	    pf->floating_veb_list[user_param]) {
5481a9643ea8Slogwang 		/* Remove vsi from parent's sibling list */
5482a9643ea8Slogwang 		if (vsi->parent_vsi == NULL ||
5483a9643ea8Slogwang 		    vsi->parent_vsi->floating_veb == NULL) {
5484a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5485a9643ea8Slogwang 			return I40E_ERR_PARAM;
5486a9643ea8Slogwang 		}
5487a9643ea8Slogwang 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5488a9643ea8Slogwang 			     &vsi->sib_vsi_list, list);
5489a9643ea8Slogwang 
5490a9643ea8Slogwang 		/* Remove all switch element of the VSI */
5491a9643ea8Slogwang 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5492a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
5493a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to delete element");
5494a9643ea8Slogwang 	}
5495a9643ea8Slogwang 
5496a9643ea8Slogwang 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5497a9643ea8Slogwang 
5498a9643ea8Slogwang 	if (vsi->type != I40E_VSI_SRIOV)
5499a9643ea8Slogwang 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5500a9643ea8Slogwang 	rte_free(vsi);
5501a9643ea8Slogwang 
5502a9643ea8Slogwang 	return I40E_SUCCESS;
5503a9643ea8Slogwang }
5504a9643ea8Slogwang 
5505a9643ea8Slogwang static int
i40e_update_default_filter_setting(struct i40e_vsi * vsi)5506a9643ea8Slogwang i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5507a9643ea8Slogwang {
5508a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5509a9643ea8Slogwang 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5510a9643ea8Slogwang 	struct i40e_mac_filter_info filter;
5511a9643ea8Slogwang 	int ret;
5512a9643ea8Slogwang 
5513a9643ea8Slogwang 	if (vsi->type != I40E_VSI_MAIN)
5514a9643ea8Slogwang 		return I40E_ERR_CONFIG;
5515a9643ea8Slogwang 	memset(&def_filter, 0, sizeof(def_filter));
55162bfe3f2eSlogwang 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5517a9643ea8Slogwang 					ETH_ADDR_LEN);
5518a9643ea8Slogwang 	def_filter.vlan_tag = 0;
5519a9643ea8Slogwang 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5520a9643ea8Slogwang 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5521a9643ea8Slogwang 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5522a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
5523a9643ea8Slogwang 		struct i40e_mac_filter *f;
55244418919fSjohnjiang 		struct rte_ether_addr *mac;
5525a9643ea8Slogwang 
55262bfe3f2eSlogwang 		PMD_DRV_LOG(DEBUG,
55272bfe3f2eSlogwang 			    "Cannot remove the default macvlan filter");
5528a9643ea8Slogwang 		/* It needs to add the permanent mac into mac list */
5529a9643ea8Slogwang 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5530a9643ea8Slogwang 		if (f == NULL) {
5531a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5532a9643ea8Slogwang 			return I40E_ERR_NO_MEMORY;
5533a9643ea8Slogwang 		}
5534a9643ea8Slogwang 		mac = &f->mac_info.mac_addr;
55352bfe3f2eSlogwang 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5536a9643ea8Slogwang 				ETH_ADDR_LEN);
5537*2d9fd380Sjfb8856606 		f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5538a9643ea8Slogwang 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5539a9643ea8Slogwang 		vsi->mac_num++;
5540a9643ea8Slogwang 
5541a9643ea8Slogwang 		return ret;
5542a9643ea8Slogwang 	}
55432bfe3f2eSlogwang 	rte_memcpy(&filter.mac_addr,
55444418919fSjohnjiang 		(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5545*2d9fd380Sjfb8856606 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5546a9643ea8Slogwang 	return i40e_vsi_add_mac(vsi, &filter);
5547a9643ea8Slogwang }
5548a9643ea8Slogwang 
5549a9643ea8Slogwang /*
5550a9643ea8Slogwang  * i40e_vsi_get_bw_config - Query VSI BW Information
5551a9643ea8Slogwang  * @vsi: the VSI to be queried
5552a9643ea8Slogwang  *
5553a9643ea8Slogwang  * Returns 0 on success, negative value on failure
5554a9643ea8Slogwang  */
5555a9643ea8Slogwang static enum i40e_status_code
i40e_vsi_get_bw_config(struct i40e_vsi * vsi)5556a9643ea8Slogwang i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5557a9643ea8Slogwang {
5558a9643ea8Slogwang 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5559a9643ea8Slogwang 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5560a9643ea8Slogwang 	struct i40e_hw *hw = &vsi->adapter->hw;
5561a9643ea8Slogwang 	i40e_status ret;
5562a9643ea8Slogwang 	int i;
5563a9643ea8Slogwang 	uint32_t bw_max;
5564a9643ea8Slogwang 
5565a9643ea8Slogwang 	memset(&bw_config, 0, sizeof(bw_config));
5566a9643ea8Slogwang 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5567a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
5568a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5569a9643ea8Slogwang 			    hw->aq.asq_last_status);
5570a9643ea8Slogwang 		return ret;
5571a9643ea8Slogwang 	}
5572a9643ea8Slogwang 
5573a9643ea8Slogwang 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5574a9643ea8Slogwang 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5575a9643ea8Slogwang 					&ets_sla_config, NULL);
5576a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
55772bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
55782bfe3f2eSlogwang 			"VSI failed to get TC bandwdith configuration %u",
55792bfe3f2eSlogwang 			hw->aq.asq_last_status);
5580a9643ea8Slogwang 		return ret;
5581a9643ea8Slogwang 	}
5582a9643ea8Slogwang 
5583a9643ea8Slogwang 	/* store and print out BW info */
5584a9643ea8Slogwang 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5585a9643ea8Slogwang 	vsi->bw_info.bw_max = bw_config.max_bw;
5586a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5587a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5588a9643ea8Slogwang 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5589a9643ea8Slogwang 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5590a9643ea8Slogwang 		     I40E_16_BIT_WIDTH);
5591a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5592a9643ea8Slogwang 		vsi->bw_info.bw_ets_share_credits[i] =
5593a9643ea8Slogwang 				ets_sla_config.share_credits[i];
5594a9643ea8Slogwang 		vsi->bw_info.bw_ets_credits[i] =
5595a9643ea8Slogwang 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5596a9643ea8Slogwang 		/* 4 bits per TC, 4th bit is reserved */
5597a9643ea8Slogwang 		vsi->bw_info.bw_ets_max[i] =
5598a9643ea8Slogwang 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5599a9643ea8Slogwang 				  RTE_LEN2MASK(3, uint8_t));
5600a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5601a9643ea8Slogwang 			    vsi->bw_info.bw_ets_share_credits[i]);
5602a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5603a9643ea8Slogwang 			    vsi->bw_info.bw_ets_credits[i]);
5604a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5605a9643ea8Slogwang 			    vsi->bw_info.bw_ets_max[i]);
5606a9643ea8Slogwang 	}
5607a9643ea8Slogwang 
5608a9643ea8Slogwang 	return I40E_SUCCESS;
5609a9643ea8Slogwang }
5610a9643ea8Slogwang 
5611a9643ea8Slogwang /* i40e_enable_pf_lb
5612a9643ea8Slogwang  * @pf: pointer to the pf structure
5613a9643ea8Slogwang  *
5614a9643ea8Slogwang  * allow loopback on pf
5615a9643ea8Slogwang  */
5616a9643ea8Slogwang static inline void
i40e_enable_pf_lb(struct i40e_pf * pf)5617a9643ea8Slogwang i40e_enable_pf_lb(struct i40e_pf *pf)
5618a9643ea8Slogwang {
5619a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5620a9643ea8Slogwang 	struct i40e_vsi_context ctxt;
5621a9643ea8Slogwang 	int ret;
5622a9643ea8Slogwang 
5623a9643ea8Slogwang 	/* Use the FW API if FW >= v5.0 */
5624d30ea906Sjfb8856606 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5625a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5626a9643ea8Slogwang 		return;
5627a9643ea8Slogwang 	}
5628a9643ea8Slogwang 
5629a9643ea8Slogwang 	memset(&ctxt, 0, sizeof(ctxt));
5630a9643ea8Slogwang 	ctxt.seid = pf->main_vsi_seid;
5631a9643ea8Slogwang 	ctxt.pf_num = hw->pf_id;
5632a9643ea8Slogwang 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5633a9643ea8Slogwang 	if (ret) {
5634a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5635a9643ea8Slogwang 			    ret, hw->aq.asq_last_status);
5636a9643ea8Slogwang 		return;
5637a9643ea8Slogwang 	}
5638a9643ea8Slogwang 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5639a9643ea8Slogwang 	ctxt.info.valid_sections =
5640a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5641a9643ea8Slogwang 	ctxt.info.switch_id |=
5642a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5643a9643ea8Slogwang 
5644a9643ea8Slogwang 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5645a9643ea8Slogwang 	if (ret)
56462bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5647a9643ea8Slogwang 			    hw->aq.asq_last_status);
5648a9643ea8Slogwang }
5649a9643ea8Slogwang 
5650a9643ea8Slogwang /* Setup a VSI */
5651a9643ea8Slogwang struct i40e_vsi *
i40e_vsi_setup(struct i40e_pf * pf,enum i40e_vsi_type type,struct i40e_vsi * uplink_vsi,uint16_t user_param)5652a9643ea8Slogwang i40e_vsi_setup(struct i40e_pf *pf,
5653a9643ea8Slogwang 	       enum i40e_vsi_type type,
5654a9643ea8Slogwang 	       struct i40e_vsi *uplink_vsi,
5655a9643ea8Slogwang 	       uint16_t user_param)
5656a9643ea8Slogwang {
5657a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5658a9643ea8Slogwang 	struct i40e_vsi *vsi;
5659a9643ea8Slogwang 	struct i40e_mac_filter_info filter;
5660a9643ea8Slogwang 	int ret;
5661a9643ea8Slogwang 	struct i40e_vsi_context ctxt;
56624418919fSjohnjiang 	struct rte_ether_addr broadcast =
5663a9643ea8Slogwang 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5664a9643ea8Slogwang 
5665a9643ea8Slogwang 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5666a9643ea8Slogwang 	    uplink_vsi == NULL) {
56672bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
56682bfe3f2eSlogwang 			"VSI setup failed, VSI link shouldn't be NULL");
5669a9643ea8Slogwang 		return NULL;
5670a9643ea8Slogwang 	}
5671a9643ea8Slogwang 
5672a9643ea8Slogwang 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
56732bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
56742bfe3f2eSlogwang 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5675a9643ea8Slogwang 		return NULL;
5676a9643ea8Slogwang 	}
5677a9643ea8Slogwang 
5678a9643ea8Slogwang 	/* two situations
5679a9643ea8Slogwang 	 * 1.type is not MAIN and uplink vsi is not NULL
5680a9643ea8Slogwang 	 * If uplink vsi didn't setup VEB, create one first under veb field
5681a9643ea8Slogwang 	 * 2.type is SRIOV and the uplink is NULL
5682a9643ea8Slogwang 	 * If floating VEB is NULL, create one veb under floating veb field
5683a9643ea8Slogwang 	 */
5684a9643ea8Slogwang 
5685a9643ea8Slogwang 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5686a9643ea8Slogwang 	    uplink_vsi->veb == NULL) {
5687a9643ea8Slogwang 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5688a9643ea8Slogwang 
5689a9643ea8Slogwang 		if (uplink_vsi->veb == NULL) {
5690a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "VEB setup failed");
5691a9643ea8Slogwang 			return NULL;
5692a9643ea8Slogwang 		}
5693a9643ea8Slogwang 		/* set ALLOWLOOPBACk on pf, when veb is created */
5694a9643ea8Slogwang 		i40e_enable_pf_lb(pf);
5695a9643ea8Slogwang 	}
5696a9643ea8Slogwang 
5697a9643ea8Slogwang 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5698a9643ea8Slogwang 	    pf->main_vsi->floating_veb == NULL) {
5699a9643ea8Slogwang 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5700a9643ea8Slogwang 
5701a9643ea8Slogwang 		if (pf->main_vsi->floating_veb == NULL) {
5702a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "VEB setup failed");
5703a9643ea8Slogwang 			return NULL;
5704a9643ea8Slogwang 		}
5705a9643ea8Slogwang 	}
5706a9643ea8Slogwang 
5707a9643ea8Slogwang 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5708a9643ea8Slogwang 	if (!vsi) {
5709a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5710a9643ea8Slogwang 		return NULL;
5711a9643ea8Slogwang 	}
5712a9643ea8Slogwang 	TAILQ_INIT(&vsi->mac_list);
5713a9643ea8Slogwang 	vsi->type = type;
5714a9643ea8Slogwang 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5715a9643ea8Slogwang 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5716a9643ea8Slogwang 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5717a9643ea8Slogwang 	vsi->user_param = user_param;
57182bfe3f2eSlogwang 	vsi->vlan_anti_spoof_on = 0;
57192bfe3f2eSlogwang 	vsi->vlan_filter_on = 0;
5720a9643ea8Slogwang 	/* Allocate queues */
5721a9643ea8Slogwang 	switch (vsi->type) {
5722a9643ea8Slogwang 	case I40E_VSI_MAIN  :
5723a9643ea8Slogwang 		vsi->nb_qps = pf->lan_nb_qps;
5724a9643ea8Slogwang 		break;
5725a9643ea8Slogwang 	case I40E_VSI_SRIOV :
5726a9643ea8Slogwang 		vsi->nb_qps = pf->vf_nb_qps;
5727a9643ea8Slogwang 		break;
5728a9643ea8Slogwang 	case I40E_VSI_VMDQ2:
5729a9643ea8Slogwang 		vsi->nb_qps = pf->vmdq_nb_qps;
5730a9643ea8Slogwang 		break;
5731a9643ea8Slogwang 	case I40E_VSI_FDIR:
5732a9643ea8Slogwang 		vsi->nb_qps = pf->fdir_nb_qps;
5733a9643ea8Slogwang 		break;
5734a9643ea8Slogwang 	default:
5735a9643ea8Slogwang 		goto fail_mem;
5736a9643ea8Slogwang 	}
5737a9643ea8Slogwang 	/*
5738a9643ea8Slogwang 	 * The filter status descriptor is reported in rx queue 0,
5739a9643ea8Slogwang 	 * while the tx queue for fdir filter programming has no
5740a9643ea8Slogwang 	 * such constraints, can be non-zero queues.
5741a9643ea8Slogwang 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5742a9643ea8Slogwang 	 * To make sure it will use queue 0 pair, queue allocation
5743a9643ea8Slogwang 	 * need be done before this function is called
5744a9643ea8Slogwang 	 */
5745a9643ea8Slogwang 	if (type != I40E_VSI_FDIR) {
5746a9643ea8Slogwang 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5747a9643ea8Slogwang 			if (ret < 0) {
5748a9643ea8Slogwang 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5749a9643ea8Slogwang 						vsi->seid, ret);
5750a9643ea8Slogwang 				goto fail_mem;
5751a9643ea8Slogwang 			}
5752a9643ea8Slogwang 			vsi->base_queue = ret;
5753a9643ea8Slogwang 	} else
5754a9643ea8Slogwang 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5755a9643ea8Slogwang 
5756a9643ea8Slogwang 	/* VF has MSIX interrupt in VF range, don't allocate here */
5757a9643ea8Slogwang 	if (type == I40E_VSI_MAIN) {
57582bfe3f2eSlogwang 		if (pf->support_multi_driver) {
57592bfe3f2eSlogwang 			/* If support multi-driver, need to use INT0 instead of
57602bfe3f2eSlogwang 			 * allocating from msix pool. The Msix pool is init from
57612bfe3f2eSlogwang 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
57622bfe3f2eSlogwang 			 * to 1 without calling i40e_res_pool_alloc.
57632bfe3f2eSlogwang 			 */
57642bfe3f2eSlogwang 			vsi->msix_intr = 0;
57652bfe3f2eSlogwang 			vsi->nb_msix = 1;
57662bfe3f2eSlogwang 		} else {
5767a9643ea8Slogwang 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5768a9643ea8Slogwang 						  RTE_MIN(vsi->nb_qps,
5769a9643ea8Slogwang 						     RTE_MAX_RXTX_INTR_VEC_ID));
5770a9643ea8Slogwang 			if (ret < 0) {
57712bfe3f2eSlogwang 				PMD_DRV_LOG(ERR,
57722bfe3f2eSlogwang 					    "VSI MAIN %d get heap failed %d",
5773a9643ea8Slogwang 					    vsi->seid, ret);
5774a9643ea8Slogwang 				goto fail_queue_alloc;
5775a9643ea8Slogwang 			}
5776a9643ea8Slogwang 			vsi->msix_intr = ret;
57772bfe3f2eSlogwang 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
57782bfe3f2eSlogwang 					       RTE_MAX_RXTX_INTR_VEC_ID);
57792bfe3f2eSlogwang 		}
5780a9643ea8Slogwang 	} else if (type != I40E_VSI_SRIOV) {
5781a9643ea8Slogwang 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5782a9643ea8Slogwang 		if (ret < 0) {
5783a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
57840c6bd470Sfengbojiang 			if (type != I40E_VSI_FDIR)
5785a9643ea8Slogwang 				goto fail_queue_alloc;
57860c6bd470Sfengbojiang 			vsi->msix_intr = 0;
57870c6bd470Sfengbojiang 			vsi->nb_msix = 0;
57880c6bd470Sfengbojiang 		} else {
5789a9643ea8Slogwang 			vsi->msix_intr = ret;
5790a9643ea8Slogwang 			vsi->nb_msix = 1;
57910c6bd470Sfengbojiang 		}
5792a9643ea8Slogwang 	} else {
5793a9643ea8Slogwang 		vsi->msix_intr = 0;
5794a9643ea8Slogwang 		vsi->nb_msix = 0;
5795a9643ea8Slogwang 	}
5796a9643ea8Slogwang 
5797a9643ea8Slogwang 	/* Add VSI */
5798a9643ea8Slogwang 	if (type == I40E_VSI_MAIN) {
5799a9643ea8Slogwang 		/* For main VSI, no need to add since it's default one */
5800a9643ea8Slogwang 		vsi->uplink_seid = pf->mac_seid;
5801a9643ea8Slogwang 		vsi->seid = pf->main_vsi_seid;
5802a9643ea8Slogwang 		/* Bind queues with specific MSIX interrupt */
5803a9643ea8Slogwang 		/**
5804a9643ea8Slogwang 		 * Needs 2 interrupt at least, one for misc cause which will
5805a9643ea8Slogwang 		 * enabled from OS side, Another for queues binding the
5806a9643ea8Slogwang 		 * interrupt from device side only.
5807a9643ea8Slogwang 		 */
5808a9643ea8Slogwang 
5809a9643ea8Slogwang 		/* Get default VSI parameters from hardware */
5810a9643ea8Slogwang 		memset(&ctxt, 0, sizeof(ctxt));
5811a9643ea8Slogwang 		ctxt.seid = vsi->seid;
5812a9643ea8Slogwang 		ctxt.pf_num = hw->pf_id;
5813a9643ea8Slogwang 		ctxt.uplink_seid = vsi->uplink_seid;
5814a9643ea8Slogwang 		ctxt.vf_num = 0;
5815a9643ea8Slogwang 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5816a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
5817a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5818a9643ea8Slogwang 			goto fail_msix_alloc;
5819a9643ea8Slogwang 		}
58202bfe3f2eSlogwang 		rte_memcpy(&vsi->info, &ctxt.info,
5821a9643ea8Slogwang 			sizeof(struct i40e_aqc_vsi_properties_data));
5822a9643ea8Slogwang 		vsi->vsi_id = ctxt.vsi_number;
5823a9643ea8Slogwang 		vsi->info.valid_sections = 0;
5824a9643ea8Slogwang 
5825a9643ea8Slogwang 		/* Configure tc, enabled TC0 only */
5826a9643ea8Slogwang 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5827a9643ea8Slogwang 			I40E_SUCCESS) {
5828a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5829a9643ea8Slogwang 			goto fail_msix_alloc;
5830a9643ea8Slogwang 		}
5831a9643ea8Slogwang 
5832a9643ea8Slogwang 		/* TC, queue mapping */
5833a9643ea8Slogwang 		memset(&ctxt, 0, sizeof(ctxt));
5834a9643ea8Slogwang 		vsi->info.valid_sections |=
5835a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5836a9643ea8Slogwang 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5837a9643ea8Slogwang 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
58382bfe3f2eSlogwang 		rte_memcpy(&ctxt.info, &vsi->info,
5839a9643ea8Slogwang 			sizeof(struct i40e_aqc_vsi_properties_data));
5840a9643ea8Slogwang 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5841a9643ea8Slogwang 						I40E_DEFAULT_TCMAP);
5842a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
58432bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
58442bfe3f2eSlogwang 				"Failed to configure TC queue mapping");
5845a9643ea8Slogwang 			goto fail_msix_alloc;
5846a9643ea8Slogwang 		}
5847a9643ea8Slogwang 		ctxt.seid = vsi->seid;
5848a9643ea8Slogwang 		ctxt.pf_num = hw->pf_id;
5849a9643ea8Slogwang 		ctxt.uplink_seid = vsi->uplink_seid;
5850a9643ea8Slogwang 		ctxt.vf_num = 0;
5851a9643ea8Slogwang 
5852a9643ea8Slogwang 		/* Update VSI parameters */
5853a9643ea8Slogwang 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5854a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
5855a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
5856a9643ea8Slogwang 			goto fail_msix_alloc;
5857a9643ea8Slogwang 		}
5858a9643ea8Slogwang 
58592bfe3f2eSlogwang 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5860a9643ea8Slogwang 						sizeof(vsi->info.tc_mapping));
58612bfe3f2eSlogwang 		rte_memcpy(&vsi->info.queue_mapping,
5862a9643ea8Slogwang 				&ctxt.info.queue_mapping,
5863a9643ea8Slogwang 			sizeof(vsi->info.queue_mapping));
5864a9643ea8Slogwang 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
5865a9643ea8Slogwang 		vsi->info.valid_sections = 0;
5866a9643ea8Slogwang 
58672bfe3f2eSlogwang 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5868a9643ea8Slogwang 				ETH_ADDR_LEN);
5869a9643ea8Slogwang 
5870a9643ea8Slogwang 		/**
5871a9643ea8Slogwang 		 * Updating default filter settings are necessary to prevent
5872a9643ea8Slogwang 		 * reception of tagged packets.
5873a9643ea8Slogwang 		 * Some old firmware configurations load a default macvlan
5874a9643ea8Slogwang 		 * filter which accepts both tagged and untagged packets.
5875a9643ea8Slogwang 		 * The updating is to use a normal filter instead if needed.
5876a9643ea8Slogwang 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
5877a9643ea8Slogwang 		 * The firmware with correct configurations load the default
5878a9643ea8Slogwang 		 * macvlan filter which is expected and cannot be removed.
5879a9643ea8Slogwang 		 */
5880a9643ea8Slogwang 		i40e_update_default_filter_setting(vsi);
5881a9643ea8Slogwang 		i40e_config_qinq(hw, vsi);
5882a9643ea8Slogwang 	} else if (type == I40E_VSI_SRIOV) {
5883a9643ea8Slogwang 		memset(&ctxt, 0, sizeof(ctxt));
5884a9643ea8Slogwang 		/**
5885a9643ea8Slogwang 		 * For other VSI, the uplink_seid equals to uplink VSI's
5886a9643ea8Slogwang 		 * uplink_seid since they share same VEB
5887a9643ea8Slogwang 		 */
5888a9643ea8Slogwang 		if (uplink_vsi == NULL)
5889a9643ea8Slogwang 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5890a9643ea8Slogwang 		else
5891a9643ea8Slogwang 			vsi->uplink_seid = uplink_vsi->uplink_seid;
5892a9643ea8Slogwang 		ctxt.pf_num = hw->pf_id;
5893a9643ea8Slogwang 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5894a9643ea8Slogwang 		ctxt.uplink_seid = vsi->uplink_seid;
5895a9643ea8Slogwang 		ctxt.connection_type = 0x1;
5896a9643ea8Slogwang 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5897a9643ea8Slogwang 
5898a9643ea8Slogwang 		/* Use the VEB configuration if FW >= v5.0 */
5899d30ea906Sjfb8856606 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5900a9643ea8Slogwang 			/* Configure switch ID */
5901a9643ea8Slogwang 			ctxt.info.valid_sections |=
5902a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5903a9643ea8Slogwang 			ctxt.info.switch_id =
5904a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5905a9643ea8Slogwang 		}
5906a9643ea8Slogwang 
5907a9643ea8Slogwang 		/* Configure port/vlan */
5908a9643ea8Slogwang 		ctxt.info.valid_sections |=
5909a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5910a9643ea8Slogwang 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5911a9643ea8Slogwang 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
59122bfe3f2eSlogwang 						hw->func_caps.enabled_tcmap);
5913a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
59142bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
59152bfe3f2eSlogwang 				"Failed to configure TC queue mapping");
5916a9643ea8Slogwang 			goto fail_msix_alloc;
5917a9643ea8Slogwang 		}
59182bfe3f2eSlogwang 
59192bfe3f2eSlogwang 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5920a9643ea8Slogwang 		ctxt.info.valid_sections |=
5921a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5922a9643ea8Slogwang 		/**
5923a9643ea8Slogwang 		 * Since VSI is not created yet, only configure parameter,
5924a9643ea8Slogwang 		 * will add vsi below.
5925a9643ea8Slogwang 		 */
5926a9643ea8Slogwang 
5927a9643ea8Slogwang 		i40e_config_qinq(hw, vsi);
5928a9643ea8Slogwang 	} else if (type == I40E_VSI_VMDQ2) {
5929a9643ea8Slogwang 		memset(&ctxt, 0, sizeof(ctxt));
5930a9643ea8Slogwang 		/*
5931a9643ea8Slogwang 		 * For other VSI, the uplink_seid equals to uplink VSI's
5932a9643ea8Slogwang 		 * uplink_seid since they share same VEB
5933a9643ea8Slogwang 		 */
5934a9643ea8Slogwang 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5935a9643ea8Slogwang 		ctxt.pf_num = hw->pf_id;
5936a9643ea8Slogwang 		ctxt.vf_num = 0;
5937a9643ea8Slogwang 		ctxt.uplink_seid = vsi->uplink_seid;
5938a9643ea8Slogwang 		ctxt.connection_type = 0x1;
5939a9643ea8Slogwang 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5940a9643ea8Slogwang 
5941a9643ea8Slogwang 		ctxt.info.valid_sections |=
5942a9643ea8Slogwang 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5943a9643ea8Slogwang 		/* user_param carries flag to enable loop back */
5944a9643ea8Slogwang 		if (user_param) {
5945a9643ea8Slogwang 			ctxt.info.switch_id =
5946a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5947a9643ea8Slogwang 			ctxt.info.switch_id |=
5948a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5949a9643ea8Slogwang 		}
5950a9643ea8Slogwang 
5951a9643ea8Slogwang 		/* Configure port/vlan */
5952a9643ea8Slogwang 		ctxt.info.valid_sections |=
5953a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5954a9643ea8Slogwang 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5955a9643ea8Slogwang 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5956a9643ea8Slogwang 						I40E_DEFAULT_TCMAP);
5957a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
59582bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
59592bfe3f2eSlogwang 				"Failed to configure TC queue mapping");
5960a9643ea8Slogwang 			goto fail_msix_alloc;
5961a9643ea8Slogwang 		}
5962a9643ea8Slogwang 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5963a9643ea8Slogwang 		ctxt.info.valid_sections |=
5964a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5965a9643ea8Slogwang 	} else if (type == I40E_VSI_FDIR) {
5966a9643ea8Slogwang 		memset(&ctxt, 0, sizeof(ctxt));
5967a9643ea8Slogwang 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5968a9643ea8Slogwang 		ctxt.pf_num = hw->pf_id;
5969a9643ea8Slogwang 		ctxt.vf_num = 0;
5970a9643ea8Slogwang 		ctxt.uplink_seid = vsi->uplink_seid;
5971a9643ea8Slogwang 		ctxt.connection_type = 0x1;     /* regular data port */
5972a9643ea8Slogwang 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5973a9643ea8Slogwang 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5974a9643ea8Slogwang 						I40E_DEFAULT_TCMAP);
5975a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
59762bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
59772bfe3f2eSlogwang 				"Failed to configure TC queue mapping.");
5978a9643ea8Slogwang 			goto fail_msix_alloc;
5979a9643ea8Slogwang 		}
5980a9643ea8Slogwang 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5981a9643ea8Slogwang 		ctxt.info.valid_sections |=
5982a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5983a9643ea8Slogwang 	} else {
5984a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5985a9643ea8Slogwang 		goto fail_msix_alloc;
5986a9643ea8Slogwang 	}
5987a9643ea8Slogwang 
5988a9643ea8Slogwang 	if (vsi->type != I40E_VSI_MAIN) {
5989a9643ea8Slogwang 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5990a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
5991a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5992a9643ea8Slogwang 				    hw->aq.asq_last_status);
5993a9643ea8Slogwang 			goto fail_msix_alloc;
5994a9643ea8Slogwang 		}
5995a9643ea8Slogwang 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5996a9643ea8Slogwang 		vsi->info.valid_sections = 0;
5997a9643ea8Slogwang 		vsi->seid = ctxt.seid;
5998a9643ea8Slogwang 		vsi->vsi_id = ctxt.vsi_number;
5999a9643ea8Slogwang 		vsi->sib_vsi_list.vsi = vsi;
6000a9643ea8Slogwang 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6001a9643ea8Slogwang 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6002a9643ea8Slogwang 					  &vsi->sib_vsi_list, list);
6003a9643ea8Slogwang 		} else {
6004a9643ea8Slogwang 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6005a9643ea8Slogwang 					  &vsi->sib_vsi_list, list);
6006a9643ea8Slogwang 		}
6007a9643ea8Slogwang 	}
6008a9643ea8Slogwang 
6009a9643ea8Slogwang 	/* MAC/VLAN configuration */
60104418919fSjohnjiang 	rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6011*2d9fd380Sjfb8856606 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
6012a9643ea8Slogwang 
6013a9643ea8Slogwang 	ret = i40e_vsi_add_mac(vsi, &filter);
6014a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
6015a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6016a9643ea8Slogwang 		goto fail_msix_alloc;
6017a9643ea8Slogwang 	}
6018a9643ea8Slogwang 
6019a9643ea8Slogwang 	/* Get VSI BW information */
6020a9643ea8Slogwang 	i40e_vsi_get_bw_config(vsi);
6021a9643ea8Slogwang 	return vsi;
6022a9643ea8Slogwang fail_msix_alloc:
6023a9643ea8Slogwang 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6024a9643ea8Slogwang fail_queue_alloc:
6025a9643ea8Slogwang 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6026a9643ea8Slogwang fail_mem:
6027a9643ea8Slogwang 	rte_free(vsi);
6028a9643ea8Slogwang 	return NULL;
6029a9643ea8Slogwang }
6030a9643ea8Slogwang 
6031a9643ea8Slogwang /* Configure vlan filter on or off */
6032a9643ea8Slogwang int
i40e_vsi_config_vlan_filter(struct i40e_vsi * vsi,bool on)6033a9643ea8Slogwang i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6034a9643ea8Slogwang {
6035a9643ea8Slogwang 	int i, num;
6036a9643ea8Slogwang 	struct i40e_mac_filter *f;
6037a9643ea8Slogwang 	void *temp;
6038a9643ea8Slogwang 	struct i40e_mac_filter_info *mac_filter;
6039*2d9fd380Sjfb8856606 	enum i40e_mac_filter_type desired_filter;
6040a9643ea8Slogwang 	int ret = I40E_SUCCESS;
6041a9643ea8Slogwang 
6042a9643ea8Slogwang 	if (on) {
6043a9643ea8Slogwang 		/* Filter to match MAC and VLAN */
6044*2d9fd380Sjfb8856606 		desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6045a9643ea8Slogwang 	} else {
6046a9643ea8Slogwang 		/* Filter to match only MAC */
6047*2d9fd380Sjfb8856606 		desired_filter = I40E_MAC_PERFECT_MATCH;
6048a9643ea8Slogwang 	}
6049a9643ea8Slogwang 
6050a9643ea8Slogwang 	num = vsi->mac_num;
6051a9643ea8Slogwang 
6052a9643ea8Slogwang 	mac_filter = rte_zmalloc("mac_filter_info_data",
6053a9643ea8Slogwang 				 num * sizeof(*mac_filter), 0);
6054a9643ea8Slogwang 	if (mac_filter == NULL) {
6055a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6056a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
6057a9643ea8Slogwang 	}
6058a9643ea8Slogwang 
6059a9643ea8Slogwang 	i = 0;
6060a9643ea8Slogwang 
6061a9643ea8Slogwang 	/* Remove all existing mac */
6062a9643ea8Slogwang 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6063a9643ea8Slogwang 		mac_filter[i] = f->mac_info;
6064a9643ea8Slogwang 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6065a9643ea8Slogwang 		if (ret) {
6066a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6067a9643ea8Slogwang 				    on ? "enable" : "disable");
6068a9643ea8Slogwang 			goto DONE;
6069a9643ea8Slogwang 		}
6070a9643ea8Slogwang 		i++;
6071a9643ea8Slogwang 	}
6072a9643ea8Slogwang 
6073a9643ea8Slogwang 	/* Override with new filter */
6074a9643ea8Slogwang 	for (i = 0; i < num; i++) {
6075a9643ea8Slogwang 		mac_filter[i].filter_type = desired_filter;
6076a9643ea8Slogwang 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6077a9643ea8Slogwang 		if (ret) {
6078a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6079a9643ea8Slogwang 				    on ? "enable" : "disable");
6080a9643ea8Slogwang 			goto DONE;
6081a9643ea8Slogwang 		}
6082a9643ea8Slogwang 	}
6083a9643ea8Slogwang 
6084a9643ea8Slogwang DONE:
6085a9643ea8Slogwang 	rte_free(mac_filter);
6086a9643ea8Slogwang 	return ret;
6087a9643ea8Slogwang }
6088a9643ea8Slogwang 
6089a9643ea8Slogwang /* Configure vlan stripping on or off */
6090a9643ea8Slogwang int
i40e_vsi_config_vlan_stripping(struct i40e_vsi * vsi,bool on)6091a9643ea8Slogwang i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6092a9643ea8Slogwang {
6093a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6094a9643ea8Slogwang 	struct i40e_vsi_context ctxt;
6095a9643ea8Slogwang 	uint8_t vlan_flags;
6096a9643ea8Slogwang 	int ret = I40E_SUCCESS;
6097a9643ea8Slogwang 
6098a9643ea8Slogwang 	/* Check if it has been already on or off */
6099a9643ea8Slogwang 	if (vsi->info.valid_sections &
6100a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6101a9643ea8Slogwang 		if (on) {
6102a9643ea8Slogwang 			if ((vsi->info.port_vlan_flags &
6103a9643ea8Slogwang 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6104a9643ea8Slogwang 				return 0; /* already on */
6105a9643ea8Slogwang 		} else {
6106a9643ea8Slogwang 			if ((vsi->info.port_vlan_flags &
6107a9643ea8Slogwang 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6108a9643ea8Slogwang 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
6109a9643ea8Slogwang 				return 0; /* already off */
6110a9643ea8Slogwang 		}
6111a9643ea8Slogwang 	}
6112a9643ea8Slogwang 
6113a9643ea8Slogwang 	if (on)
6114a9643ea8Slogwang 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6115a9643ea8Slogwang 	else
6116a9643ea8Slogwang 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6117a9643ea8Slogwang 	vsi->info.valid_sections =
6118a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6119a9643ea8Slogwang 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6120a9643ea8Slogwang 	vsi->info.port_vlan_flags |= vlan_flags;
6121a9643ea8Slogwang 	ctxt.seid = vsi->seid;
61222bfe3f2eSlogwang 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6123a9643ea8Slogwang 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6124a9643ea8Slogwang 	if (ret)
6125a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6126a9643ea8Slogwang 			    on ? "enable" : "disable");
6127a9643ea8Slogwang 
6128a9643ea8Slogwang 	return ret;
6129a9643ea8Slogwang }
6130a9643ea8Slogwang 
6131a9643ea8Slogwang static int
i40e_dev_init_vlan(struct rte_eth_dev * dev)6132a9643ea8Slogwang i40e_dev_init_vlan(struct rte_eth_dev *dev)
6133a9643ea8Slogwang {
6134a9643ea8Slogwang 	struct rte_eth_dev_data *data = dev->data;
6135a9643ea8Slogwang 	int ret;
6136a9643ea8Slogwang 	int mask = 0;
6137a9643ea8Slogwang 
6138a9643ea8Slogwang 	/* Apply vlan offload setting */
61392bfe3f2eSlogwang 	mask = ETH_VLAN_STRIP_MASK |
61400c6bd470Sfengbojiang 	       ETH_QINQ_STRIP_MASK |
61412bfe3f2eSlogwang 	       ETH_VLAN_FILTER_MASK |
61422bfe3f2eSlogwang 	       ETH_VLAN_EXTEND_MASK;
61432bfe3f2eSlogwang 	ret = i40e_vlan_offload_set(dev, mask);
61442bfe3f2eSlogwang 	if (ret) {
61452bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
61462bfe3f2eSlogwang 		return ret;
61472bfe3f2eSlogwang 	}
6148a9643ea8Slogwang 
6149a9643ea8Slogwang 	/* Apply pvid setting */
6150a9643ea8Slogwang 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6151a9643ea8Slogwang 				data->dev_conf.txmode.hw_vlan_insert_pvid);
6152a9643ea8Slogwang 	if (ret)
6153a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
6154a9643ea8Slogwang 
6155a9643ea8Slogwang 	return ret;
6156a9643ea8Slogwang }
6157a9643ea8Slogwang 
6158a9643ea8Slogwang static int
i40e_vsi_config_double_vlan(struct i40e_vsi * vsi,int on)6159a9643ea8Slogwang i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6160a9643ea8Slogwang {
6161a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6162a9643ea8Slogwang 
6163a9643ea8Slogwang 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6164a9643ea8Slogwang }
6165a9643ea8Slogwang 
6166a9643ea8Slogwang static int
i40e_update_flow_control(struct i40e_hw * hw)6167a9643ea8Slogwang i40e_update_flow_control(struct i40e_hw *hw)
6168a9643ea8Slogwang {
6169a9643ea8Slogwang #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6170a9643ea8Slogwang 	struct i40e_link_status link_status;
6171a9643ea8Slogwang 	uint32_t rxfc = 0, txfc = 0, reg;
6172a9643ea8Slogwang 	uint8_t an_info;
6173a9643ea8Slogwang 	int ret;
6174a9643ea8Slogwang 
6175a9643ea8Slogwang 	memset(&link_status, 0, sizeof(link_status));
6176a9643ea8Slogwang 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6177a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
6178a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to get link status information");
6179a9643ea8Slogwang 		goto write_reg; /* Disable flow control */
6180a9643ea8Slogwang 	}
6181a9643ea8Slogwang 
6182a9643ea8Slogwang 	an_info = hw->phy.link_info.an_info;
6183a9643ea8Slogwang 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6184a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6185a9643ea8Slogwang 		ret = I40E_ERR_NOT_READY;
6186a9643ea8Slogwang 		goto write_reg; /* Disable flow control */
6187a9643ea8Slogwang 	}
6188a9643ea8Slogwang 	/**
6189a9643ea8Slogwang 	 * If link auto negotiation is enabled, flow control needs to
6190a9643ea8Slogwang 	 * be configured according to it
6191a9643ea8Slogwang 	 */
6192a9643ea8Slogwang 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
6193a9643ea8Slogwang 	case I40E_LINK_PAUSE_RXTX:
6194a9643ea8Slogwang 		rxfc = 1;
6195a9643ea8Slogwang 		txfc = 1;
6196a9643ea8Slogwang 		hw->fc.current_mode = I40E_FC_FULL;
6197a9643ea8Slogwang 		break;
6198a9643ea8Slogwang 	case I40E_AQ_LINK_PAUSE_RX:
6199a9643ea8Slogwang 		rxfc = 1;
6200a9643ea8Slogwang 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6201a9643ea8Slogwang 		break;
6202a9643ea8Slogwang 	case I40E_AQ_LINK_PAUSE_TX:
6203a9643ea8Slogwang 		txfc = 1;
6204a9643ea8Slogwang 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6205a9643ea8Slogwang 		break;
6206a9643ea8Slogwang 	default:
6207a9643ea8Slogwang 		hw->fc.current_mode = I40E_FC_NONE;
6208a9643ea8Slogwang 		break;
6209a9643ea8Slogwang 	}
6210a9643ea8Slogwang 
6211a9643ea8Slogwang write_reg:
6212a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6213a9643ea8Slogwang 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6214a9643ea8Slogwang 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6215a9643ea8Slogwang 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6216a9643ea8Slogwang 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6217a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6218a9643ea8Slogwang 
6219a9643ea8Slogwang 	return ret;
6220a9643ea8Slogwang }
6221a9643ea8Slogwang 
6222a9643ea8Slogwang /* PF setup */
6223a9643ea8Slogwang static int
i40e_pf_setup(struct i40e_pf * pf)6224a9643ea8Slogwang i40e_pf_setup(struct i40e_pf *pf)
6225a9643ea8Slogwang {
6226a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6227a9643ea8Slogwang 	struct i40e_filter_control_settings settings;
6228a9643ea8Slogwang 	struct i40e_vsi *vsi;
6229a9643ea8Slogwang 	int ret;
6230a9643ea8Slogwang 
6231a9643ea8Slogwang 	/* Clear all stats counters */
6232a9643ea8Slogwang 	pf->offset_loaded = FALSE;
6233a9643ea8Slogwang 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6234a9643ea8Slogwang 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
62352bfe3f2eSlogwang 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
62362bfe3f2eSlogwang 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6237a9643ea8Slogwang 
6238a9643ea8Slogwang 	ret = i40e_pf_get_switch_config(pf);
6239a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
6240a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6241a9643ea8Slogwang 		return ret;
6242a9643ea8Slogwang 	}
6243d30ea906Sjfb8856606 
6244d30ea906Sjfb8856606 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6245d30ea906Sjfb8856606 	if (ret)
6246d30ea906Sjfb8856606 		PMD_INIT_LOG(WARNING,
6247d30ea906Sjfb8856606 			"failed to allocate switch domain for device %d", ret);
6248d30ea906Sjfb8856606 
6249a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_FDIR) {
6250a9643ea8Slogwang 		/* make queue allocated first, let FDIR use queue pair 0*/
6251a9643ea8Slogwang 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6252a9643ea8Slogwang 		if (ret != I40E_FDIR_QUEUE_ID) {
62532bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
62542bfe3f2eSlogwang 				"queue allocation fails for FDIR: ret =%d",
62552bfe3f2eSlogwang 				ret);
6256a9643ea8Slogwang 			pf->flags &= ~I40E_FLAG_FDIR;
6257a9643ea8Slogwang 		}
6258a9643ea8Slogwang 	}
6259a9643ea8Slogwang 	/*  main VSI setup */
6260a9643ea8Slogwang 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6261a9643ea8Slogwang 	if (!vsi) {
6262a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6263a9643ea8Slogwang 		return I40E_ERR_NOT_READY;
6264a9643ea8Slogwang 	}
6265a9643ea8Slogwang 	pf->main_vsi = vsi;
6266a9643ea8Slogwang 
6267a9643ea8Slogwang 	/* Configure filter control */
6268a9643ea8Slogwang 	memset(&settings, 0, sizeof(settings));
6269a9643ea8Slogwang 	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6270a9643ea8Slogwang 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6271a9643ea8Slogwang 	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6272a9643ea8Slogwang 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6273a9643ea8Slogwang 	else {
62742bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6275a9643ea8Slogwang 			hw->func_caps.rss_table_size);
6276a9643ea8Slogwang 		return I40E_ERR_PARAM;
6277a9643ea8Slogwang 	}
62782bfe3f2eSlogwang 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
62792bfe3f2eSlogwang 		hw->func_caps.rss_table_size);
6280a9643ea8Slogwang 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6281a9643ea8Slogwang 
6282a9643ea8Slogwang 	/* Enable ethtype and macvlan filters */
6283a9643ea8Slogwang 	settings.enable_ethtype = TRUE;
6284a9643ea8Slogwang 	settings.enable_macvlan = TRUE;
6285a9643ea8Slogwang 	ret = i40e_set_filter_control(hw, &settings);
6286a9643ea8Slogwang 	if (ret)
6287a9643ea8Slogwang 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6288a9643ea8Slogwang 								ret);
6289a9643ea8Slogwang 
6290a9643ea8Slogwang 	/* Update flow control according to the auto negotiation */
6291a9643ea8Slogwang 	i40e_update_flow_control(hw);
6292a9643ea8Slogwang 
6293a9643ea8Slogwang 	return I40E_SUCCESS;
6294a9643ea8Slogwang }
6295a9643ea8Slogwang 
6296a9643ea8Slogwang int
i40e_switch_tx_queue(struct i40e_hw * hw,uint16_t q_idx,bool on)6297a9643ea8Slogwang i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6298a9643ea8Slogwang {
6299a9643ea8Slogwang 	uint32_t reg;
6300a9643ea8Slogwang 	uint16_t j;
6301a9643ea8Slogwang 
6302a9643ea8Slogwang 	/**
6303a9643ea8Slogwang 	 * Set or clear TX Queue Disable flags,
6304a9643ea8Slogwang 	 * which is required by hardware.
6305a9643ea8Slogwang 	 */
6306a9643ea8Slogwang 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6307a9643ea8Slogwang 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6308a9643ea8Slogwang 
6309a9643ea8Slogwang 	/* Wait until the request is finished */
6310a9643ea8Slogwang 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6311a9643ea8Slogwang 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6312a9643ea8Slogwang 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6313a9643ea8Slogwang 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6314a9643ea8Slogwang 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6315a9643ea8Slogwang 							& 0x1))) {
6316a9643ea8Slogwang 			break;
6317a9643ea8Slogwang 		}
6318a9643ea8Slogwang 	}
6319a9643ea8Slogwang 	if (on) {
6320a9643ea8Slogwang 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6321a9643ea8Slogwang 			return I40E_SUCCESS; /* already on, skip next steps */
6322a9643ea8Slogwang 
6323a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6324a9643ea8Slogwang 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6325a9643ea8Slogwang 	} else {
6326a9643ea8Slogwang 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6327a9643ea8Slogwang 			return I40E_SUCCESS; /* already off, skip next steps */
6328a9643ea8Slogwang 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6329a9643ea8Slogwang 	}
6330a9643ea8Slogwang 	/* Write the register */
6331a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6332a9643ea8Slogwang 	/* Check the result */
6333a9643ea8Slogwang 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6334a9643ea8Slogwang 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6335a9643ea8Slogwang 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6336a9643ea8Slogwang 		if (on) {
6337a9643ea8Slogwang 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6338a9643ea8Slogwang 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6339a9643ea8Slogwang 				break;
6340a9643ea8Slogwang 		} else {
6341a9643ea8Slogwang 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6342a9643ea8Slogwang 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6343a9643ea8Slogwang 				break;
6344a9643ea8Slogwang 		}
6345a9643ea8Slogwang 	}
6346a9643ea8Slogwang 	/* Check if it is timeout */
6347a9643ea8Slogwang 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6348a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6349a9643ea8Slogwang 			    (on ? "enable" : "disable"), q_idx);
6350a9643ea8Slogwang 		return I40E_ERR_TIMEOUT;
6351a9643ea8Slogwang 	}
6352a9643ea8Slogwang 
6353a9643ea8Slogwang 	return I40E_SUCCESS;
6354a9643ea8Slogwang }
6355a9643ea8Slogwang 
6356a9643ea8Slogwang int
i40e_switch_rx_queue(struct i40e_hw * hw,uint16_t q_idx,bool on)6357a9643ea8Slogwang i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6358a9643ea8Slogwang {
6359a9643ea8Slogwang 	uint32_t reg;
6360a9643ea8Slogwang 	uint16_t j;
6361a9643ea8Slogwang 
6362a9643ea8Slogwang 	/* Wait until the request is finished */
6363a9643ea8Slogwang 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6364a9643ea8Slogwang 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6365a9643ea8Slogwang 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6366a9643ea8Slogwang 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6367a9643ea8Slogwang 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6368a9643ea8Slogwang 			break;
6369a9643ea8Slogwang 	}
6370a9643ea8Slogwang 
6371a9643ea8Slogwang 	if (on) {
6372a9643ea8Slogwang 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6373a9643ea8Slogwang 			return I40E_SUCCESS; /* Already on, skip next steps */
6374a9643ea8Slogwang 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6375a9643ea8Slogwang 	} else {
6376a9643ea8Slogwang 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6377a9643ea8Slogwang 			return I40E_SUCCESS; /* Already off, skip next steps */
6378a9643ea8Slogwang 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6379a9643ea8Slogwang 	}
6380a9643ea8Slogwang 
6381a9643ea8Slogwang 	/* Write the register */
6382a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6383a9643ea8Slogwang 	/* Check the result */
6384a9643ea8Slogwang 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6385a9643ea8Slogwang 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6386a9643ea8Slogwang 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6387a9643ea8Slogwang 		if (on) {
6388a9643ea8Slogwang 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6389a9643ea8Slogwang 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6390a9643ea8Slogwang 				break;
6391a9643ea8Slogwang 		} else {
6392a9643ea8Slogwang 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6393a9643ea8Slogwang 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6394a9643ea8Slogwang 				break;
6395a9643ea8Slogwang 		}
6396a9643ea8Slogwang 	}
6397a9643ea8Slogwang 
6398a9643ea8Slogwang 	/* Check if it is timeout */
6399a9643ea8Slogwang 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6400a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6401a9643ea8Slogwang 			    (on ? "enable" : "disable"), q_idx);
6402a9643ea8Slogwang 		return I40E_ERR_TIMEOUT;
6403a9643ea8Slogwang 	}
6404a9643ea8Slogwang 
6405a9643ea8Slogwang 	return I40E_SUCCESS;
6406a9643ea8Slogwang }
6407a9643ea8Slogwang 
6408a9643ea8Slogwang /* Initialize VSI for TX */
6409a9643ea8Slogwang static int
i40e_dev_tx_init(struct i40e_pf * pf)6410a9643ea8Slogwang i40e_dev_tx_init(struct i40e_pf *pf)
6411a9643ea8Slogwang {
6412a9643ea8Slogwang 	struct rte_eth_dev_data *data = pf->dev_data;
6413a9643ea8Slogwang 	uint16_t i;
6414a9643ea8Slogwang 	uint32_t ret = I40E_SUCCESS;
6415a9643ea8Slogwang 	struct i40e_tx_queue *txq;
6416a9643ea8Slogwang 
6417a9643ea8Slogwang 	for (i = 0; i < data->nb_tx_queues; i++) {
6418a9643ea8Slogwang 		txq = data->tx_queues[i];
6419a9643ea8Slogwang 		if (!txq || !txq->q_set)
6420a9643ea8Slogwang 			continue;
6421a9643ea8Slogwang 		ret = i40e_tx_queue_init(txq);
6422a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
6423a9643ea8Slogwang 			break;
6424a9643ea8Slogwang 	}
6425a9643ea8Slogwang 	if (ret == I40E_SUCCESS)
6426a9643ea8Slogwang 		i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6427a9643ea8Slogwang 				     ->eth_dev);
6428a9643ea8Slogwang 
6429a9643ea8Slogwang 	return ret;
6430a9643ea8Slogwang }
6431a9643ea8Slogwang 
6432a9643ea8Slogwang /* Initialize VSI for RX */
6433a9643ea8Slogwang static int
i40e_dev_rx_init(struct i40e_pf * pf)6434a9643ea8Slogwang i40e_dev_rx_init(struct i40e_pf *pf)
6435a9643ea8Slogwang {
6436a9643ea8Slogwang 	struct rte_eth_dev_data *data = pf->dev_data;
6437a9643ea8Slogwang 	int ret = I40E_SUCCESS;
6438a9643ea8Slogwang 	uint16_t i;
6439a9643ea8Slogwang 	struct i40e_rx_queue *rxq;
6440a9643ea8Slogwang 
6441*2d9fd380Sjfb8856606 	i40e_pf_config_rss(pf);
6442a9643ea8Slogwang 	for (i = 0; i < data->nb_rx_queues; i++) {
6443a9643ea8Slogwang 		rxq = data->rx_queues[i];
6444a9643ea8Slogwang 		if (!rxq || !rxq->q_set)
6445a9643ea8Slogwang 			continue;
6446a9643ea8Slogwang 
6447a9643ea8Slogwang 		ret = i40e_rx_queue_init(rxq);
6448a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
64492bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
64502bfe3f2eSlogwang 				"Failed to do RX queue initialization");
6451a9643ea8Slogwang 			break;
6452a9643ea8Slogwang 		}
6453a9643ea8Slogwang 	}
6454a9643ea8Slogwang 	if (ret == I40E_SUCCESS)
6455a9643ea8Slogwang 		i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6456a9643ea8Slogwang 				     ->eth_dev);
6457a9643ea8Slogwang 
6458a9643ea8Slogwang 	return ret;
6459a9643ea8Slogwang }
6460a9643ea8Slogwang 
6461a9643ea8Slogwang static int
i40e_dev_rxtx_init(struct i40e_pf * pf)6462a9643ea8Slogwang i40e_dev_rxtx_init(struct i40e_pf *pf)
6463a9643ea8Slogwang {
6464a9643ea8Slogwang 	int err;
6465a9643ea8Slogwang 
6466a9643ea8Slogwang 	err = i40e_dev_tx_init(pf);
6467a9643ea8Slogwang 	if (err) {
6468a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6469a9643ea8Slogwang 		return err;
6470a9643ea8Slogwang 	}
6471a9643ea8Slogwang 	err = i40e_dev_rx_init(pf);
6472a9643ea8Slogwang 	if (err) {
6473a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6474a9643ea8Slogwang 		return err;
6475a9643ea8Slogwang 	}
6476a9643ea8Slogwang 
6477a9643ea8Slogwang 	return err;
6478a9643ea8Slogwang }
6479a9643ea8Slogwang 
6480a9643ea8Slogwang static int
i40e_vmdq_setup(struct rte_eth_dev * dev)6481a9643ea8Slogwang i40e_vmdq_setup(struct rte_eth_dev *dev)
6482a9643ea8Slogwang {
6483a9643ea8Slogwang 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6484a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6485a9643ea8Slogwang 	int i, err, conf_vsis, j, loop;
6486a9643ea8Slogwang 	struct i40e_vsi *vsi;
6487a9643ea8Slogwang 	struct i40e_vmdq_info *vmdq_info;
6488a9643ea8Slogwang 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6489a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6490a9643ea8Slogwang 
6491a9643ea8Slogwang 	/*
6492a9643ea8Slogwang 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6493a9643ea8Slogwang 	 * avoid race condition in VSI creation/destroy.
6494a9643ea8Slogwang 	 */
6495a9643ea8Slogwang 	i40e_pf_disable_irq0(hw);
6496a9643ea8Slogwang 
6497a9643ea8Slogwang 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6498a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6499a9643ea8Slogwang 		return -ENOTSUP;
6500a9643ea8Slogwang 	}
6501a9643ea8Slogwang 
6502a9643ea8Slogwang 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6503a9643ea8Slogwang 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6504a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6505a9643ea8Slogwang 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6506a9643ea8Slogwang 			pf->max_nb_vmdq_vsi);
6507a9643ea8Slogwang 		return -ENOTSUP;
6508a9643ea8Slogwang 	}
6509a9643ea8Slogwang 
6510a9643ea8Slogwang 	if (pf->vmdq != NULL) {
6511a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6512a9643ea8Slogwang 		return 0;
6513a9643ea8Slogwang 	}
6514a9643ea8Slogwang 
6515a9643ea8Slogwang 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6516a9643ea8Slogwang 				sizeof(*vmdq_info) * conf_vsis, 0);
6517a9643ea8Slogwang 
6518a9643ea8Slogwang 	if (pf->vmdq == NULL) {
6519a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6520a9643ea8Slogwang 		return -ENOMEM;
6521a9643ea8Slogwang 	}
6522a9643ea8Slogwang 
6523a9643ea8Slogwang 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6524a9643ea8Slogwang 
6525a9643ea8Slogwang 	/* Create VMDQ VSI */
6526a9643ea8Slogwang 	for (i = 0; i < conf_vsis; i++) {
6527a9643ea8Slogwang 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6528a9643ea8Slogwang 				vmdq_conf->enable_loop_back);
6529a9643ea8Slogwang 		if (vsi == NULL) {
6530a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6531a9643ea8Slogwang 			err = -1;
6532a9643ea8Slogwang 			goto err_vsi_setup;
6533a9643ea8Slogwang 		}
6534a9643ea8Slogwang 		vmdq_info = &pf->vmdq[i];
6535a9643ea8Slogwang 		vmdq_info->pf = pf;
6536a9643ea8Slogwang 		vmdq_info->vsi = vsi;
6537a9643ea8Slogwang 	}
6538a9643ea8Slogwang 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6539a9643ea8Slogwang 
6540a9643ea8Slogwang 	/* Configure Vlan */
6541a9643ea8Slogwang 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6542a9643ea8Slogwang 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6543a9643ea8Slogwang 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6544a9643ea8Slogwang 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6545a9643ea8Slogwang 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6546a9643ea8Slogwang 					vmdq_conf->pool_map[i].vlan_id, j);
6547a9643ea8Slogwang 
6548a9643ea8Slogwang 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6549a9643ea8Slogwang 						vmdq_conf->pool_map[i].vlan_id);
6550a9643ea8Slogwang 				if (err) {
6551a9643ea8Slogwang 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6552a9643ea8Slogwang 					err = -1;
6553a9643ea8Slogwang 					goto err_vsi_setup;
6554a9643ea8Slogwang 				}
6555a9643ea8Slogwang 			}
6556a9643ea8Slogwang 		}
6557a9643ea8Slogwang 	}
6558a9643ea8Slogwang 
6559a9643ea8Slogwang 	i40e_pf_enable_irq0(hw);
6560a9643ea8Slogwang 
6561a9643ea8Slogwang 	return 0;
6562a9643ea8Slogwang 
6563a9643ea8Slogwang err_vsi_setup:
6564a9643ea8Slogwang 	for (i = 0; i < conf_vsis; i++)
6565a9643ea8Slogwang 		if (pf->vmdq[i].vsi == NULL)
6566a9643ea8Slogwang 			break;
6567a9643ea8Slogwang 		else
6568a9643ea8Slogwang 			i40e_vsi_release(pf->vmdq[i].vsi);
6569a9643ea8Slogwang 
6570a9643ea8Slogwang 	rte_free(pf->vmdq);
6571a9643ea8Slogwang 	pf->vmdq = NULL;
6572a9643ea8Slogwang 	i40e_pf_enable_irq0(hw);
6573a9643ea8Slogwang 	return err;
6574a9643ea8Slogwang }
6575a9643ea8Slogwang 
6576a9643ea8Slogwang static void
i40e_stat_update_32(struct i40e_hw * hw,uint32_t reg,bool offset_loaded,uint64_t * offset,uint64_t * stat)6577a9643ea8Slogwang i40e_stat_update_32(struct i40e_hw *hw,
6578a9643ea8Slogwang 		   uint32_t reg,
6579a9643ea8Slogwang 		   bool offset_loaded,
6580a9643ea8Slogwang 		   uint64_t *offset,
6581a9643ea8Slogwang 		   uint64_t *stat)
6582a9643ea8Slogwang {
6583a9643ea8Slogwang 	uint64_t new_data;
6584a9643ea8Slogwang 
6585a9643ea8Slogwang 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6586a9643ea8Slogwang 	if (!offset_loaded)
6587a9643ea8Slogwang 		*offset = new_data;
6588a9643ea8Slogwang 
6589a9643ea8Slogwang 	if (new_data >= *offset)
6590a9643ea8Slogwang 		*stat = (uint64_t)(new_data - *offset);
6591a9643ea8Slogwang 	else
6592a9643ea8Slogwang 		*stat = (uint64_t)((new_data +
6593a9643ea8Slogwang 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6594a9643ea8Slogwang }
6595a9643ea8Slogwang 
6596a9643ea8Slogwang static void
i40e_stat_update_48(struct i40e_hw * hw,uint32_t hireg,uint32_t loreg,bool offset_loaded,uint64_t * offset,uint64_t * stat)6597a9643ea8Slogwang i40e_stat_update_48(struct i40e_hw *hw,
6598a9643ea8Slogwang 		   uint32_t hireg,
6599a9643ea8Slogwang 		   uint32_t loreg,
6600a9643ea8Slogwang 		   bool offset_loaded,
6601a9643ea8Slogwang 		   uint64_t *offset,
6602a9643ea8Slogwang 		   uint64_t *stat)
6603a9643ea8Slogwang {
6604a9643ea8Slogwang 	uint64_t new_data;
6605a9643ea8Slogwang 
6606a9643ea8Slogwang 	new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6607a9643ea8Slogwang 	new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6608a9643ea8Slogwang 			I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6609a9643ea8Slogwang 
6610a9643ea8Slogwang 	if (!offset_loaded)
6611a9643ea8Slogwang 		*offset = new_data;
6612a9643ea8Slogwang 
6613a9643ea8Slogwang 	if (new_data >= *offset)
6614a9643ea8Slogwang 		*stat = new_data - *offset;
6615a9643ea8Slogwang 	else
6616a9643ea8Slogwang 		*stat = (uint64_t)((new_data +
6617a9643ea8Slogwang 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6618a9643ea8Slogwang 
6619a9643ea8Slogwang 	*stat &= I40E_48_BIT_MASK;
6620a9643ea8Slogwang }
6621a9643ea8Slogwang 
6622a9643ea8Slogwang /* Disable IRQ0 */
6623a9643ea8Slogwang void
i40e_pf_disable_irq0(struct i40e_hw * hw)6624a9643ea8Slogwang i40e_pf_disable_irq0(struct i40e_hw *hw)
6625a9643ea8Slogwang {
6626a9643ea8Slogwang 	/* Disable all interrupt types */
66272bfe3f2eSlogwang 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
66282bfe3f2eSlogwang 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6629a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
6630a9643ea8Slogwang }
6631a9643ea8Slogwang 
6632a9643ea8Slogwang /* Enable IRQ0 */
6633a9643ea8Slogwang void
i40e_pf_enable_irq0(struct i40e_hw * hw)6634a9643ea8Slogwang i40e_pf_enable_irq0(struct i40e_hw *hw)
6635a9643ea8Slogwang {
6636a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6637a9643ea8Slogwang 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6638a9643ea8Slogwang 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6639a9643ea8Slogwang 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6640a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
6641a9643ea8Slogwang }
6642a9643ea8Slogwang 
6643a9643ea8Slogwang static void
i40e_pf_config_irq0(struct i40e_hw * hw,bool no_queue)6644a9643ea8Slogwang i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6645a9643ea8Slogwang {
6646a9643ea8Slogwang 	/* read pending request and disable first */
6647a9643ea8Slogwang 	i40e_pf_disable_irq0(hw);
6648a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6649a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6650a9643ea8Slogwang 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6651a9643ea8Slogwang 
6652a9643ea8Slogwang 	if (no_queue)
6653a9643ea8Slogwang 		/* Link no queues with irq0 */
6654a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6655a9643ea8Slogwang 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6656a9643ea8Slogwang }
6657a9643ea8Slogwang 
6658a9643ea8Slogwang static void
i40e_dev_handle_vfr_event(struct rte_eth_dev * dev)6659a9643ea8Slogwang i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6660a9643ea8Slogwang {
6661a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6662a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6663a9643ea8Slogwang 	int i;
6664a9643ea8Slogwang 	uint16_t abs_vf_id;
6665a9643ea8Slogwang 	uint32_t index, offset, val;
6666a9643ea8Slogwang 
6667a9643ea8Slogwang 	if (!pf->vfs)
6668a9643ea8Slogwang 		return;
6669a9643ea8Slogwang 	/**
6670a9643ea8Slogwang 	 * Try to find which VF trigger a reset, use absolute VF id to access
6671a9643ea8Slogwang 	 * since the reg is global register.
6672a9643ea8Slogwang 	 */
6673a9643ea8Slogwang 	for (i = 0; i < pf->vf_num; i++) {
6674a9643ea8Slogwang 		abs_vf_id = hw->func_caps.vf_base_id + i;
6675a9643ea8Slogwang 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6676a9643ea8Slogwang 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6677a9643ea8Slogwang 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
66782bfe3f2eSlogwang 		/* VFR event occurred */
6679a9643ea8Slogwang 		if (val & (0x1 << offset)) {
6680a9643ea8Slogwang 			int ret;
6681a9643ea8Slogwang 
6682a9643ea8Slogwang 			/* Clear the event first */
6683a9643ea8Slogwang 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6684a9643ea8Slogwang 							(0x1 << offset));
66852bfe3f2eSlogwang 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6686a9643ea8Slogwang 			/**
66872bfe3f2eSlogwang 			 * Only notify a VF reset event occurred,
6688a9643ea8Slogwang 			 * don't trigger another SW reset
6689a9643ea8Slogwang 			 */
6690a9643ea8Slogwang 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6691a9643ea8Slogwang 			if (ret != I40E_SUCCESS)
6692a9643ea8Slogwang 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6693a9643ea8Slogwang 		}
6694a9643ea8Slogwang 	}
6695a9643ea8Slogwang }
6696a9643ea8Slogwang 
6697a9643ea8Slogwang static void
i40e_notify_all_vfs_link_status(struct rte_eth_dev * dev)6698a9643ea8Slogwang i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6699a9643ea8Slogwang {
6700a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6701a9643ea8Slogwang 	int i;
6702a9643ea8Slogwang 
6703a9643ea8Slogwang 	for (i = 0; i < pf->vf_num; i++)
67042bfe3f2eSlogwang 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6705a9643ea8Slogwang }
6706a9643ea8Slogwang 
6707a9643ea8Slogwang static void
i40e_dev_handle_aq_msg(struct rte_eth_dev * dev)6708a9643ea8Slogwang i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6709a9643ea8Slogwang {
6710a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6711a9643ea8Slogwang 	struct i40e_arq_event_info info;
6712a9643ea8Slogwang 	uint16_t pending, opcode;
6713a9643ea8Slogwang 	int ret;
6714a9643ea8Slogwang 
6715a9643ea8Slogwang 	info.buf_len = I40E_AQ_BUF_SZ;
6716a9643ea8Slogwang 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6717a9643ea8Slogwang 	if (!info.msg_buf) {
6718a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6719a9643ea8Slogwang 		return;
6720a9643ea8Slogwang 	}
6721a9643ea8Slogwang 
6722a9643ea8Slogwang 	pending = 1;
6723a9643ea8Slogwang 	while (pending) {
6724a9643ea8Slogwang 		ret = i40e_clean_arq_element(hw, &info, &pending);
6725a9643ea8Slogwang 
6726a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
67272bfe3f2eSlogwang 			PMD_DRV_LOG(INFO,
67282bfe3f2eSlogwang 				"Failed to read msg from AdminQ, aq_err: %u",
67292bfe3f2eSlogwang 				hw->aq.asq_last_status);
6730a9643ea8Slogwang 			break;
6731a9643ea8Slogwang 		}
6732a9643ea8Slogwang 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6733a9643ea8Slogwang 
6734a9643ea8Slogwang 		switch (opcode) {
6735a9643ea8Slogwang 		case i40e_aqc_opc_send_msg_to_pf:
6736a9643ea8Slogwang 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6737a9643ea8Slogwang 			i40e_pf_host_handle_vf_msg(dev,
6738a9643ea8Slogwang 					rte_le_to_cpu_16(info.desc.retval),
6739a9643ea8Slogwang 					rte_le_to_cpu_32(info.desc.cookie_high),
6740a9643ea8Slogwang 					rte_le_to_cpu_32(info.desc.cookie_low),
6741a9643ea8Slogwang 					info.msg_buf,
6742a9643ea8Slogwang 					info.msg_len);
6743a9643ea8Slogwang 			break;
6744a9643ea8Slogwang 		case i40e_aqc_opc_get_link_status:
6745a9643ea8Slogwang 			ret = i40e_dev_link_update(dev, 0);
67462bfe3f2eSlogwang 			if (!ret)
6747*2d9fd380Sjfb8856606 				rte_eth_dev_callback_process(dev,
6748d30ea906Sjfb8856606 					RTE_ETH_EVENT_INTR_LSC, NULL);
6749a9643ea8Slogwang 			break;
6750a9643ea8Slogwang 		default:
67512bfe3f2eSlogwang 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6752a9643ea8Slogwang 				    opcode);
6753a9643ea8Slogwang 			break;
6754a9643ea8Slogwang 		}
6755a9643ea8Slogwang 	}
6756a9643ea8Slogwang 	rte_free(info.msg_buf);
6757a9643ea8Slogwang }
6758a9643ea8Slogwang 
6759*2d9fd380Sjfb8856606 static void
i40e_handle_mdd_event(struct rte_eth_dev * dev)6760*2d9fd380Sjfb8856606 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6761*2d9fd380Sjfb8856606 {
6762*2d9fd380Sjfb8856606 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6763*2d9fd380Sjfb8856606 #define I40E_MDD_CLEAR16 0xFFFF
6764*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6765*2d9fd380Sjfb8856606 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6766*2d9fd380Sjfb8856606 	bool mdd_detected = false;
6767*2d9fd380Sjfb8856606 	struct i40e_pf_vf *vf;
6768*2d9fd380Sjfb8856606 	uint32_t reg;
6769*2d9fd380Sjfb8856606 	int i;
6770*2d9fd380Sjfb8856606 
6771*2d9fd380Sjfb8856606 	/* find what triggered the MDD event */
6772*2d9fd380Sjfb8856606 	reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6773*2d9fd380Sjfb8856606 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6774*2d9fd380Sjfb8856606 		uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6775*2d9fd380Sjfb8856606 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6776*2d9fd380Sjfb8856606 		uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6777*2d9fd380Sjfb8856606 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6778*2d9fd380Sjfb8856606 		uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6779*2d9fd380Sjfb8856606 				I40E_GL_MDET_TX_EVENT_SHIFT;
6780*2d9fd380Sjfb8856606 		uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6781*2d9fd380Sjfb8856606 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6782*2d9fd380Sjfb8856606 					hw->func_caps.base_queue;
6783*2d9fd380Sjfb8856606 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6784*2d9fd380Sjfb8856606 			"queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6785*2d9fd380Sjfb8856606 				event, queue, pf_num, vf_num, dev->data->name);
6786*2d9fd380Sjfb8856606 		I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6787*2d9fd380Sjfb8856606 		mdd_detected = true;
6788*2d9fd380Sjfb8856606 	}
6789*2d9fd380Sjfb8856606 	reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6790*2d9fd380Sjfb8856606 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6791*2d9fd380Sjfb8856606 		uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6792*2d9fd380Sjfb8856606 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6793*2d9fd380Sjfb8856606 		uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6794*2d9fd380Sjfb8856606 				I40E_GL_MDET_RX_EVENT_SHIFT;
6795*2d9fd380Sjfb8856606 		uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6796*2d9fd380Sjfb8856606 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6797*2d9fd380Sjfb8856606 					hw->func_caps.base_queue;
6798*2d9fd380Sjfb8856606 
6799*2d9fd380Sjfb8856606 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6800*2d9fd380Sjfb8856606 				"queue %d of function 0x%02x device %s\n",
6801*2d9fd380Sjfb8856606 					event, queue, func, dev->data->name);
6802*2d9fd380Sjfb8856606 		I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6803*2d9fd380Sjfb8856606 		mdd_detected = true;
6804*2d9fd380Sjfb8856606 	}
6805*2d9fd380Sjfb8856606 
6806*2d9fd380Sjfb8856606 	if (mdd_detected) {
6807*2d9fd380Sjfb8856606 		reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6808*2d9fd380Sjfb8856606 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6809*2d9fd380Sjfb8856606 			I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6810*2d9fd380Sjfb8856606 			PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6811*2d9fd380Sjfb8856606 		}
6812*2d9fd380Sjfb8856606 		reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6813*2d9fd380Sjfb8856606 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6814*2d9fd380Sjfb8856606 			I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6815*2d9fd380Sjfb8856606 					I40E_MDD_CLEAR16);
6816*2d9fd380Sjfb8856606 			PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6817*2d9fd380Sjfb8856606 		}
6818*2d9fd380Sjfb8856606 	}
6819*2d9fd380Sjfb8856606 
6820*2d9fd380Sjfb8856606 	/* see if one of the VFs needs its hand slapped */
6821*2d9fd380Sjfb8856606 	for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6822*2d9fd380Sjfb8856606 		vf = &pf->vfs[i];
6823*2d9fd380Sjfb8856606 		reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6824*2d9fd380Sjfb8856606 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6825*2d9fd380Sjfb8856606 			I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6826*2d9fd380Sjfb8856606 					I40E_MDD_CLEAR16);
6827*2d9fd380Sjfb8856606 			vf->num_mdd_events++;
6828*2d9fd380Sjfb8856606 			PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6829*2d9fd380Sjfb8856606 					PRIu64 "times\n",
6830*2d9fd380Sjfb8856606 					i, vf->num_mdd_events);
6831*2d9fd380Sjfb8856606 		}
6832*2d9fd380Sjfb8856606 
6833*2d9fd380Sjfb8856606 		reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6834*2d9fd380Sjfb8856606 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6835*2d9fd380Sjfb8856606 			I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6836*2d9fd380Sjfb8856606 					I40E_MDD_CLEAR16);
6837*2d9fd380Sjfb8856606 			vf->num_mdd_events++;
6838*2d9fd380Sjfb8856606 			PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6839*2d9fd380Sjfb8856606 					PRIu64 "times\n",
6840*2d9fd380Sjfb8856606 					i, vf->num_mdd_events);
6841*2d9fd380Sjfb8856606 		}
6842*2d9fd380Sjfb8856606 	}
6843*2d9fd380Sjfb8856606 }
6844*2d9fd380Sjfb8856606 
6845a9643ea8Slogwang /**
6846a9643ea8Slogwang  * Interrupt handler triggered by NIC  for handling
6847a9643ea8Slogwang  * specific interrupt.
6848a9643ea8Slogwang  *
6849a9643ea8Slogwang  * @param handle
6850a9643ea8Slogwang  *  Pointer to interrupt handle.
6851a9643ea8Slogwang  * @param param
6852a9643ea8Slogwang  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6853a9643ea8Slogwang  *
6854a9643ea8Slogwang  * @return
6855a9643ea8Slogwang  *  void
6856a9643ea8Slogwang  */
6857a9643ea8Slogwang static void
i40e_dev_interrupt_handler(void * param)68582bfe3f2eSlogwang i40e_dev_interrupt_handler(void *param)
6859a9643ea8Slogwang {
6860a9643ea8Slogwang 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6861a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6862a9643ea8Slogwang 	uint32_t icr0;
6863a9643ea8Slogwang 
6864a9643ea8Slogwang 	/* Disable interrupt */
6865a9643ea8Slogwang 	i40e_pf_disable_irq0(hw);
6866a9643ea8Slogwang 
6867a9643ea8Slogwang 	/* read out interrupt causes */
6868a9643ea8Slogwang 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6869a9643ea8Slogwang 
6870a9643ea8Slogwang 	/* No interrupt event indicated */
6871a9643ea8Slogwang 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6872a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "No interrupt event");
6873a9643ea8Slogwang 		goto done;
6874a9643ea8Slogwang 	}
6875a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6876a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6877*2d9fd380Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6878a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6879*2d9fd380Sjfb8856606 		i40e_handle_mdd_event(dev);
6880*2d9fd380Sjfb8856606 	}
6881a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6882a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6883a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6884a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6885a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6886a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6887a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6888a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6889a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6890a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6891a9643ea8Slogwang 
6892a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6893a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6894a9643ea8Slogwang 		i40e_dev_handle_vfr_event(dev);
6895a9643ea8Slogwang 	}
6896a9643ea8Slogwang 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6897a9643ea8Slogwang 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6898a9643ea8Slogwang 		i40e_dev_handle_aq_msg(dev);
6899a9643ea8Slogwang 	}
69002bfe3f2eSlogwang 
6901a9643ea8Slogwang done:
6902a9643ea8Slogwang 	/* Enable interrupt */
6903a9643ea8Slogwang 	i40e_pf_enable_irq0(hw);
6904d30ea906Sjfb8856606 }
6905d30ea906Sjfb8856606 
6906d30ea906Sjfb8856606 static void
i40e_dev_alarm_handler(void * param)6907d30ea906Sjfb8856606 i40e_dev_alarm_handler(void *param)
6908d30ea906Sjfb8856606 {
6909d30ea906Sjfb8856606 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6910d30ea906Sjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6911d30ea906Sjfb8856606 	uint32_t icr0;
6912d30ea906Sjfb8856606 
6913d30ea906Sjfb8856606 	/* Disable interrupt */
6914d30ea906Sjfb8856606 	i40e_pf_disable_irq0(hw);
6915d30ea906Sjfb8856606 
6916d30ea906Sjfb8856606 	/* read out interrupt causes */
6917d30ea906Sjfb8856606 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6918d30ea906Sjfb8856606 
6919d30ea906Sjfb8856606 	/* No interrupt event indicated */
6920d30ea906Sjfb8856606 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6921d30ea906Sjfb8856606 		goto done;
6922d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6923d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6924*2d9fd380Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6925d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6926*2d9fd380Sjfb8856606 		i40e_handle_mdd_event(dev);
6927*2d9fd380Sjfb8856606 	}
6928d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6929d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6930d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6931d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6932d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6933d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6934d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6935d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6936d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6937d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6938d30ea906Sjfb8856606 
6939d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6940d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6941d30ea906Sjfb8856606 		i40e_dev_handle_vfr_event(dev);
6942d30ea906Sjfb8856606 	}
6943d30ea906Sjfb8856606 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6944d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6945d30ea906Sjfb8856606 		i40e_dev_handle_aq_msg(dev);
6946d30ea906Sjfb8856606 	}
6947d30ea906Sjfb8856606 
6948d30ea906Sjfb8856606 done:
6949d30ea906Sjfb8856606 	/* Enable interrupt */
6950d30ea906Sjfb8856606 	i40e_pf_enable_irq0(hw);
6951d30ea906Sjfb8856606 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6952d30ea906Sjfb8856606 			  i40e_dev_alarm_handler, dev);
6953a9643ea8Slogwang }
6954a9643ea8Slogwang 
69552bfe3f2eSlogwang int
i40e_add_macvlan_filters(struct i40e_vsi * vsi,struct i40e_macvlan_filter * filter,int total)6956a9643ea8Slogwang i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6957a9643ea8Slogwang 			 struct i40e_macvlan_filter *filter,
6958a9643ea8Slogwang 			 int total)
6959a9643ea8Slogwang {
6960a9643ea8Slogwang 	int ele_num, ele_buff_size;
6961a9643ea8Slogwang 	int num, actual_num, i;
6962a9643ea8Slogwang 	uint16_t flags;
6963a9643ea8Slogwang 	int ret = I40E_SUCCESS;
6964a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6965a9643ea8Slogwang 	struct i40e_aqc_add_macvlan_element_data *req_list;
6966a9643ea8Slogwang 
6967a9643ea8Slogwang 	if (filter == NULL  || total == 0)
6968a9643ea8Slogwang 		return I40E_ERR_PARAM;
6969a9643ea8Slogwang 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6970a9643ea8Slogwang 	ele_buff_size = hw->aq.asq_buf_size;
6971a9643ea8Slogwang 
6972a9643ea8Slogwang 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6973a9643ea8Slogwang 	if (req_list == NULL) {
6974a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6975a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
6976a9643ea8Slogwang 	}
6977a9643ea8Slogwang 
6978a9643ea8Slogwang 	num = 0;
6979a9643ea8Slogwang 	do {
6980a9643ea8Slogwang 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6981a9643ea8Slogwang 		memset(req_list, 0, ele_buff_size);
6982a9643ea8Slogwang 
6983a9643ea8Slogwang 		for (i = 0; i < actual_num; i++) {
69842bfe3f2eSlogwang 			rte_memcpy(req_list[i].mac_addr,
6985a9643ea8Slogwang 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6986a9643ea8Slogwang 			req_list[i].vlan_tag =
6987a9643ea8Slogwang 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6988a9643ea8Slogwang 
6989a9643ea8Slogwang 			switch (filter[num + i].filter_type) {
6990*2d9fd380Sjfb8856606 			case I40E_MAC_PERFECT_MATCH:
6991a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6992a9643ea8Slogwang 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6993a9643ea8Slogwang 				break;
6994*2d9fd380Sjfb8856606 			case I40E_MACVLAN_PERFECT_MATCH:
6995a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6996a9643ea8Slogwang 				break;
6997*2d9fd380Sjfb8856606 			case I40E_MAC_HASH_MATCH:
6998a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6999a9643ea8Slogwang 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7000a9643ea8Slogwang 				break;
7001*2d9fd380Sjfb8856606 			case I40E_MACVLAN_HASH_MATCH:
7002a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7003a9643ea8Slogwang 				break;
7004a9643ea8Slogwang 			default:
70052bfe3f2eSlogwang 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
7006a9643ea8Slogwang 				ret = I40E_ERR_PARAM;
7007a9643ea8Slogwang 				goto DONE;
7008a9643ea8Slogwang 			}
7009a9643ea8Slogwang 
7010a9643ea8Slogwang 			req_list[i].queue_number = 0;
7011a9643ea8Slogwang 
7012a9643ea8Slogwang 			req_list[i].flags = rte_cpu_to_le_16(flags);
7013a9643ea8Slogwang 		}
7014a9643ea8Slogwang 
7015a9643ea8Slogwang 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7016a9643ea8Slogwang 						actual_num, NULL);
7017a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
7018a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7019a9643ea8Slogwang 			goto DONE;
7020a9643ea8Slogwang 		}
7021a9643ea8Slogwang 		num += actual_num;
7022a9643ea8Slogwang 	} while (num < total);
7023a9643ea8Slogwang 
7024a9643ea8Slogwang DONE:
7025a9643ea8Slogwang 	rte_free(req_list);
7026a9643ea8Slogwang 	return ret;
7027a9643ea8Slogwang }
7028a9643ea8Slogwang 
70292bfe3f2eSlogwang int
i40e_remove_macvlan_filters(struct i40e_vsi * vsi,struct i40e_macvlan_filter * filter,int total)7030a9643ea8Slogwang i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7031a9643ea8Slogwang 			    struct i40e_macvlan_filter *filter,
7032a9643ea8Slogwang 			    int total)
7033a9643ea8Slogwang {
7034a9643ea8Slogwang 	int ele_num, ele_buff_size;
7035a9643ea8Slogwang 	int num, actual_num, i;
7036a9643ea8Slogwang 	uint16_t flags;
7037a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7038a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7039a9643ea8Slogwang 	struct i40e_aqc_remove_macvlan_element_data *req_list;
7040a9643ea8Slogwang 
7041a9643ea8Slogwang 	if (filter == NULL  || total == 0)
7042a9643ea8Slogwang 		return I40E_ERR_PARAM;
7043a9643ea8Slogwang 
7044a9643ea8Slogwang 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7045a9643ea8Slogwang 	ele_buff_size = hw->aq.asq_buf_size;
7046a9643ea8Slogwang 
7047a9643ea8Slogwang 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7048a9643ea8Slogwang 	if (req_list == NULL) {
7049a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
7050a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7051a9643ea8Slogwang 	}
7052a9643ea8Slogwang 
7053a9643ea8Slogwang 	num = 0;
7054a9643ea8Slogwang 	do {
7055a9643ea8Slogwang 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7056a9643ea8Slogwang 		memset(req_list, 0, ele_buff_size);
7057a9643ea8Slogwang 
7058a9643ea8Slogwang 		for (i = 0; i < actual_num; i++) {
70592bfe3f2eSlogwang 			rte_memcpy(req_list[i].mac_addr,
7060a9643ea8Slogwang 				&filter[num + i].macaddr, ETH_ADDR_LEN);
7061a9643ea8Slogwang 			req_list[i].vlan_tag =
7062a9643ea8Slogwang 				rte_cpu_to_le_16(filter[num + i].vlan_id);
7063a9643ea8Slogwang 
7064a9643ea8Slogwang 			switch (filter[num + i].filter_type) {
7065*2d9fd380Sjfb8856606 			case I40E_MAC_PERFECT_MATCH:
7066a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7067a9643ea8Slogwang 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7068a9643ea8Slogwang 				break;
7069*2d9fd380Sjfb8856606 			case I40E_MACVLAN_PERFECT_MATCH:
7070a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7071a9643ea8Slogwang 				break;
7072*2d9fd380Sjfb8856606 			case I40E_MAC_HASH_MATCH:
7073a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7074a9643ea8Slogwang 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7075a9643ea8Slogwang 				break;
7076*2d9fd380Sjfb8856606 			case I40E_MACVLAN_HASH_MATCH:
7077a9643ea8Slogwang 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7078a9643ea8Slogwang 				break;
7079a9643ea8Slogwang 			default:
70802bfe3f2eSlogwang 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7081a9643ea8Slogwang 				ret = I40E_ERR_PARAM;
7082a9643ea8Slogwang 				goto DONE;
7083a9643ea8Slogwang 			}
7084a9643ea8Slogwang 			req_list[i].flags = rte_cpu_to_le_16(flags);
7085a9643ea8Slogwang 		}
7086a9643ea8Slogwang 
7087a9643ea8Slogwang 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7088a9643ea8Slogwang 						actual_num, NULL);
7089a9643ea8Slogwang 		if (ret != I40E_SUCCESS) {
7090a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7091a9643ea8Slogwang 			goto DONE;
7092a9643ea8Slogwang 		}
7093a9643ea8Slogwang 		num += actual_num;
7094a9643ea8Slogwang 	} while (num < total);
7095a9643ea8Slogwang 
7096a9643ea8Slogwang DONE:
7097a9643ea8Slogwang 	rte_free(req_list);
7098a9643ea8Slogwang 	return ret;
7099a9643ea8Slogwang }
7100a9643ea8Slogwang 
7101a9643ea8Slogwang /* Find out specific MAC filter */
7102a9643ea8Slogwang static struct i40e_mac_filter *
i40e_find_mac_filter(struct i40e_vsi * vsi,struct rte_ether_addr * macaddr)7103a9643ea8Slogwang i40e_find_mac_filter(struct i40e_vsi *vsi,
71044418919fSjohnjiang 			 struct rte_ether_addr *macaddr)
7105a9643ea8Slogwang {
7106a9643ea8Slogwang 	struct i40e_mac_filter *f;
7107a9643ea8Slogwang 
7108a9643ea8Slogwang 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
71094418919fSjohnjiang 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7110a9643ea8Slogwang 			return f;
7111a9643ea8Slogwang 	}
7112a9643ea8Slogwang 
7113a9643ea8Slogwang 	return NULL;
7114a9643ea8Slogwang }
7115a9643ea8Slogwang 
7116a9643ea8Slogwang static bool
i40e_find_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id)7117a9643ea8Slogwang i40e_find_vlan_filter(struct i40e_vsi *vsi,
7118a9643ea8Slogwang 			 uint16_t vlan_id)
7119a9643ea8Slogwang {
7120a9643ea8Slogwang 	uint32_t vid_idx, vid_bit;
7121a9643ea8Slogwang 
7122a9643ea8Slogwang 	if (vlan_id > ETH_VLAN_ID_MAX)
7123a9643ea8Slogwang 		return 0;
7124a9643ea8Slogwang 
7125a9643ea8Slogwang 	vid_idx = I40E_VFTA_IDX(vlan_id);
7126a9643ea8Slogwang 	vid_bit = I40E_VFTA_BIT(vlan_id);
7127a9643ea8Slogwang 
7128a9643ea8Slogwang 	if (vsi->vfta[vid_idx] & vid_bit)
7129a9643ea8Slogwang 		return 1;
7130a9643ea8Slogwang 	else
7131a9643ea8Slogwang 		return 0;
7132a9643ea8Slogwang }
7133a9643ea8Slogwang 
7134a9643ea8Slogwang static void
i40e_store_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id,bool on)71352bfe3f2eSlogwang i40e_store_vlan_filter(struct i40e_vsi *vsi,
7136a9643ea8Slogwang 		       uint16_t vlan_id, bool on)
7137a9643ea8Slogwang {
7138a9643ea8Slogwang 	uint32_t vid_idx, vid_bit;
7139a9643ea8Slogwang 
7140a9643ea8Slogwang 	vid_idx = I40E_VFTA_IDX(vlan_id);
7141a9643ea8Slogwang 	vid_bit = I40E_VFTA_BIT(vlan_id);
7142a9643ea8Slogwang 
7143a9643ea8Slogwang 	if (on)
7144a9643ea8Slogwang 		vsi->vfta[vid_idx] |= vid_bit;
7145a9643ea8Slogwang 	else
7146a9643ea8Slogwang 		vsi->vfta[vid_idx] &= ~vid_bit;
7147a9643ea8Slogwang }
7148a9643ea8Slogwang 
71492bfe3f2eSlogwang void
i40e_set_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id,bool on)71502bfe3f2eSlogwang i40e_set_vlan_filter(struct i40e_vsi *vsi,
71512bfe3f2eSlogwang 		     uint16_t vlan_id, bool on)
71522bfe3f2eSlogwang {
71532bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
71542bfe3f2eSlogwang 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
71552bfe3f2eSlogwang 	int ret;
71562bfe3f2eSlogwang 
71572bfe3f2eSlogwang 	if (vlan_id > ETH_VLAN_ID_MAX)
71582bfe3f2eSlogwang 		return;
71592bfe3f2eSlogwang 
71602bfe3f2eSlogwang 	i40e_store_vlan_filter(vsi, vlan_id, on);
71612bfe3f2eSlogwang 
71622bfe3f2eSlogwang 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
71632bfe3f2eSlogwang 		return;
71642bfe3f2eSlogwang 
71652bfe3f2eSlogwang 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
71662bfe3f2eSlogwang 
71672bfe3f2eSlogwang 	if (on) {
71682bfe3f2eSlogwang 		ret = i40e_aq_add_vlan(hw, vsi->seid,
71692bfe3f2eSlogwang 				       &vlan_data, 1, NULL);
71702bfe3f2eSlogwang 		if (ret != I40E_SUCCESS)
71712bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
71722bfe3f2eSlogwang 	} else {
71732bfe3f2eSlogwang 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
71742bfe3f2eSlogwang 					  &vlan_data, 1, NULL);
71752bfe3f2eSlogwang 		if (ret != I40E_SUCCESS)
71762bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
71772bfe3f2eSlogwang 				    "Failed to remove vlan filter");
71782bfe3f2eSlogwang 	}
71792bfe3f2eSlogwang }
71802bfe3f2eSlogwang 
7181a9643ea8Slogwang /**
7182a9643ea8Slogwang  * Find all vlan options for specific mac addr,
7183a9643ea8Slogwang  * return with actual vlan found.
7184a9643ea8Slogwang  */
71852bfe3f2eSlogwang int
i40e_find_all_vlan_for_mac(struct i40e_vsi * vsi,struct i40e_macvlan_filter * mv_f,int num,struct rte_ether_addr * addr)7186a9643ea8Slogwang i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7187a9643ea8Slogwang 			   struct i40e_macvlan_filter *mv_f,
71884418919fSjohnjiang 			   int num, struct rte_ether_addr *addr)
7189a9643ea8Slogwang {
7190a9643ea8Slogwang 	int i;
7191a9643ea8Slogwang 	uint32_t j, k;
7192a9643ea8Slogwang 
7193a9643ea8Slogwang 	/**
7194a9643ea8Slogwang 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
7195a9643ea8Slogwang 	 * although the code looks complex.
7196a9643ea8Slogwang 	  */
7197a9643ea8Slogwang 	if (num < vsi->vlan_num)
7198a9643ea8Slogwang 		return I40E_ERR_PARAM;
7199a9643ea8Slogwang 
7200a9643ea8Slogwang 	i = 0;
7201a9643ea8Slogwang 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
7202a9643ea8Slogwang 		if (vsi->vfta[j]) {
7203a9643ea8Slogwang 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7204a9643ea8Slogwang 				if (vsi->vfta[j] & (1 << k)) {
7205a9643ea8Slogwang 					if (i > num - 1) {
72062bfe3f2eSlogwang 						PMD_DRV_LOG(ERR,
72072bfe3f2eSlogwang 							"vlan number doesn't match");
7208a9643ea8Slogwang 						return I40E_ERR_PARAM;
7209a9643ea8Slogwang 					}
72102bfe3f2eSlogwang 					rte_memcpy(&mv_f[i].macaddr,
7211a9643ea8Slogwang 							addr, ETH_ADDR_LEN);
7212a9643ea8Slogwang 					mv_f[i].vlan_id =
7213a9643ea8Slogwang 						j * I40E_UINT32_BIT_SIZE + k;
7214a9643ea8Slogwang 					i++;
7215a9643ea8Slogwang 				}
7216a9643ea8Slogwang 			}
7217a9643ea8Slogwang 		}
7218a9643ea8Slogwang 	}
7219a9643ea8Slogwang 	return I40E_SUCCESS;
7220a9643ea8Slogwang }
7221a9643ea8Slogwang 
7222a9643ea8Slogwang static inline int
i40e_find_all_mac_for_vlan(struct i40e_vsi * vsi,struct i40e_macvlan_filter * mv_f,int num,uint16_t vlan)7223a9643ea8Slogwang i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7224a9643ea8Slogwang 			   struct i40e_macvlan_filter *mv_f,
7225a9643ea8Slogwang 			   int num,
7226a9643ea8Slogwang 			   uint16_t vlan)
7227a9643ea8Slogwang {
7228a9643ea8Slogwang 	int i = 0;
7229a9643ea8Slogwang 	struct i40e_mac_filter *f;
7230a9643ea8Slogwang 
7231a9643ea8Slogwang 	if (num < vsi->mac_num)
7232a9643ea8Slogwang 		return I40E_ERR_PARAM;
7233a9643ea8Slogwang 
7234a9643ea8Slogwang 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7235a9643ea8Slogwang 		if (i > num - 1) {
7236a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "buffer number not match");
7237a9643ea8Slogwang 			return I40E_ERR_PARAM;
7238a9643ea8Slogwang 		}
72392bfe3f2eSlogwang 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7240a9643ea8Slogwang 				ETH_ADDR_LEN);
7241a9643ea8Slogwang 		mv_f[i].vlan_id = vlan;
7242a9643ea8Slogwang 		mv_f[i].filter_type = f->mac_info.filter_type;
7243a9643ea8Slogwang 		i++;
7244a9643ea8Slogwang 	}
7245a9643ea8Slogwang 
7246a9643ea8Slogwang 	return I40E_SUCCESS;
7247a9643ea8Slogwang }
7248a9643ea8Slogwang 
7249a9643ea8Slogwang static int
i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi * vsi)7250a9643ea8Slogwang i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7251a9643ea8Slogwang {
72522bfe3f2eSlogwang 	int i, j, num;
7253a9643ea8Slogwang 	struct i40e_mac_filter *f;
7254a9643ea8Slogwang 	struct i40e_macvlan_filter *mv_f;
7255a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7256a9643ea8Slogwang 
7257a9643ea8Slogwang 	if (vsi == NULL || vsi->mac_num == 0)
7258a9643ea8Slogwang 		return I40E_ERR_PARAM;
7259a9643ea8Slogwang 
7260a9643ea8Slogwang 	/* Case that no vlan is set */
7261a9643ea8Slogwang 	if (vsi->vlan_num == 0)
7262a9643ea8Slogwang 		num = vsi->mac_num;
7263a9643ea8Slogwang 	else
7264a9643ea8Slogwang 		num = vsi->mac_num * vsi->vlan_num;
7265a9643ea8Slogwang 
7266a9643ea8Slogwang 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7267a9643ea8Slogwang 	if (mv_f == NULL) {
7268a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7269a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7270a9643ea8Slogwang 	}
7271a9643ea8Slogwang 
7272a9643ea8Slogwang 	i = 0;
7273a9643ea8Slogwang 	if (vsi->vlan_num == 0) {
7274a9643ea8Slogwang 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
72752bfe3f2eSlogwang 			rte_memcpy(&mv_f[i].macaddr,
7276a9643ea8Slogwang 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
72772bfe3f2eSlogwang 			mv_f[i].filter_type = f->mac_info.filter_type;
7278a9643ea8Slogwang 			mv_f[i].vlan_id = 0;
7279a9643ea8Slogwang 			i++;
7280a9643ea8Slogwang 		}
7281a9643ea8Slogwang 	} else {
7282a9643ea8Slogwang 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7283a9643ea8Slogwang 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7284a9643ea8Slogwang 					vsi->vlan_num, &f->mac_info.mac_addr);
7285a9643ea8Slogwang 			if (ret != I40E_SUCCESS)
7286a9643ea8Slogwang 				goto DONE;
72872bfe3f2eSlogwang 			for (j = i; j < i + vsi->vlan_num; j++)
72882bfe3f2eSlogwang 				mv_f[j].filter_type = f->mac_info.filter_type;
7289a9643ea8Slogwang 			i += vsi->vlan_num;
7290a9643ea8Slogwang 		}
7291a9643ea8Slogwang 	}
7292a9643ea8Slogwang 
7293a9643ea8Slogwang 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7294a9643ea8Slogwang DONE:
7295a9643ea8Slogwang 	rte_free(mv_f);
7296a9643ea8Slogwang 
7297a9643ea8Slogwang 	return ret;
7298a9643ea8Slogwang }
7299a9643ea8Slogwang 
7300a9643ea8Slogwang int
i40e_vsi_add_vlan(struct i40e_vsi * vsi,uint16_t vlan)7301a9643ea8Slogwang i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7302a9643ea8Slogwang {
7303a9643ea8Slogwang 	struct i40e_macvlan_filter *mv_f;
7304a9643ea8Slogwang 	int mac_num;
7305a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7306a9643ea8Slogwang 
73074418919fSjohnjiang 	if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7308a9643ea8Slogwang 		return I40E_ERR_PARAM;
7309a9643ea8Slogwang 
7310a9643ea8Slogwang 	/* If it's already set, just return */
7311a9643ea8Slogwang 	if (i40e_find_vlan_filter(vsi,vlan))
7312a9643ea8Slogwang 		return I40E_SUCCESS;
7313a9643ea8Slogwang 
7314a9643ea8Slogwang 	mac_num = vsi->mac_num;
7315a9643ea8Slogwang 
7316a9643ea8Slogwang 	if (mac_num == 0) {
7317a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7318a9643ea8Slogwang 		return I40E_ERR_PARAM;
7319a9643ea8Slogwang 	}
7320a9643ea8Slogwang 
7321a9643ea8Slogwang 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7322a9643ea8Slogwang 
7323a9643ea8Slogwang 	if (mv_f == NULL) {
7324a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7325a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7326a9643ea8Slogwang 	}
7327a9643ea8Slogwang 
7328a9643ea8Slogwang 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7329a9643ea8Slogwang 
7330a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7331a9643ea8Slogwang 		goto DONE;
7332a9643ea8Slogwang 
7333a9643ea8Slogwang 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7334a9643ea8Slogwang 
7335a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7336a9643ea8Slogwang 		goto DONE;
7337a9643ea8Slogwang 
7338a9643ea8Slogwang 	i40e_set_vlan_filter(vsi, vlan, 1);
7339a9643ea8Slogwang 
7340a9643ea8Slogwang 	vsi->vlan_num++;
7341a9643ea8Slogwang 	ret = I40E_SUCCESS;
7342a9643ea8Slogwang DONE:
7343a9643ea8Slogwang 	rte_free(mv_f);
7344a9643ea8Slogwang 	return ret;
7345a9643ea8Slogwang }
7346a9643ea8Slogwang 
7347a9643ea8Slogwang int
i40e_vsi_delete_vlan(struct i40e_vsi * vsi,uint16_t vlan)7348a9643ea8Slogwang i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7349a9643ea8Slogwang {
7350a9643ea8Slogwang 	struct i40e_macvlan_filter *mv_f;
7351a9643ea8Slogwang 	int mac_num;
7352a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7353a9643ea8Slogwang 
7354a9643ea8Slogwang 	/**
7355a9643ea8Slogwang 	 * Vlan 0 is the generic filter for untagged packets
7356a9643ea8Slogwang 	 * and can't be removed.
7357a9643ea8Slogwang 	 */
73584418919fSjohnjiang 	if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7359a9643ea8Slogwang 		return I40E_ERR_PARAM;
7360a9643ea8Slogwang 
7361a9643ea8Slogwang 	/* If can't find it, just return */
7362a9643ea8Slogwang 	if (!i40e_find_vlan_filter(vsi, vlan))
7363a9643ea8Slogwang 		return I40E_ERR_PARAM;
7364a9643ea8Slogwang 
7365a9643ea8Slogwang 	mac_num = vsi->mac_num;
7366a9643ea8Slogwang 
7367a9643ea8Slogwang 	if (mac_num == 0) {
7368a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7369a9643ea8Slogwang 		return I40E_ERR_PARAM;
7370a9643ea8Slogwang 	}
7371a9643ea8Slogwang 
7372a9643ea8Slogwang 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7373a9643ea8Slogwang 
7374a9643ea8Slogwang 	if (mv_f == NULL) {
7375a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7376a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7377a9643ea8Slogwang 	}
7378a9643ea8Slogwang 
7379a9643ea8Slogwang 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7380a9643ea8Slogwang 
7381a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7382a9643ea8Slogwang 		goto DONE;
7383a9643ea8Slogwang 
7384a9643ea8Slogwang 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7385a9643ea8Slogwang 
7386a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7387a9643ea8Slogwang 		goto DONE;
7388a9643ea8Slogwang 
7389a9643ea8Slogwang 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7390a9643ea8Slogwang 	if (vsi->vlan_num == 1) {
7391a9643ea8Slogwang 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7392a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
7393a9643ea8Slogwang 			goto DONE;
7394a9643ea8Slogwang 
7395a9643ea8Slogwang 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7396a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
7397a9643ea8Slogwang 			goto DONE;
7398a9643ea8Slogwang 	}
7399a9643ea8Slogwang 
7400a9643ea8Slogwang 	i40e_set_vlan_filter(vsi, vlan, 0);
7401a9643ea8Slogwang 
7402a9643ea8Slogwang 	vsi->vlan_num--;
7403a9643ea8Slogwang 	ret = I40E_SUCCESS;
7404a9643ea8Slogwang DONE:
7405a9643ea8Slogwang 	rte_free(mv_f);
7406a9643ea8Slogwang 	return ret;
7407a9643ea8Slogwang }
7408a9643ea8Slogwang 
7409a9643ea8Slogwang int
i40e_vsi_add_mac(struct i40e_vsi * vsi,struct i40e_mac_filter_info * mac_filter)7410a9643ea8Slogwang i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7411a9643ea8Slogwang {
7412a9643ea8Slogwang 	struct i40e_mac_filter *f;
7413a9643ea8Slogwang 	struct i40e_macvlan_filter *mv_f;
7414a9643ea8Slogwang 	int i, vlan_num = 0;
7415a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7416a9643ea8Slogwang 
7417a9643ea8Slogwang 	/* If it's add and we've config it, return */
7418a9643ea8Slogwang 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7419a9643ea8Slogwang 	if (f != NULL)
7420a9643ea8Slogwang 		return I40E_SUCCESS;
7421*2d9fd380Sjfb8856606 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7422*2d9fd380Sjfb8856606 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7423a9643ea8Slogwang 
7424a9643ea8Slogwang 		/**
7425a9643ea8Slogwang 		 * If vlan_num is 0, that's the first time to add mac,
7426a9643ea8Slogwang 		 * set mask for vlan_id 0.
7427a9643ea8Slogwang 		 */
7428a9643ea8Slogwang 		if (vsi->vlan_num == 0) {
7429a9643ea8Slogwang 			i40e_set_vlan_filter(vsi, 0, 1);
7430a9643ea8Slogwang 			vsi->vlan_num = 1;
7431a9643ea8Slogwang 		}
7432a9643ea8Slogwang 		vlan_num = vsi->vlan_num;
7433*2d9fd380Sjfb8856606 	} else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7434*2d9fd380Sjfb8856606 			mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7435a9643ea8Slogwang 		vlan_num = 1;
7436a9643ea8Slogwang 
7437a9643ea8Slogwang 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7438a9643ea8Slogwang 	if (mv_f == NULL) {
7439a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7440a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7441a9643ea8Slogwang 	}
7442a9643ea8Slogwang 
7443a9643ea8Slogwang 	for (i = 0; i < vlan_num; i++) {
7444a9643ea8Slogwang 		mv_f[i].filter_type = mac_filter->filter_type;
74452bfe3f2eSlogwang 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7446a9643ea8Slogwang 				ETH_ADDR_LEN);
7447a9643ea8Slogwang 	}
7448a9643ea8Slogwang 
7449*2d9fd380Sjfb8856606 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7450*2d9fd380Sjfb8856606 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7451a9643ea8Slogwang 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7452a9643ea8Slogwang 					&mac_filter->mac_addr);
7453a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
7454a9643ea8Slogwang 			goto DONE;
7455a9643ea8Slogwang 	}
7456a9643ea8Slogwang 
7457a9643ea8Slogwang 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7458a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7459a9643ea8Slogwang 		goto DONE;
7460a9643ea8Slogwang 
7461a9643ea8Slogwang 	/* Add the mac addr into mac list */
7462a9643ea8Slogwang 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7463a9643ea8Slogwang 	if (f == NULL) {
7464a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7465a9643ea8Slogwang 		ret = I40E_ERR_NO_MEMORY;
7466a9643ea8Slogwang 		goto DONE;
7467a9643ea8Slogwang 	}
74682bfe3f2eSlogwang 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7469a9643ea8Slogwang 			ETH_ADDR_LEN);
7470a9643ea8Slogwang 	f->mac_info.filter_type = mac_filter->filter_type;
7471a9643ea8Slogwang 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7472a9643ea8Slogwang 	vsi->mac_num++;
7473a9643ea8Slogwang 
7474a9643ea8Slogwang 	ret = I40E_SUCCESS;
7475a9643ea8Slogwang DONE:
7476a9643ea8Slogwang 	rte_free(mv_f);
7477a9643ea8Slogwang 
7478a9643ea8Slogwang 	return ret;
7479a9643ea8Slogwang }
7480a9643ea8Slogwang 
7481a9643ea8Slogwang int
i40e_vsi_delete_mac(struct i40e_vsi * vsi,struct rte_ether_addr * addr)74824418919fSjohnjiang i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7483a9643ea8Slogwang {
7484a9643ea8Slogwang 	struct i40e_mac_filter *f;
7485a9643ea8Slogwang 	struct i40e_macvlan_filter *mv_f;
7486a9643ea8Slogwang 	int i, vlan_num;
7487*2d9fd380Sjfb8856606 	enum i40e_mac_filter_type filter_type;
7488a9643ea8Slogwang 	int ret = I40E_SUCCESS;
7489a9643ea8Slogwang 
7490a9643ea8Slogwang 	/* Can't find it, return an error */
7491a9643ea8Slogwang 	f = i40e_find_mac_filter(vsi, addr);
7492a9643ea8Slogwang 	if (f == NULL)
7493a9643ea8Slogwang 		return I40E_ERR_PARAM;
7494a9643ea8Slogwang 
7495a9643ea8Slogwang 	vlan_num = vsi->vlan_num;
7496a9643ea8Slogwang 	filter_type = f->mac_info.filter_type;
7497*2d9fd380Sjfb8856606 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7498*2d9fd380Sjfb8856606 		filter_type == I40E_MACVLAN_HASH_MATCH) {
7499a9643ea8Slogwang 		if (vlan_num == 0) {
75002bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7501a9643ea8Slogwang 			return I40E_ERR_PARAM;
7502a9643ea8Slogwang 		}
7503*2d9fd380Sjfb8856606 	} else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7504*2d9fd380Sjfb8856606 			filter_type == I40E_MAC_HASH_MATCH)
7505a9643ea8Slogwang 		vlan_num = 1;
7506a9643ea8Slogwang 
7507a9643ea8Slogwang 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7508a9643ea8Slogwang 	if (mv_f == NULL) {
7509a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7510a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
7511a9643ea8Slogwang 	}
7512a9643ea8Slogwang 
7513a9643ea8Slogwang 	for (i = 0; i < vlan_num; i++) {
7514a9643ea8Slogwang 		mv_f[i].filter_type = filter_type;
75152bfe3f2eSlogwang 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7516a9643ea8Slogwang 				ETH_ADDR_LEN);
7517a9643ea8Slogwang 	}
7518*2d9fd380Sjfb8856606 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7519*2d9fd380Sjfb8856606 			filter_type == I40E_MACVLAN_HASH_MATCH) {
7520a9643ea8Slogwang 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7521a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
7522a9643ea8Slogwang 			goto DONE;
7523a9643ea8Slogwang 	}
7524a9643ea8Slogwang 
7525a9643ea8Slogwang 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7526a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
7527a9643ea8Slogwang 		goto DONE;
7528a9643ea8Slogwang 
7529a9643ea8Slogwang 	/* Remove the mac addr into mac list */
7530a9643ea8Slogwang 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7531a9643ea8Slogwang 	rte_free(f);
7532a9643ea8Slogwang 	vsi->mac_num--;
7533a9643ea8Slogwang 
7534a9643ea8Slogwang 	ret = I40E_SUCCESS;
7535a9643ea8Slogwang DONE:
7536a9643ea8Slogwang 	rte_free(mv_f);
7537a9643ea8Slogwang 	return ret;
7538a9643ea8Slogwang }
7539a9643ea8Slogwang 
7540a9643ea8Slogwang /* Configure hash enable flags for RSS */
7541a9643ea8Slogwang uint64_t
i40e_config_hena(const struct i40e_adapter * adapter,uint64_t flags)75422bfe3f2eSlogwang i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7543a9643ea8Slogwang {
7544a9643ea8Slogwang 	uint64_t hena = 0;
75452bfe3f2eSlogwang 	int i;
7546a9643ea8Slogwang 
7547a9643ea8Slogwang 	if (!flags)
7548a9643ea8Slogwang 		return hena;
7549a9643ea8Slogwang 
75502bfe3f2eSlogwang 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
75512bfe3f2eSlogwang 		if (flags & (1ULL << i))
75522bfe3f2eSlogwang 			hena |= adapter->pctypes_tbl[i];
75532bfe3f2eSlogwang 	}
7554a9643ea8Slogwang 
7555a9643ea8Slogwang 	return hena;
7556a9643ea8Slogwang }
7557a9643ea8Slogwang 
7558a9643ea8Slogwang /* Parse the hash enable flags */
7559a9643ea8Slogwang uint64_t
i40e_parse_hena(const struct i40e_adapter * adapter,uint64_t flags)75602bfe3f2eSlogwang i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7561a9643ea8Slogwang {
7562a9643ea8Slogwang 	uint64_t rss_hf = 0;
7563a9643ea8Slogwang 
7564a9643ea8Slogwang 	if (!flags)
7565a9643ea8Slogwang 		return rss_hf;
75662bfe3f2eSlogwang 	int i;
7567a9643ea8Slogwang 
75682bfe3f2eSlogwang 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
75692bfe3f2eSlogwang 		if (flags & adapter->pctypes_tbl[i])
75702bfe3f2eSlogwang 			rss_hf |= (1ULL << i);
75712bfe3f2eSlogwang 	}
7572a9643ea8Slogwang 	return rss_hf;
7573a9643ea8Slogwang }
7574a9643ea8Slogwang 
7575a9643ea8Slogwang /* Disable RSS */
7576a9643ea8Slogwang static void
i40e_pf_disable_rss(struct i40e_pf * pf)7577a9643ea8Slogwang i40e_pf_disable_rss(struct i40e_pf *pf)
7578a9643ea8Slogwang {
7579a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7580a9643ea8Slogwang 
75812bfe3f2eSlogwang 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
75822bfe3f2eSlogwang 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7583a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
7584a9643ea8Slogwang }
7585a9643ea8Slogwang 
7586d30ea906Sjfb8856606 int
i40e_set_rss_key(struct i40e_vsi * vsi,uint8_t * key,uint8_t key_len)7587a9643ea8Slogwang i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7588a9643ea8Slogwang {
7589a9643ea8Slogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7590a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7591d30ea906Sjfb8856606 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7592d30ea906Sjfb8856606 			   I40E_VFQF_HKEY_MAX_INDEX :
7593d30ea906Sjfb8856606 			   I40E_PFQF_HKEY_MAX_INDEX;
7594a9643ea8Slogwang 	int ret = 0;
7595a9643ea8Slogwang 
7596a9643ea8Slogwang 	if (!key || key_len == 0) {
7597a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7598a9643ea8Slogwang 		return 0;
7599d30ea906Sjfb8856606 	} else if (key_len != (key_idx + 1) *
7600a9643ea8Slogwang 		sizeof(uint32_t)) {
7601a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7602a9643ea8Slogwang 		return -EINVAL;
7603a9643ea8Slogwang 	}
7604a9643ea8Slogwang 
7605a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7606a9643ea8Slogwang 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7607a9643ea8Slogwang 			(struct i40e_aqc_get_set_rss_key_data *)key;
7608a9643ea8Slogwang 
7609a9643ea8Slogwang 		ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7610a9643ea8Slogwang 		if (ret)
76112bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7612a9643ea8Slogwang 	} else {
7613a9643ea8Slogwang 		uint32_t *hash_key = (uint32_t *)key;
7614a9643ea8Slogwang 		uint16_t i;
7615a9643ea8Slogwang 
7616d30ea906Sjfb8856606 		if (vsi->type == I40E_VSI_SRIOV) {
7617d30ea906Sjfb8856606 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7618d30ea906Sjfb8856606 				I40E_WRITE_REG(
7619d30ea906Sjfb8856606 					hw,
7620d30ea906Sjfb8856606 					I40E_VFQF_HKEY1(i, vsi->user_param),
7621d30ea906Sjfb8856606 					hash_key[i]);
7622d30ea906Sjfb8856606 
7623d30ea906Sjfb8856606 		} else {
7624a9643ea8Slogwang 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7625d30ea906Sjfb8856606 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7626d30ea906Sjfb8856606 					       hash_key[i]);
7627d30ea906Sjfb8856606 		}
7628a9643ea8Slogwang 		I40E_WRITE_FLUSH(hw);
7629a9643ea8Slogwang 	}
7630a9643ea8Slogwang 
7631a9643ea8Slogwang 	return ret;
7632a9643ea8Slogwang }
7633a9643ea8Slogwang 
7634a9643ea8Slogwang static int
i40e_get_rss_key(struct i40e_vsi * vsi,uint8_t * key,uint8_t * key_len)7635a9643ea8Slogwang i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7636a9643ea8Slogwang {
7637a9643ea8Slogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7638a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7639d30ea906Sjfb8856606 	uint32_t reg;
7640a9643ea8Slogwang 	int ret;
7641a9643ea8Slogwang 
7642a9643ea8Slogwang 	if (!key || !key_len)
76431646932aSjfb8856606 		return 0;
7644a9643ea8Slogwang 
7645a9643ea8Slogwang 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7646a9643ea8Slogwang 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7647a9643ea8Slogwang 			(struct i40e_aqc_get_set_rss_key_data *)key);
7648a9643ea8Slogwang 		if (ret) {
7649a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7650a9643ea8Slogwang 			return ret;
7651a9643ea8Slogwang 		}
7652a9643ea8Slogwang 	} else {
7653a9643ea8Slogwang 		uint32_t *key_dw = (uint32_t *)key;
7654a9643ea8Slogwang 		uint16_t i;
7655a9643ea8Slogwang 
7656d30ea906Sjfb8856606 		if (vsi->type == I40E_VSI_SRIOV) {
7657d30ea906Sjfb8856606 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7658d30ea906Sjfb8856606 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7659d30ea906Sjfb8856606 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7660a9643ea8Slogwang 			}
7661d30ea906Sjfb8856606 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7662d30ea906Sjfb8856606 				   sizeof(uint32_t);
7663d30ea906Sjfb8856606 		} else {
7664d30ea906Sjfb8856606 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7665d30ea906Sjfb8856606 				reg = I40E_PFQF_HKEY(i);
7666d30ea906Sjfb8856606 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7667d30ea906Sjfb8856606 			}
7668d30ea906Sjfb8856606 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7669d30ea906Sjfb8856606 				   sizeof(uint32_t);
7670d30ea906Sjfb8856606 		}
7671d30ea906Sjfb8856606 	}
7672a9643ea8Slogwang 	return 0;
7673a9643ea8Slogwang }
7674a9643ea8Slogwang 
7675a9643ea8Slogwang static int
i40e_hw_rss_hash_set(struct i40e_pf * pf,struct rte_eth_rss_conf * rss_conf)7676a9643ea8Slogwang i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7677a9643ea8Slogwang {
7678a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7679a9643ea8Slogwang 	uint64_t hena;
7680a9643ea8Slogwang 	int ret;
7681a9643ea8Slogwang 
7682a9643ea8Slogwang 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7683a9643ea8Slogwang 			       rss_conf->rss_key_len);
7684a9643ea8Slogwang 	if (ret)
7685a9643ea8Slogwang 		return ret;
7686a9643ea8Slogwang 
76872bfe3f2eSlogwang 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7688a9643ea8Slogwang 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7689a9643ea8Slogwang 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7690a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
7691a9643ea8Slogwang 
7692a9643ea8Slogwang 	return 0;
7693a9643ea8Slogwang }
7694a9643ea8Slogwang 
7695a9643ea8Slogwang static int
i40e_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)7696a9643ea8Slogwang i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7697a9643ea8Slogwang 			 struct rte_eth_rss_conf *rss_conf)
7698a9643ea8Slogwang {
7699a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7700a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
77012bfe3f2eSlogwang 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7702a9643ea8Slogwang 	uint64_t hena;
7703a9643ea8Slogwang 
7704a9643ea8Slogwang 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7705a9643ea8Slogwang 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
77062bfe3f2eSlogwang 
77072bfe3f2eSlogwang 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7708a9643ea8Slogwang 		if (rss_hf != 0) /* Enable RSS */
7709a9643ea8Slogwang 			return -EINVAL;
7710a9643ea8Slogwang 		return 0; /* Nothing to do */
7711a9643ea8Slogwang 	}
7712a9643ea8Slogwang 	/* RSS enabled */
7713a9643ea8Slogwang 	if (rss_hf == 0) /* Disable RSS */
7714a9643ea8Slogwang 		return -EINVAL;
7715a9643ea8Slogwang 
7716a9643ea8Slogwang 	return i40e_hw_rss_hash_set(pf, rss_conf);
7717a9643ea8Slogwang }
7718a9643ea8Slogwang 
7719a9643ea8Slogwang static int
i40e_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)7720a9643ea8Slogwang i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7721a9643ea8Slogwang 			   struct rte_eth_rss_conf *rss_conf)
7722a9643ea8Slogwang {
7723a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7724a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7725a9643ea8Slogwang 	uint64_t hena;
77261646932aSjfb8856606 	int ret;
7727a9643ea8Slogwang 
77281646932aSjfb8856606 	if (!rss_conf)
77291646932aSjfb8856606 		return -EINVAL;
77301646932aSjfb8856606 
77311646932aSjfb8856606 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7732a9643ea8Slogwang 			 &rss_conf->rss_key_len);
77331646932aSjfb8856606 	if (ret)
77341646932aSjfb8856606 		return ret;
7735a9643ea8Slogwang 
7736a9643ea8Slogwang 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7737a9643ea8Slogwang 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
77382bfe3f2eSlogwang 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7739a9643ea8Slogwang 
7740a9643ea8Slogwang 	return 0;
7741a9643ea8Slogwang }
7742a9643ea8Slogwang 
7743a9643ea8Slogwang static int
i40e_dev_get_filter_type(uint16_t filter_type,uint16_t * flag)7744a9643ea8Slogwang i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7745a9643ea8Slogwang {
7746a9643ea8Slogwang 	switch (filter_type) {
7747a9643ea8Slogwang 	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7748a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7749a9643ea8Slogwang 		break;
7750a9643ea8Slogwang 	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7751a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7752a9643ea8Slogwang 		break;
7753a9643ea8Slogwang 	case RTE_TUNNEL_FILTER_IMAC_TENID:
7754a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7755a9643ea8Slogwang 		break;
7756a9643ea8Slogwang 	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7757a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7758a9643ea8Slogwang 		break;
7759a9643ea8Slogwang 	case ETH_TUNNEL_FILTER_IMAC:
7760a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7761a9643ea8Slogwang 		break;
7762a9643ea8Slogwang 	case ETH_TUNNEL_FILTER_OIP:
7763a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7764a9643ea8Slogwang 		break;
7765a9643ea8Slogwang 	case ETH_TUNNEL_FILTER_IIP:
7766a9643ea8Slogwang 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7767a9643ea8Slogwang 		break;
7768a9643ea8Slogwang 	default:
7769a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7770a9643ea8Slogwang 		return -EINVAL;
7771a9643ea8Slogwang 	}
7772a9643ea8Slogwang 
7773a9643ea8Slogwang 	return 0;
7774a9643ea8Slogwang }
7775a9643ea8Slogwang 
77762bfe3f2eSlogwang /* Convert tunnel filter structure */
7777a9643ea8Slogwang static int
i40e_tunnel_filter_convert(struct i40e_aqc_cloud_filters_element_bb * cld_filter,struct i40e_tunnel_filter * tunnel_filter)77782bfe3f2eSlogwang i40e_tunnel_filter_convert(
7779d30ea906Sjfb8856606 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
77802bfe3f2eSlogwang 	struct i40e_tunnel_filter *tunnel_filter)
77812bfe3f2eSlogwang {
77824418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)
77834418919fSjohnjiang 			&cld_filter->element.outer_mac,
77844418919fSjohnjiang 		(struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
77854418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)
77864418919fSjohnjiang 			&cld_filter->element.inner_mac,
77874418919fSjohnjiang 		(struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
77882bfe3f2eSlogwang 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
77892bfe3f2eSlogwang 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
77902bfe3f2eSlogwang 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
77912bfe3f2eSlogwang 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
77922bfe3f2eSlogwang 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
77932bfe3f2eSlogwang 	else
77942bfe3f2eSlogwang 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
77952bfe3f2eSlogwang 	tunnel_filter->input.flags = cld_filter->element.flags;
77962bfe3f2eSlogwang 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
77972bfe3f2eSlogwang 	tunnel_filter->queue = cld_filter->element.queue_number;
77982bfe3f2eSlogwang 	rte_memcpy(tunnel_filter->input.general_fields,
77992bfe3f2eSlogwang 		   cld_filter->general_fields,
78002bfe3f2eSlogwang 		   sizeof(cld_filter->general_fields));
78012bfe3f2eSlogwang 
78022bfe3f2eSlogwang 	return 0;
78032bfe3f2eSlogwang }
78042bfe3f2eSlogwang 
78052bfe3f2eSlogwang /* Check if there exists the tunnel filter */
78062bfe3f2eSlogwang struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule * tunnel_rule,const struct i40e_tunnel_filter_input * input)78072bfe3f2eSlogwang i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
78082bfe3f2eSlogwang 			     const struct i40e_tunnel_filter_input *input)
78092bfe3f2eSlogwang {
78102bfe3f2eSlogwang 	int ret;
78112bfe3f2eSlogwang 
78122bfe3f2eSlogwang 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
78132bfe3f2eSlogwang 	if (ret < 0)
78142bfe3f2eSlogwang 		return NULL;
78152bfe3f2eSlogwang 
78162bfe3f2eSlogwang 	return tunnel_rule->hash_map[ret];
78172bfe3f2eSlogwang }
78182bfe3f2eSlogwang 
78192bfe3f2eSlogwang /* Add a tunnel filter into the SW list */
78202bfe3f2eSlogwang static int
i40e_sw_tunnel_filter_insert(struct i40e_pf * pf,struct i40e_tunnel_filter * tunnel_filter)78212bfe3f2eSlogwang i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
78222bfe3f2eSlogwang 			     struct i40e_tunnel_filter *tunnel_filter)
78232bfe3f2eSlogwang {
78242bfe3f2eSlogwang 	struct i40e_tunnel_rule *rule = &pf->tunnel;
78252bfe3f2eSlogwang 	int ret;
78262bfe3f2eSlogwang 
78272bfe3f2eSlogwang 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
78282bfe3f2eSlogwang 	if (ret < 0) {
78292bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
78302bfe3f2eSlogwang 			    "Failed to insert tunnel filter to hash table %d!",
78312bfe3f2eSlogwang 			    ret);
78322bfe3f2eSlogwang 		return ret;
78332bfe3f2eSlogwang 	}
78342bfe3f2eSlogwang 	rule->hash_map[ret] = tunnel_filter;
78352bfe3f2eSlogwang 
78362bfe3f2eSlogwang 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
78372bfe3f2eSlogwang 
78382bfe3f2eSlogwang 	return 0;
78392bfe3f2eSlogwang }
78402bfe3f2eSlogwang 
78412bfe3f2eSlogwang /* Delete a tunnel filter from the SW list */
78422bfe3f2eSlogwang int
i40e_sw_tunnel_filter_del(struct i40e_pf * pf,struct i40e_tunnel_filter_input * input)78432bfe3f2eSlogwang i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
78442bfe3f2eSlogwang 			  struct i40e_tunnel_filter_input *input)
78452bfe3f2eSlogwang {
78462bfe3f2eSlogwang 	struct i40e_tunnel_rule *rule = &pf->tunnel;
78472bfe3f2eSlogwang 	struct i40e_tunnel_filter *tunnel_filter;
78482bfe3f2eSlogwang 	int ret;
78492bfe3f2eSlogwang 
78502bfe3f2eSlogwang 	ret = rte_hash_del_key(rule->hash_table, input);
78512bfe3f2eSlogwang 	if (ret < 0) {
78522bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
78532bfe3f2eSlogwang 			    "Failed to delete tunnel filter to hash table %d!",
78542bfe3f2eSlogwang 			    ret);
78552bfe3f2eSlogwang 		return ret;
78562bfe3f2eSlogwang 	}
78572bfe3f2eSlogwang 	tunnel_filter = rule->hash_map[ret];
78582bfe3f2eSlogwang 	rule->hash_map[ret] = NULL;
78592bfe3f2eSlogwang 
78602bfe3f2eSlogwang 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
78612bfe3f2eSlogwang 	rte_free(tunnel_filter);
78622bfe3f2eSlogwang 
78632bfe3f2eSlogwang 	return 0;
78642bfe3f2eSlogwang }
78652bfe3f2eSlogwang 
78662bfe3f2eSlogwang #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
78672bfe3f2eSlogwang #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
78682bfe3f2eSlogwang #define I40E_TR_GENEVE_KEY_MASK			0x8
78692bfe3f2eSlogwang #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
78702bfe3f2eSlogwang #define I40E_TR_GRE_KEY_MASK			0x400
78712bfe3f2eSlogwang #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
78722bfe3f2eSlogwang #define I40E_TR_GRE_NO_KEY_MASK			0x8000
7873*2d9fd380Sjfb8856606 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7874*2d9fd380Sjfb8856606 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7875*2d9fd380Sjfb8856606 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7876*2d9fd380Sjfb8856606 #define I40E_DIRECTION_INGRESS_KEY		0x8000
7877*2d9fd380Sjfb8856606 #define I40E_TR_L4_TYPE_TCP			0x2
7878*2d9fd380Sjfb8856606 #define I40E_TR_L4_TYPE_UDP			0x4
7879*2d9fd380Sjfb8856606 #define I40E_TR_L4_TYPE_SCTP			0x8
78802bfe3f2eSlogwang 
78812bfe3f2eSlogwang static enum
i40e_replace_mpls_l1_filter(struct i40e_pf * pf)78822bfe3f2eSlogwang i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
78832bfe3f2eSlogwang {
78842bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
78852bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
78862bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7887d30ea906Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
78882bfe3f2eSlogwang 	enum i40e_status_code status = I40E_SUCCESS;
78892bfe3f2eSlogwang 
78902bfe3f2eSlogwang 	if (pf->support_multi_driver) {
78912bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
78922bfe3f2eSlogwang 		return I40E_NOT_SUPPORTED;
78932bfe3f2eSlogwang 	}
78942bfe3f2eSlogwang 
78952bfe3f2eSlogwang 	memset(&filter_replace, 0,
78962bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
78972bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
78982bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
78992bfe3f2eSlogwang 
79002bfe3f2eSlogwang 	/* create L1 filter */
79012bfe3f2eSlogwang 	filter_replace.old_filter_type =
79022bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
79032bfe3f2eSlogwang 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
79042bfe3f2eSlogwang 	filter_replace.tr_bit = 0;
79052bfe3f2eSlogwang 
79062bfe3f2eSlogwang 	/* Prepare the buffer, 3 entries */
79072bfe3f2eSlogwang 	filter_replace_buf.data[0] =
79082bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
79092bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
79102bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
79112bfe3f2eSlogwang 	filter_replace_buf.data[2] = 0xFF;
79122bfe3f2eSlogwang 	filter_replace_buf.data[3] = 0xFF;
79132bfe3f2eSlogwang 	filter_replace_buf.data[4] =
79142bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
79152bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
79162bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
79172bfe3f2eSlogwang 	filter_replace_buf.data[7] = 0xF0;
79182bfe3f2eSlogwang 	filter_replace_buf.data[8]
79192bfe3f2eSlogwang 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
79202bfe3f2eSlogwang 	filter_replace_buf.data[8] |=
79212bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
79222bfe3f2eSlogwang 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
79232bfe3f2eSlogwang 		I40E_TR_GENEVE_KEY_MASK |
79242bfe3f2eSlogwang 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
79252bfe3f2eSlogwang 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
79262bfe3f2eSlogwang 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
79272bfe3f2eSlogwang 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
79282bfe3f2eSlogwang 
79292bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
79302bfe3f2eSlogwang 					       &filter_replace_buf);
7931d30ea906Sjfb8856606 	if (!status && (filter_replace.old_filter_type !=
7932d30ea906Sjfb8856606 			filter_replace.new_filter_type))
7933d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7934d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
7935d30ea906Sjfb8856606 			    dev->device->name,
7936d30ea906Sjfb8856606 			    filter_replace.old_filter_type,
7937d30ea906Sjfb8856606 			    filter_replace.new_filter_type);
7938d30ea906Sjfb8856606 
79392bfe3f2eSlogwang 	return status;
79402bfe3f2eSlogwang }
79412bfe3f2eSlogwang 
79422bfe3f2eSlogwang static enum
i40e_replace_mpls_cloud_filter(struct i40e_pf * pf)79432bfe3f2eSlogwang i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
79442bfe3f2eSlogwang {
79452bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
79462bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
79472bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7948d30ea906Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
79492bfe3f2eSlogwang 	enum i40e_status_code status = I40E_SUCCESS;
79502bfe3f2eSlogwang 
79512bfe3f2eSlogwang 	if (pf->support_multi_driver) {
79522bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
79532bfe3f2eSlogwang 		return I40E_NOT_SUPPORTED;
79542bfe3f2eSlogwang 	}
79552bfe3f2eSlogwang 
79562bfe3f2eSlogwang 	/* For MPLSoUDP */
79572bfe3f2eSlogwang 	memset(&filter_replace, 0,
79582bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
79592bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
79602bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
79612bfe3f2eSlogwang 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
79622bfe3f2eSlogwang 		I40E_AQC_MIRROR_CLOUD_FILTER;
79632bfe3f2eSlogwang 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
79642bfe3f2eSlogwang 	filter_replace.new_filter_type =
79652bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
79662bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
79672bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
79682bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
79692bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
79702bfe3f2eSlogwang 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
79712bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
79722bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
79732bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
79742bfe3f2eSlogwang 					       &filter_replace_buf);
79752bfe3f2eSlogwang 	if (status < 0)
79762bfe3f2eSlogwang 		return status;
7977d30ea906Sjfb8856606 	if (filter_replace.old_filter_type !=
7978d30ea906Sjfb8856606 	    filter_replace.new_filter_type)
7979d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7980d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
7981d30ea906Sjfb8856606 			    dev->device->name,
7982d30ea906Sjfb8856606 			    filter_replace.old_filter_type,
7983d30ea906Sjfb8856606 			    filter_replace.new_filter_type);
79842bfe3f2eSlogwang 
79852bfe3f2eSlogwang 	/* For MPLSoGRE */
79862bfe3f2eSlogwang 	memset(&filter_replace, 0,
79872bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
79882bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
79892bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
79902bfe3f2eSlogwang 
79912bfe3f2eSlogwang 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
79922bfe3f2eSlogwang 		I40E_AQC_MIRROR_CLOUD_FILTER;
79932bfe3f2eSlogwang 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
79942bfe3f2eSlogwang 	filter_replace.new_filter_type =
79952bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
79962bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
79972bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
79982bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
79992bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80002bfe3f2eSlogwang 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
80012bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
80022bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80032bfe3f2eSlogwang 
80042bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
80052bfe3f2eSlogwang 					       &filter_replace_buf);
8006d30ea906Sjfb8856606 	if (!status && (filter_replace.old_filter_type !=
8007d30ea906Sjfb8856606 			filter_replace.new_filter_type))
8008d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8009d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8010d30ea906Sjfb8856606 			    dev->device->name,
8011d30ea906Sjfb8856606 			    filter_replace.old_filter_type,
8012d30ea906Sjfb8856606 			    filter_replace.new_filter_type);
8013d30ea906Sjfb8856606 
80142bfe3f2eSlogwang 	return status;
80152bfe3f2eSlogwang }
80162bfe3f2eSlogwang 
80172bfe3f2eSlogwang static enum i40e_status_code
i40e_replace_gtp_l1_filter(struct i40e_pf * pf)80182bfe3f2eSlogwang i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
80192bfe3f2eSlogwang {
80202bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
80212bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
80222bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8023d30ea906Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
80242bfe3f2eSlogwang 	enum i40e_status_code status = I40E_SUCCESS;
80252bfe3f2eSlogwang 
80262bfe3f2eSlogwang 	if (pf->support_multi_driver) {
80272bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
80282bfe3f2eSlogwang 		return I40E_NOT_SUPPORTED;
80292bfe3f2eSlogwang 	}
80302bfe3f2eSlogwang 
80312bfe3f2eSlogwang 	/* For GTP-C */
80322bfe3f2eSlogwang 	memset(&filter_replace, 0,
80332bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
80342bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
80352bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
80362bfe3f2eSlogwang 	/* create L1 filter */
80372bfe3f2eSlogwang 	filter_replace.old_filter_type =
80382bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
80392bfe3f2eSlogwang 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
80402bfe3f2eSlogwang 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
80412bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80422bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
80432bfe3f2eSlogwang 	filter_replace_buf.data[0] =
80442bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
80452bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
80462bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80472bfe3f2eSlogwang 	filter_replace_buf.data[2] = 0xFF;
80482bfe3f2eSlogwang 	filter_replace_buf.data[3] = 0xFF;
80492bfe3f2eSlogwang 	filter_replace_buf.data[4] =
80502bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
80512bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
80522bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80532bfe3f2eSlogwang 	filter_replace_buf.data[6] = 0xFF;
80542bfe3f2eSlogwang 	filter_replace_buf.data[7] = 0xFF;
80552bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
80562bfe3f2eSlogwang 					       &filter_replace_buf);
80572bfe3f2eSlogwang 	if (status < 0)
80582bfe3f2eSlogwang 		return status;
8059d30ea906Sjfb8856606 	if (filter_replace.old_filter_type !=
8060d30ea906Sjfb8856606 	    filter_replace.new_filter_type)
8061d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8062d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8063d30ea906Sjfb8856606 			    dev->device->name,
80642bfe3f2eSlogwang 			    filter_replace.old_filter_type,
80652bfe3f2eSlogwang 			    filter_replace.new_filter_type);
80662bfe3f2eSlogwang 
80672bfe3f2eSlogwang 	/* for GTP-U */
80682bfe3f2eSlogwang 	memset(&filter_replace, 0,
80692bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
80702bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
80712bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
80722bfe3f2eSlogwang 	/* create L1 filter */
80732bfe3f2eSlogwang 	filter_replace.old_filter_type =
80742bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
80752bfe3f2eSlogwang 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
80762bfe3f2eSlogwang 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
80772bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80782bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
80792bfe3f2eSlogwang 	filter_replace_buf.data[0] =
80802bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
80812bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
80822bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80832bfe3f2eSlogwang 	filter_replace_buf.data[2] = 0xFF;
80842bfe3f2eSlogwang 	filter_replace_buf.data[3] = 0xFF;
80852bfe3f2eSlogwang 	filter_replace_buf.data[4] =
80862bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
80872bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
80882bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
80892bfe3f2eSlogwang 	filter_replace_buf.data[6] = 0xFF;
80902bfe3f2eSlogwang 	filter_replace_buf.data[7] = 0xFF;
80912bfe3f2eSlogwang 
80922bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
80932bfe3f2eSlogwang 					       &filter_replace_buf);
8094d30ea906Sjfb8856606 	if (!status && (filter_replace.old_filter_type !=
8095d30ea906Sjfb8856606 			filter_replace.new_filter_type))
8096d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8097d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8098d30ea906Sjfb8856606 			    dev->device->name,
80992bfe3f2eSlogwang 			    filter_replace.old_filter_type,
81002bfe3f2eSlogwang 			    filter_replace.new_filter_type);
8101d30ea906Sjfb8856606 
81022bfe3f2eSlogwang 	return status;
81032bfe3f2eSlogwang }
81042bfe3f2eSlogwang 
81052bfe3f2eSlogwang static enum
i40e_replace_gtp_cloud_filter(struct i40e_pf * pf)81062bfe3f2eSlogwang i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
81072bfe3f2eSlogwang {
81082bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
81092bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
81102bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8111d30ea906Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
81122bfe3f2eSlogwang 	enum i40e_status_code status = I40E_SUCCESS;
81132bfe3f2eSlogwang 
81142bfe3f2eSlogwang 	if (pf->support_multi_driver) {
81152bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
81162bfe3f2eSlogwang 		return I40E_NOT_SUPPORTED;
81172bfe3f2eSlogwang 	}
81182bfe3f2eSlogwang 
81192bfe3f2eSlogwang 	/* for GTP-C */
81202bfe3f2eSlogwang 	memset(&filter_replace, 0,
81212bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
81222bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
81232bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
81242bfe3f2eSlogwang 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
81252bfe3f2eSlogwang 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
81262bfe3f2eSlogwang 	filter_replace.new_filter_type =
81272bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
81282bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
81292bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
81302bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
81312bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
81322bfe3f2eSlogwang 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
81332bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
81342bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
81352bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
81362bfe3f2eSlogwang 					       &filter_replace_buf);
81372bfe3f2eSlogwang 	if (status < 0)
81382bfe3f2eSlogwang 		return status;
8139d30ea906Sjfb8856606 	if (filter_replace.old_filter_type !=
8140d30ea906Sjfb8856606 	    filter_replace.new_filter_type)
8141d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8142d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8143d30ea906Sjfb8856606 			    dev->device->name,
81442bfe3f2eSlogwang 			    filter_replace.old_filter_type,
81452bfe3f2eSlogwang 			    filter_replace.new_filter_type);
81462bfe3f2eSlogwang 
81472bfe3f2eSlogwang 	/* for GTP-U */
81482bfe3f2eSlogwang 	memset(&filter_replace, 0,
81492bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
81502bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
81512bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
81522bfe3f2eSlogwang 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
81532bfe3f2eSlogwang 	filter_replace.old_filter_type =
81542bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
81552bfe3f2eSlogwang 	filter_replace.new_filter_type =
81562bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
81572bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
81582bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
81592bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
81602bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
81612bfe3f2eSlogwang 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
81622bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
81632bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
81642bfe3f2eSlogwang 
81652bfe3f2eSlogwang 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
81662bfe3f2eSlogwang 					       &filter_replace_buf);
8167d30ea906Sjfb8856606 	if (!status && (filter_replace.old_filter_type !=
8168d30ea906Sjfb8856606 			filter_replace.new_filter_type))
8169d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8170d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8171d30ea906Sjfb8856606 			    dev->device->name,
81722bfe3f2eSlogwang 			    filter_replace.old_filter_type,
81732bfe3f2eSlogwang 			    filter_replace.new_filter_type);
8174d30ea906Sjfb8856606 
81752bfe3f2eSlogwang 	return status;
81762bfe3f2eSlogwang }
81772bfe3f2eSlogwang 
8178*2d9fd380Sjfb8856606 static enum i40e_status_code
i40e_replace_port_l1_filter(struct i40e_pf * pf,enum i40e_l4_port_type l4_port_type)8179*2d9fd380Sjfb8856606 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8180*2d9fd380Sjfb8856606 			    enum i40e_l4_port_type l4_port_type)
8181*2d9fd380Sjfb8856606 {
8182*2d9fd380Sjfb8856606 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8183*2d9fd380Sjfb8856606 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8184*2d9fd380Sjfb8856606 	enum i40e_status_code status = I40E_SUCCESS;
8185*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8186*2d9fd380Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8187*2d9fd380Sjfb8856606 
8188*2d9fd380Sjfb8856606 	if (pf->support_multi_driver) {
8189*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8190*2d9fd380Sjfb8856606 		return I40E_NOT_SUPPORTED;
8191*2d9fd380Sjfb8856606 	}
8192*2d9fd380Sjfb8856606 
8193*2d9fd380Sjfb8856606 	memset(&filter_replace, 0,
8194*2d9fd380Sjfb8856606 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8195*2d9fd380Sjfb8856606 	memset(&filter_replace_buf, 0,
8196*2d9fd380Sjfb8856606 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8197*2d9fd380Sjfb8856606 
8198*2d9fd380Sjfb8856606 	/* create L1 filter */
8199*2d9fd380Sjfb8856606 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8200*2d9fd380Sjfb8856606 		filter_replace.old_filter_type =
8201*2d9fd380Sjfb8856606 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8202*2d9fd380Sjfb8856606 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8203*2d9fd380Sjfb8856606 		filter_replace_buf.data[8] =
8204*2d9fd380Sjfb8856606 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8205*2d9fd380Sjfb8856606 	} else {
8206*2d9fd380Sjfb8856606 		filter_replace.old_filter_type =
8207*2d9fd380Sjfb8856606 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8208*2d9fd380Sjfb8856606 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8209*2d9fd380Sjfb8856606 		filter_replace_buf.data[8] =
8210*2d9fd380Sjfb8856606 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8211*2d9fd380Sjfb8856606 	}
8212*2d9fd380Sjfb8856606 
8213*2d9fd380Sjfb8856606 	filter_replace.tr_bit = 0;
8214*2d9fd380Sjfb8856606 	/* Prepare the buffer, 3 entries */
8215*2d9fd380Sjfb8856606 	filter_replace_buf.data[0] =
8216*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8217*2d9fd380Sjfb8856606 	filter_replace_buf.data[0] |=
8218*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8219*2d9fd380Sjfb8856606 	filter_replace_buf.data[2] = 0x00;
8220*2d9fd380Sjfb8856606 	filter_replace_buf.data[3] =
8221*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8222*2d9fd380Sjfb8856606 	filter_replace_buf.data[4] =
8223*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8224*2d9fd380Sjfb8856606 	filter_replace_buf.data[4] |=
8225*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8226*2d9fd380Sjfb8856606 	filter_replace_buf.data[5] = 0x00;
8227*2d9fd380Sjfb8856606 	filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8228*2d9fd380Sjfb8856606 		I40E_TR_L4_TYPE_TCP |
8229*2d9fd380Sjfb8856606 		I40E_TR_L4_TYPE_SCTP;
8230*2d9fd380Sjfb8856606 	filter_replace_buf.data[7] = 0x00;
8231*2d9fd380Sjfb8856606 	filter_replace_buf.data[8] |=
8232*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8233*2d9fd380Sjfb8856606 	filter_replace_buf.data[9] = 0x00;
8234*2d9fd380Sjfb8856606 	filter_replace_buf.data[10] = 0xFF;
8235*2d9fd380Sjfb8856606 	filter_replace_buf.data[11] = 0xFF;
8236*2d9fd380Sjfb8856606 
8237*2d9fd380Sjfb8856606 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8238*2d9fd380Sjfb8856606 					       &filter_replace_buf);
8239*2d9fd380Sjfb8856606 	if (!status && filter_replace.old_filter_type !=
8240*2d9fd380Sjfb8856606 	    filter_replace.new_filter_type)
8241*2d9fd380Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8242*2d9fd380Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8243*2d9fd380Sjfb8856606 			    dev->device->name,
8244*2d9fd380Sjfb8856606 			    filter_replace.old_filter_type,
8245*2d9fd380Sjfb8856606 			    filter_replace.new_filter_type);
8246*2d9fd380Sjfb8856606 
8247*2d9fd380Sjfb8856606 	return status;
8248*2d9fd380Sjfb8856606 }
8249*2d9fd380Sjfb8856606 
8250*2d9fd380Sjfb8856606 static enum i40e_status_code
i40e_replace_port_cloud_filter(struct i40e_pf * pf,enum i40e_l4_port_type l4_port_type)8251*2d9fd380Sjfb8856606 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8252*2d9fd380Sjfb8856606 			       enum i40e_l4_port_type l4_port_type)
8253*2d9fd380Sjfb8856606 {
8254*2d9fd380Sjfb8856606 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8255*2d9fd380Sjfb8856606 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8256*2d9fd380Sjfb8856606 	enum i40e_status_code status = I40E_SUCCESS;
8257*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8258*2d9fd380Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8259*2d9fd380Sjfb8856606 
8260*2d9fd380Sjfb8856606 	if (pf->support_multi_driver) {
8261*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8262*2d9fd380Sjfb8856606 		return I40E_NOT_SUPPORTED;
8263*2d9fd380Sjfb8856606 	}
8264*2d9fd380Sjfb8856606 
8265*2d9fd380Sjfb8856606 	memset(&filter_replace, 0,
8266*2d9fd380Sjfb8856606 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8267*2d9fd380Sjfb8856606 	memset(&filter_replace_buf, 0,
8268*2d9fd380Sjfb8856606 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8269*2d9fd380Sjfb8856606 
8270*2d9fd380Sjfb8856606 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8271*2d9fd380Sjfb8856606 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8272*2d9fd380Sjfb8856606 		filter_replace.new_filter_type =
8273*2d9fd380Sjfb8856606 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8274*2d9fd380Sjfb8856606 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8275*2d9fd380Sjfb8856606 	} else {
8276*2d9fd380Sjfb8856606 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8277*2d9fd380Sjfb8856606 		filter_replace.new_filter_type =
8278*2d9fd380Sjfb8856606 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8279*2d9fd380Sjfb8856606 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8280*2d9fd380Sjfb8856606 	}
8281*2d9fd380Sjfb8856606 
8282*2d9fd380Sjfb8856606 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8283*2d9fd380Sjfb8856606 	filter_replace.tr_bit = 0;
8284*2d9fd380Sjfb8856606 	/* Prepare the buffer, 2 entries */
8285*2d9fd380Sjfb8856606 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8286*2d9fd380Sjfb8856606 	filter_replace_buf.data[0] |=
8287*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8288*2d9fd380Sjfb8856606 	filter_replace_buf.data[4] |=
8289*2d9fd380Sjfb8856606 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8290*2d9fd380Sjfb8856606 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8291*2d9fd380Sjfb8856606 					       &filter_replace_buf);
8292*2d9fd380Sjfb8856606 
8293*2d9fd380Sjfb8856606 	if (!status && filter_replace.old_filter_type !=
8294*2d9fd380Sjfb8856606 	    filter_replace.new_filter_type)
8295*2d9fd380Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8296*2d9fd380Sjfb8856606 			    " original: 0x%x, new: 0x%x",
8297*2d9fd380Sjfb8856606 			    dev->device->name,
8298*2d9fd380Sjfb8856606 			    filter_replace.old_filter_type,
8299*2d9fd380Sjfb8856606 			    filter_replace.new_filter_type);
8300*2d9fd380Sjfb8856606 
8301*2d9fd380Sjfb8856606 	return status;
8302*2d9fd380Sjfb8856606 }
8303*2d9fd380Sjfb8856606 
83042bfe3f2eSlogwang int
i40e_dev_consistent_tunnel_filter_set(struct i40e_pf * pf,struct i40e_tunnel_filter_conf * tunnel_filter,uint8_t add)83052bfe3f2eSlogwang i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
83062bfe3f2eSlogwang 		      struct i40e_tunnel_filter_conf *tunnel_filter,
83072bfe3f2eSlogwang 		      uint8_t add)
83082bfe3f2eSlogwang {
83092bfe3f2eSlogwang 	uint16_t ip_type;
83102bfe3f2eSlogwang 	uint32_t ipv4_addr, ipv4_addr_le;
83112bfe3f2eSlogwang 	uint8_t i, tun_type = 0;
83122bfe3f2eSlogwang 	/* internal variable to convert ipv6 byte order */
83132bfe3f2eSlogwang 	uint32_t convert_ipv6[4];
83142bfe3f2eSlogwang 	int val, ret = 0;
83152bfe3f2eSlogwang 	struct i40e_pf_vf *vf = NULL;
83162bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
83172bfe3f2eSlogwang 	struct i40e_vsi *vsi;
8318d30ea906Sjfb8856606 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8319d30ea906Sjfb8856606 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
83202bfe3f2eSlogwang 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
83212bfe3f2eSlogwang 	struct i40e_tunnel_filter *tunnel, *node;
83222bfe3f2eSlogwang 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
83232bfe3f2eSlogwang 	uint32_t teid_le;
83242bfe3f2eSlogwang 	bool big_buffer = 0;
83252bfe3f2eSlogwang 
83262bfe3f2eSlogwang 	cld_filter = rte_zmalloc("tunnel_filter",
83272bfe3f2eSlogwang 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
83282bfe3f2eSlogwang 			 0);
83292bfe3f2eSlogwang 
83302bfe3f2eSlogwang 	if (cld_filter == NULL) {
83312bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
83322bfe3f2eSlogwang 		return -ENOMEM;
83332bfe3f2eSlogwang 	}
83342bfe3f2eSlogwang 	pfilter = cld_filter;
83352bfe3f2eSlogwang 
83364418919fSjohnjiang 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
83374418919fSjohnjiang 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
83384418919fSjohnjiang 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
83394418919fSjohnjiang 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
83402bfe3f2eSlogwang 
83412bfe3f2eSlogwang 	pfilter->element.inner_vlan =
83422bfe3f2eSlogwang 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
83432bfe3f2eSlogwang 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
83442bfe3f2eSlogwang 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
83452bfe3f2eSlogwang 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
83462bfe3f2eSlogwang 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
83472bfe3f2eSlogwang 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
83482bfe3f2eSlogwang 				&ipv4_addr_le,
83492bfe3f2eSlogwang 				sizeof(pfilter->element.ipaddr.v4.data));
83502bfe3f2eSlogwang 	} else {
83512bfe3f2eSlogwang 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
83522bfe3f2eSlogwang 		for (i = 0; i < 4; i++) {
83532bfe3f2eSlogwang 			convert_ipv6[i] =
83542bfe3f2eSlogwang 			rte_cpu_to_le_32(rte_be_to_cpu_32(
83552bfe3f2eSlogwang 					 tunnel_filter->ip_addr.ipv6_addr[i]));
83562bfe3f2eSlogwang 		}
83572bfe3f2eSlogwang 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
83582bfe3f2eSlogwang 			   &convert_ipv6,
83592bfe3f2eSlogwang 			   sizeof(pfilter->element.ipaddr.v6.data));
83602bfe3f2eSlogwang 	}
83612bfe3f2eSlogwang 
83622bfe3f2eSlogwang 	/* check tunneled type */
83632bfe3f2eSlogwang 	switch (tunnel_filter->tunnel_type) {
83642bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_VXLAN:
83652bfe3f2eSlogwang 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
83662bfe3f2eSlogwang 		break;
83672bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_NVGRE:
83682bfe3f2eSlogwang 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
83692bfe3f2eSlogwang 		break;
83702bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
83712bfe3f2eSlogwang 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
83722bfe3f2eSlogwang 		break;
83732bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_MPLSoUDP:
83742bfe3f2eSlogwang 		if (!pf->mpls_replace_flag) {
83752bfe3f2eSlogwang 			i40e_replace_mpls_l1_filter(pf);
83762bfe3f2eSlogwang 			i40e_replace_mpls_cloud_filter(pf);
83772bfe3f2eSlogwang 			pf->mpls_replace_flag = 1;
83782bfe3f2eSlogwang 		}
83792bfe3f2eSlogwang 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
83802bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
83812bfe3f2eSlogwang 			teid_le >> 4;
83822bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
83832bfe3f2eSlogwang 			(teid_le & 0xF) << 12;
83842bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
83852bfe3f2eSlogwang 			0x40;
83862bfe3f2eSlogwang 		big_buffer = 1;
83872bfe3f2eSlogwang 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
83882bfe3f2eSlogwang 		break;
83892bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_MPLSoGRE:
83902bfe3f2eSlogwang 		if (!pf->mpls_replace_flag) {
83912bfe3f2eSlogwang 			i40e_replace_mpls_l1_filter(pf);
83922bfe3f2eSlogwang 			i40e_replace_mpls_cloud_filter(pf);
83932bfe3f2eSlogwang 			pf->mpls_replace_flag = 1;
83942bfe3f2eSlogwang 		}
83952bfe3f2eSlogwang 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
83962bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
83972bfe3f2eSlogwang 			teid_le >> 4;
83982bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
83992bfe3f2eSlogwang 			(teid_le & 0xF) << 12;
84002bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
84012bfe3f2eSlogwang 			0x0;
84022bfe3f2eSlogwang 		big_buffer = 1;
84032bfe3f2eSlogwang 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
84042bfe3f2eSlogwang 		break;
84052bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_GTPC:
84062bfe3f2eSlogwang 		if (!pf->gtp_replace_flag) {
84072bfe3f2eSlogwang 			i40e_replace_gtp_l1_filter(pf);
84082bfe3f2eSlogwang 			i40e_replace_gtp_cloud_filter(pf);
84092bfe3f2eSlogwang 			pf->gtp_replace_flag = 1;
84102bfe3f2eSlogwang 		}
84112bfe3f2eSlogwang 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
84122bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
84132bfe3f2eSlogwang 			(teid_le >> 16) & 0xFFFF;
84142bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
84152bfe3f2eSlogwang 			teid_le & 0xFFFF;
84162bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
84172bfe3f2eSlogwang 			0x0;
84182bfe3f2eSlogwang 		big_buffer = 1;
84192bfe3f2eSlogwang 		break;
84202bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_GTPU:
84212bfe3f2eSlogwang 		if (!pf->gtp_replace_flag) {
84222bfe3f2eSlogwang 			i40e_replace_gtp_l1_filter(pf);
84232bfe3f2eSlogwang 			i40e_replace_gtp_cloud_filter(pf);
84242bfe3f2eSlogwang 			pf->gtp_replace_flag = 1;
84252bfe3f2eSlogwang 		}
84262bfe3f2eSlogwang 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
84272bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
84282bfe3f2eSlogwang 			(teid_le >> 16) & 0xFFFF;
84292bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
84302bfe3f2eSlogwang 			teid_le & 0xFFFF;
84312bfe3f2eSlogwang 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
84322bfe3f2eSlogwang 			0x0;
84332bfe3f2eSlogwang 		big_buffer = 1;
84342bfe3f2eSlogwang 		break;
84352bfe3f2eSlogwang 	case I40E_TUNNEL_TYPE_QINQ:
84362bfe3f2eSlogwang 		if (!pf->qinq_replace_flag) {
84372bfe3f2eSlogwang 			ret = i40e_cloud_filter_qinq_create(pf);
84382bfe3f2eSlogwang 			if (ret < 0)
84392bfe3f2eSlogwang 				PMD_DRV_LOG(DEBUG,
84402bfe3f2eSlogwang 					    "QinQ tunnel filter already created.");
84412bfe3f2eSlogwang 			pf->qinq_replace_flag = 1;
84422bfe3f2eSlogwang 		}
84432bfe3f2eSlogwang 		/*	Add in the General fields the values of
84442bfe3f2eSlogwang 		 *	the Outer and Inner VLAN
84452bfe3f2eSlogwang 		 *	Big Buffer should be set, see changes in
84462bfe3f2eSlogwang 		 *	i40e_aq_add_cloud_filters
84472bfe3f2eSlogwang 		 */
84482bfe3f2eSlogwang 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
84492bfe3f2eSlogwang 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
84502bfe3f2eSlogwang 		big_buffer = 1;
84512bfe3f2eSlogwang 		break;
8452*2d9fd380Sjfb8856606 	case I40E_CLOUD_TYPE_UDP:
8453*2d9fd380Sjfb8856606 	case I40E_CLOUD_TYPE_TCP:
8454*2d9fd380Sjfb8856606 	case I40E_CLOUD_TYPE_SCTP:
8455*2d9fd380Sjfb8856606 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8456*2d9fd380Sjfb8856606 			if (!pf->sport_replace_flag) {
8457*2d9fd380Sjfb8856606 				i40e_replace_port_l1_filter(pf,
8458*2d9fd380Sjfb8856606 						tunnel_filter->l4_port_type);
8459*2d9fd380Sjfb8856606 				i40e_replace_port_cloud_filter(pf,
8460*2d9fd380Sjfb8856606 						tunnel_filter->l4_port_type);
8461*2d9fd380Sjfb8856606 				pf->sport_replace_flag = 1;
8462*2d9fd380Sjfb8856606 			}
8463*2d9fd380Sjfb8856606 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8464*2d9fd380Sjfb8856606 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8465*2d9fd380Sjfb8856606 				I40E_DIRECTION_INGRESS_KEY;
8466*2d9fd380Sjfb8856606 
8467*2d9fd380Sjfb8856606 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8468*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8469*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_UDP;
8470*2d9fd380Sjfb8856606 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8471*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8472*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_TCP;
8473*2d9fd380Sjfb8856606 			else
8474*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8475*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_SCTP;
8476*2d9fd380Sjfb8856606 
8477*2d9fd380Sjfb8856606 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8478*2d9fd380Sjfb8856606 				(teid_le >> 16) & 0xFFFF;
8479*2d9fd380Sjfb8856606 			big_buffer = 1;
8480*2d9fd380Sjfb8856606 		} else {
8481*2d9fd380Sjfb8856606 			if (!pf->dport_replace_flag) {
8482*2d9fd380Sjfb8856606 				i40e_replace_port_l1_filter(pf,
8483*2d9fd380Sjfb8856606 						tunnel_filter->l4_port_type);
8484*2d9fd380Sjfb8856606 				i40e_replace_port_cloud_filter(pf,
8485*2d9fd380Sjfb8856606 						tunnel_filter->l4_port_type);
8486*2d9fd380Sjfb8856606 				pf->dport_replace_flag = 1;
8487*2d9fd380Sjfb8856606 			}
8488*2d9fd380Sjfb8856606 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8489*2d9fd380Sjfb8856606 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8490*2d9fd380Sjfb8856606 				I40E_DIRECTION_INGRESS_KEY;
8491*2d9fd380Sjfb8856606 
8492*2d9fd380Sjfb8856606 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8493*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8494*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_UDP;
8495*2d9fd380Sjfb8856606 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8496*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8497*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_TCP;
8498*2d9fd380Sjfb8856606 			else
8499*2d9fd380Sjfb8856606 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8500*2d9fd380Sjfb8856606 					I40E_TR_L4_TYPE_SCTP;
8501*2d9fd380Sjfb8856606 
8502*2d9fd380Sjfb8856606 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8503*2d9fd380Sjfb8856606 				(teid_le >> 16) & 0xFFFF;
8504*2d9fd380Sjfb8856606 			big_buffer = 1;
8505*2d9fd380Sjfb8856606 		}
8506*2d9fd380Sjfb8856606 
8507*2d9fd380Sjfb8856606 		break;
85082bfe3f2eSlogwang 	default:
85092bfe3f2eSlogwang 		/* Other tunnel types is not supported. */
85102bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
85112bfe3f2eSlogwang 		rte_free(cld_filter);
85122bfe3f2eSlogwang 		return -EINVAL;
85132bfe3f2eSlogwang 	}
85142bfe3f2eSlogwang 
85152bfe3f2eSlogwang 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
85162bfe3f2eSlogwang 		pfilter->element.flags =
85172bfe3f2eSlogwang 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
85182bfe3f2eSlogwang 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
85192bfe3f2eSlogwang 		pfilter->element.flags =
85202bfe3f2eSlogwang 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
85212bfe3f2eSlogwang 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
85222bfe3f2eSlogwang 		pfilter->element.flags =
85232bfe3f2eSlogwang 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
85242bfe3f2eSlogwang 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
85252bfe3f2eSlogwang 		pfilter->element.flags =
85262bfe3f2eSlogwang 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
85272bfe3f2eSlogwang 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
85282bfe3f2eSlogwang 		pfilter->element.flags |=
85292bfe3f2eSlogwang 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8530*2d9fd380Sjfb8856606 	else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8531*2d9fd380Sjfb8856606 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8532*2d9fd380Sjfb8856606 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8533*2d9fd380Sjfb8856606 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8534*2d9fd380Sjfb8856606 			pfilter->element.flags |=
8535*2d9fd380Sjfb8856606 				I40E_AQC_ADD_CLOUD_FILTER_0X11;
8536*2d9fd380Sjfb8856606 		else
8537*2d9fd380Sjfb8856606 			pfilter->element.flags |=
8538*2d9fd380Sjfb8856606 				I40E_AQC_ADD_CLOUD_FILTER_0X10;
8539*2d9fd380Sjfb8856606 	} else {
85402bfe3f2eSlogwang 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
85412bfe3f2eSlogwang 						&pfilter->element.flags);
85422bfe3f2eSlogwang 		if (val < 0) {
85432bfe3f2eSlogwang 			rte_free(cld_filter);
85442bfe3f2eSlogwang 			return -EINVAL;
85452bfe3f2eSlogwang 		}
85462bfe3f2eSlogwang 	}
85472bfe3f2eSlogwang 
85482bfe3f2eSlogwang 	pfilter->element.flags |= rte_cpu_to_le_16(
85492bfe3f2eSlogwang 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
85502bfe3f2eSlogwang 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
85512bfe3f2eSlogwang 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
85522bfe3f2eSlogwang 	pfilter->element.queue_number =
85532bfe3f2eSlogwang 		rte_cpu_to_le_16(tunnel_filter->queue_id);
85542bfe3f2eSlogwang 
85552bfe3f2eSlogwang 	if (!tunnel_filter->is_to_vf)
85562bfe3f2eSlogwang 		vsi = pf->main_vsi;
85572bfe3f2eSlogwang 	else {
85582bfe3f2eSlogwang 		if (tunnel_filter->vf_id >= pf->vf_num) {
85592bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Invalid argument.");
85602bfe3f2eSlogwang 			rte_free(cld_filter);
85612bfe3f2eSlogwang 			return -EINVAL;
85622bfe3f2eSlogwang 		}
85632bfe3f2eSlogwang 		vf = &pf->vfs[tunnel_filter->vf_id];
85642bfe3f2eSlogwang 		vsi = vf->vsi;
85652bfe3f2eSlogwang 	}
85662bfe3f2eSlogwang 
85672bfe3f2eSlogwang 	/* Check if there is the filter in SW list */
85682bfe3f2eSlogwang 	memset(&check_filter, 0, sizeof(check_filter));
85692bfe3f2eSlogwang 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
85702bfe3f2eSlogwang 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
85712bfe3f2eSlogwang 	check_filter.vf_id = tunnel_filter->vf_id;
85722bfe3f2eSlogwang 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
85732bfe3f2eSlogwang 	if (add && node) {
85742bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
85752bfe3f2eSlogwang 		rte_free(cld_filter);
85762bfe3f2eSlogwang 		return -EINVAL;
85772bfe3f2eSlogwang 	}
85782bfe3f2eSlogwang 
85792bfe3f2eSlogwang 	if (!add && !node) {
85802bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
85812bfe3f2eSlogwang 		rte_free(cld_filter);
85822bfe3f2eSlogwang 		return -EINVAL;
85832bfe3f2eSlogwang 	}
85842bfe3f2eSlogwang 
85852bfe3f2eSlogwang 	if (add) {
85862bfe3f2eSlogwang 		if (big_buffer)
8587d30ea906Sjfb8856606 			ret = i40e_aq_add_cloud_filters_bb(hw,
85882bfe3f2eSlogwang 						   vsi->seid, cld_filter, 1);
85892bfe3f2eSlogwang 		else
85902bfe3f2eSlogwang 			ret = i40e_aq_add_cloud_filters(hw,
85912bfe3f2eSlogwang 					vsi->seid, &cld_filter->element, 1);
85922bfe3f2eSlogwang 		if (ret < 0) {
85932bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
85942bfe3f2eSlogwang 			rte_free(cld_filter);
85952bfe3f2eSlogwang 			return -ENOTSUP;
85962bfe3f2eSlogwang 		}
85972bfe3f2eSlogwang 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
85982bfe3f2eSlogwang 		if (tunnel == NULL) {
85992bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
86002bfe3f2eSlogwang 			rte_free(cld_filter);
86012bfe3f2eSlogwang 			return -ENOMEM;
86022bfe3f2eSlogwang 		}
86032bfe3f2eSlogwang 
86042bfe3f2eSlogwang 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
86052bfe3f2eSlogwang 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
86062bfe3f2eSlogwang 		if (ret < 0)
86072bfe3f2eSlogwang 			rte_free(tunnel);
86082bfe3f2eSlogwang 	} else {
86092bfe3f2eSlogwang 		if (big_buffer)
8610d30ea906Sjfb8856606 			ret = i40e_aq_rem_cloud_filters_bb(
86112bfe3f2eSlogwang 				hw, vsi->seid, cld_filter, 1);
8612a9643ea8Slogwang 		else
8613d30ea906Sjfb8856606 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
86142bfe3f2eSlogwang 						&cld_filter->element, 1);
86152bfe3f2eSlogwang 		if (ret < 0) {
86162bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
86172bfe3f2eSlogwang 			rte_free(cld_filter);
86182bfe3f2eSlogwang 			return -ENOTSUP;
86192bfe3f2eSlogwang 		}
86202bfe3f2eSlogwang 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
86212bfe3f2eSlogwang 	}
8622a9643ea8Slogwang 
8623a9643ea8Slogwang 	rte_free(cld_filter);
8624a9643ea8Slogwang 	return ret;
8625a9643ea8Slogwang }
8626a9643ea8Slogwang 
8627a9643ea8Slogwang static int
i40e_get_vxlan_port_idx(struct i40e_pf * pf,uint16_t port)8628a9643ea8Slogwang i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8629a9643ea8Slogwang {
8630a9643ea8Slogwang 	uint8_t i;
8631a9643ea8Slogwang 
8632a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8633a9643ea8Slogwang 		if (pf->vxlan_ports[i] == port)
8634a9643ea8Slogwang 			return i;
8635a9643ea8Slogwang 	}
8636a9643ea8Slogwang 
8637a9643ea8Slogwang 	return -1;
8638a9643ea8Slogwang }
8639a9643ea8Slogwang 
8640a9643ea8Slogwang static int
i40e_add_vxlan_port(struct i40e_pf * pf,uint16_t port,int udp_type)86414418919fSjohnjiang i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8642a9643ea8Slogwang {
8643a9643ea8Slogwang 	int  idx, ret;
86444418919fSjohnjiang 	uint8_t filter_idx = 0;
8645a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8646a9643ea8Slogwang 
8647a9643ea8Slogwang 	idx = i40e_get_vxlan_port_idx(pf, port);
8648a9643ea8Slogwang 
8649a9643ea8Slogwang 	/* Check if port already exists */
8650a9643ea8Slogwang 	if (idx >= 0) {
8651a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8652a9643ea8Slogwang 		return -EINVAL;
8653a9643ea8Slogwang 	}
8654a9643ea8Slogwang 
8655a9643ea8Slogwang 	/* Now check if there is space to add the new port */
8656a9643ea8Slogwang 	idx = i40e_get_vxlan_port_idx(pf, 0);
8657a9643ea8Slogwang 	if (idx < 0) {
86582bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
86592bfe3f2eSlogwang 			"Maximum number of UDP ports reached, not adding port %d",
86602bfe3f2eSlogwang 			port);
8661a9643ea8Slogwang 		return -ENOSPC;
8662a9643ea8Slogwang 	}
8663a9643ea8Slogwang 
86644418919fSjohnjiang 	ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8665a9643ea8Slogwang 					&filter_idx, NULL);
8666a9643ea8Slogwang 	if (ret < 0) {
8667a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8668a9643ea8Slogwang 		return -1;
8669a9643ea8Slogwang 	}
8670a9643ea8Slogwang 
8671a9643ea8Slogwang 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8672a9643ea8Slogwang 			 port,  filter_idx);
8673a9643ea8Slogwang 
8674a9643ea8Slogwang 	/* New port: add it and mark its index in the bitmap */
8675a9643ea8Slogwang 	pf->vxlan_ports[idx] = port;
8676a9643ea8Slogwang 	pf->vxlan_bitmap |= (1 << idx);
8677a9643ea8Slogwang 
8678a9643ea8Slogwang 	if (!(pf->flags & I40E_FLAG_VXLAN))
8679a9643ea8Slogwang 		pf->flags |= I40E_FLAG_VXLAN;
8680a9643ea8Slogwang 
8681a9643ea8Slogwang 	return 0;
8682a9643ea8Slogwang }
8683a9643ea8Slogwang 
8684a9643ea8Slogwang static int
i40e_del_vxlan_port(struct i40e_pf * pf,uint16_t port)8685a9643ea8Slogwang i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8686a9643ea8Slogwang {
8687a9643ea8Slogwang 	int idx;
8688a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8689a9643ea8Slogwang 
8690a9643ea8Slogwang 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8691a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8692a9643ea8Slogwang 		return -EINVAL;
8693a9643ea8Slogwang 	}
8694a9643ea8Slogwang 
8695a9643ea8Slogwang 	idx = i40e_get_vxlan_port_idx(pf, port);
8696a9643ea8Slogwang 
8697a9643ea8Slogwang 	if (idx < 0) {
8698a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8699a9643ea8Slogwang 		return -EINVAL;
8700a9643ea8Slogwang 	}
8701a9643ea8Slogwang 
8702a9643ea8Slogwang 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8703a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8704a9643ea8Slogwang 		return -1;
8705a9643ea8Slogwang 	}
8706a9643ea8Slogwang 
8707a9643ea8Slogwang 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8708a9643ea8Slogwang 			port, idx);
8709a9643ea8Slogwang 
8710a9643ea8Slogwang 	pf->vxlan_ports[idx] = 0;
8711a9643ea8Slogwang 	pf->vxlan_bitmap &= ~(1 << idx);
8712a9643ea8Slogwang 
8713a9643ea8Slogwang 	if (!pf->vxlan_bitmap)
8714a9643ea8Slogwang 		pf->flags &= ~I40E_FLAG_VXLAN;
8715a9643ea8Slogwang 
8716a9643ea8Slogwang 	return 0;
8717a9643ea8Slogwang }
8718a9643ea8Slogwang 
8719a9643ea8Slogwang /* Add UDP tunneling port */
8720a9643ea8Slogwang static int
i40e_dev_udp_tunnel_port_add(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)8721a9643ea8Slogwang i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8722a9643ea8Slogwang 			     struct rte_eth_udp_tunnel *udp_tunnel)
8723a9643ea8Slogwang {
8724a9643ea8Slogwang 	int ret = 0;
8725a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8726a9643ea8Slogwang 
8727a9643ea8Slogwang 	if (udp_tunnel == NULL)
8728a9643ea8Slogwang 		return -EINVAL;
8729a9643ea8Slogwang 
8730a9643ea8Slogwang 	switch (udp_tunnel->prot_type) {
8731a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_VXLAN:
87324418919fSjohnjiang 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
87334418919fSjohnjiang 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
8734a9643ea8Slogwang 		break;
87354418919fSjohnjiang 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
87364418919fSjohnjiang 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
87374418919fSjohnjiang 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
87384418919fSjohnjiang 		break;
8739a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_GENEVE:
8740a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_TEREDO:
8741a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8742a9643ea8Slogwang 		ret = -1;
8743a9643ea8Slogwang 		break;
8744a9643ea8Slogwang 
8745a9643ea8Slogwang 	default:
8746a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8747a9643ea8Slogwang 		ret = -1;
8748a9643ea8Slogwang 		break;
8749a9643ea8Slogwang 	}
8750a9643ea8Slogwang 
8751a9643ea8Slogwang 	return ret;
8752a9643ea8Slogwang }
8753a9643ea8Slogwang 
8754a9643ea8Slogwang /* Remove UDP tunneling port */
8755a9643ea8Slogwang static int
i40e_dev_udp_tunnel_port_del(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)8756a9643ea8Slogwang i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8757a9643ea8Slogwang 			     struct rte_eth_udp_tunnel *udp_tunnel)
8758a9643ea8Slogwang {
8759a9643ea8Slogwang 	int ret = 0;
8760a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8761a9643ea8Slogwang 
8762a9643ea8Slogwang 	if (udp_tunnel == NULL)
8763a9643ea8Slogwang 		return -EINVAL;
8764a9643ea8Slogwang 
8765a9643ea8Slogwang 	switch (udp_tunnel->prot_type) {
8766a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_VXLAN:
87674418919fSjohnjiang 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
8768a9643ea8Slogwang 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8769a9643ea8Slogwang 		break;
8770a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_GENEVE:
8771a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_TEREDO:
8772a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8773a9643ea8Slogwang 		ret = -1;
8774a9643ea8Slogwang 		break;
8775a9643ea8Slogwang 	default:
8776a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8777a9643ea8Slogwang 		ret = -1;
8778a9643ea8Slogwang 		break;
8779a9643ea8Slogwang 	}
8780a9643ea8Slogwang 
8781a9643ea8Slogwang 	return ret;
8782a9643ea8Slogwang }
8783a9643ea8Slogwang 
8784a9643ea8Slogwang /* Calculate the maximum number of contiguous PF queues that are configured */
8785a9643ea8Slogwang static int
i40e_pf_calc_configured_queues_num(struct i40e_pf * pf)8786a9643ea8Slogwang i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8787a9643ea8Slogwang {
8788a9643ea8Slogwang 	struct rte_eth_dev_data *data = pf->dev_data;
8789a9643ea8Slogwang 	int i, num;
8790a9643ea8Slogwang 	struct i40e_rx_queue *rxq;
8791a9643ea8Slogwang 
8792a9643ea8Slogwang 	num = 0;
8793a9643ea8Slogwang 	for (i = 0; i < pf->lan_nb_qps; i++) {
8794a9643ea8Slogwang 		rxq = data->rx_queues[i];
8795a9643ea8Slogwang 		if (rxq && rxq->q_set)
8796a9643ea8Slogwang 			num++;
8797a9643ea8Slogwang 		else
8798a9643ea8Slogwang 			break;
8799a9643ea8Slogwang 	}
8800a9643ea8Slogwang 
8801a9643ea8Slogwang 	return num;
8802a9643ea8Slogwang }
8803a9643ea8Slogwang 
8804a9643ea8Slogwang /* Configure RSS */
8805a9643ea8Slogwang static int
i40e_pf_config_rss(struct i40e_pf * pf)8806a9643ea8Slogwang i40e_pf_config_rss(struct i40e_pf *pf)
8807a9643ea8Slogwang {
8808*2d9fd380Sjfb8856606 	enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8809a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8810a9643ea8Slogwang 	struct rte_eth_rss_conf rss_conf;
8811a9643ea8Slogwang 	uint32_t i, lut = 0;
8812a9643ea8Slogwang 	uint16_t j, num;
8813a9643ea8Slogwang 
8814a9643ea8Slogwang 	/*
8815a9643ea8Slogwang 	 * If both VMDQ and RSS enabled, not all of PF queues are configured.
88162bfe3f2eSlogwang 	 * It's necessary to calculate the actual PF queues that are configured.
8817a9643ea8Slogwang 	 */
8818a9643ea8Slogwang 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8819a9643ea8Slogwang 		num = i40e_pf_calc_configured_queues_num(pf);
8820a9643ea8Slogwang 	else
8821a9643ea8Slogwang 		num = pf->dev_data->nb_rx_queues;
8822a9643ea8Slogwang 
8823a9643ea8Slogwang 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8824a9643ea8Slogwang 	PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8825a9643ea8Slogwang 			num);
8826a9643ea8Slogwang 
8827a9643ea8Slogwang 	if (num == 0) {
8828*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
8829*2d9fd380Sjfb8856606 			"No PF queues are configured to enable RSS for port %u",
8830*2d9fd380Sjfb8856606 			pf->dev_data->port_id);
8831a9643ea8Slogwang 		return -ENOTSUP;
8832a9643ea8Slogwang 	}
8833a9643ea8Slogwang 
88341646932aSjfb8856606 	if (pf->adapter->rss_reta_updated == 0) {
8835a9643ea8Slogwang 		for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8836a9643ea8Slogwang 			if (j == num)
8837a9643ea8Slogwang 				j = 0;
8838a9643ea8Slogwang 			lut = (lut << 8) | (j & ((0x1 <<
8839a9643ea8Slogwang 				hw->func_caps.rss_table_entry_width) - 1));
8840a9643ea8Slogwang 			if ((i & 3) == 3)
88411646932aSjfb8856606 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
88421646932aSjfb8856606 					       rte_bswap32(lut));
88431646932aSjfb8856606 		}
8844a9643ea8Slogwang 	}
8845a9643ea8Slogwang 
8846a9643ea8Slogwang 	rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8847*2d9fd380Sjfb8856606 	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
8848*2d9fd380Sjfb8856606 	    !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
8849a9643ea8Slogwang 		i40e_pf_disable_rss(pf);
8850a9643ea8Slogwang 		return 0;
8851a9643ea8Slogwang 	}
8852a9643ea8Slogwang 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8853a9643ea8Slogwang 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8854a9643ea8Slogwang 		/* Random default keys */
8855a9643ea8Slogwang 		static uint32_t rss_key_default[] = {0x6b793944,
8856a9643ea8Slogwang 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8857a9643ea8Slogwang 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8858a9643ea8Slogwang 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8859a9643ea8Slogwang 
8860a9643ea8Slogwang 		rss_conf.rss_key = (uint8_t *)rss_key_default;
8861a9643ea8Slogwang 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8862a9643ea8Slogwang 							sizeof(uint32_t);
8863a9643ea8Slogwang 	}
8864a9643ea8Slogwang 
8865a9643ea8Slogwang 	return i40e_hw_rss_hash_set(pf, &rss_conf);
8866a9643ea8Slogwang }
8867a9643ea8Slogwang 
8868a9643ea8Slogwang #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8869a9643ea8Slogwang #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8870*2d9fd380Sjfb8856606 int
i40e_dev_set_gre_key_len(struct i40e_hw * hw,uint8_t len)8871a9643ea8Slogwang i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8872a9643ea8Slogwang {
88732bfe3f2eSlogwang 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8874a9643ea8Slogwang 	uint32_t val, reg;
8875a9643ea8Slogwang 	int ret = -EINVAL;
8876a9643ea8Slogwang 
88772bfe3f2eSlogwang 	if (pf->support_multi_driver) {
88782bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
88792bfe3f2eSlogwang 		return -ENOTSUP;
88802bfe3f2eSlogwang 	}
88812bfe3f2eSlogwang 
8882a9643ea8Slogwang 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
88832bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8884a9643ea8Slogwang 
8885a9643ea8Slogwang 	if (len == 3) {
8886a9643ea8Slogwang 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8887a9643ea8Slogwang 	} else if (len == 4) {
8888a9643ea8Slogwang 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8889a9643ea8Slogwang 	} else {
8890a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8891a9643ea8Slogwang 		return ret;
8892a9643ea8Slogwang 	}
8893a9643ea8Slogwang 
8894a9643ea8Slogwang 	if (reg != val) {
8895d30ea906Sjfb8856606 		ret = i40e_aq_debug_write_global_register(hw,
8896d30ea906Sjfb8856606 						   I40E_GL_PRS_FVBM(2),
8897a9643ea8Slogwang 						   reg, NULL);
8898a9643ea8Slogwang 		if (ret != 0)
8899a9643ea8Slogwang 			return ret;
89002bfe3f2eSlogwang 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
89012bfe3f2eSlogwang 			    "with value 0x%08x",
89022bfe3f2eSlogwang 			    I40E_GL_PRS_FVBM(2), reg);
8903a9643ea8Slogwang 	} else {
8904a9643ea8Slogwang 		ret = 0;
8905a9643ea8Slogwang 	}
89062bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8907a9643ea8Slogwang 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8908a9643ea8Slogwang 
8909a9643ea8Slogwang 	return ret;
8910a9643ea8Slogwang }
8911a9643ea8Slogwang 
8912a9643ea8Slogwang /* Set the symmetric hash enable configurations per port */
8913a9643ea8Slogwang static void
i40e_set_symmetric_hash_enable_per_port(struct i40e_hw * hw,uint8_t enable)8914a9643ea8Slogwang i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8915a9643ea8Slogwang {
8916a9643ea8Slogwang 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8917a9643ea8Slogwang 
8918a9643ea8Slogwang 	if (enable > 0) {
8919a9643ea8Slogwang 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
89202bfe3f2eSlogwang 			PMD_DRV_LOG(INFO,
89212bfe3f2eSlogwang 				"Symmetric hash has already been enabled");
8922a9643ea8Slogwang 			return;
8923a9643ea8Slogwang 		}
8924a9643ea8Slogwang 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8925a9643ea8Slogwang 	} else {
8926a9643ea8Slogwang 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
89272bfe3f2eSlogwang 			PMD_DRV_LOG(INFO,
89282bfe3f2eSlogwang 				"Symmetric hash has already been disabled");
8929a9643ea8Slogwang 			return;
8930a9643ea8Slogwang 		}
8931a9643ea8Slogwang 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8932a9643ea8Slogwang 	}
8933a9643ea8Slogwang 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8934a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
8935a9643ea8Slogwang }
8936a9643ea8Slogwang 
8937a9643ea8Slogwang /**
8938a9643ea8Slogwang  * Valid input sets for hash and flow director filters per PCTYPE
8939a9643ea8Slogwang  */
8940a9643ea8Slogwang static uint64_t
i40e_get_valid_input_set(enum i40e_filter_pctype pctype,enum rte_filter_type filter)8941a9643ea8Slogwang i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8942a9643ea8Slogwang 		enum rte_filter_type filter)
8943a9643ea8Slogwang {
8944a9643ea8Slogwang 	uint64_t valid;
8945a9643ea8Slogwang 
8946a9643ea8Slogwang 	static const uint64_t valid_hash_inset_table[] = {
8947a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
8948a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8949a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8950a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8951a9643ea8Slogwang 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8952a9643ea8Slogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8953a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8954a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
8955a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8956a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8957a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8958a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8959a9643ea8Slogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8960a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8961a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8962a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8963a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
89642bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
89652bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
89662bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
89672bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
89682bfe3f2eSlogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
89692bfe3f2eSlogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
89702bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
89712bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
89722bfe3f2eSlogwang 			I40E_INSET_FLEX_PAYLOAD,
89732bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
89742bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
89752bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
89762bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
89772bfe3f2eSlogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
89782bfe3f2eSlogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
89792bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
89802bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
89812bfe3f2eSlogwang 			I40E_INSET_FLEX_PAYLOAD,
8982a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8983a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8984a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8985a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8986a9643ea8Slogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8987a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8988a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8989a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8990a9643ea8Slogwang 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
89912bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
89922bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
89932bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
89942bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
89952bfe3f2eSlogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
89962bfe3f2eSlogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
89972bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
89982bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
89992bfe3f2eSlogwang 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9000a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9001a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9002a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9003a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9004a9643ea8Slogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9005a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9006a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9007a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9008a9643ea8Slogwang 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9009a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9010a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9011a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9012a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9013a9643ea8Slogwang 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9014a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9015a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9016a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
9017a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9018a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9019a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9020a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9021a9643ea8Slogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9022a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9023a9643ea8Slogwang 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9024a9643ea8Slogwang 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9025a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9026a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9027a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9028a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9029a9643ea8Slogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9030a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9031a9643ea8Slogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9032a9643ea8Slogwang 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
90332bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
90342bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
90352bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
90362bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
90372bfe3f2eSlogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
90382bfe3f2eSlogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
90392bfe3f2eSlogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
90402bfe3f2eSlogwang 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
90412bfe3f2eSlogwang 			I40E_INSET_FLEX_PAYLOAD,
90422bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
90432bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
90442bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
90452bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
90462bfe3f2eSlogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
90472bfe3f2eSlogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
90482bfe3f2eSlogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
90492bfe3f2eSlogwang 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
90502bfe3f2eSlogwang 			I40E_INSET_FLEX_PAYLOAD,
9051a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9052a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9053a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9054a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9055a9643ea8Slogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9056a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9057a9643ea8Slogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9058a9643ea8Slogwang 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9059a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
90602bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
90612bfe3f2eSlogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
90622bfe3f2eSlogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
90632bfe3f2eSlogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
90642bfe3f2eSlogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
90652bfe3f2eSlogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
90662bfe3f2eSlogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
90672bfe3f2eSlogwang 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
90682bfe3f2eSlogwang 			I40E_INSET_FLEX_PAYLOAD,
9069a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9070a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9071a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9072a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9073a9643ea8Slogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9074a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9075a9643ea8Slogwang 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9076a9643ea8Slogwang 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9077a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
9078a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9079a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9080a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9081a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9082a9643ea8Slogwang 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9083a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9084a9643ea8Slogwang 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9085a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
9086a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9087a9643ea8Slogwang 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9088a9643ea8Slogwang 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9089a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9090a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD,
9091a9643ea8Slogwang 	};
9092a9643ea8Slogwang 
9093a9643ea8Slogwang 	/**
9094a9643ea8Slogwang 	 * Flow director supports only fields defined in
9095a9643ea8Slogwang 	 * union rte_eth_fdir_flow.
9096a9643ea8Slogwang 	 */
9097a9643ea8Slogwang 	static const uint64_t valid_fdir_inset_table[] = {
9098a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9099a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9100a9643ea8Slogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9101a9643ea8Slogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9102a9643ea8Slogwang 		I40E_INSET_IPV4_TTL,
9103a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9104*2d9fd380Sjfb8856606 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9105a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9106a9643ea8Slogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9107a9643ea8Slogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9108a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91092bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
91102bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91112bfe3f2eSlogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
91122bfe3f2eSlogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
91132bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91142bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
91152bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91162bfe3f2eSlogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
91172bfe3f2eSlogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
91182bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9119a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9120*2d9fd380Sjfb8856606 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9121a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9122a9643ea8Slogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9123a9643ea8Slogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9124a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91252bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
91262bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91272bfe3f2eSlogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
91282bfe3f2eSlogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
91292bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9130a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9131a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9132a9643ea8Slogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9133a9643ea8Slogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9134a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9135a9643ea8Slogwang 		I40E_INSET_SCTP_VT,
9136a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9137*2d9fd380Sjfb8856606 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9138a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9139a9643ea8Slogwang 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9140a9643ea8Slogwang 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9141a9643ea8Slogwang 		I40E_INSET_IPV4_TTL,
9142a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9143a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9144a9643ea8Slogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9145a9643ea8Slogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9146a9643ea8Slogwang 		I40E_INSET_IPV6_HOP_LIMIT,
9147a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9148a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9149a9643ea8Slogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9150a9643ea8Slogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9151a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91522bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
91532bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91542bfe3f2eSlogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
91552bfe3f2eSlogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
91562bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91572bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
91582bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91592bfe3f2eSlogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
91602bfe3f2eSlogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
91612bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9162a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9163a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9164a9643ea8Slogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9165a9643ea8Slogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9166a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
91672bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
91682bfe3f2eSlogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
91692bfe3f2eSlogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
91702bfe3f2eSlogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
91712bfe3f2eSlogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9172a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9173a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9174a9643ea8Slogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9175a9643ea8Slogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9176a9643ea8Slogwang 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9177a9643ea8Slogwang 		I40E_INSET_SCTP_VT,
9178a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9179a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9180a9643ea8Slogwang 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9181a9643ea8Slogwang 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9182a9643ea8Slogwang 		I40E_INSET_IPV6_HOP_LIMIT,
9183a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9184a9643ea8Slogwang 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9185a9643ea8Slogwang 		I40E_INSET_LAST_ETHER_TYPE,
9186a9643ea8Slogwang 	};
9187a9643ea8Slogwang 
9188a9643ea8Slogwang 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9189a9643ea8Slogwang 		return 0;
9190a9643ea8Slogwang 	if (filter == RTE_ETH_FILTER_HASH)
9191a9643ea8Slogwang 		valid = valid_hash_inset_table[pctype];
9192a9643ea8Slogwang 	else
9193a9643ea8Slogwang 		valid = valid_fdir_inset_table[pctype];
9194a9643ea8Slogwang 
9195a9643ea8Slogwang 	return valid;
9196a9643ea8Slogwang }
9197a9643ea8Slogwang 
9198a9643ea8Slogwang /**
9199a9643ea8Slogwang  * Validate if the input set is allowed for a specific PCTYPE
9200a9643ea8Slogwang  */
92012bfe3f2eSlogwang int
i40e_validate_input_set(enum i40e_filter_pctype pctype,enum rte_filter_type filter,uint64_t inset)9202a9643ea8Slogwang i40e_validate_input_set(enum i40e_filter_pctype pctype,
9203a9643ea8Slogwang 		enum rte_filter_type filter, uint64_t inset)
9204a9643ea8Slogwang {
9205a9643ea8Slogwang 	uint64_t valid;
9206a9643ea8Slogwang 
9207a9643ea8Slogwang 	valid = i40e_get_valid_input_set(pctype, filter);
9208a9643ea8Slogwang 	if (inset & (~valid))
9209a9643ea8Slogwang 		return -EINVAL;
9210a9643ea8Slogwang 
9211a9643ea8Slogwang 	return 0;
9212a9643ea8Slogwang }
9213a9643ea8Slogwang 
9214a9643ea8Slogwang /* default input set fields combination per pctype */
92152bfe3f2eSlogwang uint64_t
i40e_get_default_input_set(uint16_t pctype)9216a9643ea8Slogwang i40e_get_default_input_set(uint16_t pctype)
9217a9643ea8Slogwang {
9218a9643ea8Slogwang 	static const uint64_t default_inset_table[] = {
9219a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9220a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9221a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9222a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9223a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92242bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
92252bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
92262bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92272bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
92282bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
92292bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9230a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9231a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9232a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92332bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
92342bfe3f2eSlogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
92352bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9236a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9237a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9238a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9239a9643ea8Slogwang 			I40E_INSET_SCTP_VT,
9240a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9241a9643ea8Slogwang 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9242a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9243a9643ea8Slogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9244a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9245a9643ea8Slogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9246a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92472bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
92482bfe3f2eSlogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
92492bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92502bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
92512bfe3f2eSlogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
92522bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9253a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9254a9643ea8Slogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9255a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
92562bfe3f2eSlogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
92572bfe3f2eSlogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
92582bfe3f2eSlogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9259a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9260a9643ea8Slogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9261a9643ea8Slogwang 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9262a9643ea8Slogwang 			I40E_INSET_SCTP_VT,
9263a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9264a9643ea8Slogwang 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9265a9643ea8Slogwang 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9266a9643ea8Slogwang 			I40E_INSET_LAST_ETHER_TYPE,
9267a9643ea8Slogwang 	};
9268a9643ea8Slogwang 
9269a9643ea8Slogwang 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9270a9643ea8Slogwang 		return 0;
9271a9643ea8Slogwang 
9272a9643ea8Slogwang 	return default_inset_table[pctype];
9273a9643ea8Slogwang }
9274a9643ea8Slogwang 
9275a9643ea8Slogwang /**
9276a9643ea8Slogwang  * Parse the input set from index to logical bit masks
9277a9643ea8Slogwang  */
9278a9643ea8Slogwang static int
i40e_parse_input_set(uint64_t * inset,enum i40e_filter_pctype pctype,enum rte_eth_input_set_field * field,uint16_t size)9279a9643ea8Slogwang i40e_parse_input_set(uint64_t *inset,
9280a9643ea8Slogwang 		     enum i40e_filter_pctype pctype,
9281a9643ea8Slogwang 		     enum rte_eth_input_set_field *field,
9282a9643ea8Slogwang 		     uint16_t size)
9283a9643ea8Slogwang {
9284a9643ea8Slogwang 	uint16_t i, j;
9285a9643ea8Slogwang 	int ret = -EINVAL;
9286a9643ea8Slogwang 
9287a9643ea8Slogwang 	static const struct {
9288a9643ea8Slogwang 		enum rte_eth_input_set_field field;
9289a9643ea8Slogwang 		uint64_t inset;
9290a9643ea8Slogwang 	} inset_convert_table[] = {
9291a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9292a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9293a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9294a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9295a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9296a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9297a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9298a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9299a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9300a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9301a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9302a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9303a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9304a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9305a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9306a9643ea8Slogwang 			I40E_INSET_IPV6_NEXT_HDR},
9307a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9308a9643ea8Slogwang 			I40E_INSET_IPV6_HOP_LIMIT},
9309a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9310a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9311a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9312a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9313a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9314a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9315a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9316a9643ea8Slogwang 			I40E_INSET_SCTP_VT},
9317a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9318a9643ea8Slogwang 			I40E_INSET_TUNNEL_DMAC},
9319a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9320a9643ea8Slogwang 			I40E_INSET_VLAN_TUNNEL},
9321a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9322a9643ea8Slogwang 			I40E_INSET_TUNNEL_ID},
9323a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9324a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9325a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W1},
9326a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9327a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W2},
9328a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9329a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W3},
9330a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9331a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W4},
9332a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9333a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W5},
9334a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9335a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W6},
9336a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9337a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W7},
9338a9643ea8Slogwang 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9339a9643ea8Slogwang 			I40E_INSET_FLEX_PAYLOAD_W8},
9340a9643ea8Slogwang 	};
9341a9643ea8Slogwang 
9342a9643ea8Slogwang 	if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9343a9643ea8Slogwang 		return ret;
9344a9643ea8Slogwang 
9345a9643ea8Slogwang 	/* Only one item allowed for default or all */
9346a9643ea8Slogwang 	if (size == 1) {
9347a9643ea8Slogwang 		if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9348a9643ea8Slogwang 			*inset = i40e_get_default_input_set(pctype);
9349a9643ea8Slogwang 			return 0;
9350a9643ea8Slogwang 		} else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9351a9643ea8Slogwang 			*inset = I40E_INSET_NONE;
9352a9643ea8Slogwang 			return 0;
9353a9643ea8Slogwang 		}
9354a9643ea8Slogwang 	}
9355a9643ea8Slogwang 
9356a9643ea8Slogwang 	for (i = 0, *inset = 0; i < size; i++) {
9357a9643ea8Slogwang 		for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9358a9643ea8Slogwang 			if (field[i] == inset_convert_table[j].field) {
9359a9643ea8Slogwang 				*inset |= inset_convert_table[j].inset;
9360a9643ea8Slogwang 				break;
9361a9643ea8Slogwang 			}
9362a9643ea8Slogwang 		}
9363a9643ea8Slogwang 
9364a9643ea8Slogwang 		/* It contains unsupported input set, return immediately */
9365a9643ea8Slogwang 		if (j == RTE_DIM(inset_convert_table))
9366a9643ea8Slogwang 			return ret;
9367a9643ea8Slogwang 	}
9368a9643ea8Slogwang 
9369a9643ea8Slogwang 	return 0;
9370a9643ea8Slogwang }
9371a9643ea8Slogwang 
9372a9643ea8Slogwang /**
9373a9643ea8Slogwang  * Translate the input set from bit masks to register aware bit masks
9374a9643ea8Slogwang  * and vice versa
9375a9643ea8Slogwang  */
93762bfe3f2eSlogwang uint64_t
i40e_translate_input_set_reg(enum i40e_mac_type type,uint64_t input)9377a9643ea8Slogwang i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9378a9643ea8Slogwang {
9379a9643ea8Slogwang 	uint64_t val = 0;
9380a9643ea8Slogwang 	uint16_t i;
9381a9643ea8Slogwang 
9382a9643ea8Slogwang 	struct inset_map {
9383a9643ea8Slogwang 		uint64_t inset;
9384a9643ea8Slogwang 		uint64_t inset_reg;
9385a9643ea8Slogwang 	};
9386a9643ea8Slogwang 
9387a9643ea8Slogwang 	static const struct inset_map inset_map_common[] = {
9388a9643ea8Slogwang 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9389a9643ea8Slogwang 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9390a9643ea8Slogwang 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9391a9643ea8Slogwang 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9392a9643ea8Slogwang 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9393a9643ea8Slogwang 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9394a9643ea8Slogwang 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9395a9643ea8Slogwang 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9396a9643ea8Slogwang 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9397a9643ea8Slogwang 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9398a9643ea8Slogwang 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9399a9643ea8Slogwang 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9400a9643ea8Slogwang 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9401a9643ea8Slogwang 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9402a9643ea8Slogwang 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9403a9643ea8Slogwang 		{I40E_INSET_TUNNEL_DMAC,
9404a9643ea8Slogwang 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9405a9643ea8Slogwang 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9406a9643ea8Slogwang 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9407a9643ea8Slogwang 		{I40E_INSET_TUNNEL_SRC_PORT,
9408a9643ea8Slogwang 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9409a9643ea8Slogwang 		{I40E_INSET_TUNNEL_DST_PORT,
9410a9643ea8Slogwang 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9411a9643ea8Slogwang 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9412a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9413a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9414a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9415a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9416a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9417a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9418a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9419a9643ea8Slogwang 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9420a9643ea8Slogwang 	};
9421a9643ea8Slogwang 
9422a9643ea8Slogwang     /* some different registers map in x722*/
9423a9643ea8Slogwang 	static const struct inset_map inset_map_diff_x722[] = {
9424a9643ea8Slogwang 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9425a9643ea8Slogwang 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9426a9643ea8Slogwang 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9427a9643ea8Slogwang 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9428a9643ea8Slogwang 	};
9429a9643ea8Slogwang 
9430a9643ea8Slogwang 	static const struct inset_map inset_map_diff_not_x722[] = {
9431a9643ea8Slogwang 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9432a9643ea8Slogwang 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9433a9643ea8Slogwang 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9434a9643ea8Slogwang 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9435a9643ea8Slogwang 	};
9436a9643ea8Slogwang 
9437a9643ea8Slogwang 	if (input == 0)
9438a9643ea8Slogwang 		return val;
9439a9643ea8Slogwang 
9440a9643ea8Slogwang 	/* Translate input set to register aware inset */
9441a9643ea8Slogwang 	if (type == I40E_MAC_X722) {
9442a9643ea8Slogwang 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9443a9643ea8Slogwang 			if (input & inset_map_diff_x722[i].inset)
9444a9643ea8Slogwang 				val |= inset_map_diff_x722[i].inset_reg;
9445a9643ea8Slogwang 		}
9446a9643ea8Slogwang 	} else {
9447a9643ea8Slogwang 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9448a9643ea8Slogwang 			if (input & inset_map_diff_not_x722[i].inset)
9449a9643ea8Slogwang 				val |= inset_map_diff_not_x722[i].inset_reg;
9450a9643ea8Slogwang 		}
9451a9643ea8Slogwang 	}
9452a9643ea8Slogwang 
9453a9643ea8Slogwang 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9454a9643ea8Slogwang 		if (input & inset_map_common[i].inset)
9455a9643ea8Slogwang 			val |= inset_map_common[i].inset_reg;
9456a9643ea8Slogwang 	}
9457a9643ea8Slogwang 
9458a9643ea8Slogwang 	return val;
9459a9643ea8Slogwang }
9460a9643ea8Slogwang 
94612bfe3f2eSlogwang int
i40e_generate_inset_mask_reg(uint64_t inset,uint32_t * mask,uint8_t nb_elem)9462a9643ea8Slogwang i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9463a9643ea8Slogwang {
9464a9643ea8Slogwang 	uint8_t i, idx = 0;
9465a9643ea8Slogwang 	uint64_t inset_need_mask = inset;
9466a9643ea8Slogwang 
9467a9643ea8Slogwang 	static const struct {
9468a9643ea8Slogwang 		uint64_t inset;
9469a9643ea8Slogwang 		uint32_t mask;
9470a9643ea8Slogwang 	} inset_mask_map[] = {
9471a9643ea8Slogwang 		{I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9472a9643ea8Slogwang 		{I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9473a9643ea8Slogwang 		{I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9474a9643ea8Slogwang 		{I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9475a9643ea8Slogwang 		{I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9476a9643ea8Slogwang 		{I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9477a9643ea8Slogwang 		{I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9478a9643ea8Slogwang 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9479a9643ea8Slogwang 	};
9480a9643ea8Slogwang 
9481a9643ea8Slogwang 	if (!inset || !mask || !nb_elem)
9482a9643ea8Slogwang 		return 0;
9483a9643ea8Slogwang 
9484a9643ea8Slogwang 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9485a9643ea8Slogwang 		/* Clear the inset bit, if no MASK is required,
9486a9643ea8Slogwang 		 * for example proto + ttl
9487a9643ea8Slogwang 		 */
9488a9643ea8Slogwang 		if ((inset & inset_mask_map[i].inset) ==
9489a9643ea8Slogwang 		     inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9490a9643ea8Slogwang 			inset_need_mask &= ~inset_mask_map[i].inset;
9491a9643ea8Slogwang 		if (!inset_need_mask)
9492a9643ea8Slogwang 			return 0;
9493a9643ea8Slogwang 	}
9494a9643ea8Slogwang 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9495a9643ea8Slogwang 		if ((inset_need_mask & inset_mask_map[i].inset) ==
9496a9643ea8Slogwang 		    inset_mask_map[i].inset) {
9497a9643ea8Slogwang 			if (idx >= nb_elem) {
9498a9643ea8Slogwang 				PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9499a9643ea8Slogwang 				return -EINVAL;
9500a9643ea8Slogwang 			}
9501a9643ea8Slogwang 			mask[idx] = inset_mask_map[i].mask;
9502a9643ea8Slogwang 			idx++;
9503a9643ea8Slogwang 		}
9504a9643ea8Slogwang 	}
9505a9643ea8Slogwang 
9506a9643ea8Slogwang 	return idx;
9507a9643ea8Slogwang }
9508a9643ea8Slogwang 
95092bfe3f2eSlogwang void
i40e_check_write_reg(struct i40e_hw * hw,uint32_t addr,uint32_t val)9510a9643ea8Slogwang i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9511a9643ea8Slogwang {
9512a9643ea8Slogwang 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9513a9643ea8Slogwang 
95142bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9515a9643ea8Slogwang 	if (reg != val)
9516a9643ea8Slogwang 		i40e_write_rx_ctl(hw, addr, val);
95172bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
95182bfe3f2eSlogwang 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
95192bfe3f2eSlogwang }
95202bfe3f2eSlogwang 
95212bfe3f2eSlogwang void
i40e_check_write_global_reg(struct i40e_hw * hw,uint32_t addr,uint32_t val)95222bfe3f2eSlogwang i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
95232bfe3f2eSlogwang {
95242bfe3f2eSlogwang 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9525d30ea906Sjfb8856606 	struct rte_eth_dev *dev;
95262bfe3f2eSlogwang 
9527d30ea906Sjfb8856606 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9528d30ea906Sjfb8856606 	if (reg != val) {
9529d30ea906Sjfb8856606 		i40e_write_rx_ctl(hw, addr, val);
9530d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING,
9531d30ea906Sjfb8856606 			    "i40e device %s changed global register [0x%08x]."
9532d30ea906Sjfb8856606 			    " original: 0x%08x, new: 0x%08x",
9533d30ea906Sjfb8856606 			    dev->device->name, addr, reg,
9534a9643ea8Slogwang 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9535a9643ea8Slogwang 	}
9536d30ea906Sjfb8856606 }
9537a9643ea8Slogwang 
9538a9643ea8Slogwang static void
i40e_filter_input_set_init(struct i40e_pf * pf)9539a9643ea8Slogwang i40e_filter_input_set_init(struct i40e_pf *pf)
9540a9643ea8Slogwang {
9541a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9542a9643ea8Slogwang 	enum i40e_filter_pctype pctype;
9543a9643ea8Slogwang 	uint64_t input_set, inset_reg;
9544a9643ea8Slogwang 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9545a9643ea8Slogwang 	int num, i;
95462bfe3f2eSlogwang 	uint16_t flow_type;
9547a9643ea8Slogwang 
9548a9643ea8Slogwang 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9549a9643ea8Slogwang 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
95502bfe3f2eSlogwang 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
95512bfe3f2eSlogwang 
95522bfe3f2eSlogwang 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9553a9643ea8Slogwang 			continue;
95542bfe3f2eSlogwang 
9555a9643ea8Slogwang 		input_set = i40e_get_default_input_set(pctype);
9556a9643ea8Slogwang 
9557a9643ea8Slogwang 		num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9558a9643ea8Slogwang 						   I40E_INSET_MASK_NUM_REG);
9559a9643ea8Slogwang 		if (num < 0)
9560a9643ea8Slogwang 			return;
95612bfe3f2eSlogwang 		if (pf->support_multi_driver && num > 0) {
95622bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
95632bfe3f2eSlogwang 			return;
95642bfe3f2eSlogwang 		}
9565a9643ea8Slogwang 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9566a9643ea8Slogwang 					input_set);
9567a9643ea8Slogwang 
9568a9643ea8Slogwang 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9569a9643ea8Slogwang 				      (uint32_t)(inset_reg & UINT32_MAX));
9570a9643ea8Slogwang 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9571a9643ea8Slogwang 				     (uint32_t)((inset_reg >>
9572a9643ea8Slogwang 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
95732bfe3f2eSlogwang 		if (!pf->support_multi_driver) {
95742bfe3f2eSlogwang 			i40e_check_write_global_reg(hw,
95752bfe3f2eSlogwang 					    I40E_GLQF_HASH_INSET(0, pctype),
9576a9643ea8Slogwang 					    (uint32_t)(inset_reg & UINT32_MAX));
95772bfe3f2eSlogwang 			i40e_check_write_global_reg(hw,
95782bfe3f2eSlogwang 					     I40E_GLQF_HASH_INSET(1, pctype),
9579a9643ea8Slogwang 					     (uint32_t)((inset_reg >>
9580a9643ea8Slogwang 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9581a9643ea8Slogwang 
9582a9643ea8Slogwang 			for (i = 0; i < num; i++) {
95832bfe3f2eSlogwang 				i40e_check_write_global_reg(hw,
95842bfe3f2eSlogwang 						    I40E_GLQF_FD_MSK(i, pctype),
9585a9643ea8Slogwang 						    mask_reg[i]);
95862bfe3f2eSlogwang 				i40e_check_write_global_reg(hw,
95872bfe3f2eSlogwang 						  I40E_GLQF_HASH_MSK(i, pctype),
9588a9643ea8Slogwang 						  mask_reg[i]);
9589a9643ea8Slogwang 			}
9590a9643ea8Slogwang 			/*clear unused mask registers of the pctype */
9591a9643ea8Slogwang 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
95922bfe3f2eSlogwang 				i40e_check_write_global_reg(hw,
95932bfe3f2eSlogwang 						    I40E_GLQF_FD_MSK(i, pctype),
9594a9643ea8Slogwang 						    0);
95952bfe3f2eSlogwang 				i40e_check_write_global_reg(hw,
95962bfe3f2eSlogwang 						  I40E_GLQF_HASH_MSK(i, pctype),
9597a9643ea8Slogwang 						  0);
9598a9643ea8Slogwang 			}
95992bfe3f2eSlogwang 		} else {
96002bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
96012bfe3f2eSlogwang 		}
9602a9643ea8Slogwang 		I40E_WRITE_FLUSH(hw);
9603a9643ea8Slogwang 
9604a9643ea8Slogwang 		/* store the default input set */
96052bfe3f2eSlogwang 		if (!pf->support_multi_driver)
9606a9643ea8Slogwang 			pf->hash_input_set[pctype] = input_set;
9607a9643ea8Slogwang 		pf->fdir.input_set[pctype] = input_set;
9608a9643ea8Slogwang 	}
9609a9643ea8Slogwang }
9610a9643ea8Slogwang 
9611a9643ea8Slogwang int
i40e_hash_filter_inset_select(struct i40e_hw * hw,struct rte_eth_input_set_conf * conf)9612a9643ea8Slogwang i40e_hash_filter_inset_select(struct i40e_hw *hw,
9613a9643ea8Slogwang 			 struct rte_eth_input_set_conf *conf)
9614a9643ea8Slogwang {
9615a9643ea8Slogwang 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9616a9643ea8Slogwang 	enum i40e_filter_pctype pctype;
9617a9643ea8Slogwang 	uint64_t input_set, inset_reg = 0;
9618a9643ea8Slogwang 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9619a9643ea8Slogwang 	int ret, i, num;
9620a9643ea8Slogwang 
9621a9643ea8Slogwang 	if (!conf) {
9622a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid pointer");
9623a9643ea8Slogwang 		return -EFAULT;
9624a9643ea8Slogwang 	}
9625a9643ea8Slogwang 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9626a9643ea8Slogwang 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
9627a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
9628a9643ea8Slogwang 		return -EINVAL;
9629a9643ea8Slogwang 	}
9630a9643ea8Slogwang 
96312bfe3f2eSlogwang 	if (pf->support_multi_driver) {
96322bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
96332bfe3f2eSlogwang 		return -ENOTSUP;
96342bfe3f2eSlogwang 	}
96352bfe3f2eSlogwang 
96362bfe3f2eSlogwang 	pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
96372bfe3f2eSlogwang 	if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9638a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
9639a9643ea8Slogwang 		return -EINVAL;
9640a9643ea8Slogwang 	}
96412bfe3f2eSlogwang 
96422bfe3f2eSlogwang 	if (hw->mac.type == I40E_MAC_X722) {
96432bfe3f2eSlogwang 		/* get translated pctype value in fd pctype register */
96442bfe3f2eSlogwang 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
96452bfe3f2eSlogwang 			I40E_GLQF_FD_PCTYPES((int)pctype));
96462bfe3f2eSlogwang 	}
96472bfe3f2eSlogwang 
9648a9643ea8Slogwang 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9649a9643ea8Slogwang 				   conf->inset_size);
9650a9643ea8Slogwang 	if (ret) {
9651a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to parse input set");
9652a9643ea8Slogwang 		return -EINVAL;
9653a9643ea8Slogwang 	}
96542bfe3f2eSlogwang 
9655a9643ea8Slogwang 	if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9656a9643ea8Slogwang 		/* get inset value in register */
9657a9643ea8Slogwang 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9658a9643ea8Slogwang 		inset_reg <<= I40E_32_BIT_WIDTH;
9659a9643ea8Slogwang 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9660a9643ea8Slogwang 		input_set |= pf->hash_input_set[pctype];
9661a9643ea8Slogwang 	}
9662a9643ea8Slogwang 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9663a9643ea8Slogwang 					   I40E_INSET_MASK_NUM_REG);
9664a9643ea8Slogwang 	if (num < 0)
9665a9643ea8Slogwang 		return -EINVAL;
9666a9643ea8Slogwang 
9667a9643ea8Slogwang 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9668a9643ea8Slogwang 
96692bfe3f2eSlogwang 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9670a9643ea8Slogwang 				    (uint32_t)(inset_reg & UINT32_MAX));
96712bfe3f2eSlogwang 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9672a9643ea8Slogwang 				    (uint32_t)((inset_reg >>
9673a9643ea8Slogwang 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9674a9643ea8Slogwang 
9675a9643ea8Slogwang 	for (i = 0; i < num; i++)
96762bfe3f2eSlogwang 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9677a9643ea8Slogwang 					    mask_reg[i]);
9678a9643ea8Slogwang 	/*clear unused mask registers of the pctype */
9679a9643ea8Slogwang 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
96802bfe3f2eSlogwang 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9681a9643ea8Slogwang 					    0);
9682a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
9683a9643ea8Slogwang 
9684a9643ea8Slogwang 	pf->hash_input_set[pctype] = input_set;
9685a9643ea8Slogwang 	return 0;
9686a9643ea8Slogwang }
9687a9643ea8Slogwang 
96882bfe3f2eSlogwang /* Convert ethertype filter structure */
96892bfe3f2eSlogwang static int
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter * input,struct i40e_ethertype_filter * filter)96902bfe3f2eSlogwang i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
96912bfe3f2eSlogwang 			      struct i40e_ethertype_filter *filter)
96922bfe3f2eSlogwang {
96934418919fSjohnjiang 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
96944418919fSjohnjiang 		RTE_ETHER_ADDR_LEN);
96952bfe3f2eSlogwang 	filter->input.ether_type = input->ether_type;
96962bfe3f2eSlogwang 	filter->flags = input->flags;
96972bfe3f2eSlogwang 	filter->queue = input->queue;
96982bfe3f2eSlogwang 
96992bfe3f2eSlogwang 	return 0;
97002bfe3f2eSlogwang }
97012bfe3f2eSlogwang 
97022bfe3f2eSlogwang /* Check if there exists the ehtertype filter */
97032bfe3f2eSlogwang struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule * ethertype_rule,const struct i40e_ethertype_filter_input * input)97042bfe3f2eSlogwang i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
97052bfe3f2eSlogwang 				const struct i40e_ethertype_filter_input *input)
97062bfe3f2eSlogwang {
97072bfe3f2eSlogwang 	int ret;
97082bfe3f2eSlogwang 
97092bfe3f2eSlogwang 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
97102bfe3f2eSlogwang 	if (ret < 0)
97112bfe3f2eSlogwang 		return NULL;
97122bfe3f2eSlogwang 
97132bfe3f2eSlogwang 	return ethertype_rule->hash_map[ret];
97142bfe3f2eSlogwang }
97152bfe3f2eSlogwang 
97162bfe3f2eSlogwang /* Add ethertype filter in SW list */
97172bfe3f2eSlogwang static int
i40e_sw_ethertype_filter_insert(struct i40e_pf * pf,struct i40e_ethertype_filter * filter)97182bfe3f2eSlogwang i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
97192bfe3f2eSlogwang 				struct i40e_ethertype_filter *filter)
97202bfe3f2eSlogwang {
97212bfe3f2eSlogwang 	struct i40e_ethertype_rule *rule = &pf->ethertype;
97222bfe3f2eSlogwang 	int ret;
97232bfe3f2eSlogwang 
97242bfe3f2eSlogwang 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
97252bfe3f2eSlogwang 	if (ret < 0) {
97262bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
97272bfe3f2eSlogwang 			    "Failed to insert ethertype filter"
97282bfe3f2eSlogwang 			    " to hash table %d!",
97292bfe3f2eSlogwang 			    ret);
97302bfe3f2eSlogwang 		return ret;
97312bfe3f2eSlogwang 	}
97322bfe3f2eSlogwang 	rule->hash_map[ret] = filter;
97332bfe3f2eSlogwang 
97342bfe3f2eSlogwang 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
97352bfe3f2eSlogwang 
97362bfe3f2eSlogwang 	return 0;
97372bfe3f2eSlogwang }
97382bfe3f2eSlogwang 
97392bfe3f2eSlogwang /* Delete ethertype filter in SW list */
97402bfe3f2eSlogwang int
i40e_sw_ethertype_filter_del(struct i40e_pf * pf,struct i40e_ethertype_filter_input * input)97412bfe3f2eSlogwang i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
97422bfe3f2eSlogwang 			     struct i40e_ethertype_filter_input *input)
97432bfe3f2eSlogwang {
97442bfe3f2eSlogwang 	struct i40e_ethertype_rule *rule = &pf->ethertype;
97452bfe3f2eSlogwang 	struct i40e_ethertype_filter *filter;
97462bfe3f2eSlogwang 	int ret;
97472bfe3f2eSlogwang 
97482bfe3f2eSlogwang 	ret = rte_hash_del_key(rule->hash_table, input);
97492bfe3f2eSlogwang 	if (ret < 0) {
97502bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
97512bfe3f2eSlogwang 			    "Failed to delete ethertype filter"
97522bfe3f2eSlogwang 			    " to hash table %d!",
97532bfe3f2eSlogwang 			    ret);
97542bfe3f2eSlogwang 		return ret;
97552bfe3f2eSlogwang 	}
97562bfe3f2eSlogwang 	filter = rule->hash_map[ret];
97572bfe3f2eSlogwang 	rule->hash_map[ret] = NULL;
97582bfe3f2eSlogwang 
97592bfe3f2eSlogwang 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
97602bfe3f2eSlogwang 	rte_free(filter);
97612bfe3f2eSlogwang 
97622bfe3f2eSlogwang 	return 0;
97632bfe3f2eSlogwang }
97642bfe3f2eSlogwang 
9765a9643ea8Slogwang /*
9766a9643ea8Slogwang  * Configure ethertype filter, which can director packet by filtering
9767a9643ea8Slogwang  * with mac address and ether_type or only ether_type
9768a9643ea8Slogwang  */
97692bfe3f2eSlogwang int
i40e_ethertype_filter_set(struct i40e_pf * pf,struct rte_eth_ethertype_filter * filter,bool add)9770a9643ea8Slogwang i40e_ethertype_filter_set(struct i40e_pf *pf,
9771a9643ea8Slogwang 			struct rte_eth_ethertype_filter *filter,
9772a9643ea8Slogwang 			bool add)
9773a9643ea8Slogwang {
9774a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
97752bfe3f2eSlogwang 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
97762bfe3f2eSlogwang 	struct i40e_ethertype_filter *ethertype_filter, *node;
97772bfe3f2eSlogwang 	struct i40e_ethertype_filter check_filter;
9778a9643ea8Slogwang 	struct i40e_control_filter_stats stats;
9779a9643ea8Slogwang 	uint16_t flags = 0;
9780a9643ea8Slogwang 	int ret;
9781a9643ea8Slogwang 
9782a9643ea8Slogwang 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
9783a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid queue ID");
9784a9643ea8Slogwang 		return -EINVAL;
9785a9643ea8Slogwang 	}
97864418919fSjohnjiang 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
97874418919fSjohnjiang 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
97882bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
97892bfe3f2eSlogwang 			"unsupported ether_type(0x%04x) in control packet filter.",
97902bfe3f2eSlogwang 			filter->ether_type);
9791a9643ea8Slogwang 		return -EINVAL;
9792a9643ea8Slogwang 	}
97934418919fSjohnjiang 	if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
97942bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING,
97952bfe3f2eSlogwang 			"filter vlan ether_type in first tag is not supported.");
97962bfe3f2eSlogwang 
97972bfe3f2eSlogwang 	/* Check if there is the filter in SW list */
97982bfe3f2eSlogwang 	memset(&check_filter, 0, sizeof(check_filter));
97992bfe3f2eSlogwang 	i40e_ethertype_filter_convert(filter, &check_filter);
98002bfe3f2eSlogwang 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
98012bfe3f2eSlogwang 					       &check_filter.input);
98022bfe3f2eSlogwang 	if (add && node) {
98032bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
98042bfe3f2eSlogwang 		return -EINVAL;
98052bfe3f2eSlogwang 	}
98062bfe3f2eSlogwang 
98072bfe3f2eSlogwang 	if (!add && !node) {
98082bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
98092bfe3f2eSlogwang 		return -EINVAL;
98102bfe3f2eSlogwang 	}
9811a9643ea8Slogwang 
9812a9643ea8Slogwang 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9813a9643ea8Slogwang 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9814a9643ea8Slogwang 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9815a9643ea8Slogwang 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9816a9643ea8Slogwang 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9817a9643ea8Slogwang 
9818a9643ea8Slogwang 	memset(&stats, 0, sizeof(stats));
9819a9643ea8Slogwang 	ret = i40e_aq_add_rem_control_packet_filter(hw,
9820a9643ea8Slogwang 			filter->mac_addr.addr_bytes,
9821a9643ea8Slogwang 			filter->ether_type, flags,
9822a9643ea8Slogwang 			pf->main_vsi->seid,
9823a9643ea8Slogwang 			filter->queue, add, &stats, NULL);
9824a9643ea8Slogwang 
98252bfe3f2eSlogwang 	PMD_DRV_LOG(INFO,
98262bfe3f2eSlogwang 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9827a9643ea8Slogwang 		ret, stats.mac_etype_used, stats.etype_used,
9828a9643ea8Slogwang 		stats.mac_etype_free, stats.etype_free);
9829a9643ea8Slogwang 	if (ret < 0)
9830a9643ea8Slogwang 		return -ENOSYS;
98312bfe3f2eSlogwang 
98322bfe3f2eSlogwang 	/* Add or delete a filter in SW list */
98332bfe3f2eSlogwang 	if (add) {
98342bfe3f2eSlogwang 		ethertype_filter = rte_zmalloc("ethertype_filter",
98352bfe3f2eSlogwang 				       sizeof(*ethertype_filter), 0);
98362bfe3f2eSlogwang 		if (ethertype_filter == NULL) {
98372bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
98382bfe3f2eSlogwang 			return -ENOMEM;
98392bfe3f2eSlogwang 		}
98402bfe3f2eSlogwang 
98412bfe3f2eSlogwang 		rte_memcpy(ethertype_filter, &check_filter,
98422bfe3f2eSlogwang 			   sizeof(check_filter));
98432bfe3f2eSlogwang 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
98442bfe3f2eSlogwang 		if (ret < 0)
98452bfe3f2eSlogwang 			rte_free(ethertype_filter);
98462bfe3f2eSlogwang 	} else {
98472bfe3f2eSlogwang 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
98482bfe3f2eSlogwang 	}
98492bfe3f2eSlogwang 
98502bfe3f2eSlogwang 	return ret;
9851a9643ea8Slogwang }
9852a9643ea8Slogwang 
9853a9643ea8Slogwang static int
i40e_dev_filter_ctrl(struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)9854a9643ea8Slogwang i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9855a9643ea8Slogwang 		     enum rte_filter_type filter_type,
9856a9643ea8Slogwang 		     enum rte_filter_op filter_op,
9857a9643ea8Slogwang 		     void *arg)
9858a9643ea8Slogwang {
9859a9643ea8Slogwang 	int ret = 0;
9860a9643ea8Slogwang 
9861a9643ea8Slogwang 	if (dev == NULL)
9862a9643ea8Slogwang 		return -EINVAL;
9863a9643ea8Slogwang 
9864a9643ea8Slogwang 	switch (filter_type) {
98652bfe3f2eSlogwang 	case RTE_ETH_FILTER_GENERIC:
98662bfe3f2eSlogwang 		if (filter_op != RTE_ETH_FILTER_GET)
98672bfe3f2eSlogwang 			return -EINVAL;
98682bfe3f2eSlogwang 		*(const void **)arg = &i40e_flow_ops;
98692bfe3f2eSlogwang 		break;
9870a9643ea8Slogwang 	default:
9871a9643ea8Slogwang 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9872a9643ea8Slogwang 							filter_type);
9873a9643ea8Slogwang 		ret = -EINVAL;
9874a9643ea8Slogwang 		break;
9875a9643ea8Slogwang 	}
9876a9643ea8Slogwang 
9877a9643ea8Slogwang 	return ret;
9878a9643ea8Slogwang }
9879a9643ea8Slogwang 
9880a9643ea8Slogwang /*
9881a9643ea8Slogwang  * Check and enable Extended Tag.
9882a9643ea8Slogwang  * Enabling Extended Tag is important for 40G performance.
9883a9643ea8Slogwang  */
9884a9643ea8Slogwang static void
i40e_enable_extended_tag(struct rte_eth_dev * dev)9885a9643ea8Slogwang i40e_enable_extended_tag(struct rte_eth_dev *dev)
9886a9643ea8Slogwang {
98872bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9888a9643ea8Slogwang 	uint32_t buf = 0;
9889a9643ea8Slogwang 	int ret;
9890a9643ea8Slogwang 
98912bfe3f2eSlogwang 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9892a9643ea8Slogwang 				      PCI_DEV_CAP_REG);
9893a9643ea8Slogwang 	if (ret < 0) {
9894a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9895a9643ea8Slogwang 			    PCI_DEV_CAP_REG);
9896a9643ea8Slogwang 		return;
9897a9643ea8Slogwang 	}
9898a9643ea8Slogwang 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9899a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9900a9643ea8Slogwang 		return;
9901a9643ea8Slogwang 	}
9902a9643ea8Slogwang 
9903a9643ea8Slogwang 	buf = 0;
99042bfe3f2eSlogwang 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9905a9643ea8Slogwang 				      PCI_DEV_CTRL_REG);
9906a9643ea8Slogwang 	if (ret < 0) {
9907a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9908a9643ea8Slogwang 			    PCI_DEV_CTRL_REG);
9909a9643ea8Slogwang 		return;
9910a9643ea8Slogwang 	}
9911a9643ea8Slogwang 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9912a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9913a9643ea8Slogwang 		return;
9914a9643ea8Slogwang 	}
9915a9643ea8Slogwang 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
99162bfe3f2eSlogwang 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9917a9643ea8Slogwang 				       PCI_DEV_CTRL_REG);
9918a9643ea8Slogwang 	if (ret < 0) {
9919a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9920a9643ea8Slogwang 			    PCI_DEV_CTRL_REG);
9921a9643ea8Slogwang 		return;
9922a9643ea8Slogwang 	}
9923a9643ea8Slogwang }
9924a9643ea8Slogwang 
9925a9643ea8Slogwang /*
9926a9643ea8Slogwang  * As some registers wouldn't be reset unless a global hardware reset,
9927a9643ea8Slogwang  * hardware initialization is needed to put those registers into an
9928a9643ea8Slogwang  * expected initial state.
9929a9643ea8Slogwang  */
9930a9643ea8Slogwang static void
i40e_hw_init(struct rte_eth_dev * dev)9931a9643ea8Slogwang i40e_hw_init(struct rte_eth_dev *dev)
9932a9643ea8Slogwang {
9933a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9934a9643ea8Slogwang 
9935a9643ea8Slogwang 	i40e_enable_extended_tag(dev);
9936a9643ea8Slogwang 
9937a9643ea8Slogwang 	/* clear the PF Queue Filter control register */
9938a9643ea8Slogwang 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9939a9643ea8Slogwang 
9940a9643ea8Slogwang 	/* Disable symmetric hash per port */
9941a9643ea8Slogwang 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
9942a9643ea8Slogwang }
9943a9643ea8Slogwang 
99442bfe3f2eSlogwang /*
99452bfe3f2eSlogwang  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
99462bfe3f2eSlogwang  * however this function will return only one highest pctype index,
99472bfe3f2eSlogwang  * which is not quite correct. This is known problem of i40e driver
99482bfe3f2eSlogwang  * and needs to be fixed later.
99492bfe3f2eSlogwang  */
9950a9643ea8Slogwang enum i40e_filter_pctype
i40e_flowtype_to_pctype(const struct i40e_adapter * adapter,uint16_t flow_type)99512bfe3f2eSlogwang i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9952a9643ea8Slogwang {
99532bfe3f2eSlogwang 	int i;
99542bfe3f2eSlogwang 	uint64_t pctype_mask;
9955a9643ea8Slogwang 
99562bfe3f2eSlogwang 	if (flow_type < I40E_FLOW_TYPE_MAX) {
99572bfe3f2eSlogwang 		pctype_mask = adapter->pctypes_tbl[flow_type];
99582bfe3f2eSlogwang 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
99592bfe3f2eSlogwang 			if (pctype_mask & (1ULL << i))
99602bfe3f2eSlogwang 				return (enum i40e_filter_pctype)i;
99612bfe3f2eSlogwang 		}
99622bfe3f2eSlogwang 	}
99632bfe3f2eSlogwang 	return I40E_FILTER_PCTYPE_INVALID;
9964a9643ea8Slogwang }
9965a9643ea8Slogwang 
9966a9643ea8Slogwang uint16_t
i40e_pctype_to_flowtype(const struct i40e_adapter * adapter,enum i40e_filter_pctype pctype)99672bfe3f2eSlogwang i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
99682bfe3f2eSlogwang 			enum i40e_filter_pctype pctype)
9969a9643ea8Slogwang {
99702bfe3f2eSlogwang 	uint16_t flowtype;
99712bfe3f2eSlogwang 	uint64_t pctype_mask = 1ULL << pctype;
9972a9643ea8Slogwang 
99732bfe3f2eSlogwang 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
99742bfe3f2eSlogwang 	     flowtype++) {
99752bfe3f2eSlogwang 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
99762bfe3f2eSlogwang 			return flowtype;
99772bfe3f2eSlogwang 	}
99782bfe3f2eSlogwang 
99792bfe3f2eSlogwang 	return RTE_ETH_FLOW_UNKNOWN;
9980a9643ea8Slogwang }
9981a9643ea8Slogwang 
9982a9643ea8Slogwang /*
9983a9643ea8Slogwang  * On X710, performance number is far from the expectation on recent firmware
9984a9643ea8Slogwang  * versions; on XL710, performance number is also far from the expectation on
9985a9643ea8Slogwang  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9986a9643ea8Slogwang  * mode is enabled and port MAC address is equal to the packet destination MAC
9987a9643ea8Slogwang  * address. The fix for this issue may not be integrated in the following
9988a9643ea8Slogwang  * firmware version. So the workaround in software driver is needed. It needs
9989a9643ea8Slogwang  * to modify the initial values of 3 internal only registers for both X710 and
9990a9643ea8Slogwang  * XL710. Note that the values for X710 or XL710 could be different, and the
9991a9643ea8Slogwang  * workaround can be removed when it is fixed in firmware in the future.
9992a9643ea8Slogwang  */
9993a9643ea8Slogwang 
9994a9643ea8Slogwang /* For both X710 and XL710 */
99952bfe3f2eSlogwang #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
99962bfe3f2eSlogwang #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
9997a9643ea8Slogwang #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
9998a9643ea8Slogwang 
9999a9643ea8Slogwang #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10000a9643ea8Slogwang #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10001a9643ea8Slogwang 
100022bfe3f2eSlogwang /* For X722 */
100032bfe3f2eSlogwang #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
100042bfe3f2eSlogwang #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
100052bfe3f2eSlogwang 
10006a9643ea8Slogwang /* For X710 */
10007a9643ea8Slogwang #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10008a9643ea8Slogwang /* For XL710 */
10009a9643ea8Slogwang #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10010a9643ea8Slogwang #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10011a9643ea8Slogwang 
10012579bf1e2Sjfb8856606 /*
10013579bf1e2Sjfb8856606  * GL_SWR_PM_UP_THR:
10014579bf1e2Sjfb8856606  * The value is not impacted from the link speed, its value is set according
10015579bf1e2Sjfb8856606  * to the total number of ports for a better pipe-monitor configuration.
10016579bf1e2Sjfb8856606  */
10017579bf1e2Sjfb8856606 static bool
i40e_get_swr_pm_cfg(struct i40e_hw * hw,uint32_t * value)10018579bf1e2Sjfb8856606 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10019579bf1e2Sjfb8856606 {
10020579bf1e2Sjfb8856606 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10021579bf1e2Sjfb8856606 		.device_id = (dev),   \
10022579bf1e2Sjfb8856606 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10023579bf1e2Sjfb8856606 
10024579bf1e2Sjfb8856606 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10025579bf1e2Sjfb8856606 		.device_id = (dev),   \
10026579bf1e2Sjfb8856606 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10027579bf1e2Sjfb8856606 
10028579bf1e2Sjfb8856606 	static const struct {
10029579bf1e2Sjfb8856606 		uint16_t device_id;
10030579bf1e2Sjfb8856606 		uint32_t val;
10031579bf1e2Sjfb8856606 	} swr_pm_table[] = {
10032579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10033579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10034579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10035579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
100360c6bd470Sfengbojiang 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10037579bf1e2Sjfb8856606 
10038579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10039579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10040579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10041579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10042579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10043579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10044579bf1e2Sjfb8856606 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10045579bf1e2Sjfb8856606 	};
10046579bf1e2Sjfb8856606 	uint32_t i;
10047579bf1e2Sjfb8856606 
10048579bf1e2Sjfb8856606 	if (value == NULL) {
10049579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "value is NULL");
10050579bf1e2Sjfb8856606 		return false;
10051579bf1e2Sjfb8856606 	}
10052579bf1e2Sjfb8856606 
10053579bf1e2Sjfb8856606 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10054579bf1e2Sjfb8856606 		if (hw->device_id == swr_pm_table[i].device_id) {
10055579bf1e2Sjfb8856606 			*value = swr_pm_table[i].val;
10056579bf1e2Sjfb8856606 
10057579bf1e2Sjfb8856606 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10058579bf1e2Sjfb8856606 				    "value - 0x%08x",
10059579bf1e2Sjfb8856606 				    hw->device_id, *value);
10060579bf1e2Sjfb8856606 			return true;
10061579bf1e2Sjfb8856606 		}
10062579bf1e2Sjfb8856606 	}
10063579bf1e2Sjfb8856606 
10064579bf1e2Sjfb8856606 	return false;
10065579bf1e2Sjfb8856606 }
10066579bf1e2Sjfb8856606 
100672bfe3f2eSlogwang static int
i40e_dev_sync_phy_type(struct i40e_hw * hw)100682bfe3f2eSlogwang i40e_dev_sync_phy_type(struct i40e_hw *hw)
100692bfe3f2eSlogwang {
100702bfe3f2eSlogwang 	enum i40e_status_code status;
100712bfe3f2eSlogwang 	struct i40e_aq_get_phy_abilities_resp phy_ab;
100722bfe3f2eSlogwang 	int ret = -ENOTSUP;
100732bfe3f2eSlogwang 	int retries = 0;
100742bfe3f2eSlogwang 
100752bfe3f2eSlogwang 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
100762bfe3f2eSlogwang 					      NULL);
100772bfe3f2eSlogwang 
100782bfe3f2eSlogwang 	while (status) {
100792bfe3f2eSlogwang 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
100802bfe3f2eSlogwang 			status);
100812bfe3f2eSlogwang 		retries++;
100822bfe3f2eSlogwang 		rte_delay_us(100000);
100832bfe3f2eSlogwang 		if  (retries < 5)
100842bfe3f2eSlogwang 			status = i40e_aq_get_phy_capabilities(hw, false,
100852bfe3f2eSlogwang 					true, &phy_ab, NULL);
100862bfe3f2eSlogwang 		else
100872bfe3f2eSlogwang 			return ret;
100882bfe3f2eSlogwang 	}
100892bfe3f2eSlogwang 	return 0;
100902bfe3f2eSlogwang }
100912bfe3f2eSlogwang 
10092a9643ea8Slogwang static void
i40e_configure_registers(struct i40e_hw * hw)10093a9643ea8Slogwang i40e_configure_registers(struct i40e_hw *hw)
10094a9643ea8Slogwang {
10095a9643ea8Slogwang 	static struct {
10096a9643ea8Slogwang 		uint32_t addr;
10097a9643ea8Slogwang 		uint64_t val;
10098a9643ea8Slogwang 	} reg_table[] = {
100992bfe3f2eSlogwang 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
101002bfe3f2eSlogwang 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10101a9643ea8Slogwang 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10102a9643ea8Slogwang 	};
10103a9643ea8Slogwang 	uint64_t reg;
10104a9643ea8Slogwang 	uint32_t i;
10105a9643ea8Slogwang 	int ret;
10106a9643ea8Slogwang 
10107a9643ea8Slogwang 	for (i = 0; i < RTE_DIM(reg_table); i++) {
101082bfe3f2eSlogwang 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
101092bfe3f2eSlogwang 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
101102bfe3f2eSlogwang 				reg_table[i].val =
101112bfe3f2eSlogwang 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
101122bfe3f2eSlogwang 			else /* For X710/XL710/XXV710 */
101132bfe3f2eSlogwang 				if (hw->aq.fw_maj_ver < 6)
101142bfe3f2eSlogwang 					reg_table[i].val =
101152bfe3f2eSlogwang 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
101162bfe3f2eSlogwang 				else
101172bfe3f2eSlogwang 					reg_table[i].val =
101182bfe3f2eSlogwang 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
101192bfe3f2eSlogwang 		}
101202bfe3f2eSlogwang 
101212bfe3f2eSlogwang 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
101222bfe3f2eSlogwang 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
101232bfe3f2eSlogwang 				reg_table[i].val =
101242bfe3f2eSlogwang 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
101252bfe3f2eSlogwang 			else /* For X710/XL710/XXV710 */
101262bfe3f2eSlogwang 				reg_table[i].val =
101272bfe3f2eSlogwang 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
101282bfe3f2eSlogwang 		}
101292bfe3f2eSlogwang 
10130a9643ea8Slogwang 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10131579bf1e2Sjfb8856606 			uint32_t cfg_val;
10132579bf1e2Sjfb8856606 
10133579bf1e2Sjfb8856606 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10134579bf1e2Sjfb8856606 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10135579bf1e2Sjfb8856606 					    "GL_SWR_PM_UP_THR value fixup",
10136579bf1e2Sjfb8856606 					    hw->device_id);
10137579bf1e2Sjfb8856606 				continue;
10138579bf1e2Sjfb8856606 			}
10139579bf1e2Sjfb8856606 
10140579bf1e2Sjfb8856606 			reg_table[i].val = cfg_val;
10141a9643ea8Slogwang 		}
10142a9643ea8Slogwang 
10143a9643ea8Slogwang 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10144a9643ea8Slogwang 							&reg, NULL);
10145a9643ea8Slogwang 		if (ret < 0) {
10146a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10147a9643ea8Slogwang 							reg_table[i].addr);
10148a9643ea8Slogwang 			break;
10149a9643ea8Slogwang 		}
10150a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10151a9643ea8Slogwang 						reg_table[i].addr, reg);
10152a9643ea8Slogwang 		if (reg == reg_table[i].val)
10153a9643ea8Slogwang 			continue;
10154a9643ea8Slogwang 
10155a9643ea8Slogwang 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10156a9643ea8Slogwang 						reg_table[i].val, NULL);
10157a9643ea8Slogwang 		if (ret < 0) {
101582bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
101592bfe3f2eSlogwang 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
101602bfe3f2eSlogwang 				reg_table[i].val, reg_table[i].addr);
10161a9643ea8Slogwang 			break;
10162a9643ea8Slogwang 		}
10163a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10164a9643ea8Slogwang 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10165a9643ea8Slogwang 	}
10166a9643ea8Slogwang }
10167a9643ea8Slogwang 
10168a9643ea8Slogwang #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10169a9643ea8Slogwang #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10170a9643ea8Slogwang #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10171a9643ea8Slogwang static int
i40e_config_qinq(struct i40e_hw * hw,struct i40e_vsi * vsi)10172a9643ea8Slogwang i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10173a9643ea8Slogwang {
10174a9643ea8Slogwang 	uint32_t reg;
10175a9643ea8Slogwang 	int ret;
10176a9643ea8Slogwang 
10177a9643ea8Slogwang 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10178a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10179a9643ea8Slogwang 		return -EINVAL;
10180a9643ea8Slogwang 	}
10181a9643ea8Slogwang 
10182a9643ea8Slogwang 	/* Configure for double VLAN RX stripping */
10183a9643ea8Slogwang 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10184a9643ea8Slogwang 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10185a9643ea8Slogwang 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10186a9643ea8Slogwang 		ret = i40e_aq_debug_write_register(hw,
10187a9643ea8Slogwang 						   I40E_VSI_TSR(vsi->vsi_id),
10188a9643ea8Slogwang 						   reg, NULL);
10189a9643ea8Slogwang 		if (ret < 0) {
10190a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10191a9643ea8Slogwang 				    vsi->vsi_id);
10192a9643ea8Slogwang 			return I40E_ERR_CONFIG;
10193a9643ea8Slogwang 		}
10194a9643ea8Slogwang 	}
10195a9643ea8Slogwang 
10196a9643ea8Slogwang 	/* Configure for double VLAN TX insertion */
10197a9643ea8Slogwang 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10198a9643ea8Slogwang 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10199a9643ea8Slogwang 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10200a9643ea8Slogwang 		ret = i40e_aq_debug_write_register(hw,
10201a9643ea8Slogwang 						   I40E_VSI_L2TAGSTXVALID(
10202a9643ea8Slogwang 						   vsi->vsi_id), reg, NULL);
10203a9643ea8Slogwang 		if (ret < 0) {
102042bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
102052bfe3f2eSlogwang 				"Failed to update VSI_L2TAGSTXVALID[%d]",
102062bfe3f2eSlogwang 				vsi->vsi_id);
10207a9643ea8Slogwang 			return I40E_ERR_CONFIG;
10208a9643ea8Slogwang 		}
10209a9643ea8Slogwang 	}
10210a9643ea8Slogwang 
10211a9643ea8Slogwang 	return 0;
10212a9643ea8Slogwang }
10213a9643ea8Slogwang 
10214a9643ea8Slogwang /**
10215a9643ea8Slogwang  * i40e_aq_add_mirror_rule
10216a9643ea8Slogwang  * @hw: pointer to the hardware structure
10217a9643ea8Slogwang  * @seid: VEB seid to add mirror rule to
10218a9643ea8Slogwang  * @dst_id: destination vsi seid
10219a9643ea8Slogwang  * @entries: Buffer which contains the entities to be mirrored
10220a9643ea8Slogwang  * @count: number of entities contained in the buffer
10221a9643ea8Slogwang  * @rule_id:the rule_id of the rule to be added
10222a9643ea8Slogwang  *
10223a9643ea8Slogwang  * Add a mirror rule for a given veb.
10224a9643ea8Slogwang  *
10225a9643ea8Slogwang  **/
10226a9643ea8Slogwang static enum i40e_status_code
i40e_aq_add_mirror_rule(struct i40e_hw * hw,uint16_t seid,uint16_t dst_id,uint16_t rule_type,uint16_t * entries,uint16_t count,uint16_t * rule_id)10227a9643ea8Slogwang i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10228a9643ea8Slogwang 			uint16_t seid, uint16_t dst_id,
10229a9643ea8Slogwang 			uint16_t rule_type, uint16_t *entries,
10230a9643ea8Slogwang 			uint16_t count, uint16_t *rule_id)
10231a9643ea8Slogwang {
10232a9643ea8Slogwang 	struct i40e_aq_desc desc;
10233a9643ea8Slogwang 	struct i40e_aqc_add_delete_mirror_rule cmd;
10234a9643ea8Slogwang 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10235a9643ea8Slogwang 		(struct i40e_aqc_add_delete_mirror_rule_completion *)
10236a9643ea8Slogwang 		&desc.params.raw;
10237a9643ea8Slogwang 	uint16_t buff_len;
10238a9643ea8Slogwang 	enum i40e_status_code status;
10239a9643ea8Slogwang 
10240a9643ea8Slogwang 	i40e_fill_default_direct_cmd_desc(&desc,
10241a9643ea8Slogwang 					  i40e_aqc_opc_add_mirror_rule);
10242a9643ea8Slogwang 	memset(&cmd, 0, sizeof(cmd));
10243a9643ea8Slogwang 
10244a9643ea8Slogwang 	buff_len = sizeof(uint16_t) * count;
10245a9643ea8Slogwang 	desc.datalen = rte_cpu_to_le_16(buff_len);
10246a9643ea8Slogwang 	if (buff_len > 0)
10247a9643ea8Slogwang 		desc.flags |= rte_cpu_to_le_16(
10248a9643ea8Slogwang 			(uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10249a9643ea8Slogwang 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10250a9643ea8Slogwang 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10251a9643ea8Slogwang 	cmd.num_entries = rte_cpu_to_le_16(count);
10252a9643ea8Slogwang 	cmd.seid = rte_cpu_to_le_16(seid);
10253a9643ea8Slogwang 	cmd.destination = rte_cpu_to_le_16(dst_id);
10254a9643ea8Slogwang 
10255a9643ea8Slogwang 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10256a9643ea8Slogwang 	status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
102572bfe3f2eSlogwang 	PMD_DRV_LOG(INFO,
102582bfe3f2eSlogwang 		"i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10259a9643ea8Slogwang 		hw->aq.asq_last_status, resp->rule_id,
10260a9643ea8Slogwang 		resp->mirror_rules_used, resp->mirror_rules_free);
10261a9643ea8Slogwang 	*rule_id = rte_le_to_cpu_16(resp->rule_id);
10262a9643ea8Slogwang 
10263a9643ea8Slogwang 	return status;
10264a9643ea8Slogwang }
10265a9643ea8Slogwang 
10266a9643ea8Slogwang /**
10267a9643ea8Slogwang  * i40e_aq_del_mirror_rule
10268a9643ea8Slogwang  * @hw: pointer to the hardware structure
10269a9643ea8Slogwang  * @seid: VEB seid to add mirror rule to
10270a9643ea8Slogwang  * @entries: Buffer which contains the entities to be mirrored
10271a9643ea8Slogwang  * @count: number of entities contained in the buffer
10272a9643ea8Slogwang  * @rule_id:the rule_id of the rule to be delete
10273a9643ea8Slogwang  *
10274a9643ea8Slogwang  * Delete a mirror rule for a given veb.
10275a9643ea8Slogwang  *
10276a9643ea8Slogwang  **/
10277a9643ea8Slogwang static enum i40e_status_code
i40e_aq_del_mirror_rule(struct i40e_hw * hw,uint16_t seid,uint16_t rule_type,uint16_t * entries,uint16_t count,uint16_t rule_id)10278a9643ea8Slogwang i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10279a9643ea8Slogwang 		uint16_t seid, uint16_t rule_type, uint16_t *entries,
10280a9643ea8Slogwang 		uint16_t count, uint16_t rule_id)
10281a9643ea8Slogwang {
10282a9643ea8Slogwang 	struct i40e_aq_desc desc;
10283a9643ea8Slogwang 	struct i40e_aqc_add_delete_mirror_rule cmd;
10284a9643ea8Slogwang 	uint16_t buff_len = 0;
10285a9643ea8Slogwang 	enum i40e_status_code status;
10286a9643ea8Slogwang 	void *buff = NULL;
10287a9643ea8Slogwang 
10288a9643ea8Slogwang 	i40e_fill_default_direct_cmd_desc(&desc,
10289a9643ea8Slogwang 					  i40e_aqc_opc_delete_mirror_rule);
10290a9643ea8Slogwang 	memset(&cmd, 0, sizeof(cmd));
10291a9643ea8Slogwang 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10292a9643ea8Slogwang 		desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10293a9643ea8Slogwang 							  I40E_AQ_FLAG_RD));
10294a9643ea8Slogwang 		cmd.num_entries = count;
10295a9643ea8Slogwang 		buff_len = sizeof(uint16_t) * count;
10296a9643ea8Slogwang 		desc.datalen = rte_cpu_to_le_16(buff_len);
10297a9643ea8Slogwang 		buff = (void *)entries;
10298a9643ea8Slogwang 	} else
10299a9643ea8Slogwang 		/* rule id is filled in destination field for deleting mirror rule */
10300a9643ea8Slogwang 		cmd.destination = rte_cpu_to_le_16(rule_id);
10301a9643ea8Slogwang 
10302a9643ea8Slogwang 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10303a9643ea8Slogwang 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10304a9643ea8Slogwang 	cmd.seid = rte_cpu_to_le_16(seid);
10305a9643ea8Slogwang 
10306a9643ea8Slogwang 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10307a9643ea8Slogwang 	status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10308a9643ea8Slogwang 
10309a9643ea8Slogwang 	return status;
10310a9643ea8Slogwang }
10311a9643ea8Slogwang 
10312a9643ea8Slogwang /**
10313a9643ea8Slogwang  * i40e_mirror_rule_set
10314a9643ea8Slogwang  * @dev: pointer to the hardware structure
10315a9643ea8Slogwang  * @mirror_conf: mirror rule info
10316a9643ea8Slogwang  * @sw_id: mirror rule's sw_id
10317a9643ea8Slogwang  * @on: enable/disable
10318a9643ea8Slogwang  *
10319a9643ea8Slogwang  * set a mirror rule.
10320a9643ea8Slogwang  *
10321a9643ea8Slogwang  **/
10322a9643ea8Slogwang static int
i40e_mirror_rule_set(struct rte_eth_dev * dev,struct rte_eth_mirror_conf * mirror_conf,uint8_t sw_id,uint8_t on)10323a9643ea8Slogwang i40e_mirror_rule_set(struct rte_eth_dev *dev,
10324a9643ea8Slogwang 			struct rte_eth_mirror_conf *mirror_conf,
10325a9643ea8Slogwang 			uint8_t sw_id, uint8_t on)
10326a9643ea8Slogwang {
10327a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10328a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10329a9643ea8Slogwang 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10330a9643ea8Slogwang 	struct i40e_mirror_rule *parent = NULL;
10331a9643ea8Slogwang 	uint16_t seid, dst_seid, rule_id;
10332a9643ea8Slogwang 	uint16_t i, j = 0;
10333a9643ea8Slogwang 	int ret;
10334a9643ea8Slogwang 
10335a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10336a9643ea8Slogwang 
10337a9643ea8Slogwang 	if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
103382bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
103392bfe3f2eSlogwang 			"mirror rule can not be configured without veb or vfs.");
10340a9643ea8Slogwang 		return -ENOSYS;
10341a9643ea8Slogwang 	}
10342a9643ea8Slogwang 	if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10343a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "mirror table is full.");
10344a9643ea8Slogwang 		return -ENOSPC;
10345a9643ea8Slogwang 	}
10346a9643ea8Slogwang 	if (mirror_conf->dst_pool > pf->vf_num) {
10347a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10348a9643ea8Slogwang 				 mirror_conf->dst_pool);
10349a9643ea8Slogwang 		return -EINVAL;
10350a9643ea8Slogwang 	}
10351a9643ea8Slogwang 
10352a9643ea8Slogwang 	seid = pf->main_vsi->veb->seid;
10353a9643ea8Slogwang 
10354a9643ea8Slogwang 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10355a9643ea8Slogwang 		if (sw_id <= it->index) {
10356a9643ea8Slogwang 			mirr_rule = it;
10357a9643ea8Slogwang 			break;
10358a9643ea8Slogwang 		}
10359a9643ea8Slogwang 		parent = it;
10360a9643ea8Slogwang 	}
10361a9643ea8Slogwang 	if (mirr_rule && sw_id == mirr_rule->index) {
10362a9643ea8Slogwang 		if (on) {
10363a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "mirror rule exists.");
10364a9643ea8Slogwang 			return -EEXIST;
10365a9643ea8Slogwang 		} else {
10366a9643ea8Slogwang 			ret = i40e_aq_del_mirror_rule(hw, seid,
10367a9643ea8Slogwang 					mirr_rule->rule_type,
10368a9643ea8Slogwang 					mirr_rule->entries,
10369a9643ea8Slogwang 					mirr_rule->num_entries, mirr_rule->id);
10370a9643ea8Slogwang 			if (ret < 0) {
103712bfe3f2eSlogwang 				PMD_DRV_LOG(ERR,
103722bfe3f2eSlogwang 					"failed to remove mirror rule: ret = %d, aq_err = %d.",
10373a9643ea8Slogwang 					ret, hw->aq.asq_last_status);
10374a9643ea8Slogwang 				return -ENOSYS;
10375a9643ea8Slogwang 			}
10376a9643ea8Slogwang 			TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10377a9643ea8Slogwang 			rte_free(mirr_rule);
10378a9643ea8Slogwang 			pf->nb_mirror_rule--;
10379a9643ea8Slogwang 			return 0;
10380a9643ea8Slogwang 		}
10381a9643ea8Slogwang 	} else if (!on) {
10382a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10383a9643ea8Slogwang 		return -ENOENT;
10384a9643ea8Slogwang 	}
10385a9643ea8Slogwang 
10386a9643ea8Slogwang 	mirr_rule = rte_zmalloc("i40e_mirror_rule",
10387a9643ea8Slogwang 				sizeof(struct i40e_mirror_rule) , 0);
10388a9643ea8Slogwang 	if (!mirr_rule) {
10389a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "failed to allocate memory");
10390a9643ea8Slogwang 		return I40E_ERR_NO_MEMORY;
10391a9643ea8Slogwang 	}
10392a9643ea8Slogwang 	switch (mirror_conf->rule_type) {
10393a9643ea8Slogwang 	case ETH_MIRROR_VLAN:
10394a9643ea8Slogwang 		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10395a9643ea8Slogwang 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10396a9643ea8Slogwang 				mirr_rule->entries[j] =
10397a9643ea8Slogwang 					mirror_conf->vlan.vlan_id[i];
10398a9643ea8Slogwang 				j++;
10399a9643ea8Slogwang 			}
10400a9643ea8Slogwang 		}
10401a9643ea8Slogwang 		if (j == 0) {
10402a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "vlan is not specified.");
10403a9643ea8Slogwang 			rte_free(mirr_rule);
10404a9643ea8Slogwang 			return -EINVAL;
10405a9643ea8Slogwang 		}
10406a9643ea8Slogwang 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10407a9643ea8Slogwang 		break;
10408a9643ea8Slogwang 	case ETH_MIRROR_VIRTUAL_POOL_UP:
10409a9643ea8Slogwang 	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10410a9643ea8Slogwang 		/* check if the specified pool bit is out of range */
10411a9643ea8Slogwang 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10412a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
10413a9643ea8Slogwang 			rte_free(mirr_rule);
10414a9643ea8Slogwang 			return -EINVAL;
10415a9643ea8Slogwang 		}
10416a9643ea8Slogwang 		for (i = 0, j = 0; i < pf->vf_num; i++) {
10417a9643ea8Slogwang 			if (mirror_conf->pool_mask & (1ULL << i)) {
10418a9643ea8Slogwang 				mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10419a9643ea8Slogwang 				j++;
10420a9643ea8Slogwang 			}
10421a9643ea8Slogwang 		}
10422a9643ea8Slogwang 		if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10423a9643ea8Slogwang 			/* add pf vsi to entries */
10424a9643ea8Slogwang 			mirr_rule->entries[j] = pf->main_vsi_seid;
10425a9643ea8Slogwang 			j++;
10426a9643ea8Slogwang 		}
10427a9643ea8Slogwang 		if (j == 0) {
10428a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "pool is not specified.");
10429a9643ea8Slogwang 			rte_free(mirr_rule);
10430a9643ea8Slogwang 			return -EINVAL;
10431a9643ea8Slogwang 		}
10432a9643ea8Slogwang 		/* egress and ingress in aq commands means from switch but not port */
10433a9643ea8Slogwang 		mirr_rule->rule_type =
10434a9643ea8Slogwang 			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10435a9643ea8Slogwang 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10436a9643ea8Slogwang 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10437a9643ea8Slogwang 		break;
10438a9643ea8Slogwang 	case ETH_MIRROR_UPLINK_PORT:
10439a9643ea8Slogwang 		/* egress and ingress in aq commands means from switch but not port*/
10440a9643ea8Slogwang 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10441a9643ea8Slogwang 		break;
10442a9643ea8Slogwang 	case ETH_MIRROR_DOWNLINK_PORT:
10443a9643ea8Slogwang 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10444a9643ea8Slogwang 		break;
10445a9643ea8Slogwang 	default:
10446a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10447a9643ea8Slogwang 			mirror_conf->rule_type);
10448a9643ea8Slogwang 		rte_free(mirr_rule);
10449a9643ea8Slogwang 		return -EINVAL;
10450a9643ea8Slogwang 	}
10451a9643ea8Slogwang 
10452a9643ea8Slogwang 	/* If the dst_pool is equal to vf_num, consider it as PF */
10453a9643ea8Slogwang 	if (mirror_conf->dst_pool == pf->vf_num)
10454a9643ea8Slogwang 		dst_seid = pf->main_vsi_seid;
10455a9643ea8Slogwang 	else
10456a9643ea8Slogwang 		dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10457a9643ea8Slogwang 
10458a9643ea8Slogwang 	ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10459a9643ea8Slogwang 				      mirr_rule->rule_type, mirr_rule->entries,
10460a9643ea8Slogwang 				      j, &rule_id);
10461a9643ea8Slogwang 	if (ret < 0) {
104622bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
104632bfe3f2eSlogwang 			"failed to add mirror rule: ret = %d, aq_err = %d.",
10464a9643ea8Slogwang 			ret, hw->aq.asq_last_status);
10465a9643ea8Slogwang 		rte_free(mirr_rule);
10466a9643ea8Slogwang 		return -ENOSYS;
10467a9643ea8Slogwang 	}
10468a9643ea8Slogwang 
10469a9643ea8Slogwang 	mirr_rule->index = sw_id;
10470a9643ea8Slogwang 	mirr_rule->num_entries = j;
10471a9643ea8Slogwang 	mirr_rule->id = rule_id;
10472a9643ea8Slogwang 	mirr_rule->dst_vsi_seid = dst_seid;
10473a9643ea8Slogwang 
10474a9643ea8Slogwang 	if (parent)
10475a9643ea8Slogwang 		TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10476a9643ea8Slogwang 	else
10477a9643ea8Slogwang 		TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10478a9643ea8Slogwang 
10479a9643ea8Slogwang 	pf->nb_mirror_rule++;
10480a9643ea8Slogwang 	return 0;
10481a9643ea8Slogwang }
10482a9643ea8Slogwang 
10483a9643ea8Slogwang /**
10484a9643ea8Slogwang  * i40e_mirror_rule_reset
10485a9643ea8Slogwang  * @dev: pointer to the device
10486a9643ea8Slogwang  * @sw_id: mirror rule's sw_id
10487a9643ea8Slogwang  *
10488a9643ea8Slogwang  * reset a mirror rule.
10489a9643ea8Slogwang  *
10490a9643ea8Slogwang  **/
10491a9643ea8Slogwang static int
i40e_mirror_rule_reset(struct rte_eth_dev * dev,uint8_t sw_id)10492a9643ea8Slogwang i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10493a9643ea8Slogwang {
10494a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10495a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10496a9643ea8Slogwang 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10497a9643ea8Slogwang 	uint16_t seid;
10498a9643ea8Slogwang 	int ret;
10499a9643ea8Slogwang 
10500a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10501a9643ea8Slogwang 
10502a9643ea8Slogwang 	seid = pf->main_vsi->veb->seid;
10503a9643ea8Slogwang 
10504a9643ea8Slogwang 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10505a9643ea8Slogwang 		if (sw_id == it->index) {
10506a9643ea8Slogwang 			mirr_rule = it;
10507a9643ea8Slogwang 			break;
10508a9643ea8Slogwang 		}
10509a9643ea8Slogwang 	}
10510a9643ea8Slogwang 	if (mirr_rule) {
10511a9643ea8Slogwang 		ret = i40e_aq_del_mirror_rule(hw, seid,
10512a9643ea8Slogwang 				mirr_rule->rule_type,
10513a9643ea8Slogwang 				mirr_rule->entries,
10514a9643ea8Slogwang 				mirr_rule->num_entries, mirr_rule->id);
10515a9643ea8Slogwang 		if (ret < 0) {
105162bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
105172bfe3f2eSlogwang 				"failed to remove mirror rule: status = %d, aq_err = %d.",
10518a9643ea8Slogwang 				ret, hw->aq.asq_last_status);
10519a9643ea8Slogwang 			return -ENOSYS;
10520a9643ea8Slogwang 		}
10521a9643ea8Slogwang 		TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10522a9643ea8Slogwang 		rte_free(mirr_rule);
10523a9643ea8Slogwang 		pf->nb_mirror_rule--;
10524a9643ea8Slogwang 	} else {
10525a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10526a9643ea8Slogwang 		return -ENOENT;
10527a9643ea8Slogwang 	}
10528a9643ea8Slogwang 	return 0;
10529a9643ea8Slogwang }
10530a9643ea8Slogwang 
10531a9643ea8Slogwang static uint64_t
i40e_read_systime_cyclecounter(struct rte_eth_dev * dev)10532a9643ea8Slogwang i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10533a9643ea8Slogwang {
10534a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10535a9643ea8Slogwang 	uint64_t systim_cycles;
10536a9643ea8Slogwang 
10537a9643ea8Slogwang 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10538a9643ea8Slogwang 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10539a9643ea8Slogwang 			<< 32;
10540a9643ea8Slogwang 
10541a9643ea8Slogwang 	return systim_cycles;
10542a9643ea8Slogwang }
10543a9643ea8Slogwang 
10544a9643ea8Slogwang static uint64_t
i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev,uint8_t index)10545a9643ea8Slogwang i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10546a9643ea8Slogwang {
10547a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10548a9643ea8Slogwang 	uint64_t rx_tstamp;
10549a9643ea8Slogwang 
10550a9643ea8Slogwang 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10551a9643ea8Slogwang 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10552a9643ea8Slogwang 			<< 32;
10553a9643ea8Slogwang 
10554a9643ea8Slogwang 	return rx_tstamp;
10555a9643ea8Slogwang }
10556a9643ea8Slogwang 
10557a9643ea8Slogwang static uint64_t
i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)10558a9643ea8Slogwang i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10559a9643ea8Slogwang {
10560a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10561a9643ea8Slogwang 	uint64_t tx_tstamp;
10562a9643ea8Slogwang 
10563a9643ea8Slogwang 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10564a9643ea8Slogwang 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10565a9643ea8Slogwang 			<< 32;
10566a9643ea8Slogwang 
10567a9643ea8Slogwang 	return tx_tstamp;
10568a9643ea8Slogwang }
10569a9643ea8Slogwang 
10570a9643ea8Slogwang static void
i40e_start_timecounters(struct rte_eth_dev * dev)10571a9643ea8Slogwang i40e_start_timecounters(struct rte_eth_dev *dev)
10572a9643ea8Slogwang {
10573a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
105744b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10575a9643ea8Slogwang 	struct rte_eth_link link;
10576a9643ea8Slogwang 	uint32_t tsync_inc_l;
10577a9643ea8Slogwang 	uint32_t tsync_inc_h;
10578a9643ea8Slogwang 
10579a9643ea8Slogwang 	/* Get current link speed. */
10580a9643ea8Slogwang 	i40e_dev_link_update(dev, 1);
10581d30ea906Sjfb8856606 	rte_eth_linkstatus_get(dev, &link);
10582a9643ea8Slogwang 
10583a9643ea8Slogwang 	switch (link.link_speed) {
10584a9643ea8Slogwang 	case ETH_SPEED_NUM_40G:
105851646932aSjfb8856606 	case ETH_SPEED_NUM_25G:
10586a9643ea8Slogwang 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10587a9643ea8Slogwang 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10588a9643ea8Slogwang 		break;
10589a9643ea8Slogwang 	case ETH_SPEED_NUM_10G:
10590a9643ea8Slogwang 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10591a9643ea8Slogwang 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10592a9643ea8Slogwang 		break;
10593a9643ea8Slogwang 	case ETH_SPEED_NUM_1G:
10594a9643ea8Slogwang 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10595a9643ea8Slogwang 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10596a9643ea8Slogwang 		break;
10597a9643ea8Slogwang 	default:
10598a9643ea8Slogwang 		tsync_inc_l = 0x0;
10599a9643ea8Slogwang 		tsync_inc_h = 0x0;
10600a9643ea8Slogwang 	}
10601a9643ea8Slogwang 
10602a9643ea8Slogwang 	/* Set the timesync increment value. */
10603a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10604a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10605a9643ea8Slogwang 
10606a9643ea8Slogwang 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10607a9643ea8Slogwang 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10608a9643ea8Slogwang 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10609a9643ea8Slogwang 
10610a9643ea8Slogwang 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10611a9643ea8Slogwang 	adapter->systime_tc.cc_shift = 0;
10612a9643ea8Slogwang 	adapter->systime_tc.nsec_mask = 0;
10613a9643ea8Slogwang 
10614a9643ea8Slogwang 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10615a9643ea8Slogwang 	adapter->rx_tstamp_tc.cc_shift = 0;
10616a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec_mask = 0;
10617a9643ea8Slogwang 
10618a9643ea8Slogwang 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10619a9643ea8Slogwang 	adapter->tx_tstamp_tc.cc_shift = 0;
10620a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec_mask = 0;
10621a9643ea8Slogwang }
10622a9643ea8Slogwang 
10623a9643ea8Slogwang static int
i40e_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)10624a9643ea8Slogwang i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10625a9643ea8Slogwang {
106264b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10627a9643ea8Slogwang 
10628a9643ea8Slogwang 	adapter->systime_tc.nsec += delta;
10629a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec += delta;
10630a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec += delta;
10631a9643ea8Slogwang 
10632a9643ea8Slogwang 	return 0;
10633a9643ea8Slogwang }
10634a9643ea8Slogwang 
10635a9643ea8Slogwang static int
i40e_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)10636a9643ea8Slogwang i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10637a9643ea8Slogwang {
10638a9643ea8Slogwang 	uint64_t ns;
106394b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10640a9643ea8Slogwang 
10641a9643ea8Slogwang 	ns = rte_timespec_to_ns(ts);
10642a9643ea8Slogwang 
10643a9643ea8Slogwang 	/* Set the timecounters to a new value. */
10644a9643ea8Slogwang 	adapter->systime_tc.nsec = ns;
10645a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec = ns;
10646a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec = ns;
10647a9643ea8Slogwang 
10648a9643ea8Slogwang 	return 0;
10649a9643ea8Slogwang }
10650a9643ea8Slogwang 
10651a9643ea8Slogwang static int
i40e_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)10652a9643ea8Slogwang i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10653a9643ea8Slogwang {
10654a9643ea8Slogwang 	uint64_t ns, systime_cycles;
106554b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10656a9643ea8Slogwang 
10657a9643ea8Slogwang 	systime_cycles = i40e_read_systime_cyclecounter(dev);
10658a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10659a9643ea8Slogwang 	*ts = rte_ns_to_timespec(ns);
10660a9643ea8Slogwang 
10661a9643ea8Slogwang 	return 0;
10662a9643ea8Slogwang }
10663a9643ea8Slogwang 
10664a9643ea8Slogwang static int
i40e_timesync_enable(struct rte_eth_dev * dev)10665a9643ea8Slogwang i40e_timesync_enable(struct rte_eth_dev *dev)
10666a9643ea8Slogwang {
10667a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10668a9643ea8Slogwang 	uint32_t tsync_ctl_l;
10669a9643ea8Slogwang 	uint32_t tsync_ctl_h;
10670a9643ea8Slogwang 
10671a9643ea8Slogwang 	/* Stop the timesync system time. */
10672a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10673a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10674a9643ea8Slogwang 	/* Reset the timesync system time value. */
10675a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10676a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10677a9643ea8Slogwang 
10678a9643ea8Slogwang 	i40e_start_timecounters(dev);
10679a9643ea8Slogwang 
10680a9643ea8Slogwang 	/* Clear timesync registers. */
10681a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10682a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10683a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10684a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10685a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10686a9643ea8Slogwang 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10687a9643ea8Slogwang 
10688a9643ea8Slogwang 	/* Enable timestamping of PTP packets. */
10689a9643ea8Slogwang 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10690a9643ea8Slogwang 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10691a9643ea8Slogwang 
10692a9643ea8Slogwang 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10693a9643ea8Slogwang 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10694a9643ea8Slogwang 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10695a9643ea8Slogwang 
10696a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10697a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10698a9643ea8Slogwang 
10699a9643ea8Slogwang 	return 0;
10700a9643ea8Slogwang }
10701a9643ea8Slogwang 
10702a9643ea8Slogwang static int
i40e_timesync_disable(struct rte_eth_dev * dev)10703a9643ea8Slogwang i40e_timesync_disable(struct rte_eth_dev *dev)
10704a9643ea8Slogwang {
10705a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10706a9643ea8Slogwang 	uint32_t tsync_ctl_l;
10707a9643ea8Slogwang 	uint32_t tsync_ctl_h;
10708a9643ea8Slogwang 
10709a9643ea8Slogwang 	/* Disable timestamping of transmitted PTP packets. */
10710a9643ea8Slogwang 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10711a9643ea8Slogwang 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10712a9643ea8Slogwang 
10713a9643ea8Slogwang 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10714a9643ea8Slogwang 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10715a9643ea8Slogwang 
10716a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10717a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10718a9643ea8Slogwang 
10719a9643ea8Slogwang 	/* Reset the timesync increment value. */
10720a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10721a9643ea8Slogwang 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10722a9643ea8Slogwang 
10723a9643ea8Slogwang 	return 0;
10724a9643ea8Slogwang }
10725a9643ea8Slogwang 
10726a9643ea8Slogwang static int
i40e_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags)10727a9643ea8Slogwang i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10728a9643ea8Slogwang 				struct timespec *timestamp, uint32_t flags)
10729a9643ea8Slogwang {
10730a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
107314b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10732a9643ea8Slogwang 	uint32_t sync_status;
10733a9643ea8Slogwang 	uint32_t index = flags & 0x03;
10734a9643ea8Slogwang 	uint64_t rx_tstamp_cycles;
10735a9643ea8Slogwang 	uint64_t ns;
10736a9643ea8Slogwang 
10737a9643ea8Slogwang 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10738a9643ea8Slogwang 	if ((sync_status & (1 << index)) == 0)
10739a9643ea8Slogwang 		return -EINVAL;
10740a9643ea8Slogwang 
10741a9643ea8Slogwang 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10742a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10743a9643ea8Slogwang 	*timestamp = rte_ns_to_timespec(ns);
10744a9643ea8Slogwang 
10745a9643ea8Slogwang 	return 0;
10746a9643ea8Slogwang }
10747a9643ea8Slogwang 
10748a9643ea8Slogwang static int
i40e_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)10749a9643ea8Slogwang i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10750a9643ea8Slogwang 				struct timespec *timestamp)
10751a9643ea8Slogwang {
10752a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
107534b05018fSfengbojiang 	struct i40e_adapter *adapter = dev->data->dev_private;
10754a9643ea8Slogwang 	uint32_t sync_status;
10755a9643ea8Slogwang 	uint64_t tx_tstamp_cycles;
10756a9643ea8Slogwang 	uint64_t ns;
10757a9643ea8Slogwang 
10758a9643ea8Slogwang 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10759a9643ea8Slogwang 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10760a9643ea8Slogwang 		return -EINVAL;
10761a9643ea8Slogwang 
10762a9643ea8Slogwang 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10763a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10764a9643ea8Slogwang 	*timestamp = rte_ns_to_timespec(ns);
10765a9643ea8Slogwang 
10766a9643ea8Slogwang 	return 0;
10767a9643ea8Slogwang }
10768a9643ea8Slogwang 
10769a9643ea8Slogwang /*
10770a9643ea8Slogwang  * i40e_parse_dcb_configure - parse dcb configure from user
10771a9643ea8Slogwang  * @dev: the device being configured
10772a9643ea8Slogwang  * @dcb_cfg: pointer of the result of parse
10773a9643ea8Slogwang  * @*tc_map: bit map of enabled traffic classes
10774a9643ea8Slogwang  *
10775a9643ea8Slogwang  * Returns 0 on success, negative value on failure
10776a9643ea8Slogwang  */
10777a9643ea8Slogwang static int
i40e_parse_dcb_configure(struct rte_eth_dev * dev,struct i40e_dcbx_config * dcb_cfg,uint8_t * tc_map)10778a9643ea8Slogwang i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10779a9643ea8Slogwang 			 struct i40e_dcbx_config *dcb_cfg,
10780a9643ea8Slogwang 			 uint8_t *tc_map)
10781a9643ea8Slogwang {
10782a9643ea8Slogwang 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10783a9643ea8Slogwang 	uint8_t i, tc_bw, bw_lf;
10784a9643ea8Slogwang 
10785a9643ea8Slogwang 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10786a9643ea8Slogwang 
10787a9643ea8Slogwang 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10788a9643ea8Slogwang 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10789a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10790a9643ea8Slogwang 		return -EINVAL;
10791a9643ea8Slogwang 	}
10792a9643ea8Slogwang 
10793a9643ea8Slogwang 	/* assume each tc has the same bw */
10794a9643ea8Slogwang 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10795a9643ea8Slogwang 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10796a9643ea8Slogwang 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10797a9643ea8Slogwang 	/* to ensure the sum of tcbw is equal to 100 */
10798a9643ea8Slogwang 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10799a9643ea8Slogwang 	for (i = 0; i < bw_lf; i++)
10800a9643ea8Slogwang 		dcb_cfg->etscfg.tcbwtable[i]++;
10801a9643ea8Slogwang 
10802a9643ea8Slogwang 	/* assume each tc has the same Transmission Selection Algorithm */
10803a9643ea8Slogwang 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10804a9643ea8Slogwang 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10805a9643ea8Slogwang 
10806a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10807a9643ea8Slogwang 		dcb_cfg->etscfg.prioritytable[i] =
10808a9643ea8Slogwang 				dcb_rx_conf->dcb_tc[i];
10809a9643ea8Slogwang 
10810a9643ea8Slogwang 	/* FW needs one App to configure HW */
10811a9643ea8Slogwang 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10812a9643ea8Slogwang 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10813a9643ea8Slogwang 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10814a9643ea8Slogwang 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10815a9643ea8Slogwang 
10816a9643ea8Slogwang 	if (dcb_rx_conf->nb_tcs == 0)
10817a9643ea8Slogwang 		*tc_map = 1; /* tc0 only */
10818a9643ea8Slogwang 	else
10819a9643ea8Slogwang 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10820a9643ea8Slogwang 
10821a9643ea8Slogwang 	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10822a9643ea8Slogwang 		dcb_cfg->pfc.willing = 0;
10823a9643ea8Slogwang 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10824a9643ea8Slogwang 		dcb_cfg->pfc.pfcenable = *tc_map;
10825a9643ea8Slogwang 	}
10826a9643ea8Slogwang 	return 0;
10827a9643ea8Slogwang }
10828a9643ea8Slogwang 
10829a9643ea8Slogwang 
10830a9643ea8Slogwang static enum i40e_status_code
i40e_vsi_update_queue_mapping(struct i40e_vsi * vsi,struct i40e_aqc_vsi_properties_data * info,uint8_t enabled_tcmap)10831a9643ea8Slogwang i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10832a9643ea8Slogwang 			      struct i40e_aqc_vsi_properties_data *info,
10833a9643ea8Slogwang 			      uint8_t enabled_tcmap)
10834a9643ea8Slogwang {
10835a9643ea8Slogwang 	enum i40e_status_code ret;
10836a9643ea8Slogwang 	int i, total_tc = 0;
10837a9643ea8Slogwang 	uint16_t qpnum_per_tc, bsf, qp_idx;
10838a9643ea8Slogwang 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10839a9643ea8Slogwang 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10840a9643ea8Slogwang 	uint16_t used_queues;
10841a9643ea8Slogwang 
10842a9643ea8Slogwang 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10843a9643ea8Slogwang 	if (ret != I40E_SUCCESS)
10844a9643ea8Slogwang 		return ret;
10845a9643ea8Slogwang 
10846a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10847a9643ea8Slogwang 		if (enabled_tcmap & (1 << i))
10848a9643ea8Slogwang 			total_tc++;
10849a9643ea8Slogwang 	}
10850a9643ea8Slogwang 	if (total_tc == 0)
10851a9643ea8Slogwang 		total_tc = 1;
10852a9643ea8Slogwang 	vsi->enabled_tc = enabled_tcmap;
10853a9643ea8Slogwang 
10854a9643ea8Slogwang 	/* different VSI has different queues assigned */
10855a9643ea8Slogwang 	if (vsi->type == I40E_VSI_MAIN)
10856a9643ea8Slogwang 		used_queues = dev_data->nb_rx_queues -
10857a9643ea8Slogwang 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10858a9643ea8Slogwang 	else if (vsi->type == I40E_VSI_VMDQ2)
10859a9643ea8Slogwang 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10860a9643ea8Slogwang 	else {
10861a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
10862a9643ea8Slogwang 		return I40E_ERR_NO_AVAILABLE_VSI;
10863a9643ea8Slogwang 	}
10864a9643ea8Slogwang 
10865a9643ea8Slogwang 	qpnum_per_tc = used_queues / total_tc;
10866a9643ea8Slogwang 	/* Number of queues per enabled TC */
10867a9643ea8Slogwang 	if (qpnum_per_tc == 0) {
10868a9643ea8Slogwang 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10869a9643ea8Slogwang 		return I40E_ERR_INVALID_QP_ID;
10870a9643ea8Slogwang 	}
10871a9643ea8Slogwang 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10872a9643ea8Slogwang 				I40E_MAX_Q_PER_TC);
10873a9643ea8Slogwang 	bsf = rte_bsf32(qpnum_per_tc);
10874a9643ea8Slogwang 
10875a9643ea8Slogwang 	/**
10876a9643ea8Slogwang 	 * Configure TC and queue mapping parameters, for enabled TC,
10877a9643ea8Slogwang 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10878a9643ea8Slogwang 	 * default queue will serve it.
10879a9643ea8Slogwang 	 */
10880a9643ea8Slogwang 	qp_idx = 0;
10881a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10882a9643ea8Slogwang 		if (vsi->enabled_tc & (1 << i)) {
10883a9643ea8Slogwang 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10884a9643ea8Slogwang 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10885a9643ea8Slogwang 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10886a9643ea8Slogwang 			qp_idx += qpnum_per_tc;
10887a9643ea8Slogwang 		} else
10888a9643ea8Slogwang 			info->tc_mapping[i] = 0;
10889a9643ea8Slogwang 	}
10890a9643ea8Slogwang 
10891a9643ea8Slogwang 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10892a9643ea8Slogwang 	if (vsi->type == I40E_VSI_SRIOV) {
10893a9643ea8Slogwang 		info->mapping_flags |=
10894a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10895a9643ea8Slogwang 		for (i = 0; i < vsi->nb_qps; i++)
10896a9643ea8Slogwang 			info->queue_mapping[i] =
10897a9643ea8Slogwang 				rte_cpu_to_le_16(vsi->base_queue + i);
10898a9643ea8Slogwang 	} else {
10899a9643ea8Slogwang 		info->mapping_flags |=
10900a9643ea8Slogwang 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10901a9643ea8Slogwang 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10902a9643ea8Slogwang 	}
10903a9643ea8Slogwang 	info->valid_sections |=
10904a9643ea8Slogwang 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10905a9643ea8Slogwang 
10906a9643ea8Slogwang 	return I40E_SUCCESS;
10907a9643ea8Slogwang }
10908a9643ea8Slogwang 
10909a9643ea8Slogwang /*
10910a9643ea8Slogwang  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10911a9643ea8Slogwang  * @veb: VEB to be configured
10912a9643ea8Slogwang  * @tc_map: enabled TC bitmap
10913a9643ea8Slogwang  *
10914a9643ea8Slogwang  * Returns 0 on success, negative value on failure
10915a9643ea8Slogwang  */
10916a9643ea8Slogwang static enum i40e_status_code
i40e_config_switch_comp_tc(struct i40e_veb * veb,uint8_t tc_map)10917a9643ea8Slogwang i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10918a9643ea8Slogwang {
10919a9643ea8Slogwang 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10920a9643ea8Slogwang 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10921a9643ea8Slogwang 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10922a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10923a9643ea8Slogwang 	enum i40e_status_code ret = I40E_SUCCESS;
10924a9643ea8Slogwang 	int i;
10925a9643ea8Slogwang 	uint32_t bw_max;
10926a9643ea8Slogwang 
10927a9643ea8Slogwang 	/* Check if enabled_tc is same as existing or new TCs */
10928a9643ea8Slogwang 	if (veb->enabled_tc == tc_map)
10929a9643ea8Slogwang 		return ret;
10930a9643ea8Slogwang 
10931a9643ea8Slogwang 	/* configure tc bandwidth */
10932a9643ea8Slogwang 	memset(&veb_bw, 0, sizeof(veb_bw));
10933a9643ea8Slogwang 	veb_bw.tc_valid_bits = tc_map;
10934a9643ea8Slogwang 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10935a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10936a9643ea8Slogwang 		if (tc_map & BIT_ULL(i))
10937a9643ea8Slogwang 			veb_bw.tc_bw_share_credits[i] = 1;
10938a9643ea8Slogwang 	}
10939a9643ea8Slogwang 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10940a9643ea8Slogwang 						   &veb_bw, NULL);
10941a9643ea8Slogwang 	if (ret) {
109422bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
109432bfe3f2eSlogwang 			"AQ command Config switch_comp BW allocation per TC failed = %d",
10944a9643ea8Slogwang 			hw->aq.asq_last_status);
10945a9643ea8Slogwang 		return ret;
10946a9643ea8Slogwang 	}
10947a9643ea8Slogwang 
10948a9643ea8Slogwang 	memset(&ets_query, 0, sizeof(ets_query));
10949a9643ea8Slogwang 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10950a9643ea8Slogwang 						   &ets_query, NULL);
10951a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
109522bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
109532bfe3f2eSlogwang 			"Failed to get switch_comp ETS configuration %u",
109542bfe3f2eSlogwang 			hw->aq.asq_last_status);
10955a9643ea8Slogwang 		return ret;
10956a9643ea8Slogwang 	}
10957a9643ea8Slogwang 	memset(&bw_query, 0, sizeof(bw_query));
10958a9643ea8Slogwang 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10959a9643ea8Slogwang 						  &bw_query, NULL);
10960a9643ea8Slogwang 	if (ret != I40E_SUCCESS) {
109612bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
109622bfe3f2eSlogwang 			"Failed to get switch_comp bandwidth configuration %u",
109632bfe3f2eSlogwang 			hw->aq.asq_last_status);
10964a9643ea8Slogwang 		return ret;
10965a9643ea8Slogwang 	}
10966a9643ea8Slogwang 
10967a9643ea8Slogwang 	/* store and print out BW info */
10968a9643ea8Slogwang 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10969a9643ea8Slogwang 	veb->bw_info.bw_max = ets_query.tc_bw_max;
10970a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10971a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10972a9643ea8Slogwang 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10973a9643ea8Slogwang 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10974a9643ea8Slogwang 		     I40E_16_BIT_WIDTH);
10975a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10976a9643ea8Slogwang 		veb->bw_info.bw_ets_share_credits[i] =
10977a9643ea8Slogwang 				bw_query.tc_bw_share_credits[i];
10978a9643ea8Slogwang 		veb->bw_info.bw_ets_credits[i] =
10979a9643ea8Slogwang 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10980a9643ea8Slogwang 		/* 4 bits per TC, 4th bit is reserved */
10981a9643ea8Slogwang 		veb->bw_info.bw_ets_max[i] =
10982a9643ea8Slogwang 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10983a9643ea8Slogwang 				  RTE_LEN2MASK(3, uint8_t));
10984a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10985a9643ea8Slogwang 			    veb->bw_info.bw_ets_share_credits[i]);
10986a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10987a9643ea8Slogwang 			    veb->bw_info.bw_ets_credits[i]);
10988a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10989a9643ea8Slogwang 			    veb->bw_info.bw_ets_max[i]);
10990a9643ea8Slogwang 	}
10991a9643ea8Slogwang 
10992a9643ea8Slogwang 	veb->enabled_tc = tc_map;
10993a9643ea8Slogwang 
10994a9643ea8Slogwang 	return ret;
10995a9643ea8Slogwang }
10996a9643ea8Slogwang 
10997a9643ea8Slogwang 
10998a9643ea8Slogwang /*
10999a9643ea8Slogwang  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11000a9643ea8Slogwang  * @vsi: VSI to be configured
11001a9643ea8Slogwang  * @tc_map: enabled TC bitmap
11002a9643ea8Slogwang  *
11003a9643ea8Slogwang  * Returns 0 on success, negative value on failure
11004a9643ea8Slogwang  */
11005a9643ea8Slogwang static enum i40e_status_code
i40e_vsi_config_tc(struct i40e_vsi * vsi,uint8_t tc_map)11006a9643ea8Slogwang i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11007a9643ea8Slogwang {
11008a9643ea8Slogwang 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11009a9643ea8Slogwang 	struct i40e_vsi_context ctxt;
11010a9643ea8Slogwang 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11011a9643ea8Slogwang 	enum i40e_status_code ret = I40E_SUCCESS;
11012a9643ea8Slogwang 	int i;
11013a9643ea8Slogwang 
11014a9643ea8Slogwang 	/* Check if enabled_tc is same as existing or new TCs */
11015a9643ea8Slogwang 	if (vsi->enabled_tc == tc_map)
11016a9643ea8Slogwang 		return ret;
11017a9643ea8Slogwang 
11018a9643ea8Slogwang 	/* configure tc bandwidth */
11019a9643ea8Slogwang 	memset(&bw_data, 0, sizeof(bw_data));
11020a9643ea8Slogwang 	bw_data.tc_valid_bits = tc_map;
11021a9643ea8Slogwang 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
11022a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11023a9643ea8Slogwang 		if (tc_map & BIT_ULL(i))
11024a9643ea8Slogwang 			bw_data.tc_bw_credits[i] = 1;
11025a9643ea8Slogwang 	}
11026a9643ea8Slogwang 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11027a9643ea8Slogwang 	if (ret) {
110282bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
110292bfe3f2eSlogwang 			"AQ command Config VSI BW allocation per TC failed = %d",
11030a9643ea8Slogwang 			hw->aq.asq_last_status);
11031a9643ea8Slogwang 		goto out;
11032a9643ea8Slogwang 	}
11033a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11034a9643ea8Slogwang 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11035a9643ea8Slogwang 
11036a9643ea8Slogwang 	/* Update Queue Pairs Mapping for currently enabled UPs */
11037a9643ea8Slogwang 	ctxt.seid = vsi->seid;
11038a9643ea8Slogwang 	ctxt.pf_num = hw->pf_id;
11039a9643ea8Slogwang 	ctxt.vf_num = 0;
11040a9643ea8Slogwang 	ctxt.uplink_seid = vsi->uplink_seid;
11041a9643ea8Slogwang 	ctxt.info = vsi->info;
11042a9643ea8Slogwang 	i40e_get_cap(hw);
11043a9643ea8Slogwang 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11044a9643ea8Slogwang 	if (ret)
11045a9643ea8Slogwang 		goto out;
11046a9643ea8Slogwang 
11047a9643ea8Slogwang 	/* Update the VSI after updating the VSI queue-mapping information */
11048a9643ea8Slogwang 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11049a9643ea8Slogwang 	if (ret) {
110502bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11051a9643ea8Slogwang 			hw->aq.asq_last_status);
11052a9643ea8Slogwang 		goto out;
11053a9643ea8Slogwang 	}
11054a9643ea8Slogwang 	/* update the local VSI info with updated queue map */
110552bfe3f2eSlogwang 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11056a9643ea8Slogwang 					sizeof(vsi->info.tc_mapping));
110572bfe3f2eSlogwang 	rte_memcpy(&vsi->info.queue_mapping,
11058a9643ea8Slogwang 			&ctxt.info.queue_mapping,
11059a9643ea8Slogwang 		sizeof(vsi->info.queue_mapping));
11060a9643ea8Slogwang 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
11061a9643ea8Slogwang 	vsi->info.valid_sections = 0;
11062a9643ea8Slogwang 
11063a9643ea8Slogwang 	/* query and update current VSI BW information */
11064a9643ea8Slogwang 	ret = i40e_vsi_get_bw_config(vsi);
11065a9643ea8Slogwang 	if (ret) {
11066a9643ea8Slogwang 		PMD_INIT_LOG(ERR,
11067a9643ea8Slogwang 			 "Failed updating vsi bw info, err %s aq_err %s",
11068a9643ea8Slogwang 			 i40e_stat_str(hw, ret),
11069a9643ea8Slogwang 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11070a9643ea8Slogwang 		goto out;
11071a9643ea8Slogwang 	}
11072a9643ea8Slogwang 
11073a9643ea8Slogwang 	vsi->enabled_tc = tc_map;
11074a9643ea8Slogwang 
11075a9643ea8Slogwang out:
11076a9643ea8Slogwang 	return ret;
11077a9643ea8Slogwang }
11078a9643ea8Slogwang 
11079a9643ea8Slogwang /*
11080a9643ea8Slogwang  * i40e_dcb_hw_configure - program the dcb setting to hw
11081a9643ea8Slogwang  * @pf: pf the configuration is taken on
11082a9643ea8Slogwang  * @new_cfg: new configuration
11083a9643ea8Slogwang  * @tc_map: enabled TC bitmap
11084a9643ea8Slogwang  *
11085a9643ea8Slogwang  * Returns 0 on success, negative value on failure
11086a9643ea8Slogwang  */
11087a9643ea8Slogwang static enum i40e_status_code
i40e_dcb_hw_configure(struct i40e_pf * pf,struct i40e_dcbx_config * new_cfg,uint8_t tc_map)11088a9643ea8Slogwang i40e_dcb_hw_configure(struct i40e_pf *pf,
11089a9643ea8Slogwang 		      struct i40e_dcbx_config *new_cfg,
11090a9643ea8Slogwang 		      uint8_t tc_map)
11091a9643ea8Slogwang {
11092a9643ea8Slogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11093a9643ea8Slogwang 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11094a9643ea8Slogwang 	struct i40e_vsi *main_vsi = pf->main_vsi;
11095a9643ea8Slogwang 	struct i40e_vsi_list *vsi_list;
11096a9643ea8Slogwang 	enum i40e_status_code ret;
11097a9643ea8Slogwang 	int i;
11098a9643ea8Slogwang 	uint32_t val;
11099a9643ea8Slogwang 
11100a9643ea8Slogwang 	/* Use the FW API if FW > v4.4*/
11101a9643ea8Slogwang 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11102a9643ea8Slogwang 	      (hw->aq.fw_maj_ver >= 5))) {
111032bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
111042bfe3f2eSlogwang 			"FW < v4.4, can not use FW LLDP API to configure DCB");
11105a9643ea8Slogwang 		return I40E_ERR_FIRMWARE_API_VERSION;
11106a9643ea8Slogwang 	}
11107a9643ea8Slogwang 
11108a9643ea8Slogwang 	/* Check if need reconfiguration */
11109a9643ea8Slogwang 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11110a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11111a9643ea8Slogwang 		return I40E_SUCCESS;
11112a9643ea8Slogwang 	}
11113a9643ea8Slogwang 
11114a9643ea8Slogwang 	/* Copy the new config to the current config */
11115a9643ea8Slogwang 	*old_cfg = *new_cfg;
11116a9643ea8Slogwang 	old_cfg->etsrec = old_cfg->etscfg;
11117a9643ea8Slogwang 	ret = i40e_set_dcb_config(hw);
11118a9643ea8Slogwang 	if (ret) {
111192bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11120a9643ea8Slogwang 			 i40e_stat_str(hw, ret),
11121a9643ea8Slogwang 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11122a9643ea8Slogwang 		return ret;
11123a9643ea8Slogwang 	}
11124a9643ea8Slogwang 	/* set receive Arbiter to RR mode and ETS scheme by default */
11125a9643ea8Slogwang 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11126a9643ea8Slogwang 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11127a9643ea8Slogwang 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11128a9643ea8Slogwang 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11129a9643ea8Slogwang 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11130a9643ea8Slogwang 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11131a9643ea8Slogwang 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11132a9643ea8Slogwang 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11133a9643ea8Slogwang 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11134a9643ea8Slogwang 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11135a9643ea8Slogwang 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11136a9643ea8Slogwang 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11137a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11138a9643ea8Slogwang 	}
11139a9643ea8Slogwang 	/* get local mib to check whether it is configured correctly */
11140a9643ea8Slogwang 	/* IEEE mode */
11141a9643ea8Slogwang 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11142a9643ea8Slogwang 	/* Get Local DCB Config */
11143a9643ea8Slogwang 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11144a9643ea8Slogwang 				     &hw->local_dcbx_config);
11145a9643ea8Slogwang 
11146a9643ea8Slogwang 	/* if Veb is created, need to update TC of it at first */
11147a9643ea8Slogwang 	if (main_vsi->veb) {
11148a9643ea8Slogwang 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11149a9643ea8Slogwang 		if (ret)
11150a9643ea8Slogwang 			PMD_INIT_LOG(WARNING,
111512bfe3f2eSlogwang 				 "Failed configuring TC for VEB seid=%d",
11152a9643ea8Slogwang 				 main_vsi->veb->seid);
11153a9643ea8Slogwang 	}
11154a9643ea8Slogwang 	/* Update each VSI */
11155a9643ea8Slogwang 	i40e_vsi_config_tc(main_vsi, tc_map);
11156a9643ea8Slogwang 	if (main_vsi->veb) {
11157a9643ea8Slogwang 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11158a9643ea8Slogwang 			/* Beside main VSI and VMDQ VSIs, only enable default
11159a9643ea8Slogwang 			 * TC for other VSIs
11160a9643ea8Slogwang 			 */
11161a9643ea8Slogwang 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11162a9643ea8Slogwang 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11163a9643ea8Slogwang 							 tc_map);
11164a9643ea8Slogwang 			else
11165a9643ea8Slogwang 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11166a9643ea8Slogwang 							 I40E_DEFAULT_TCMAP);
11167a9643ea8Slogwang 			if (ret)
11168a9643ea8Slogwang 				PMD_INIT_LOG(WARNING,
111692bfe3f2eSlogwang 					"Failed configuring TC for VSI seid=%d",
11170a9643ea8Slogwang 					vsi_list->vsi->seid);
11171a9643ea8Slogwang 			/* continue */
11172a9643ea8Slogwang 		}
11173a9643ea8Slogwang 	}
11174a9643ea8Slogwang 	return I40E_SUCCESS;
11175a9643ea8Slogwang }
11176a9643ea8Slogwang 
11177a9643ea8Slogwang /*
11178a9643ea8Slogwang  * i40e_dcb_init_configure - initial dcb config
11179a9643ea8Slogwang  * @dev: device being configured
11180a9643ea8Slogwang  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11181a9643ea8Slogwang  *
11182a9643ea8Slogwang  * Returns 0 on success, negative value on failure
11183a9643ea8Slogwang  */
111842bfe3f2eSlogwang int
i40e_dcb_init_configure(struct rte_eth_dev * dev,bool sw_dcb)11185a9643ea8Slogwang i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11186a9643ea8Slogwang {
11187a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11188a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
111892bfe3f2eSlogwang 	int i, ret = 0;
11190a9643ea8Slogwang 
11191a9643ea8Slogwang 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11192a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11193a9643ea8Slogwang 		return -ENOTSUP;
11194a9643ea8Slogwang 	}
11195a9643ea8Slogwang 
11196a9643ea8Slogwang 	/* DCB initialization:
11197a9643ea8Slogwang 	 * Update DCB configuration from the Firmware and configure
11198a9643ea8Slogwang 	 * LLDP MIB change event.
11199a9643ea8Slogwang 	 */
11200a9643ea8Slogwang 	if (sw_dcb == TRUE) {
11201*2d9fd380Sjfb8856606 		/* Stopping lldp is necessary for DPDK, but it will cause
11202*2d9fd380Sjfb8856606 		 * DCB init failed. For i40e_init_dcb(), the prerequisite
11203*2d9fd380Sjfb8856606 		 * for successful initialization of DCB is that LLDP is
11204*2d9fd380Sjfb8856606 		 * enabled. So it is needed to start lldp before DCB init
11205*2d9fd380Sjfb8856606 		 * and stop it after initialization.
11206*2d9fd380Sjfb8856606 		 */
11207*2d9fd380Sjfb8856606 		ret = i40e_aq_start_lldp(hw, true, NULL);
11208d30ea906Sjfb8856606 		if (ret != I40E_SUCCESS)
11209*2d9fd380Sjfb8856606 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11210d30ea906Sjfb8856606 
11211*2d9fd380Sjfb8856606 		ret = i40e_init_dcb(hw, true);
11212a9643ea8Slogwang 		/* If lldp agent is stopped, the return value from
11213a9643ea8Slogwang 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11214a9643ea8Slogwang 		 * adminq status. Otherwise, it should return success.
11215a9643ea8Slogwang 		 */
11216a9643ea8Slogwang 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11217a9643ea8Slogwang 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11218a9643ea8Slogwang 			memset(&hw->local_dcbx_config, 0,
11219a9643ea8Slogwang 				sizeof(struct i40e_dcbx_config));
11220a9643ea8Slogwang 			/* set dcb default configuration */
11221a9643ea8Slogwang 			hw->local_dcbx_config.etscfg.willing = 0;
11222a9643ea8Slogwang 			hw->local_dcbx_config.etscfg.maxtcs = 0;
11223a9643ea8Slogwang 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11224a9643ea8Slogwang 			hw->local_dcbx_config.etscfg.tsatable[0] =
11225a9643ea8Slogwang 						I40E_IEEE_TSA_ETS;
112262bfe3f2eSlogwang 			/* all UPs mapping to TC0 */
112272bfe3f2eSlogwang 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
112282bfe3f2eSlogwang 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11229a9643ea8Slogwang 			hw->local_dcbx_config.etsrec =
11230a9643ea8Slogwang 				hw->local_dcbx_config.etscfg;
11231a9643ea8Slogwang 			hw->local_dcbx_config.pfc.willing = 0;
11232a9643ea8Slogwang 			hw->local_dcbx_config.pfc.pfccap =
11233a9643ea8Slogwang 						I40E_MAX_TRAFFIC_CLASS;
11234a9643ea8Slogwang 			/* FW needs one App to configure HW */
11235a9643ea8Slogwang 			hw->local_dcbx_config.numapps = 1;
11236a9643ea8Slogwang 			hw->local_dcbx_config.app[0].selector =
11237a9643ea8Slogwang 						I40E_APP_SEL_ETHTYPE;
11238a9643ea8Slogwang 			hw->local_dcbx_config.app[0].priority = 3;
11239a9643ea8Slogwang 			hw->local_dcbx_config.app[0].protocolid =
11240a9643ea8Slogwang 						I40E_APP_PROTOID_FCOE;
11241a9643ea8Slogwang 			ret = i40e_set_dcb_config(hw);
11242a9643ea8Slogwang 			if (ret) {
112432bfe3f2eSlogwang 				PMD_INIT_LOG(ERR,
112442bfe3f2eSlogwang 					"default dcb config fails. err = %d, aq_err = %d.",
112452bfe3f2eSlogwang 					ret, hw->aq.asq_last_status);
11246a9643ea8Slogwang 				return -ENOSYS;
11247a9643ea8Slogwang 			}
11248a9643ea8Slogwang 		} else {
112492bfe3f2eSlogwang 			PMD_INIT_LOG(ERR,
112502bfe3f2eSlogwang 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
112512bfe3f2eSlogwang 				ret, hw->aq.asq_last_status);
11252a9643ea8Slogwang 			return -ENOTSUP;
11253a9643ea8Slogwang 		}
11254*2d9fd380Sjfb8856606 
11255*2d9fd380Sjfb8856606 		if (i40e_need_stop_lldp(dev)) {
11256*2d9fd380Sjfb8856606 			ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11257*2d9fd380Sjfb8856606 			if (ret != I40E_SUCCESS)
11258*2d9fd380Sjfb8856606 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11259*2d9fd380Sjfb8856606 		}
11260a9643ea8Slogwang 	} else {
11261*2d9fd380Sjfb8856606 		ret = i40e_aq_start_lldp(hw, true, NULL);
11262a9643ea8Slogwang 		if (ret != I40E_SUCCESS)
11263a9643ea8Slogwang 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11264a9643ea8Slogwang 
11265*2d9fd380Sjfb8856606 		ret = i40e_init_dcb(hw, true);
11266a9643ea8Slogwang 		if (!ret) {
11267a9643ea8Slogwang 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
112682bfe3f2eSlogwang 				PMD_INIT_LOG(ERR,
112692bfe3f2eSlogwang 					"HW doesn't support DCBX offload.");
11270a9643ea8Slogwang 				return -ENOTSUP;
11271a9643ea8Slogwang 			}
11272a9643ea8Slogwang 		} else {
112732bfe3f2eSlogwang 			PMD_INIT_LOG(ERR,
112742bfe3f2eSlogwang 				"DCBX configuration failed, err = %d, aq_err = %d.",
112752bfe3f2eSlogwang 				ret, hw->aq.asq_last_status);
11276a9643ea8Slogwang 			return -ENOTSUP;
11277a9643ea8Slogwang 		}
11278a9643ea8Slogwang 	}
11279a9643ea8Slogwang 	return 0;
11280a9643ea8Slogwang }
11281a9643ea8Slogwang 
11282a9643ea8Slogwang /*
11283a9643ea8Slogwang  * i40e_dcb_setup - setup dcb related config
11284a9643ea8Slogwang  * @dev: device being configured
11285a9643ea8Slogwang  *
11286a9643ea8Slogwang  * Returns 0 on success, negative value on failure
11287a9643ea8Slogwang  */
11288a9643ea8Slogwang static int
i40e_dcb_setup(struct rte_eth_dev * dev)11289a9643ea8Slogwang i40e_dcb_setup(struct rte_eth_dev *dev)
11290a9643ea8Slogwang {
11291a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11292a9643ea8Slogwang 	struct i40e_dcbx_config dcb_cfg;
11293a9643ea8Slogwang 	uint8_t tc_map = 0;
11294a9643ea8Slogwang 	int ret = 0;
11295a9643ea8Slogwang 
11296a9643ea8Slogwang 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11297a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11298a9643ea8Slogwang 		return -ENOTSUP;
11299a9643ea8Slogwang 	}
11300a9643ea8Slogwang 
11301a9643ea8Slogwang 	if (pf->vf_num != 0)
11302a9643ea8Slogwang 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11303a9643ea8Slogwang 
11304a9643ea8Slogwang 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11305a9643ea8Slogwang 	if (ret) {
11306a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "invalid dcb config");
11307a9643ea8Slogwang 		return -EINVAL;
11308a9643ea8Slogwang 	}
11309a9643ea8Slogwang 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11310a9643ea8Slogwang 	if (ret) {
11311a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
11312a9643ea8Slogwang 		return -ENOSYS;
11313a9643ea8Slogwang 	}
11314a9643ea8Slogwang 
11315a9643ea8Slogwang 	return 0;
11316a9643ea8Slogwang }
11317a9643ea8Slogwang 
11318a9643ea8Slogwang static int
i40e_dev_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)11319a9643ea8Slogwang i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11320a9643ea8Slogwang 		      struct rte_eth_dcb_info *dcb_info)
11321a9643ea8Slogwang {
11322a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11323a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11324a9643ea8Slogwang 	struct i40e_vsi *vsi = pf->main_vsi;
11325a9643ea8Slogwang 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11326a9643ea8Slogwang 	uint16_t bsf, tc_mapping;
11327a9643ea8Slogwang 	int i, j = 0;
11328a9643ea8Slogwang 
11329a9643ea8Slogwang 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11330a9643ea8Slogwang 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11331a9643ea8Slogwang 	else
11332a9643ea8Slogwang 		dcb_info->nb_tcs = 1;
11333a9643ea8Slogwang 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11334a9643ea8Slogwang 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11335a9643ea8Slogwang 	for (i = 0; i < dcb_info->nb_tcs; i++)
11336a9643ea8Slogwang 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11337a9643ea8Slogwang 
11338a9643ea8Slogwang 	/* get queue mapping if vmdq is disabled */
11339a9643ea8Slogwang 	if (!pf->nb_cfg_vmdq_vsi) {
11340a9643ea8Slogwang 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11341a9643ea8Slogwang 			if (!(vsi->enabled_tc & (1 << i)))
11342a9643ea8Slogwang 				continue;
11343a9643ea8Slogwang 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11344a9643ea8Slogwang 			dcb_info->tc_queue.tc_rxq[j][i].base =
11345a9643ea8Slogwang 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11346a9643ea8Slogwang 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11347a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[j][i].base =
11348a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[j][i].base;
11349a9643ea8Slogwang 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11350a9643ea8Slogwang 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11351a9643ea8Slogwang 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11352a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11353a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11354a9643ea8Slogwang 		}
11355a9643ea8Slogwang 		return 0;
11356a9643ea8Slogwang 	}
11357a9643ea8Slogwang 
11358a9643ea8Slogwang 	/* get queue mapping if vmdq is enabled */
11359a9643ea8Slogwang 	do {
11360a9643ea8Slogwang 		vsi = pf->vmdq[j].vsi;
11361a9643ea8Slogwang 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11362a9643ea8Slogwang 			if (!(vsi->enabled_tc & (1 << i)))
11363a9643ea8Slogwang 				continue;
11364a9643ea8Slogwang 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11365a9643ea8Slogwang 			dcb_info->tc_queue.tc_rxq[j][i].base =
11366a9643ea8Slogwang 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11367a9643ea8Slogwang 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11368a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[j][i].base =
11369a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[j][i].base;
11370a9643ea8Slogwang 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11371a9643ea8Slogwang 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11372a9643ea8Slogwang 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11373a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11374a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11375a9643ea8Slogwang 		}
11376a9643ea8Slogwang 		j++;
11377a9643ea8Slogwang 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11378a9643ea8Slogwang 	return 0;
11379a9643ea8Slogwang }
11380a9643ea8Slogwang 
11381a9643ea8Slogwang static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)11382a9643ea8Slogwang i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11383a9643ea8Slogwang {
113842bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
113852bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11386a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11387a9643ea8Slogwang 	uint16_t msix_intr;
11388a9643ea8Slogwang 
11389a9643ea8Slogwang 	msix_intr = intr_handle->intr_vec[queue_id];
11390a9643ea8Slogwang 	if (msix_intr == I40E_MISC_VEC_ID)
11391a9643ea8Slogwang 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
113922bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
113932bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
113942bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11395a9643ea8Slogwang 	else
11396a9643ea8Slogwang 		I40E_WRITE_REG(hw,
11397a9643ea8Slogwang 			       I40E_PFINT_DYN_CTLN(msix_intr -
11398a9643ea8Slogwang 						   I40E_RX_VEC_START),
11399a9643ea8Slogwang 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11400a9643ea8Slogwang 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
114012bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11402a9643ea8Slogwang 
11403a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
114044418919fSjohnjiang 	rte_intr_ack(&pci_dev->intr_handle);
11405a9643ea8Slogwang 
11406a9643ea8Slogwang 	return 0;
11407a9643ea8Slogwang }
11408a9643ea8Slogwang 
11409a9643ea8Slogwang static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)11410a9643ea8Slogwang i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11411a9643ea8Slogwang {
114122bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
114132bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11414a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11415a9643ea8Slogwang 	uint16_t msix_intr;
11416a9643ea8Slogwang 
11417a9643ea8Slogwang 	msix_intr = intr_handle->intr_vec[queue_id];
11418a9643ea8Slogwang 	if (msix_intr == I40E_MISC_VEC_ID)
114192bfe3f2eSlogwang 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
114202bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11421a9643ea8Slogwang 	else
11422a9643ea8Slogwang 		I40E_WRITE_REG(hw,
11423a9643ea8Slogwang 			       I40E_PFINT_DYN_CTLN(msix_intr -
11424a9643ea8Slogwang 						   I40E_RX_VEC_START),
114252bfe3f2eSlogwang 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11426a9643ea8Slogwang 	I40E_WRITE_FLUSH(hw);
11427a9643ea8Slogwang 
11428a9643ea8Slogwang 	return 0;
11429a9643ea8Slogwang }
11430a9643ea8Slogwang 
11431d30ea906Sjfb8856606 /**
11432d30ea906Sjfb8856606  * This function is used to check if the register is valid.
11433d30ea906Sjfb8856606  * Below is the valid registers list for X722 only:
11434d30ea906Sjfb8856606  * 0x2b800--0x2bb00
11435d30ea906Sjfb8856606  * 0x38700--0x38a00
11436d30ea906Sjfb8856606  * 0x3d800--0x3db00
11437d30ea906Sjfb8856606  * 0x208e00--0x209000
11438d30ea906Sjfb8856606  * 0x20be00--0x20c000
11439d30ea906Sjfb8856606  * 0x263c00--0x264000
11440d30ea906Sjfb8856606  * 0x265c00--0x266000
11441d30ea906Sjfb8856606  */
i40e_valid_regs(enum i40e_mac_type type,uint32_t reg_offset)11442d30ea906Sjfb8856606 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11443d30ea906Sjfb8856606 {
11444d30ea906Sjfb8856606 	if ((type != I40E_MAC_X722) &&
11445d30ea906Sjfb8856606 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11446d30ea906Sjfb8856606 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11447d30ea906Sjfb8856606 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11448d30ea906Sjfb8856606 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11449d30ea906Sjfb8856606 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11450d30ea906Sjfb8856606 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11451d30ea906Sjfb8856606 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11452d30ea906Sjfb8856606 		return 0;
11453d30ea906Sjfb8856606 	else
11454d30ea906Sjfb8856606 		return 1;
11455d30ea906Sjfb8856606 }
11456d30ea906Sjfb8856606 
i40e_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)11457a9643ea8Slogwang static int i40e_get_regs(struct rte_eth_dev *dev,
11458a9643ea8Slogwang 			 struct rte_dev_reg_info *regs)
11459a9643ea8Slogwang {
11460a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11461a9643ea8Slogwang 	uint32_t *ptr_data = regs->data;
11462a9643ea8Slogwang 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11463a9643ea8Slogwang 	const struct i40e_reg_info *reg_info;
11464a9643ea8Slogwang 
11465a9643ea8Slogwang 	if (ptr_data == NULL) {
11466a9643ea8Slogwang 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11467a9643ea8Slogwang 		regs->width = sizeof(uint32_t);
11468a9643ea8Slogwang 		return 0;
11469a9643ea8Slogwang 	}
11470a9643ea8Slogwang 
11471a9643ea8Slogwang 	/* The first few registers have to be read using AQ operations */
11472a9643ea8Slogwang 	reg_idx = 0;
11473a9643ea8Slogwang 	while (i40e_regs_adminq[reg_idx].name) {
11474a9643ea8Slogwang 		reg_info = &i40e_regs_adminq[reg_idx++];
11475a9643ea8Slogwang 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11476a9643ea8Slogwang 			for (arr_idx2 = 0;
11477a9643ea8Slogwang 					arr_idx2 <= reg_info->count2;
11478a9643ea8Slogwang 					arr_idx2++) {
11479a9643ea8Slogwang 				reg_offset = arr_idx * reg_info->stride1 +
11480a9643ea8Slogwang 					arr_idx2 * reg_info->stride2;
11481a9643ea8Slogwang 				reg_offset += reg_info->base_addr;
11482a9643ea8Slogwang 				ptr_data[reg_offset >> 2] =
11483a9643ea8Slogwang 					i40e_read_rx_ctl(hw, reg_offset);
11484a9643ea8Slogwang 			}
11485a9643ea8Slogwang 	}
11486a9643ea8Slogwang 
11487a9643ea8Slogwang 	/* The remaining registers can be read using primitives */
11488a9643ea8Slogwang 	reg_idx = 0;
11489a9643ea8Slogwang 	while (i40e_regs_others[reg_idx].name) {
11490a9643ea8Slogwang 		reg_info = &i40e_regs_others[reg_idx++];
11491a9643ea8Slogwang 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11492a9643ea8Slogwang 			for (arr_idx2 = 0;
11493a9643ea8Slogwang 					arr_idx2 <= reg_info->count2;
11494a9643ea8Slogwang 					arr_idx2++) {
11495a9643ea8Slogwang 				reg_offset = arr_idx * reg_info->stride1 +
11496a9643ea8Slogwang 					arr_idx2 * reg_info->stride2;
11497a9643ea8Slogwang 				reg_offset += reg_info->base_addr;
11498d30ea906Sjfb8856606 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11499d30ea906Sjfb8856606 					ptr_data[reg_offset >> 2] = 0;
11500d30ea906Sjfb8856606 				else
11501a9643ea8Slogwang 					ptr_data[reg_offset >> 2] =
11502a9643ea8Slogwang 						I40E_READ_REG(hw, reg_offset);
11503a9643ea8Slogwang 			}
11504a9643ea8Slogwang 	}
11505a9643ea8Slogwang 
11506a9643ea8Slogwang 	return 0;
11507a9643ea8Slogwang }
11508a9643ea8Slogwang 
i40e_get_eeprom_length(struct rte_eth_dev * dev)11509a9643ea8Slogwang static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11510a9643ea8Slogwang {
11511a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11512a9643ea8Slogwang 
11513a9643ea8Slogwang 	/* Convert word count to byte count */
11514a9643ea8Slogwang 	return hw->nvm.sr_size << 1;
11515a9643ea8Slogwang }
11516a9643ea8Slogwang 
i40e_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)11517a9643ea8Slogwang static int i40e_get_eeprom(struct rte_eth_dev *dev,
11518a9643ea8Slogwang 			   struct rte_dev_eeprom_info *eeprom)
11519a9643ea8Slogwang {
11520a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11521a9643ea8Slogwang 	uint16_t *data = eeprom->data;
11522a9643ea8Slogwang 	uint16_t offset, length, cnt_words;
11523a9643ea8Slogwang 	int ret_code;
11524a9643ea8Slogwang 
11525a9643ea8Slogwang 	offset = eeprom->offset >> 1;
11526a9643ea8Slogwang 	length = eeprom->length >> 1;
11527a9643ea8Slogwang 	cnt_words = length;
11528a9643ea8Slogwang 
11529a9643ea8Slogwang 	if (offset > hw->nvm.sr_size ||
11530a9643ea8Slogwang 		offset + length > hw->nvm.sr_size) {
11531a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11532a9643ea8Slogwang 		return -EINVAL;
11533a9643ea8Slogwang 	}
11534a9643ea8Slogwang 
11535a9643ea8Slogwang 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11536a9643ea8Slogwang 
11537a9643ea8Slogwang 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11538a9643ea8Slogwang 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11539a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11540a9643ea8Slogwang 		return -EIO;
11541a9643ea8Slogwang 	}
11542a9643ea8Slogwang 
11543a9643ea8Slogwang 	return 0;
11544a9643ea8Slogwang }
11545a9643ea8Slogwang 
i40e_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)11546d30ea906Sjfb8856606 static int i40e_get_module_info(struct rte_eth_dev *dev,
11547d30ea906Sjfb8856606 				struct rte_eth_dev_module_info *modinfo)
11548d30ea906Sjfb8856606 {
11549d30ea906Sjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11550d30ea906Sjfb8856606 	uint32_t sff8472_comp = 0;
11551d30ea906Sjfb8856606 	uint32_t sff8472_swap = 0;
11552d30ea906Sjfb8856606 	uint32_t sff8636_rev = 0;
11553d30ea906Sjfb8856606 	i40e_status status;
11554d30ea906Sjfb8856606 	uint32_t type = 0;
11555d30ea906Sjfb8856606 
11556d30ea906Sjfb8856606 	/* Check if firmware supports reading module EEPROM. */
11557d30ea906Sjfb8856606 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11558d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
11559d30ea906Sjfb8856606 			    "Module EEPROM memory read not supported. "
11560d30ea906Sjfb8856606 			    "Please update the NVM image.\n");
11561d30ea906Sjfb8856606 		return -EINVAL;
11562d30ea906Sjfb8856606 	}
11563d30ea906Sjfb8856606 
11564d30ea906Sjfb8856606 	status = i40e_update_link_info(hw);
11565d30ea906Sjfb8856606 	if (status)
11566d30ea906Sjfb8856606 		return -EIO;
11567d30ea906Sjfb8856606 
11568d30ea906Sjfb8856606 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11569d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
11570d30ea906Sjfb8856606 			    "Cannot read module EEPROM memory. "
11571d30ea906Sjfb8856606 			    "No module connected.\n");
11572d30ea906Sjfb8856606 		return -EINVAL;
11573d30ea906Sjfb8856606 	}
11574d30ea906Sjfb8856606 
11575d30ea906Sjfb8856606 	type = hw->phy.link_info.module_type[0];
11576d30ea906Sjfb8856606 
11577d30ea906Sjfb8856606 	switch (type) {
11578d30ea906Sjfb8856606 	case I40E_MODULE_TYPE_SFP:
11579d30ea906Sjfb8856606 		status = i40e_aq_get_phy_register(hw,
11580d30ea906Sjfb8856606 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11581d30ea906Sjfb8856606 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11582d30ea906Sjfb8856606 				I40E_MODULE_SFF_8472_COMP,
11583d30ea906Sjfb8856606 				&sff8472_comp, NULL);
11584d30ea906Sjfb8856606 		if (status)
11585d30ea906Sjfb8856606 			return -EIO;
11586d30ea906Sjfb8856606 
11587d30ea906Sjfb8856606 		status = i40e_aq_get_phy_register(hw,
11588d30ea906Sjfb8856606 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11589d30ea906Sjfb8856606 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11590d30ea906Sjfb8856606 				I40E_MODULE_SFF_8472_SWAP,
11591d30ea906Sjfb8856606 				&sff8472_swap, NULL);
11592d30ea906Sjfb8856606 		if (status)
11593d30ea906Sjfb8856606 			return -EIO;
11594d30ea906Sjfb8856606 
11595d30ea906Sjfb8856606 		/* Check if the module requires address swap to access
11596d30ea906Sjfb8856606 		 * the other EEPROM memory page.
11597d30ea906Sjfb8856606 		 */
11598d30ea906Sjfb8856606 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11599d30ea906Sjfb8856606 			PMD_DRV_LOG(WARNING,
11600d30ea906Sjfb8856606 				    "Module address swap to access "
11601d30ea906Sjfb8856606 				    "page 0xA2 is not supported.\n");
11602d30ea906Sjfb8856606 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11603d30ea906Sjfb8856606 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11604d30ea906Sjfb8856606 		} else if (sff8472_comp == 0x00) {
11605d30ea906Sjfb8856606 			/* Module is not SFF-8472 compliant */
11606d30ea906Sjfb8856606 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11607d30ea906Sjfb8856606 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11608d30ea906Sjfb8856606 		} else {
11609d30ea906Sjfb8856606 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11610d30ea906Sjfb8856606 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11611d30ea906Sjfb8856606 		}
11612d30ea906Sjfb8856606 		break;
11613d30ea906Sjfb8856606 	case I40E_MODULE_TYPE_QSFP_PLUS:
11614d30ea906Sjfb8856606 		/* Read from memory page 0. */
11615d30ea906Sjfb8856606 		status = i40e_aq_get_phy_register(hw,
11616d30ea906Sjfb8856606 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11617d30ea906Sjfb8856606 				0, 1,
11618d30ea906Sjfb8856606 				I40E_MODULE_REVISION_ADDR,
11619d30ea906Sjfb8856606 				&sff8636_rev, NULL);
11620d30ea906Sjfb8856606 		if (status)
11621d30ea906Sjfb8856606 			return -EIO;
11622d30ea906Sjfb8856606 		/* Determine revision compliance byte */
11623d30ea906Sjfb8856606 		if (sff8636_rev > 0x02) {
11624d30ea906Sjfb8856606 			/* Module is SFF-8636 compliant */
11625d30ea906Sjfb8856606 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11626d30ea906Sjfb8856606 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11627d30ea906Sjfb8856606 		} else {
11628d30ea906Sjfb8856606 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11629d30ea906Sjfb8856606 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11630d30ea906Sjfb8856606 		}
11631d30ea906Sjfb8856606 		break;
11632d30ea906Sjfb8856606 	case I40E_MODULE_TYPE_QSFP28:
11633d30ea906Sjfb8856606 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11634d30ea906Sjfb8856606 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11635d30ea906Sjfb8856606 		break;
11636d30ea906Sjfb8856606 	default:
11637d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11638d30ea906Sjfb8856606 		return -EINVAL;
11639d30ea906Sjfb8856606 	}
11640d30ea906Sjfb8856606 	return 0;
11641d30ea906Sjfb8856606 }
11642d30ea906Sjfb8856606 
i40e_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)11643d30ea906Sjfb8856606 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11644d30ea906Sjfb8856606 				  struct rte_dev_eeprom_info *info)
11645d30ea906Sjfb8856606 {
11646d30ea906Sjfb8856606 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11647d30ea906Sjfb8856606 	bool is_sfp = false;
11648d30ea906Sjfb8856606 	i40e_status status;
116491646932aSjfb8856606 	uint8_t *data;
11650d30ea906Sjfb8856606 	uint32_t value = 0;
11651d30ea906Sjfb8856606 	uint32_t i;
11652d30ea906Sjfb8856606 
116531646932aSjfb8856606 	if (!info || !info->length || !info->data)
11654d30ea906Sjfb8856606 		return -EINVAL;
11655d30ea906Sjfb8856606 
11656d30ea906Sjfb8856606 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11657d30ea906Sjfb8856606 		is_sfp = true;
11658d30ea906Sjfb8856606 
116591646932aSjfb8856606 	data = info->data;
11660d30ea906Sjfb8856606 	for (i = 0; i < info->length; i++) {
11661d30ea906Sjfb8856606 		u32 offset = i + info->offset;
11662d30ea906Sjfb8856606 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11663d30ea906Sjfb8856606 
11664d30ea906Sjfb8856606 		/* Check if we need to access the other memory page */
11665d30ea906Sjfb8856606 		if (is_sfp) {
11666d30ea906Sjfb8856606 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11667d30ea906Sjfb8856606 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11668d30ea906Sjfb8856606 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
11669d30ea906Sjfb8856606 			}
11670d30ea906Sjfb8856606 		} else {
11671d30ea906Sjfb8856606 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11672d30ea906Sjfb8856606 				/* Compute memory page number and offset. */
11673d30ea906Sjfb8856606 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11674d30ea906Sjfb8856606 				addr++;
11675d30ea906Sjfb8856606 			}
11676d30ea906Sjfb8856606 		}
11677d30ea906Sjfb8856606 		status = i40e_aq_get_phy_register(hw,
11678d30ea906Sjfb8856606 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
116790c6bd470Sfengbojiang 				addr, 1, offset, &value, NULL);
11680d30ea906Sjfb8856606 		if (status)
11681d30ea906Sjfb8856606 			return -EIO;
11682d30ea906Sjfb8856606 		data[i] = (uint8_t)value;
11683d30ea906Sjfb8856606 	}
11684d30ea906Sjfb8856606 	return 0;
11685d30ea906Sjfb8856606 }
11686d30ea906Sjfb8856606 
i40e_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)11687d30ea906Sjfb8856606 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
116884418919fSjohnjiang 				     struct rte_ether_addr *mac_addr)
11689a9643ea8Slogwang {
11690a9643ea8Slogwang 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
116912bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
116922bfe3f2eSlogwang 	struct i40e_vsi *vsi = pf->main_vsi;
116932bfe3f2eSlogwang 	struct i40e_mac_filter_info mac_filter;
116942bfe3f2eSlogwang 	struct i40e_mac_filter *f;
116952bfe3f2eSlogwang 	int ret;
11696a9643ea8Slogwang 
116974418919fSjohnjiang 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11698a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11699d30ea906Sjfb8856606 		return -EINVAL;
11700a9643ea8Slogwang 	}
11701a9643ea8Slogwang 
117022bfe3f2eSlogwang 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
117034418919fSjohnjiang 		if (rte_is_same_ether_addr(&pf->dev_addr,
117044418919fSjohnjiang 						&f->mac_info.mac_addr))
117052bfe3f2eSlogwang 			break;
117062bfe3f2eSlogwang 	}
117072bfe3f2eSlogwang 
117082bfe3f2eSlogwang 	if (f == NULL) {
117092bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11710d30ea906Sjfb8856606 		return -EIO;
117112bfe3f2eSlogwang 	}
117122bfe3f2eSlogwang 
117132bfe3f2eSlogwang 	mac_filter = f->mac_info;
117142bfe3f2eSlogwang 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
117152bfe3f2eSlogwang 	if (ret != I40E_SUCCESS) {
117162bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11717d30ea906Sjfb8856606 		return -EIO;
117182bfe3f2eSlogwang 	}
117192bfe3f2eSlogwang 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
117202bfe3f2eSlogwang 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
117212bfe3f2eSlogwang 	if (ret != I40E_SUCCESS) {
117222bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
11723d30ea906Sjfb8856606 		return -EIO;
117242bfe3f2eSlogwang 	}
117252bfe3f2eSlogwang 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
117262bfe3f2eSlogwang 
11727d30ea906Sjfb8856606 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
117282bfe3f2eSlogwang 					mac_addr->addr_bytes, NULL);
11729d30ea906Sjfb8856606 	if (ret != I40E_SUCCESS) {
11730d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to change mac");
11731d30ea906Sjfb8856606 		return -EIO;
11732d30ea906Sjfb8856606 	}
11733d30ea906Sjfb8856606 
11734d30ea906Sjfb8856606 	return 0;
11735a9643ea8Slogwang }
11736a9643ea8Slogwang 
11737a9643ea8Slogwang static int
i40e_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)11738a9643ea8Slogwang i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11739a9643ea8Slogwang {
11740a9643ea8Slogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11741a9643ea8Slogwang 	struct rte_eth_dev_data *dev_data = pf->dev_data;
117422bfe3f2eSlogwang 	uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11743a9643ea8Slogwang 	int ret = 0;
11744a9643ea8Slogwang 
11745a9643ea8Slogwang 	/* check if mtu is within the allowed range */
117464418919fSjohnjiang 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11747a9643ea8Slogwang 		return -EINVAL;
11748a9643ea8Slogwang 
11749a9643ea8Slogwang 	/* mtu setting is forbidden if port is start */
11750a9643ea8Slogwang 	if (dev_data->dev_started) {
117512bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11752a9643ea8Slogwang 			    dev_data->port_id);
11753a9643ea8Slogwang 		return -EBUSY;
11754a9643ea8Slogwang 	}
11755a9643ea8Slogwang 
117564418919fSjohnjiang 	if (frame_size > RTE_ETHER_MAX_LEN)
11757d30ea906Sjfb8856606 		dev_data->dev_conf.rxmode.offloads |=
11758d30ea906Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME;
11759a9643ea8Slogwang 	else
11760d30ea906Sjfb8856606 		dev_data->dev_conf.rxmode.offloads &=
11761d30ea906Sjfb8856606 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
11762a9643ea8Slogwang 
11763a9643ea8Slogwang 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11764a9643ea8Slogwang 
11765a9643ea8Slogwang 	return ret;
11766a9643ea8Slogwang }
117672bfe3f2eSlogwang 
117682bfe3f2eSlogwang /* Restore ethertype filter */
117692bfe3f2eSlogwang static void
i40e_ethertype_filter_restore(struct i40e_pf * pf)117702bfe3f2eSlogwang i40e_ethertype_filter_restore(struct i40e_pf *pf)
117712bfe3f2eSlogwang {
117722bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
117732bfe3f2eSlogwang 	struct i40e_ethertype_filter_list
117742bfe3f2eSlogwang 		*ethertype_list = &pf->ethertype.ethertype_list;
117752bfe3f2eSlogwang 	struct i40e_ethertype_filter *f;
117762bfe3f2eSlogwang 	struct i40e_control_filter_stats stats;
117772bfe3f2eSlogwang 	uint16_t flags;
117782bfe3f2eSlogwang 
117792bfe3f2eSlogwang 	TAILQ_FOREACH(f, ethertype_list, rules) {
117802bfe3f2eSlogwang 		flags = 0;
117812bfe3f2eSlogwang 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
117822bfe3f2eSlogwang 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
117832bfe3f2eSlogwang 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
117842bfe3f2eSlogwang 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
117852bfe3f2eSlogwang 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
117862bfe3f2eSlogwang 
117872bfe3f2eSlogwang 		memset(&stats, 0, sizeof(stats));
117882bfe3f2eSlogwang 		i40e_aq_add_rem_control_packet_filter(hw,
117892bfe3f2eSlogwang 					    f->input.mac_addr.addr_bytes,
117902bfe3f2eSlogwang 					    f->input.ether_type,
117912bfe3f2eSlogwang 					    flags, pf->main_vsi->seid,
117922bfe3f2eSlogwang 					    f->queue, 1, &stats, NULL);
117932bfe3f2eSlogwang 	}
117942bfe3f2eSlogwang 	PMD_DRV_LOG(INFO, "Ethertype filter:"
117952bfe3f2eSlogwang 		    " mac_etype_used = %u, etype_used = %u,"
117962bfe3f2eSlogwang 		    " mac_etype_free = %u, etype_free = %u",
117972bfe3f2eSlogwang 		    stats.mac_etype_used, stats.etype_used,
117982bfe3f2eSlogwang 		    stats.mac_etype_free, stats.etype_free);
117992bfe3f2eSlogwang }
118002bfe3f2eSlogwang 
118012bfe3f2eSlogwang /* Restore tunnel filter */
118022bfe3f2eSlogwang static void
i40e_tunnel_filter_restore(struct i40e_pf * pf)118032bfe3f2eSlogwang i40e_tunnel_filter_restore(struct i40e_pf *pf)
118042bfe3f2eSlogwang {
118052bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
118062bfe3f2eSlogwang 	struct i40e_vsi *vsi;
118072bfe3f2eSlogwang 	struct i40e_pf_vf *vf;
118082bfe3f2eSlogwang 	struct i40e_tunnel_filter_list
118092bfe3f2eSlogwang 		*tunnel_list = &pf->tunnel.tunnel_list;
118102bfe3f2eSlogwang 	struct i40e_tunnel_filter *f;
11811d30ea906Sjfb8856606 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
118122bfe3f2eSlogwang 	bool big_buffer = 0;
118132bfe3f2eSlogwang 
118142bfe3f2eSlogwang 	TAILQ_FOREACH(f, tunnel_list, rules) {
118152bfe3f2eSlogwang 		if (!f->is_to_vf)
118162bfe3f2eSlogwang 			vsi = pf->main_vsi;
118172bfe3f2eSlogwang 		else {
118182bfe3f2eSlogwang 			vf = &pf->vfs[f->vf_id];
118192bfe3f2eSlogwang 			vsi = vf->vsi;
118202bfe3f2eSlogwang 		}
118212bfe3f2eSlogwang 		memset(&cld_filter, 0, sizeof(cld_filter));
118224418919fSjohnjiang 		rte_ether_addr_copy((struct rte_ether_addr *)
118234418919fSjohnjiang 				&f->input.outer_mac,
118244418919fSjohnjiang 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
118254418919fSjohnjiang 		rte_ether_addr_copy((struct rte_ether_addr *)
118264418919fSjohnjiang 				&f->input.inner_mac,
118274418919fSjohnjiang 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
118282bfe3f2eSlogwang 		cld_filter.element.inner_vlan = f->input.inner_vlan;
118292bfe3f2eSlogwang 		cld_filter.element.flags = f->input.flags;
118302bfe3f2eSlogwang 		cld_filter.element.tenant_id = f->input.tenant_id;
118312bfe3f2eSlogwang 		cld_filter.element.queue_number = f->queue;
118322bfe3f2eSlogwang 		rte_memcpy(cld_filter.general_fields,
118332bfe3f2eSlogwang 			   f->input.general_fields,
118342bfe3f2eSlogwang 			   sizeof(f->input.general_fields));
118352bfe3f2eSlogwang 
118362bfe3f2eSlogwang 		if (((f->input.flags &
118372bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
118382bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
118392bfe3f2eSlogwang 		    ((f->input.flags &
118402bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
118412bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
118422bfe3f2eSlogwang 		    ((f->input.flags &
118432bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
118442bfe3f2eSlogwang 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
118452bfe3f2eSlogwang 			big_buffer = 1;
118462bfe3f2eSlogwang 
118472bfe3f2eSlogwang 		if (big_buffer)
11848d30ea906Sjfb8856606 			i40e_aq_add_cloud_filters_bb(hw,
118492bfe3f2eSlogwang 					vsi->seid, &cld_filter, 1);
118502bfe3f2eSlogwang 		else
118512bfe3f2eSlogwang 			i40e_aq_add_cloud_filters(hw, vsi->seid,
118522bfe3f2eSlogwang 						  &cld_filter.element, 1);
118532bfe3f2eSlogwang 	}
118542bfe3f2eSlogwang }
118552bfe3f2eSlogwang 
11856*2d9fd380Sjfb8856606 /* Restore RSS filter */
11857d30ea906Sjfb8856606 static inline void
i40e_rss_filter_restore(struct i40e_pf * pf)11858d30ea906Sjfb8856606 i40e_rss_filter_restore(struct i40e_pf *pf)
11859d30ea906Sjfb8856606 {
11860*2d9fd380Sjfb8856606 	struct i40e_rss_conf_list *list = &pf->rss_config_list;
11861*2d9fd380Sjfb8856606 	struct i40e_rss_filter *filter;
11862*2d9fd380Sjfb8856606 
11863*2d9fd380Sjfb8856606 	TAILQ_FOREACH(filter, list, next) {
11864*2d9fd380Sjfb8856606 		i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
11865*2d9fd380Sjfb8856606 	}
11866d30ea906Sjfb8856606 }
11867d30ea906Sjfb8856606 
118682bfe3f2eSlogwang static void
i40e_filter_restore(struct i40e_pf * pf)118692bfe3f2eSlogwang i40e_filter_restore(struct i40e_pf *pf)
118702bfe3f2eSlogwang {
118712bfe3f2eSlogwang 	i40e_ethertype_filter_restore(pf);
118722bfe3f2eSlogwang 	i40e_tunnel_filter_restore(pf);
118732bfe3f2eSlogwang 	i40e_fdir_filter_restore(pf);
11874d30ea906Sjfb8856606 	i40e_rss_filter_restore(pf);
118752bfe3f2eSlogwang }
118762bfe3f2eSlogwang 
118774418919fSjohnjiang bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)118782bfe3f2eSlogwang is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
118792bfe3f2eSlogwang {
118802bfe3f2eSlogwang 	if (strcmp(dev->device->driver->name, drv->driver.name))
118812bfe3f2eSlogwang 		return false;
118822bfe3f2eSlogwang 
118832bfe3f2eSlogwang 	return true;
118842bfe3f2eSlogwang }
118852bfe3f2eSlogwang 
118862bfe3f2eSlogwang bool
is_i40e_supported(struct rte_eth_dev * dev)118872bfe3f2eSlogwang is_i40e_supported(struct rte_eth_dev *dev)
118882bfe3f2eSlogwang {
118892bfe3f2eSlogwang 	return is_device_supported(dev, &rte_i40e_pmd);
118902bfe3f2eSlogwang }
118912bfe3f2eSlogwang 
118922bfe3f2eSlogwang struct i40e_customized_pctype*
i40e_find_customized_pctype(struct i40e_pf * pf,uint8_t index)118932bfe3f2eSlogwang i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
118942bfe3f2eSlogwang {
118952bfe3f2eSlogwang 	int i;
118962bfe3f2eSlogwang 
118972bfe3f2eSlogwang 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
118982bfe3f2eSlogwang 		if (pf->customized_pctype[i].index == index)
118992bfe3f2eSlogwang 			return &pf->customized_pctype[i];
119002bfe3f2eSlogwang 	}
119012bfe3f2eSlogwang 	return NULL;
119022bfe3f2eSlogwang }
119032bfe3f2eSlogwang 
119042bfe3f2eSlogwang static int
i40e_update_customized_pctype(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,uint32_t proto_num,struct rte_pmd_i40e_proto_info * proto,enum rte_pmd_i40e_package_op op)119052bfe3f2eSlogwang i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
119062bfe3f2eSlogwang 			      uint32_t pkg_size, uint32_t proto_num,
11907579bf1e2Sjfb8856606 			      struct rte_pmd_i40e_proto_info *proto,
11908579bf1e2Sjfb8856606 			      enum rte_pmd_i40e_package_op op)
119092bfe3f2eSlogwang {
119102bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
119112bfe3f2eSlogwang 	uint32_t pctype_num;
119122bfe3f2eSlogwang 	struct rte_pmd_i40e_ptype_info *pctype;
119132bfe3f2eSlogwang 	uint32_t buff_size;
119142bfe3f2eSlogwang 	struct i40e_customized_pctype *new_pctype = NULL;
119152bfe3f2eSlogwang 	uint8_t proto_id;
119162bfe3f2eSlogwang 	uint8_t pctype_value;
119172bfe3f2eSlogwang 	char name[64];
119182bfe3f2eSlogwang 	uint32_t i, j, n;
119192bfe3f2eSlogwang 	int ret;
119202bfe3f2eSlogwang 
11921579bf1e2Sjfb8856606 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11922579bf1e2Sjfb8856606 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11923579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11924579bf1e2Sjfb8856606 		return -1;
11925579bf1e2Sjfb8856606 	}
11926579bf1e2Sjfb8856606 
119272bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
119282bfe3f2eSlogwang 				(uint8_t *)&pctype_num, sizeof(pctype_num),
119292bfe3f2eSlogwang 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
119302bfe3f2eSlogwang 	if (ret) {
119312bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
119322bfe3f2eSlogwang 		return -1;
119332bfe3f2eSlogwang 	}
119342bfe3f2eSlogwang 	if (!pctype_num) {
119352bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "No new pctype added");
119362bfe3f2eSlogwang 		return -1;
119372bfe3f2eSlogwang 	}
119382bfe3f2eSlogwang 
119392bfe3f2eSlogwang 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
119402bfe3f2eSlogwang 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
119412bfe3f2eSlogwang 	if (!pctype) {
119422bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
119432bfe3f2eSlogwang 		return -1;
119442bfe3f2eSlogwang 	}
119452bfe3f2eSlogwang 	/* get information about new pctype list */
119462bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
119472bfe3f2eSlogwang 					(uint8_t *)pctype, buff_size,
119482bfe3f2eSlogwang 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
119492bfe3f2eSlogwang 	if (ret) {
119502bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
119512bfe3f2eSlogwang 		rte_free(pctype);
119522bfe3f2eSlogwang 		return -1;
119532bfe3f2eSlogwang 	}
119542bfe3f2eSlogwang 
119552bfe3f2eSlogwang 	/* Update customized pctype. */
119562bfe3f2eSlogwang 	for (i = 0; i < pctype_num; i++) {
119572bfe3f2eSlogwang 		pctype_value = pctype[i].ptype_id;
119582bfe3f2eSlogwang 		memset(name, 0, sizeof(name));
119592bfe3f2eSlogwang 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
119602bfe3f2eSlogwang 			proto_id = pctype[i].protocols[j];
119612bfe3f2eSlogwang 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
119622bfe3f2eSlogwang 				continue;
119632bfe3f2eSlogwang 			for (n = 0; n < proto_num; n++) {
119642bfe3f2eSlogwang 				if (proto[n].proto_id != proto_id)
119652bfe3f2eSlogwang 					continue;
119661646932aSjfb8856606 				strlcat(name, proto[n].name, sizeof(name));
119671646932aSjfb8856606 				strlcat(name, "_", sizeof(name));
119682bfe3f2eSlogwang 				break;
119692bfe3f2eSlogwang 			}
119702bfe3f2eSlogwang 		}
119712bfe3f2eSlogwang 		name[strlen(name) - 1] = '\0';
11972*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "name = %s\n", name);
119732bfe3f2eSlogwang 		if (!strcmp(name, "GTPC"))
119742bfe3f2eSlogwang 			new_pctype =
119752bfe3f2eSlogwang 				i40e_find_customized_pctype(pf,
119762bfe3f2eSlogwang 						      I40E_CUSTOMIZED_GTPC);
119772bfe3f2eSlogwang 		else if (!strcmp(name, "GTPU_IPV4"))
119782bfe3f2eSlogwang 			new_pctype =
119792bfe3f2eSlogwang 				i40e_find_customized_pctype(pf,
119802bfe3f2eSlogwang 						   I40E_CUSTOMIZED_GTPU_IPV4);
119812bfe3f2eSlogwang 		else if (!strcmp(name, "GTPU_IPV6"))
119822bfe3f2eSlogwang 			new_pctype =
119832bfe3f2eSlogwang 				i40e_find_customized_pctype(pf,
119842bfe3f2eSlogwang 						   I40E_CUSTOMIZED_GTPU_IPV6);
119852bfe3f2eSlogwang 		else if (!strcmp(name, "GTPU"))
119862bfe3f2eSlogwang 			new_pctype =
119872bfe3f2eSlogwang 				i40e_find_customized_pctype(pf,
119882bfe3f2eSlogwang 						      I40E_CUSTOMIZED_GTPU);
11989*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV4_L2TPV3"))
11990*2d9fd380Sjfb8856606 			new_pctype =
11991*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
11992*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_IPV4_L2TPV3);
11993*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV6_L2TPV3"))
11994*2d9fd380Sjfb8856606 			new_pctype =
11995*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
11996*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_IPV6_L2TPV3);
11997*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV4_ESP"))
11998*2d9fd380Sjfb8856606 			new_pctype =
11999*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12000*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_ESP_IPV4);
12001*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV6_ESP"))
12002*2d9fd380Sjfb8856606 			new_pctype =
12003*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12004*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_ESP_IPV6);
12005*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV4_UDP_ESP"))
12006*2d9fd380Sjfb8856606 			new_pctype =
12007*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12008*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
12009*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV6_UDP_ESP"))
12010*2d9fd380Sjfb8856606 			new_pctype =
12011*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12012*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
12013*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV4_AH"))
12014*2d9fd380Sjfb8856606 			new_pctype =
12015*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12016*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_AH_IPV4);
12017*2d9fd380Sjfb8856606 		else if (!strcmp(name, "IPV6_AH"))
12018*2d9fd380Sjfb8856606 			new_pctype =
12019*2d9fd380Sjfb8856606 				i40e_find_customized_pctype(pf,
12020*2d9fd380Sjfb8856606 						I40E_CUSTOMIZED_AH_IPV6);
120212bfe3f2eSlogwang 		if (new_pctype) {
12022579bf1e2Sjfb8856606 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
120232bfe3f2eSlogwang 				new_pctype->pctype = pctype_value;
120242bfe3f2eSlogwang 				new_pctype->valid = true;
12025579bf1e2Sjfb8856606 			} else {
12026579bf1e2Sjfb8856606 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12027579bf1e2Sjfb8856606 				new_pctype->valid = false;
12028579bf1e2Sjfb8856606 			}
120292bfe3f2eSlogwang 		}
120302bfe3f2eSlogwang 	}
120312bfe3f2eSlogwang 
120322bfe3f2eSlogwang 	rte_free(pctype);
120332bfe3f2eSlogwang 	return 0;
120342bfe3f2eSlogwang }
120352bfe3f2eSlogwang 
120362bfe3f2eSlogwang static int
i40e_update_customized_ptype(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,uint32_t proto_num,struct rte_pmd_i40e_proto_info * proto,enum rte_pmd_i40e_package_op op)120372bfe3f2eSlogwang i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
120382bfe3f2eSlogwang 			     uint32_t pkg_size, uint32_t proto_num,
12039579bf1e2Sjfb8856606 			     struct rte_pmd_i40e_proto_info *proto,
12040579bf1e2Sjfb8856606 			     enum rte_pmd_i40e_package_op op)
120412bfe3f2eSlogwang {
120422bfe3f2eSlogwang 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
120432bfe3f2eSlogwang 	uint16_t port_id = dev->data->port_id;
120442bfe3f2eSlogwang 	uint32_t ptype_num;
120452bfe3f2eSlogwang 	struct rte_pmd_i40e_ptype_info *ptype;
120462bfe3f2eSlogwang 	uint32_t buff_size;
120472bfe3f2eSlogwang 	uint8_t proto_id;
120482bfe3f2eSlogwang 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
120492bfe3f2eSlogwang 	uint32_t i, j, n;
12050d30ea906Sjfb8856606 	bool in_tunnel;
120512bfe3f2eSlogwang 	int ret;
120522bfe3f2eSlogwang 
12053579bf1e2Sjfb8856606 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12054579bf1e2Sjfb8856606 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12055579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12056579bf1e2Sjfb8856606 		return -1;
12057579bf1e2Sjfb8856606 	}
12058579bf1e2Sjfb8856606 
12059579bf1e2Sjfb8856606 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12060579bf1e2Sjfb8856606 		rte_pmd_i40e_ptype_mapping_reset(port_id);
12061579bf1e2Sjfb8856606 		return 0;
12062579bf1e2Sjfb8856606 	}
12063579bf1e2Sjfb8856606 
120642bfe3f2eSlogwang 	/* get information about new ptype num */
120652bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
120662bfe3f2eSlogwang 				(uint8_t *)&ptype_num, sizeof(ptype_num),
120672bfe3f2eSlogwang 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
120682bfe3f2eSlogwang 	if (ret) {
120692bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
120702bfe3f2eSlogwang 		return ret;
120712bfe3f2eSlogwang 	}
120722bfe3f2eSlogwang 	if (!ptype_num) {
120732bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "No new ptype added");
120742bfe3f2eSlogwang 		return -1;
120752bfe3f2eSlogwang 	}
120762bfe3f2eSlogwang 
120772bfe3f2eSlogwang 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
120782bfe3f2eSlogwang 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
120792bfe3f2eSlogwang 	if (!ptype) {
120802bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
120812bfe3f2eSlogwang 		return -1;
120822bfe3f2eSlogwang 	}
120832bfe3f2eSlogwang 
120842bfe3f2eSlogwang 	/* get information about new ptype list */
120852bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
120862bfe3f2eSlogwang 					(uint8_t *)ptype, buff_size,
120872bfe3f2eSlogwang 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
120882bfe3f2eSlogwang 	if (ret) {
120892bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
120902bfe3f2eSlogwang 		rte_free(ptype);
120912bfe3f2eSlogwang 		return ret;
120922bfe3f2eSlogwang 	}
120932bfe3f2eSlogwang 
120942bfe3f2eSlogwang 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
120952bfe3f2eSlogwang 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
120962bfe3f2eSlogwang 	if (!ptype_mapping) {
120972bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
120982bfe3f2eSlogwang 		rte_free(ptype);
120992bfe3f2eSlogwang 		return -1;
121002bfe3f2eSlogwang 	}
121012bfe3f2eSlogwang 
121022bfe3f2eSlogwang 	/* Update ptype mapping table. */
121032bfe3f2eSlogwang 	for (i = 0; i < ptype_num; i++) {
121042bfe3f2eSlogwang 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
121052bfe3f2eSlogwang 		ptype_mapping[i].sw_ptype = 0;
12106d30ea906Sjfb8856606 		in_tunnel = false;
121072bfe3f2eSlogwang 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
121082bfe3f2eSlogwang 			proto_id = ptype[i].protocols[j];
121092bfe3f2eSlogwang 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
121102bfe3f2eSlogwang 				continue;
121112bfe3f2eSlogwang 			for (n = 0; n < proto_num; n++) {
121122bfe3f2eSlogwang 				if (proto[n].proto_id != proto_id)
121132bfe3f2eSlogwang 					continue;
121142bfe3f2eSlogwang 				memset(name, 0, sizeof(name));
121152bfe3f2eSlogwang 				strcpy(name, proto[n].name);
12116*2d9fd380Sjfb8856606 				PMD_DRV_LOG(INFO, "name = %s\n", name);
12117d30ea906Sjfb8856606 				if (!strncasecmp(name, "PPPOE", 5))
12118d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12119d30ea906Sjfb8856606 						RTE_PTYPE_L2_ETHER_PPPOE;
12120d30ea906Sjfb8856606 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12121d30ea906Sjfb8856606 					 !in_tunnel) {
121222bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121232bfe3f2eSlogwang 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12124d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12125d30ea906Sjfb8856606 						RTE_PTYPE_L4_FRAG;
12126d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12127d30ea906Sjfb8856606 					   in_tunnel) {
121282bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121292bfe3f2eSlogwang 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
121302bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121312bfe3f2eSlogwang 						RTE_PTYPE_INNER_L4_FRAG;
12132d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "OIPV4", 5)) {
12133d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12134d30ea906Sjfb8856606 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12135d30ea906Sjfb8856606 					in_tunnel = true;
12136d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "IPV4", 4) &&
12137d30ea906Sjfb8856606 					   !in_tunnel)
12138d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12139d30ea906Sjfb8856606 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12140d30ea906Sjfb8856606 				else if (!strncasecmp(name, "IPV4", 4) &&
12141d30ea906Sjfb8856606 					 in_tunnel)
121422bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121432bfe3f2eSlogwang 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12144d30ea906Sjfb8856606 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12145d30ea906Sjfb8856606 					 !in_tunnel) {
121462bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121472bfe3f2eSlogwang 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12148d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12149d30ea906Sjfb8856606 						RTE_PTYPE_L4_FRAG;
12150d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12151d30ea906Sjfb8856606 					   in_tunnel) {
121522bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121532bfe3f2eSlogwang 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
121542bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121552bfe3f2eSlogwang 						RTE_PTYPE_INNER_L4_FRAG;
12156d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "OIPV6", 5)) {
12157d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12158d30ea906Sjfb8856606 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12159d30ea906Sjfb8856606 					in_tunnel = true;
12160d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "IPV6", 4) &&
12161d30ea906Sjfb8856606 					   !in_tunnel)
12162d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12163d30ea906Sjfb8856606 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12164d30ea906Sjfb8856606 				else if (!strncasecmp(name, "IPV6", 4) &&
12165d30ea906Sjfb8856606 					 in_tunnel)
121662bfe3f2eSlogwang 					ptype_mapping[i].sw_ptype |=
121672bfe3f2eSlogwang 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12168d30ea906Sjfb8856606 				else if (!strncasecmp(name, "UDP", 3) &&
12169d30ea906Sjfb8856606 					 !in_tunnel)
1217028440c50Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12171d30ea906Sjfb8856606 						RTE_PTYPE_L4_UDP;
12172d30ea906Sjfb8856606 				else if (!strncasecmp(name, "UDP", 3) &&
12173d30ea906Sjfb8856606 					 in_tunnel)
1217428440c50Sjfb8856606 					ptype_mapping[i].sw_ptype |=
121755af785ecSfengbojiang(姜凤波) 						RTE_PTYPE_INNER_L4_UDP;
12176d30ea906Sjfb8856606 				else if (!strncasecmp(name, "TCP", 3) &&
12177d30ea906Sjfb8856606 					 !in_tunnel)
12178d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12179d30ea906Sjfb8856606 						RTE_PTYPE_L4_TCP;
12180d30ea906Sjfb8856606 				else if (!strncasecmp(name, "TCP", 3) &&
12181d30ea906Sjfb8856606 					 in_tunnel)
1218228440c50Sjfb8856606 					ptype_mapping[i].sw_ptype |=
121835af785ecSfengbojiang(姜凤波) 						RTE_PTYPE_INNER_L4_TCP;
12184d30ea906Sjfb8856606 				else if (!strncasecmp(name, "SCTP", 4) &&
12185d30ea906Sjfb8856606 					 !in_tunnel)
12186d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12187d30ea906Sjfb8856606 						RTE_PTYPE_L4_SCTP;
12188d30ea906Sjfb8856606 				else if (!strncasecmp(name, "SCTP", 4) &&
12189d30ea906Sjfb8856606 					 in_tunnel)
121905af785ecSfengbojiang(姜凤波) 					ptype_mapping[i].sw_ptype |=
121915af785ecSfengbojiang(姜凤波) 						RTE_PTYPE_INNER_L4_SCTP;
12192d30ea906Sjfb8856606 				else if ((!strncasecmp(name, "ICMP", 4) ||
12193d30ea906Sjfb8856606 					  !strncasecmp(name, "ICMPV6", 6)) &&
12194d30ea906Sjfb8856606 					 !in_tunnel)
12195d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12196d30ea906Sjfb8856606 						RTE_PTYPE_L4_ICMP;
12197d30ea906Sjfb8856606 				else if ((!strncasecmp(name, "ICMP", 4) ||
12198d30ea906Sjfb8856606 					  !strncasecmp(name, "ICMPV6", 6)) &&
12199d30ea906Sjfb8856606 					 in_tunnel)
122005af785ecSfengbojiang(姜凤波) 					ptype_mapping[i].sw_ptype |=
122015af785ecSfengbojiang(姜凤波) 						RTE_PTYPE_INNER_L4_ICMP;
12202d30ea906Sjfb8856606 				else if (!strncasecmp(name, "GTPC", 4)) {
12203d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12204d30ea906Sjfb8856606 						RTE_PTYPE_TUNNEL_GTPC;
12205d30ea906Sjfb8856606 					in_tunnel = true;
12206d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "GTPU", 4)) {
12207d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12208d30ea906Sjfb8856606 						RTE_PTYPE_TUNNEL_GTPU;
12209d30ea906Sjfb8856606 					in_tunnel = true;
12210*2d9fd380Sjfb8856606 				} else if (!strncasecmp(name, "ESP", 3)) {
12211*2d9fd380Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12212*2d9fd380Sjfb8856606 						RTE_PTYPE_TUNNEL_ESP;
12213*2d9fd380Sjfb8856606 					in_tunnel = true;
12214d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "GRENAT", 6)) {
12215d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12216d30ea906Sjfb8856606 						RTE_PTYPE_TUNNEL_GRENAT;
12217d30ea906Sjfb8856606 					in_tunnel = true;
12218d30ea906Sjfb8856606 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12219*2d9fd380Sjfb8856606 					   !strncasecmp(name, "L2TPV2", 6) ||
12220*2d9fd380Sjfb8856606 					   !strncasecmp(name, "L2TPV3", 6)) {
12221d30ea906Sjfb8856606 					ptype_mapping[i].sw_ptype |=
12222d30ea906Sjfb8856606 						RTE_PTYPE_TUNNEL_L2TP;
12223d30ea906Sjfb8856606 					in_tunnel = true;
12224d30ea906Sjfb8856606 				}
122252bfe3f2eSlogwang 
122262bfe3f2eSlogwang 				break;
122272bfe3f2eSlogwang 			}
122282bfe3f2eSlogwang 		}
122292bfe3f2eSlogwang 	}
122302bfe3f2eSlogwang 
122312bfe3f2eSlogwang 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
122322bfe3f2eSlogwang 						ptype_num, 0);
122332bfe3f2eSlogwang 	if (ret)
12234*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
122352bfe3f2eSlogwang 
122362bfe3f2eSlogwang 	rte_free(ptype_mapping);
122372bfe3f2eSlogwang 	rte_free(ptype);
122382bfe3f2eSlogwang 	return ret;
122392bfe3f2eSlogwang }
122402bfe3f2eSlogwang 
122412bfe3f2eSlogwang void
i40e_update_customized_info(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,enum rte_pmd_i40e_package_op op)122422bfe3f2eSlogwang i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12243579bf1e2Sjfb8856606 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
122442bfe3f2eSlogwang {
122452bfe3f2eSlogwang 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
122462bfe3f2eSlogwang 	uint32_t proto_num;
122472bfe3f2eSlogwang 	struct rte_pmd_i40e_proto_info *proto;
122482bfe3f2eSlogwang 	uint32_t buff_size;
122492bfe3f2eSlogwang 	uint32_t i;
122502bfe3f2eSlogwang 	int ret;
122512bfe3f2eSlogwang 
12252579bf1e2Sjfb8856606 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12253579bf1e2Sjfb8856606 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12254579bf1e2Sjfb8856606 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12255579bf1e2Sjfb8856606 		return;
12256579bf1e2Sjfb8856606 	}
12257579bf1e2Sjfb8856606 
122582bfe3f2eSlogwang 	/* get information about protocol number */
122592bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
122602bfe3f2eSlogwang 				       (uint8_t *)&proto_num, sizeof(proto_num),
122612bfe3f2eSlogwang 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
122622bfe3f2eSlogwang 	if (ret) {
122632bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
122642bfe3f2eSlogwang 		return;
122652bfe3f2eSlogwang 	}
122662bfe3f2eSlogwang 	if (!proto_num) {
122672bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "No new protocol added");
122682bfe3f2eSlogwang 		return;
122692bfe3f2eSlogwang 	}
122702bfe3f2eSlogwang 
122712bfe3f2eSlogwang 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
122722bfe3f2eSlogwang 	proto = rte_zmalloc("new_proto", buff_size, 0);
122732bfe3f2eSlogwang 	if (!proto) {
122742bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
122752bfe3f2eSlogwang 		return;
122762bfe3f2eSlogwang 	}
122772bfe3f2eSlogwang 
122782bfe3f2eSlogwang 	/* get information about protocol list */
122792bfe3f2eSlogwang 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
122802bfe3f2eSlogwang 					(uint8_t *)proto, buff_size,
122812bfe3f2eSlogwang 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
122822bfe3f2eSlogwang 	if (ret) {
122832bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
122842bfe3f2eSlogwang 		rte_free(proto);
122852bfe3f2eSlogwang 		return;
122862bfe3f2eSlogwang 	}
122872bfe3f2eSlogwang 
122882bfe3f2eSlogwang 	/* Check if GTP is supported. */
122892bfe3f2eSlogwang 	for (i = 0; i < proto_num; i++) {
122902bfe3f2eSlogwang 		if (!strncmp(proto[i].name, "GTP", 3)) {
12291579bf1e2Sjfb8856606 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
122922bfe3f2eSlogwang 				pf->gtp_support = true;
12293579bf1e2Sjfb8856606 			else
12294579bf1e2Sjfb8856606 				pf->gtp_support = false;
122952bfe3f2eSlogwang 			break;
122962bfe3f2eSlogwang 		}
122972bfe3f2eSlogwang 	}
122982bfe3f2eSlogwang 
12299*2d9fd380Sjfb8856606 	/* Check if ESP is supported. */
12300*2d9fd380Sjfb8856606 	for (i = 0; i < proto_num; i++) {
12301*2d9fd380Sjfb8856606 		if (!strncmp(proto[i].name, "ESP", 3)) {
12302*2d9fd380Sjfb8856606 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12303*2d9fd380Sjfb8856606 				pf->esp_support = true;
12304*2d9fd380Sjfb8856606 			else
12305*2d9fd380Sjfb8856606 				pf->esp_support = false;
12306*2d9fd380Sjfb8856606 			break;
12307*2d9fd380Sjfb8856606 		}
12308*2d9fd380Sjfb8856606 	}
12309*2d9fd380Sjfb8856606 
123102bfe3f2eSlogwang 	/* Update customized pctype info */
123112bfe3f2eSlogwang 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12312579bf1e2Sjfb8856606 					    proto_num, proto, op);
123132bfe3f2eSlogwang 	if (ret)
123142bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "No pctype is updated.");
123152bfe3f2eSlogwang 
123162bfe3f2eSlogwang 	/* Update customized ptype info */
123172bfe3f2eSlogwang 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12318579bf1e2Sjfb8856606 					   proto_num, proto, op);
123192bfe3f2eSlogwang 	if (ret)
123202bfe3f2eSlogwang 		PMD_DRV_LOG(INFO, "No ptype is updated.");
123212bfe3f2eSlogwang 
123222bfe3f2eSlogwang 	rte_free(proto);
123232bfe3f2eSlogwang }
123242bfe3f2eSlogwang 
123252bfe3f2eSlogwang /* Create a QinQ cloud filter
123262bfe3f2eSlogwang  *
123272bfe3f2eSlogwang  * The Fortville NIC has limited resources for tunnel filters,
123282bfe3f2eSlogwang  * so we can only reuse existing filters.
123292bfe3f2eSlogwang  *
123302bfe3f2eSlogwang  * In step 1 we define which Field Vector fields can be used for
123312bfe3f2eSlogwang  * filter types.
123322bfe3f2eSlogwang  * As we do not have the inner tag defined as a field,
123332bfe3f2eSlogwang  * we have to define it first, by reusing one of L1 entries.
123342bfe3f2eSlogwang  *
123352bfe3f2eSlogwang  * In step 2 we are replacing one of existing filter types with
123362bfe3f2eSlogwang  * a new one for QinQ.
123372bfe3f2eSlogwang  * As we reusing L1 and replacing L2, some of the default filter
123382bfe3f2eSlogwang  * types will disappear,which depends on L1 and L2 entries we reuse.
123392bfe3f2eSlogwang  *
123402bfe3f2eSlogwang  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
123412bfe3f2eSlogwang  *
123422bfe3f2eSlogwang  * 1.	Create L1 filter of outer vlan (12b) which will be in use
123432bfe3f2eSlogwang  *		later when we define the cloud filter.
123442bfe3f2eSlogwang  *	a.	Valid_flags.replace_cloud = 0
123452bfe3f2eSlogwang  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
123462bfe3f2eSlogwang  *	c.	New_filter = 0x10
123472bfe3f2eSlogwang  *	d.	TR bit = 0xff (optional, not used here)
123482bfe3f2eSlogwang  *	e.	Buffer – 2 entries:
123492bfe3f2eSlogwang  *		i.	Byte 0 = 8 (outer vlan FV index).
123502bfe3f2eSlogwang  *			Byte 1 = 0 (rsv)
123512bfe3f2eSlogwang  *			Byte 2-3 = 0x0fff
123522bfe3f2eSlogwang  *		ii.	Byte 0 = 37 (inner vlan FV index).
123532bfe3f2eSlogwang  *			Byte 1 =0 (rsv)
123542bfe3f2eSlogwang  *			Byte 2-3 = 0x0fff
123552bfe3f2eSlogwang  *
123562bfe3f2eSlogwang  * Step 2:
123572bfe3f2eSlogwang  * 2.	Create cloud filter using two L1 filters entries: stag and
123582bfe3f2eSlogwang  *		new filter(outer vlan+ inner vlan)
123592bfe3f2eSlogwang  *	a.	Valid_flags.replace_cloud = 1
123602bfe3f2eSlogwang  *	b.	Old_filter = 1 (instead of outer IP)
123612bfe3f2eSlogwang  *	c.	New_filter = 0x10
123622bfe3f2eSlogwang  *	d.	Buffer – 2 entries:
123632bfe3f2eSlogwang  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
123642bfe3f2eSlogwang  *			Byte 1-3 = 0 (rsv)
123652bfe3f2eSlogwang  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
123662bfe3f2eSlogwang  *			Byte 9-11 = 0 (rsv)
123672bfe3f2eSlogwang  */
123682bfe3f2eSlogwang static int
i40e_cloud_filter_qinq_create(struct i40e_pf * pf)123692bfe3f2eSlogwang i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
123702bfe3f2eSlogwang {
123712bfe3f2eSlogwang 	int ret = -ENOTSUP;
123722bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
123732bfe3f2eSlogwang 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
123742bfe3f2eSlogwang 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12375d30ea906Sjfb8856606 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
123762bfe3f2eSlogwang 
123772bfe3f2eSlogwang 	if (pf->support_multi_driver) {
123782bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
123792bfe3f2eSlogwang 		return ret;
123802bfe3f2eSlogwang 	}
123812bfe3f2eSlogwang 
123822bfe3f2eSlogwang 	/* Init */
123832bfe3f2eSlogwang 	memset(&filter_replace, 0,
123842bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
123852bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
123862bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
123872bfe3f2eSlogwang 
123882bfe3f2eSlogwang 	/* create L1 filter */
123892bfe3f2eSlogwang 	filter_replace.old_filter_type =
123902bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
123912bfe3f2eSlogwang 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
123922bfe3f2eSlogwang 	filter_replace.tr_bit = 0;
123932bfe3f2eSlogwang 
123942bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
123952bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
123962bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
123972bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
123982bfe3f2eSlogwang 	/* Field Vector 12b mask */
123992bfe3f2eSlogwang 	filter_replace_buf.data[2] = 0xff;
124002bfe3f2eSlogwang 	filter_replace_buf.data[3] = 0x0f;
124012bfe3f2eSlogwang 	filter_replace_buf.data[4] =
124022bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
124032bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
124042bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
124052bfe3f2eSlogwang 	/* Field Vector 12b mask */
124062bfe3f2eSlogwang 	filter_replace_buf.data[6] = 0xff;
124072bfe3f2eSlogwang 	filter_replace_buf.data[7] = 0x0f;
124082bfe3f2eSlogwang 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
124092bfe3f2eSlogwang 			&filter_replace_buf);
124102bfe3f2eSlogwang 	if (ret != I40E_SUCCESS)
124112bfe3f2eSlogwang 		return ret;
12412d30ea906Sjfb8856606 
12413d30ea906Sjfb8856606 	if (filter_replace.old_filter_type !=
12414d30ea906Sjfb8856606 	    filter_replace.new_filter_type)
12415d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12416d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
12417d30ea906Sjfb8856606 			    dev->device->name,
124182bfe3f2eSlogwang 			    filter_replace.old_filter_type,
124192bfe3f2eSlogwang 			    filter_replace.new_filter_type);
124202bfe3f2eSlogwang 
124212bfe3f2eSlogwang 	/* Apply the second L2 cloud filter */
124222bfe3f2eSlogwang 	memset(&filter_replace, 0,
124232bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
124242bfe3f2eSlogwang 	memset(&filter_replace_buf, 0,
124252bfe3f2eSlogwang 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
124262bfe3f2eSlogwang 
124272bfe3f2eSlogwang 	/* create L2 filter, input for L2 filter will be L1 filter  */
124282bfe3f2eSlogwang 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
124292bfe3f2eSlogwang 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
124302bfe3f2eSlogwang 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
124312bfe3f2eSlogwang 
124322bfe3f2eSlogwang 	/* Prepare the buffer, 2 entries */
124332bfe3f2eSlogwang 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
124342bfe3f2eSlogwang 	filter_replace_buf.data[0] |=
124352bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
124362bfe3f2eSlogwang 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
124372bfe3f2eSlogwang 	filter_replace_buf.data[4] |=
124382bfe3f2eSlogwang 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
124392bfe3f2eSlogwang 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
124402bfe3f2eSlogwang 			&filter_replace_buf);
12441d30ea906Sjfb8856606 	if (!ret && (filter_replace.old_filter_type !=
12442d30ea906Sjfb8856606 		     filter_replace.new_filter_type))
12443d30ea906Sjfb8856606 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12444d30ea906Sjfb8856606 			    " original: 0x%x, new: 0x%x",
12445d30ea906Sjfb8856606 			    dev->device->name,
124462bfe3f2eSlogwang 			    filter_replace.old_filter_type,
124472bfe3f2eSlogwang 			    filter_replace.new_filter_type);
12448d30ea906Sjfb8856606 
124492bfe3f2eSlogwang 	return ret;
124502bfe3f2eSlogwang }
124512bfe3f2eSlogwang 
12452d30ea906Sjfb8856606 int
i40e_rss_conf_init(struct i40e_rte_flow_rss_conf * out,const struct rte_flow_action_rss * in)12453d30ea906Sjfb8856606 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12454d30ea906Sjfb8856606 		   const struct rte_flow_action_rss *in)
124552bfe3f2eSlogwang {
12456d30ea906Sjfb8856606 	if (in->key_len > RTE_DIM(out->key) ||
12457d30ea906Sjfb8856606 	    in->queue_num > RTE_DIM(out->queue))
12458d30ea906Sjfb8856606 		return -EINVAL;
12459d30ea906Sjfb8856606 	if (!in->key && in->key_len)
12460d30ea906Sjfb8856606 		return -EINVAL;
12461d30ea906Sjfb8856606 	out->conf = (struct rte_flow_action_rss){
12462d30ea906Sjfb8856606 		.func = in->func,
12463d30ea906Sjfb8856606 		.level = in->level,
12464d30ea906Sjfb8856606 		.types = in->types,
12465d30ea906Sjfb8856606 		.key_len = in->key_len,
12466d30ea906Sjfb8856606 		.queue_num = in->queue_num,
12467d30ea906Sjfb8856606 		.queue = memcpy(out->queue, in->queue,
12468d30ea906Sjfb8856606 				sizeof(*in->queue) * in->queue_num),
12469d30ea906Sjfb8856606 	};
12470d30ea906Sjfb8856606 	if (in->key)
12471d30ea906Sjfb8856606 		out->conf.key = memcpy(out->key, in->key, in->key_len);
12472d30ea906Sjfb8856606 	return 0;
12473d30ea906Sjfb8856606 }
12474d30ea906Sjfb8856606 
12475*2d9fd380Sjfb8856606 /* Write HENA register to enable hash */
12476*2d9fd380Sjfb8856606 static int
i40e_rss_hash_set(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * rss_conf)12477*2d9fd380Sjfb8856606 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
12478d30ea906Sjfb8856606 {
12479d30ea906Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12480*2d9fd380Sjfb8856606 	uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
12481*2d9fd380Sjfb8856606 	uint64_t hena;
12482*2d9fd380Sjfb8856606 	int ret;
12483d30ea906Sjfb8856606 
12484*2d9fd380Sjfb8856606 	ret = i40e_set_rss_key(pf->main_vsi, key,
12485*2d9fd380Sjfb8856606 			       rss_conf->conf.key_len);
12486*2d9fd380Sjfb8856606 	if (ret)
12487*2d9fd380Sjfb8856606 		return ret;
12488*2d9fd380Sjfb8856606 
12489*2d9fd380Sjfb8856606 	hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
12490*2d9fd380Sjfb8856606 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
12491*2d9fd380Sjfb8856606 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
12492*2d9fd380Sjfb8856606 	I40E_WRITE_FLUSH(hw);
12493*2d9fd380Sjfb8856606 
12494d30ea906Sjfb8856606 	return 0;
12495d30ea906Sjfb8856606 }
12496*2d9fd380Sjfb8856606 
12497*2d9fd380Sjfb8856606 /* Configure hash input set */
12498*2d9fd380Sjfb8856606 static int
i40e_rss_conf_hash_inset(struct i40e_pf * pf,uint64_t types)12499*2d9fd380Sjfb8856606 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
12500*2d9fd380Sjfb8856606 {
12501*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12502*2d9fd380Sjfb8856606 	struct rte_eth_input_set_conf conf;
12503*2d9fd380Sjfb8856606 	uint64_t mask0;
12504*2d9fd380Sjfb8856606 	int ret = 0;
12505*2d9fd380Sjfb8856606 	uint32_t j;
12506*2d9fd380Sjfb8856606 	int i;
12507*2d9fd380Sjfb8856606 	static const struct {
12508*2d9fd380Sjfb8856606 		uint64_t type;
12509*2d9fd380Sjfb8856606 		enum rte_eth_input_set_field field;
12510*2d9fd380Sjfb8856606 	} inset_match_table[] = {
12511*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
12512*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP4},
12513*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
12514*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP4},
12515*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
12516*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12517*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
12518*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12519*2d9fd380Sjfb8856606 
12520*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
12521*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP4},
12522*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
12523*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP4},
12524*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
12525*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
12526*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
12527*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
12528*2d9fd380Sjfb8856606 
12529*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
12530*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP4},
12531*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
12532*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP4},
12533*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
12534*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
12535*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
12536*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
12537*2d9fd380Sjfb8856606 
12538*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
12539*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP4},
12540*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
12541*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP4},
12542*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
12543*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
12544*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
12545*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
12546*2d9fd380Sjfb8856606 
12547*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
12548*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP4},
12549*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
12550*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP4},
12551*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
12552*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12553*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
12554*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12555*2d9fd380Sjfb8856606 
12556*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
12557*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP6},
12558*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
12559*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP6},
12560*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
12561*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12562*2d9fd380Sjfb8856606 		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
12563*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12564*2d9fd380Sjfb8856606 
12565*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
12566*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP6},
12567*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
12568*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP6},
12569*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
12570*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
12571*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
12572*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
12573*2d9fd380Sjfb8856606 
12574*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
12575*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP6},
12576*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
12577*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP6},
12578*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
12579*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
12580*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
12581*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
12582*2d9fd380Sjfb8856606 
12583*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
12584*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP6},
12585*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
12586*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP6},
12587*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
12588*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
12589*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
12590*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
12591*2d9fd380Sjfb8856606 
12592*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
12593*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_SRC_IP6},
12594*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
12595*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_L3_DST_IP6},
12596*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
12597*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12598*2d9fd380Sjfb8856606 		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
12599*2d9fd380Sjfb8856606 			RTE_ETH_INPUT_SET_UNKNOWN},
12600*2d9fd380Sjfb8856606 	};
12601*2d9fd380Sjfb8856606 
12602*2d9fd380Sjfb8856606 	mask0 = types & pf->adapter->flow_types_mask;
12603*2d9fd380Sjfb8856606 	conf.op = RTE_ETH_INPUT_SET_SELECT;
12604*2d9fd380Sjfb8856606 	conf.inset_size = 0;
12605*2d9fd380Sjfb8856606 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
12606*2d9fd380Sjfb8856606 		if (mask0 & (1ULL << i)) {
12607*2d9fd380Sjfb8856606 			conf.flow_type = i;
12608*2d9fd380Sjfb8856606 			break;
12609d30ea906Sjfb8856606 		}
12610*2d9fd380Sjfb8856606 	}
12611*2d9fd380Sjfb8856606 
12612*2d9fd380Sjfb8856606 	for (j = 0; j < RTE_DIM(inset_match_table); j++) {
12613*2d9fd380Sjfb8856606 		if ((types & inset_match_table[j].type) ==
12614*2d9fd380Sjfb8856606 		    inset_match_table[j].type) {
12615*2d9fd380Sjfb8856606 			if (inset_match_table[j].field ==
12616*2d9fd380Sjfb8856606 			    RTE_ETH_INPUT_SET_UNKNOWN)
12617*2d9fd380Sjfb8856606 				return -EINVAL;
12618*2d9fd380Sjfb8856606 
12619*2d9fd380Sjfb8856606 			conf.field[conf.inset_size] =
12620*2d9fd380Sjfb8856606 				inset_match_table[j].field;
12621*2d9fd380Sjfb8856606 			conf.inset_size++;
12622*2d9fd380Sjfb8856606 		}
12623*2d9fd380Sjfb8856606 	}
12624*2d9fd380Sjfb8856606 
12625*2d9fd380Sjfb8856606 	if (conf.inset_size) {
12626*2d9fd380Sjfb8856606 		ret = i40e_hash_filter_inset_select(hw, &conf);
12627*2d9fd380Sjfb8856606 		if (ret)
12628*2d9fd380Sjfb8856606 			return ret;
12629*2d9fd380Sjfb8856606 	}
12630*2d9fd380Sjfb8856606 
12631*2d9fd380Sjfb8856606 	return ret;
12632*2d9fd380Sjfb8856606 }
12633*2d9fd380Sjfb8856606 
12634*2d9fd380Sjfb8856606 /* Look up the conflicted rule then mark it as invalid */
12635*2d9fd380Sjfb8856606 static void
i40e_rss_mark_invalid_rule(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12636*2d9fd380Sjfb8856606 i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
12637*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12638*2d9fd380Sjfb8856606 {
12639*2d9fd380Sjfb8856606 	struct i40e_rss_filter *rss_item;
12640*2d9fd380Sjfb8856606 	uint64_t rss_inset;
12641*2d9fd380Sjfb8856606 
12642*2d9fd380Sjfb8856606 	/* Clear input set bits before comparing the pctype */
12643*2d9fd380Sjfb8856606 	rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
12644*2d9fd380Sjfb8856606 		ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
12645*2d9fd380Sjfb8856606 
12646*2d9fd380Sjfb8856606 	/* Look up the conflicted rule then mark it as invalid */
12647*2d9fd380Sjfb8856606 	TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
12648*2d9fd380Sjfb8856606 		if (!rss_item->rss_filter_info.valid)
12649*2d9fd380Sjfb8856606 			continue;
12650*2d9fd380Sjfb8856606 
12651*2d9fd380Sjfb8856606 		if (conf->conf.queue_num &&
12652*2d9fd380Sjfb8856606 		    rss_item->rss_filter_info.conf.queue_num)
12653*2d9fd380Sjfb8856606 			rss_item->rss_filter_info.valid = false;
12654*2d9fd380Sjfb8856606 
12655*2d9fd380Sjfb8856606 		if (conf->conf.types &&
12656*2d9fd380Sjfb8856606 		    (rss_item->rss_filter_info.conf.types &
12657*2d9fd380Sjfb8856606 		    rss_inset) ==
12658*2d9fd380Sjfb8856606 		    (conf->conf.types & rss_inset))
12659*2d9fd380Sjfb8856606 			rss_item->rss_filter_info.valid = false;
12660*2d9fd380Sjfb8856606 
12661*2d9fd380Sjfb8856606 		if (conf->conf.func ==
12662*2d9fd380Sjfb8856606 		    RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
12663*2d9fd380Sjfb8856606 		    rss_item->rss_filter_info.conf.func ==
12664*2d9fd380Sjfb8856606 		    RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
12665*2d9fd380Sjfb8856606 			rss_item->rss_filter_info.valid = false;
12666*2d9fd380Sjfb8856606 	}
12667*2d9fd380Sjfb8856606 }
12668*2d9fd380Sjfb8856606 
12669*2d9fd380Sjfb8856606 /* Configure RSS hash function */
12670*2d9fd380Sjfb8856606 static int
i40e_rss_config_hash_function(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12671*2d9fd380Sjfb8856606 i40e_rss_config_hash_function(struct i40e_pf *pf,
12672*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12673*2d9fd380Sjfb8856606 {
12674*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12675*2d9fd380Sjfb8856606 	uint32_t reg, i;
12676*2d9fd380Sjfb8856606 	uint64_t mask0;
12677*2d9fd380Sjfb8856606 	uint16_t j;
12678*2d9fd380Sjfb8856606 
12679*2d9fd380Sjfb8856606 	if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
12680*2d9fd380Sjfb8856606 		reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
12681*2d9fd380Sjfb8856606 		if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
12682*2d9fd380Sjfb8856606 			PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
12683*2d9fd380Sjfb8856606 			I40E_WRITE_FLUSH(hw);
12684*2d9fd380Sjfb8856606 			i40e_rss_mark_invalid_rule(pf, conf);
12685*2d9fd380Sjfb8856606 
12686*2d9fd380Sjfb8856606 			return 0;
12687*2d9fd380Sjfb8856606 		}
12688*2d9fd380Sjfb8856606 		reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
12689*2d9fd380Sjfb8856606 
12690*2d9fd380Sjfb8856606 		i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
12691*2d9fd380Sjfb8856606 		I40E_WRITE_FLUSH(hw);
12692*2d9fd380Sjfb8856606 		i40e_rss_mark_invalid_rule(pf, conf);
12693*2d9fd380Sjfb8856606 	} else if (conf->conf.func ==
12694*2d9fd380Sjfb8856606 		   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
12695*2d9fd380Sjfb8856606 		mask0 = conf->conf.types & pf->adapter->flow_types_mask;
12696*2d9fd380Sjfb8856606 
12697*2d9fd380Sjfb8856606 		i40e_set_symmetric_hash_enable_per_port(hw, 1);
12698*2d9fd380Sjfb8856606 		for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
12699*2d9fd380Sjfb8856606 			if (mask0 & (1UL << i))
12700*2d9fd380Sjfb8856606 				break;
12701*2d9fd380Sjfb8856606 		}
12702*2d9fd380Sjfb8856606 
12703*2d9fd380Sjfb8856606 		if (i == UINT64_BIT)
12704*2d9fd380Sjfb8856606 			return -EINVAL;
12705*2d9fd380Sjfb8856606 
12706*2d9fd380Sjfb8856606 		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
12707*2d9fd380Sjfb8856606 		     j < I40E_FILTER_PCTYPE_MAX; j++) {
12708*2d9fd380Sjfb8856606 			if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
12709*2d9fd380Sjfb8856606 				i40e_write_global_rx_ctl(hw,
12710*2d9fd380Sjfb8856606 					I40E_GLQF_HSYM(j),
12711*2d9fd380Sjfb8856606 					I40E_GLQF_HSYM_SYMH_ENA_MASK);
12712*2d9fd380Sjfb8856606 		}
12713*2d9fd380Sjfb8856606 	}
12714*2d9fd380Sjfb8856606 
12715*2d9fd380Sjfb8856606 	return 0;
12716*2d9fd380Sjfb8856606 }
12717*2d9fd380Sjfb8856606 
12718*2d9fd380Sjfb8856606 /* Enable RSS according to the configuration */
12719*2d9fd380Sjfb8856606 static int
i40e_rss_enable_hash(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12720*2d9fd380Sjfb8856606 i40e_rss_enable_hash(struct i40e_pf *pf,
12721*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12722*2d9fd380Sjfb8856606 {
12723*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12724*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf rss_conf;
12725*2d9fd380Sjfb8856606 
12726*2d9fd380Sjfb8856606 	if (!(conf->conf.types & pf->adapter->flow_types_mask))
12727*2d9fd380Sjfb8856606 		return -ENOTSUP;
12728*2d9fd380Sjfb8856606 
12729*2d9fd380Sjfb8856606 	memset(&rss_conf, 0, sizeof(rss_conf));
12730*2d9fd380Sjfb8856606 	rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
12731*2d9fd380Sjfb8856606 
12732*2d9fd380Sjfb8856606 	/* Configure hash input set */
12733*2d9fd380Sjfb8856606 	if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
12734*2d9fd380Sjfb8856606 		return -EINVAL;
12735*2d9fd380Sjfb8856606 
12736*2d9fd380Sjfb8856606 	if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
12737*2d9fd380Sjfb8856606 	    (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12738*2d9fd380Sjfb8856606 		/* Random default keys */
12739*2d9fd380Sjfb8856606 		static uint32_t rss_key_default[] = {0x6b793944,
12740*2d9fd380Sjfb8856606 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12741*2d9fd380Sjfb8856606 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12742*2d9fd380Sjfb8856606 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12743*2d9fd380Sjfb8856606 
12744*2d9fd380Sjfb8856606 		rss_conf.conf.key = (uint8_t *)rss_key_default;
12745*2d9fd380Sjfb8856606 		rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12746*2d9fd380Sjfb8856606 				sizeof(uint32_t);
12747*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO,
12748*2d9fd380Sjfb8856606 			"No valid RSS key config for i40e, using default\n");
12749*2d9fd380Sjfb8856606 	}
12750*2d9fd380Sjfb8856606 
12751*2d9fd380Sjfb8856606 	rss_conf.conf.types |= rss_info->conf.types;
12752*2d9fd380Sjfb8856606 	i40e_rss_hash_set(pf, &rss_conf);
12753*2d9fd380Sjfb8856606 
12754*2d9fd380Sjfb8856606 	if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
12755*2d9fd380Sjfb8856606 		i40e_rss_config_hash_function(pf, conf);
12756*2d9fd380Sjfb8856606 
12757*2d9fd380Sjfb8856606 	i40e_rss_mark_invalid_rule(pf, conf);
12758*2d9fd380Sjfb8856606 
12759*2d9fd380Sjfb8856606 	return 0;
12760*2d9fd380Sjfb8856606 }
12761*2d9fd380Sjfb8856606 
12762*2d9fd380Sjfb8856606 /* Configure RSS queue region */
12763*2d9fd380Sjfb8856606 static int
i40e_rss_config_queue_region(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12764*2d9fd380Sjfb8856606 i40e_rss_config_queue_region(struct i40e_pf *pf,
12765*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12766*2d9fd380Sjfb8856606 {
12767*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12768*2d9fd380Sjfb8856606 	uint32_t lut = 0;
12769*2d9fd380Sjfb8856606 	uint16_t j, num;
12770*2d9fd380Sjfb8856606 	uint32_t i;
12771d30ea906Sjfb8856606 
12772d30ea906Sjfb8856606 	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
12773d30ea906Sjfb8856606 	 * It's necessary to calculate the actual PF queues that are configured.
12774d30ea906Sjfb8856606 	 */
12775d30ea906Sjfb8856606 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12776d30ea906Sjfb8856606 		num = i40e_pf_calc_configured_queues_num(pf);
12777d30ea906Sjfb8856606 	else
12778d30ea906Sjfb8856606 		num = pf->dev_data->nb_rx_queues;
12779d30ea906Sjfb8856606 
12780d30ea906Sjfb8856606 	num = RTE_MIN(num, conf->conf.queue_num);
12781d30ea906Sjfb8856606 	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12782d30ea906Sjfb8856606 			num);
12783d30ea906Sjfb8856606 
12784d30ea906Sjfb8856606 	if (num == 0) {
12785*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
12786*2d9fd380Sjfb8856606 			"No PF queues are configured to enable RSS for port %u",
12787*2d9fd380Sjfb8856606 			pf->dev_data->port_id);
12788d30ea906Sjfb8856606 		return -ENOTSUP;
12789d30ea906Sjfb8856606 	}
12790d30ea906Sjfb8856606 
12791d30ea906Sjfb8856606 	/* Fill in redirection table */
12792d30ea906Sjfb8856606 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12793d30ea906Sjfb8856606 		if (j == num)
12794d30ea906Sjfb8856606 			j = 0;
12795d30ea906Sjfb8856606 		lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12796d30ea906Sjfb8856606 			hw->func_caps.rss_table_entry_width) - 1));
12797d30ea906Sjfb8856606 		if ((i & 3) == 3)
12798d30ea906Sjfb8856606 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12799d30ea906Sjfb8856606 	}
12800d30ea906Sjfb8856606 
12801*2d9fd380Sjfb8856606 	i40e_rss_mark_invalid_rule(pf, conf);
12802*2d9fd380Sjfb8856606 
12803d30ea906Sjfb8856606 	return 0;
12804d30ea906Sjfb8856606 }
12805d30ea906Sjfb8856606 
12806*2d9fd380Sjfb8856606 /* Configure RSS hash function to default */
12807*2d9fd380Sjfb8856606 static int
i40e_rss_clear_hash_function(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12808*2d9fd380Sjfb8856606 i40e_rss_clear_hash_function(struct i40e_pf *pf,
12809*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12810*2d9fd380Sjfb8856606 {
12811*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12812*2d9fd380Sjfb8856606 	uint32_t i, reg;
12813*2d9fd380Sjfb8856606 	uint64_t mask0;
12814*2d9fd380Sjfb8856606 	uint16_t j;
12815*2d9fd380Sjfb8856606 
12816*2d9fd380Sjfb8856606 	if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
12817*2d9fd380Sjfb8856606 		reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
12818*2d9fd380Sjfb8856606 		if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
12819*2d9fd380Sjfb8856606 			PMD_DRV_LOG(DEBUG,
12820*2d9fd380Sjfb8856606 				"Hash function already set to Toeplitz");
12821*2d9fd380Sjfb8856606 			I40E_WRITE_FLUSH(hw);
12822*2d9fd380Sjfb8856606 
12823*2d9fd380Sjfb8856606 			return 0;
12824*2d9fd380Sjfb8856606 		}
12825*2d9fd380Sjfb8856606 		reg |= I40E_GLQF_CTL_HTOEP_MASK;
12826*2d9fd380Sjfb8856606 
12827*2d9fd380Sjfb8856606 		i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
12828*2d9fd380Sjfb8856606 		I40E_WRITE_FLUSH(hw);
12829*2d9fd380Sjfb8856606 	} else if (conf->conf.func ==
12830*2d9fd380Sjfb8856606 		   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
12831*2d9fd380Sjfb8856606 		mask0 = conf->conf.types & pf->adapter->flow_types_mask;
12832*2d9fd380Sjfb8856606 
12833*2d9fd380Sjfb8856606 		for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
12834*2d9fd380Sjfb8856606 			if (mask0 & (1UL << i))
12835*2d9fd380Sjfb8856606 				break;
12836d30ea906Sjfb8856606 		}
12837d30ea906Sjfb8856606 
12838*2d9fd380Sjfb8856606 		if (i == UINT64_BIT)
12839d30ea906Sjfb8856606 			return -EINVAL;
12840d30ea906Sjfb8856606 
12841*2d9fd380Sjfb8856606 		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
12842*2d9fd380Sjfb8856606 		     j < I40E_FILTER_PCTYPE_MAX; j++) {
12843*2d9fd380Sjfb8856606 			if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
12844*2d9fd380Sjfb8856606 				i40e_write_global_rx_ctl(hw,
12845*2d9fd380Sjfb8856606 					I40E_GLQF_HSYM(j),
12846*2d9fd380Sjfb8856606 					0);
12847*2d9fd380Sjfb8856606 		}
12848*2d9fd380Sjfb8856606 	}
12849*2d9fd380Sjfb8856606 
12850d30ea906Sjfb8856606 	return 0;
12851d30ea906Sjfb8856606 }
12852d30ea906Sjfb8856606 
12853*2d9fd380Sjfb8856606 /* Disable RSS hash and configure default input set */
12854*2d9fd380Sjfb8856606 static int
i40e_rss_disable_hash(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf)12855*2d9fd380Sjfb8856606 i40e_rss_disable_hash(struct i40e_pf *pf,
12856*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf)
12857d30ea906Sjfb8856606 {
12858*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12859*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12860*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf rss_conf;
12861*2d9fd380Sjfb8856606 	uint32_t i;
128624418919fSjohnjiang 
12863*2d9fd380Sjfb8856606 	memset(&rss_conf, 0, sizeof(rss_conf));
12864*2d9fd380Sjfb8856606 	rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
128654418919fSjohnjiang 
12866*2d9fd380Sjfb8856606 	/* Disable RSS hash */
12867*2d9fd380Sjfb8856606 	rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
12868*2d9fd380Sjfb8856606 	i40e_rss_hash_set(pf, &rss_conf);
128694418919fSjohnjiang 
12870*2d9fd380Sjfb8856606 	for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
12871*2d9fd380Sjfb8856606 		if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
12872*2d9fd380Sjfb8856606 		    !(conf->conf.types & (1ULL << i)))
12873*2d9fd380Sjfb8856606 			continue;
12874*2d9fd380Sjfb8856606 
12875*2d9fd380Sjfb8856606 		/* Configure default input set */
12876*2d9fd380Sjfb8856606 		struct rte_eth_input_set_conf input_conf = {
12877*2d9fd380Sjfb8856606 			.op = RTE_ETH_INPUT_SET_SELECT,
12878*2d9fd380Sjfb8856606 			.flow_type = i,
12879*2d9fd380Sjfb8856606 			.inset_size = 1,
12880*2d9fd380Sjfb8856606 		};
12881*2d9fd380Sjfb8856606 		input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
12882*2d9fd380Sjfb8856606 		i40e_hash_filter_inset_select(hw, &input_conf);
128832bfe3f2eSlogwang 	}
128842bfe3f2eSlogwang 
12885*2d9fd380Sjfb8856606 	rss_info->conf.types = rss_conf.conf.types;
12886*2d9fd380Sjfb8856606 
12887*2d9fd380Sjfb8856606 	i40e_rss_clear_hash_function(pf, conf);
12888*2d9fd380Sjfb8856606 
12889*2d9fd380Sjfb8856606 	return 0;
12890*2d9fd380Sjfb8856606 }
12891*2d9fd380Sjfb8856606 
12892*2d9fd380Sjfb8856606 /* Configure RSS queue region to default */
12893*2d9fd380Sjfb8856606 static int
i40e_rss_clear_queue_region(struct i40e_pf * pf)12894*2d9fd380Sjfb8856606 i40e_rss_clear_queue_region(struct i40e_pf *pf)
12895*2d9fd380Sjfb8856606 {
12896*2d9fd380Sjfb8856606 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12897*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12898*2d9fd380Sjfb8856606 	uint16_t queue[I40E_MAX_Q_PER_TC];
12899*2d9fd380Sjfb8856606 	uint32_t num_rxq, i;
12900*2d9fd380Sjfb8856606 	uint32_t lut = 0;
12901*2d9fd380Sjfb8856606 	uint16_t j, num;
12902*2d9fd380Sjfb8856606 
12903*2d9fd380Sjfb8856606 	num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
12904*2d9fd380Sjfb8856606 
12905*2d9fd380Sjfb8856606 	for (j = 0; j < num_rxq; j++)
12906*2d9fd380Sjfb8856606 		queue[j] = j;
12907*2d9fd380Sjfb8856606 
12908*2d9fd380Sjfb8856606 	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
12909*2d9fd380Sjfb8856606 	 * It's necessary to calculate the actual PF queues that are configured.
12910*2d9fd380Sjfb8856606 	 */
12911*2d9fd380Sjfb8856606 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12912*2d9fd380Sjfb8856606 		num = i40e_pf_calc_configured_queues_num(pf);
12913*2d9fd380Sjfb8856606 	else
12914*2d9fd380Sjfb8856606 		num = pf->dev_data->nb_rx_queues;
12915*2d9fd380Sjfb8856606 
12916*2d9fd380Sjfb8856606 	num = RTE_MIN(num, num_rxq);
12917*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12918*2d9fd380Sjfb8856606 			num);
12919*2d9fd380Sjfb8856606 
12920*2d9fd380Sjfb8856606 	if (num == 0) {
12921*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
12922*2d9fd380Sjfb8856606 			"No PF queues are configured to enable RSS for port %u",
12923*2d9fd380Sjfb8856606 			pf->dev_data->port_id);
12924*2d9fd380Sjfb8856606 		return -ENOTSUP;
12925*2d9fd380Sjfb8856606 	}
12926*2d9fd380Sjfb8856606 
12927*2d9fd380Sjfb8856606 	/* Fill in redirection table */
12928*2d9fd380Sjfb8856606 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12929*2d9fd380Sjfb8856606 		if (j == num)
12930*2d9fd380Sjfb8856606 			j = 0;
12931*2d9fd380Sjfb8856606 		lut = (lut << 8) | (queue[j] & ((0x1 <<
12932*2d9fd380Sjfb8856606 			hw->func_caps.rss_table_entry_width) - 1));
12933*2d9fd380Sjfb8856606 		if ((i & 3) == 3)
12934*2d9fd380Sjfb8856606 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12935*2d9fd380Sjfb8856606 	}
12936*2d9fd380Sjfb8856606 
12937*2d9fd380Sjfb8856606 	rss_info->conf.queue_num = 0;
12938*2d9fd380Sjfb8856606 	memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
12939*2d9fd380Sjfb8856606 
12940*2d9fd380Sjfb8856606 	return 0;
12941*2d9fd380Sjfb8856606 }
12942*2d9fd380Sjfb8856606 
12943*2d9fd380Sjfb8856606 int
i40e_config_rss_filter(struct i40e_pf * pf,struct i40e_rte_flow_rss_conf * conf,bool add)12944*2d9fd380Sjfb8856606 i40e_config_rss_filter(struct i40e_pf *pf,
12945*2d9fd380Sjfb8856606 		struct i40e_rte_flow_rss_conf *conf, bool add)
12946*2d9fd380Sjfb8856606 {
12947*2d9fd380Sjfb8856606 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12948*2d9fd380Sjfb8856606 	struct rte_flow_action_rss update_conf = rss_info->conf;
12949*2d9fd380Sjfb8856606 	int ret = 0;
12950*2d9fd380Sjfb8856606 
12951*2d9fd380Sjfb8856606 	if (add) {
12952*2d9fd380Sjfb8856606 		if (conf->conf.queue_num) {
12953*2d9fd380Sjfb8856606 			/* Configure RSS queue region */
12954*2d9fd380Sjfb8856606 			ret = i40e_rss_config_queue_region(pf, conf);
12955*2d9fd380Sjfb8856606 			if (ret)
12956*2d9fd380Sjfb8856606 				return ret;
12957*2d9fd380Sjfb8856606 
12958*2d9fd380Sjfb8856606 			update_conf.queue_num = conf->conf.queue_num;
12959*2d9fd380Sjfb8856606 			update_conf.queue = conf->conf.queue;
12960*2d9fd380Sjfb8856606 		} else if (conf->conf.func ==
12961*2d9fd380Sjfb8856606 			   RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
12962*2d9fd380Sjfb8856606 			/* Configure hash function */
12963*2d9fd380Sjfb8856606 			ret = i40e_rss_config_hash_function(pf, conf);
12964*2d9fd380Sjfb8856606 			if (ret)
12965*2d9fd380Sjfb8856606 				return ret;
12966*2d9fd380Sjfb8856606 
12967*2d9fd380Sjfb8856606 			update_conf.func = conf->conf.func;
12968*2d9fd380Sjfb8856606 		} else {
12969*2d9fd380Sjfb8856606 			/* Configure hash enable and input set */
12970*2d9fd380Sjfb8856606 			ret = i40e_rss_enable_hash(pf, conf);
12971*2d9fd380Sjfb8856606 			if (ret)
12972*2d9fd380Sjfb8856606 				return ret;
12973*2d9fd380Sjfb8856606 
12974*2d9fd380Sjfb8856606 			update_conf.types |= conf->conf.types;
12975*2d9fd380Sjfb8856606 			update_conf.key = conf->conf.key;
12976*2d9fd380Sjfb8856606 			update_conf.key_len = conf->conf.key_len;
12977*2d9fd380Sjfb8856606 		}
12978*2d9fd380Sjfb8856606 
12979*2d9fd380Sjfb8856606 		/* Update RSS info in pf */
12980*2d9fd380Sjfb8856606 		if (i40e_rss_conf_init(rss_info, &update_conf))
12981*2d9fd380Sjfb8856606 			return -EINVAL;
12982*2d9fd380Sjfb8856606 	} else {
12983*2d9fd380Sjfb8856606 		if (!conf->valid)
12984*2d9fd380Sjfb8856606 			return 0;
12985*2d9fd380Sjfb8856606 
12986*2d9fd380Sjfb8856606 		if (conf->conf.queue_num)
12987*2d9fd380Sjfb8856606 			i40e_rss_clear_queue_region(pf);
12988*2d9fd380Sjfb8856606 		else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
12989*2d9fd380Sjfb8856606 			i40e_rss_clear_hash_function(pf, conf);
12990*2d9fd380Sjfb8856606 		else
12991*2d9fd380Sjfb8856606 			i40e_rss_disable_hash(pf, conf);
12992*2d9fd380Sjfb8856606 	}
12993*2d9fd380Sjfb8856606 
12994*2d9fd380Sjfb8856606 	return 0;
12995*2d9fd380Sjfb8856606 }
12996*2d9fd380Sjfb8856606 
12997*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
12998*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
12999*2d9fd380Sjfb8856606 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
13000*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
13001*2d9fd380Sjfb8856606 #endif
13002*2d9fd380Sjfb8856606 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
13003*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
13004*2d9fd380Sjfb8856606 #endif
13005*2d9fd380Sjfb8856606 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
13006*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
13007*2d9fd380Sjfb8856606 #endif
13008*2d9fd380Sjfb8856606 
130092bfe3f2eSlogwang RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
13010d30ea906Sjfb8856606 			      ETH_I40E_FLOATING_VEB_ARG "=1"
13011d30ea906Sjfb8856606 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
13012d30ea906Sjfb8856606 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
13013d30ea906Sjfb8856606 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
13014d30ea906Sjfb8856606 			      ETH_I40E_USE_LATEST_VEC "=0|1");
13015