xref: /dpdk/drivers/net/i40e/i40e_ethdev.c (revision a4ba7736)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13 
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30 #include <rte_os_shim.h>
31 
32 #include "i40e_logs.h"
33 #include "base/i40e_prototype.h"
34 #include "base/i40e_adminq_cmd.h"
35 #include "base/i40e_type.h"
36 #include "base/i40e_register.h"
37 #include "base/i40e_dcb.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_rxtx.h"
40 #include "i40e_pf.h"
41 #include "i40e_regs.h"
42 #include "rte_pmd_i40e.h"
43 #include "i40e_hash.h"
44 
45 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
46 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
47 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
48 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
49 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
50 
51 #define I40E_CLEAR_PXE_WAIT_MS     200
52 #define I40E_VSI_TSR_QINQ_STRIP		0x4010
53 #define I40E_VSI_TSR(_i)	(0x00050800 + ((_i) * 4))
54 
55 /* Maximun number of capability elements */
56 #define I40E_MAX_CAP_ELE_NUM       128
57 
58 /* Wait count and interval */
59 #define I40E_CHK_Q_ENA_COUNT       1000
60 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
61 
62 /* Maximun number of VSI */
63 #define I40E_MAX_NUM_VSIS          (384UL)
64 
65 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
66 
67 /* Flow control default timer */
68 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69 
70 /* Flow control enable fwd bit */
71 #define I40E_PRTMAC_FWD_CTRL   0x00000001
72 
73 /* Receive Packet Buffer size */
74 #define I40E_RXPBSIZE (968 * 1024)
75 
76 /* Kilobytes shift */
77 #define I40E_KILOSHIFT 10
78 
79 /* Flow control default high water */
80 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81 
82 /* Flow control default low water */
83 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
84 
85 /* Receive Average Packet Size in Byte*/
86 #define I40E_PACKET_AVERAGE_SIZE 128
87 
88 /* Mask of PF interrupt causes */
89 #define I40E_PFINT_ICR0_ENA_MASK ( \
90 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
93 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99 
100 #define I40E_FLOW_TYPES ( \
101 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112 
113 /* Additional timesync values. */
114 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
115 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
116 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
117 #define I40E_PRTTSYN_TSYNENA     0x80000000
118 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
119 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
120 
121 /**
122  * Below are values for writing un-exposed registers suggested
123  * by silicon experts
124  */
125 /* Destination MAC address */
126 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
127 /* Source MAC address */
128 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
129 /* Outer (S-Tag) VLAN tag in the outer L2 header */
130 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
131 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
133 /* Single VLAN tag in the inner L2 header */
134 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
135 /* Source IPv4 address */
136 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
137 /* Destination IPv4 address */
138 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
139 /* Source IPv4 address for X722 */
140 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
141 /* Destination IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
143 /* IPv4 Protocol for X722 */
144 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
145 /* IPv4 Time to Live for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
147 /* IPv4 Type of Service (TOS) */
148 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
149 /* IPv4 Protocol */
150 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
151 /* IPv4 Time to Live */
152 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
153 /* Source IPv6 address */
154 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
155 /* Destination IPv6 address */
156 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
157 /* IPv6 Traffic Class (TC) */
158 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
159 /* IPv6 Next Header */
160 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
161 /* IPv6 Hop Limit */
162 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
163 /* Source L4 port */
164 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
165 /* Destination L4 port */
166 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
167 /* SCTP verification tag */
168 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
169 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
170 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
171 /* Source port of tunneling UDP */
172 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
173 /* Destination port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
175 /* UDP Tunneling ID, NVGRE/GRE key */
176 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
177 /* Last ether type */
178 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
179 /* Tunneling outer destination IPv4 address */
180 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
181 /* Tunneling outer destination IPv6 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
183 /* 1st word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
185 /* 2nd word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
187 /* 3rd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
189 /* 4th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
191 /* 5th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
193 /* 6th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
195 /* 7th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
197 /* 8th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
199 /* all 8 words flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
201 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
202 
203 #define I40E_TRANSLATE_INSET 0
204 #define I40E_TRANSLATE_REG   1
205 
206 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
207 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
208 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
209 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
210 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
211 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
212 
213 /* PCI offset for querying capability */
214 #define PCI_DEV_CAP_REG            0xA4
215 /* PCI offset for enabling/disabling Extended Tag */
216 #define PCI_DEV_CTRL_REG           0xA8
217 /* Bit mask of Extended Tag capability */
218 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
219 /* Bit shift of Extended Tag enable/disable */
220 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221 /* Bit mask of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223 
224 #define I40E_GLQF_PIT_IPV4_START	2
225 #define I40E_GLQF_PIT_IPV4_COUNT	2
226 #define I40E_GLQF_PIT_IPV6_START	4
227 #define I40E_GLQF_PIT_IPV6_COUNT	2
228 
229 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a)	\
230 				(((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231 				 I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232 
233 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234 				(((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235 				 I40E_GLQF_PIT_DEST_OFF_SHIFT)
236 
237 #define I40E_GLQF_PIT_FSIZE_GET(a)	(((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238 					 I40E_GLQF_PIT_FSIZE_SHIFT)
239 
240 #define I40E_GLQF_PIT_BUILD(off, mask)	(((off) << 16) | (mask))
241 #define I40E_FDIR_FIELD_OFFSET(a)	((a) >> 1)
242 
243 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int i40e_dev_configure(struct rte_eth_dev *dev);
246 static int i40e_dev_start(struct rte_eth_dev *dev);
247 static int i40e_dev_stop(struct rte_eth_dev *dev);
248 static int i40e_dev_close(struct rte_eth_dev *dev);
249 static int  i40e_dev_reset(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257 			       struct rte_eth_stats *stats);
258 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259 			       struct rte_eth_xstat *xstats, unsigned n);
260 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261 				     struct rte_eth_xstat_name *xstats_names,
262 				     unsigned limit);
263 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264 static int i40e_fw_version_get(struct rte_eth_dev *dev,
265 				char *fw_version, size_t fw_size);
266 static int i40e_dev_info_get(struct rte_eth_dev *dev,
267 			     struct rte_eth_dev_info *dev_info);
268 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269 				uint16_t vlan_id,
270 				int on);
271 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272 			      enum rte_vlan_type vlan_type,
273 			      uint16_t tpid);
274 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276 				      uint16_t queue,
277 				      int on);
278 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279 static int i40e_dev_led_on(struct rte_eth_dev *dev);
280 static int i40e_dev_led_off(struct rte_eth_dev *dev);
281 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282 			      struct rte_eth_fc_conf *fc_conf);
283 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284 			      struct rte_eth_fc_conf *fc_conf);
285 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286 				       struct rte_eth_pfc_conf *pfc_conf);
287 static int i40e_macaddr_add(struct rte_eth_dev *dev,
288 			    struct rte_ether_addr *mac_addr,
289 			    uint32_t index,
290 			    uint32_t pool);
291 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293 				    struct rte_eth_rss_reta_entry64 *reta_conf,
294 				    uint16_t reta_size);
295 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296 				   struct rte_eth_rss_reta_entry64 *reta_conf,
297 				   uint16_t reta_size);
298 
299 static int i40e_get_cap(struct i40e_hw *hw);
300 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301 static int i40e_pf_setup(struct i40e_pf *pf);
302 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304 static int i40e_dcb_setup(struct rte_eth_dev *dev);
305 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
307 static void i40e_stat_update_48(struct i40e_hw *hw,
308 			       uint32_t hireg,
309 			       uint32_t loreg,
310 			       bool offset_loaded,
311 			       uint64_t *offset,
312 			       uint64_t *stat);
313 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314 static void i40e_dev_interrupt_handler(void *param);
315 static void i40e_dev_alarm_handler(void *param);
316 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317 				uint32_t base, uint32_t num);
318 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320 			uint32_t base);
321 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322 			uint16_t num);
323 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324 static int i40e_veb_release(struct i40e_veb *veb);
325 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326 						struct i40e_vsi *vsi);
327 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329 					     struct i40e_macvlan_filter *mv_f,
330 					     int num,
331 					     uint16_t vlan);
332 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334 				    struct rte_eth_rss_conf *rss_conf);
335 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336 				      struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338 					struct rte_eth_udp_tunnel *udp_tunnel);
339 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340 					struct rte_eth_udp_tunnel *udp_tunnel);
341 static void i40e_filter_input_set_init(struct i40e_pf *pf);
342 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343 				 const struct rte_flow_ops **ops);
344 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345 				  struct rte_eth_dcb_info *dcb_info);
346 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347 static void i40e_configure_registers(struct i40e_hw *hw);
348 static void i40e_hw_init(struct rte_eth_dev *dev);
349 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350 
351 static int i40e_timesync_enable(struct rte_eth_dev *dev);
352 static int i40e_timesync_disable(struct rte_eth_dev *dev);
353 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
354 					   struct timespec *timestamp,
355 					   uint32_t flags);
356 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
357 					   struct timespec *timestamp);
358 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
359 
360 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
361 
362 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
363 				   struct timespec *timestamp);
364 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
365 				    const struct timespec *timestamp);
366 
367 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
368 					 uint16_t queue_id);
369 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
370 					  uint16_t queue_id);
371 
372 static int i40e_get_regs(struct rte_eth_dev *dev,
373 			 struct rte_dev_reg_info *regs);
374 
375 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
376 
377 static int i40e_get_eeprom(struct rte_eth_dev *dev,
378 			   struct rte_dev_eeprom_info *eeprom);
379 
380 static int i40e_get_module_info(struct rte_eth_dev *dev,
381 				struct rte_eth_dev_module_info *modinfo);
382 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
383 				  struct rte_dev_eeprom_info *info);
384 
385 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
386 				      struct rte_ether_addr *mac_addr);
387 
388 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
389 static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
390 
391 static int i40e_ethertype_filter_convert(
392 	const struct rte_eth_ethertype_filter *input,
393 	struct i40e_ethertype_filter *filter);
394 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
395 				   struct i40e_ethertype_filter *filter);
396 
397 static int i40e_tunnel_filter_convert(
398 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
399 	struct i40e_tunnel_filter *tunnel_filter);
400 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
401 				struct i40e_tunnel_filter *tunnel_filter);
402 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
403 
404 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
405 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
406 static void i40e_filter_restore(struct i40e_pf *pf);
407 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
408 
409 static const char *const valid_keys[] = {
410 	ETH_I40E_FLOATING_VEB_ARG,
411 	ETH_I40E_FLOATING_VEB_LIST_ARG,
412 	ETH_I40E_SUPPORT_MULTI_DRIVER,
413 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
414 	ETH_I40E_VF_MSG_CFG,
415 	NULL};
416 
417 static const struct rte_pci_id pci_id_i40e_map[] = {
418 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
419 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
420 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
421 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
422 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
423 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
424 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
425 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
426 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
427 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
428 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
429 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
430 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
431 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
432 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
433 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
434 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
435 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
436 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
437 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
438 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
439 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
440 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
441 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
442 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
443 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
444 	{ .vendor_id = 0, /* sentinel */ },
445 };
446 
447 static const struct eth_dev_ops i40e_eth_dev_ops = {
448 	.dev_configure                = i40e_dev_configure,
449 	.dev_start                    = i40e_dev_start,
450 	.dev_stop                     = i40e_dev_stop,
451 	.dev_close                    = i40e_dev_close,
452 	.dev_reset		      = i40e_dev_reset,
453 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
454 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
455 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
456 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
457 	.dev_set_link_up              = i40e_dev_set_link_up,
458 	.dev_set_link_down            = i40e_dev_set_link_down,
459 	.link_update                  = i40e_dev_link_update,
460 	.stats_get                    = i40e_dev_stats_get,
461 	.xstats_get                   = i40e_dev_xstats_get,
462 	.xstats_get_names             = i40e_dev_xstats_get_names,
463 	.stats_reset                  = i40e_dev_stats_reset,
464 	.xstats_reset                 = i40e_dev_stats_reset,
465 	.fw_version_get               = i40e_fw_version_get,
466 	.dev_infos_get                = i40e_dev_info_get,
467 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
468 	.vlan_filter_set              = i40e_vlan_filter_set,
469 	.vlan_tpid_set                = i40e_vlan_tpid_set,
470 	.vlan_offload_set             = i40e_vlan_offload_set,
471 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
472 	.vlan_pvid_set                = i40e_vlan_pvid_set,
473 	.rx_queue_start               = i40e_dev_rx_queue_start,
474 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
475 	.tx_queue_start               = i40e_dev_tx_queue_start,
476 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
477 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
478 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
479 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
480 	.rx_queue_release             = i40e_dev_rx_queue_release,
481 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
482 	.tx_queue_release             = i40e_dev_tx_queue_release,
483 	.dev_led_on                   = i40e_dev_led_on,
484 	.dev_led_off                  = i40e_dev_led_off,
485 	.flow_ctrl_get                = i40e_flow_ctrl_get,
486 	.flow_ctrl_set                = i40e_flow_ctrl_set,
487 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
488 	.mac_addr_add                 = i40e_macaddr_add,
489 	.mac_addr_remove              = i40e_macaddr_remove,
490 	.reta_update                  = i40e_dev_rss_reta_update,
491 	.reta_query                   = i40e_dev_rss_reta_query,
492 	.rss_hash_update              = i40e_dev_rss_hash_update,
493 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
494 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
495 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
496 	.flow_ops_get                 = i40e_dev_flow_ops_get,
497 	.rxq_info_get                 = i40e_rxq_info_get,
498 	.txq_info_get                 = i40e_txq_info_get,
499 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
500 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
501 	.timesync_enable              = i40e_timesync_enable,
502 	.timesync_disable             = i40e_timesync_disable,
503 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
504 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
505 	.get_dcb_info                 = i40e_dev_get_dcb_info,
506 	.timesync_adjust_time         = i40e_timesync_adjust_time,
507 	.timesync_read_time           = i40e_timesync_read_time,
508 	.timesync_write_time          = i40e_timesync_write_time,
509 	.get_reg                      = i40e_get_regs,
510 	.get_eeprom_length            = i40e_get_eeprom_length,
511 	.get_eeprom                   = i40e_get_eeprom,
512 	.get_module_info              = i40e_get_module_info,
513 	.get_module_eeprom            = i40e_get_module_eeprom,
514 	.mac_addr_set                 = i40e_set_default_mac_addr,
515 	.mtu_set                      = i40e_dev_mtu_set,
516 	.tm_ops_get                   = i40e_tm_ops_get,
517 	.tx_done_cleanup              = i40e_tx_done_cleanup,
518 	.get_monitor_addr             = i40e_get_monitor_addr,
519 };
520 
521 /* store statistics names and its offset in stats structure */
522 struct rte_i40e_xstats_name_off {
523 	char name[RTE_ETH_XSTATS_NAME_SIZE];
524 	int offset;
525 };
526 
527 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
528 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
529 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
530 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
531 	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
532 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
533 		rx_unknown_protocol)},
534 	{"rx_size_error_packets", offsetof(struct i40e_pf, rx_err1) -
535 				  offsetof(struct i40e_pf, stats)},
536 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
537 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
538 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
539 	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
540 };
541 
542 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
543 		sizeof(rte_i40e_stats_strings[0]))
544 
545 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
546 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
547 		tx_dropped_link_down)},
548 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
549 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
550 		illegal_bytes)},
551 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
552 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
553 		mac_local_faults)},
554 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
555 		mac_remote_faults)},
556 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
557 		rx_length_errors)},
558 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
559 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
560 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
561 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
562 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
563 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
564 		rx_size_127)},
565 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
566 		rx_size_255)},
567 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
568 		rx_size_511)},
569 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
570 		rx_size_1023)},
571 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
572 		rx_size_1522)},
573 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
574 		rx_size_big)},
575 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
576 		rx_undersize)},
577 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
578 		rx_oversize)},
579 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
580 		mac_short_packet_dropped)},
581 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
582 		rx_fragments)},
583 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
584 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
585 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
586 		tx_size_127)},
587 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
588 		tx_size_255)},
589 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
590 		tx_size_511)},
591 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
592 		tx_size_1023)},
593 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
594 		tx_size_1522)},
595 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
596 		tx_size_big)},
597 	{"rx_flow_director_atr_match_packets",
598 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
599 	{"rx_flow_director_sb_match_packets",
600 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
601 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
602 		tx_lpi_status)},
603 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
604 		rx_lpi_status)},
605 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
606 		tx_lpi_count)},
607 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
608 		rx_lpi_count)},
609 };
610 
611 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
612 		sizeof(rte_i40e_hw_port_strings[0]))
613 
614 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
615 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
616 		priority_xon_rx)},
617 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
618 		priority_xoff_rx)},
619 };
620 
621 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
622 		sizeof(rte_i40e_rxq_prio_strings[0]))
623 
624 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
625 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
626 		priority_xon_tx)},
627 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
628 		priority_xoff_tx)},
629 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
630 		priority_xon_2_xoff)},
631 };
632 
633 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
634 		sizeof(rte_i40e_txq_prio_strings[0]))
635 
636 static int
eth_i40e_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)637 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
638 	struct rte_pci_device *pci_dev)
639 {
640 	char name[RTE_ETH_NAME_MAX_LEN];
641 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
642 	int i, retval;
643 
644 	if (pci_dev->device.devargs) {
645 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
646 				&eth_da);
647 		if (retval)
648 			return retval;
649 	}
650 
651 	if (eth_da.nb_representor_ports > 0 &&
652 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
653 		PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
654 			    pci_dev->device.devargs->args);
655 		return -ENOTSUP;
656 	}
657 
658 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
659 		sizeof(struct i40e_adapter),
660 		eth_dev_pci_specific_init, pci_dev,
661 		eth_i40e_dev_init, NULL);
662 
663 	if (retval || eth_da.nb_representor_ports < 1)
664 		return retval;
665 
666 	/* probe VF representor ports */
667 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
668 		pci_dev->device.name);
669 
670 	if (pf_ethdev == NULL)
671 		return -ENODEV;
672 
673 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
674 		struct i40e_vf_representor representor = {
675 			.vf_id = eth_da.representor_ports[i],
676 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
677 				pf_ethdev->data->dev_private)->switch_domain_id,
678 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
679 				pf_ethdev->data->dev_private)
680 		};
681 
682 		/* representor port net_bdf_port */
683 		snprintf(name, sizeof(name), "net_%s_representor_%d",
684 			pci_dev->device.name, eth_da.representor_ports[i]);
685 
686 		retval = rte_eth_dev_create(&pci_dev->device, name,
687 			sizeof(struct i40e_vf_representor), NULL, NULL,
688 			i40e_vf_representor_init, &representor);
689 
690 		if (retval)
691 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
692 				"representor %s.", name);
693 	}
694 
695 	return 0;
696 }
697 
eth_i40e_pci_remove(struct rte_pci_device * pci_dev)698 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
699 {
700 	struct rte_eth_dev *ethdev;
701 
702 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
703 	if (!ethdev)
704 		return 0;
705 
706 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
707 		return rte_eth_dev_pci_generic_remove(pci_dev,
708 					i40e_vf_representor_uninit);
709 	else
710 		return rte_eth_dev_pci_generic_remove(pci_dev,
711 						eth_i40e_dev_uninit);
712 }
713 
714 static struct rte_pci_driver rte_i40e_pmd = {
715 	.id_table = pci_id_i40e_map,
716 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
717 	.probe = eth_i40e_pci_probe,
718 	.remove = eth_i40e_pci_remove,
719 };
720 
721 static inline void
i40e_write_global_rx_ctl(struct i40e_hw * hw,uint32_t reg_addr,uint32_t reg_val)722 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
723 			 uint32_t reg_val)
724 {
725 	uint32_t ori_reg_val;
726 	struct rte_eth_dev_data *dev_data =
727 		((struct i40e_adapter *)hw->back)->pf.dev_data;
728 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
729 
730 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
731 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
732 	if (ori_reg_val != reg_val)
733 		PMD_DRV_LOG(WARNING,
734 			    "i40e device %s changed global register [0x%08x]."
735 			    " original: 0x%08x, new: 0x%08x",
736 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
737 }
738 
739 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
740 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
741 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
742 
743 #ifndef I40E_GLQF_ORT
744 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
745 #endif
746 #ifndef I40E_GLQF_PIT
747 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
748 #endif
749 #ifndef I40E_GLQF_L3_MAP
750 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
751 #endif
752 
i40e_GLQF_reg_init(struct i40e_hw * hw)753 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
754 {
755 	/*
756 	 * Initialize registers for parsing packet type of QinQ
757 	 * This should be removed from code once proper
758 	 * configuration API is added to avoid configuration conflicts
759 	 * between ports of the same device.
760 	 */
761 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
762 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
763 }
764 
i40e_config_automask(struct i40e_pf * pf)765 static inline void i40e_config_automask(struct i40e_pf *pf)
766 {
767 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
768 	uint32_t val;
769 
770 	/* INTENA flag is not auto-cleared for interrupt */
771 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
772 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
773 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
774 
775 	/* If support multi-driver, PF will use INT0. */
776 	if (!pf->support_multi_driver)
777 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
778 
779 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
780 }
781 
i40e_clear_automask(struct i40e_pf * pf)782 static inline void i40e_clear_automask(struct i40e_pf *pf)
783 {
784 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
785 	uint32_t val;
786 
787 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
788 	val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
789 		 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
790 
791 	if (!pf->support_multi_driver)
792 		val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
793 
794 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
795 }
796 
797 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
798 
799 /*
800  * Add a ethertype filter to drop all flow control frames transmitted
801  * from VSIs.
802 */
803 static void
i40e_add_tx_flow_control_drop_filter(struct i40e_pf * pf)804 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
805 {
806 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
807 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
808 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
809 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
810 	int ret;
811 
812 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
813 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
814 				pf->main_vsi_seid, 0,
815 				TRUE, NULL, NULL);
816 	if (ret)
817 		PMD_INIT_LOG(ERR,
818 			"Failed to add filter to drop flow control frames from VSIs.");
819 }
820 
821 static int
floating_veb_list_handler(__rte_unused const char * key,const char * floating_veb_value,void * opaque)822 floating_veb_list_handler(__rte_unused const char *key,
823 			  const char *floating_veb_value,
824 			  void *opaque)
825 {
826 	int idx = 0;
827 	unsigned int count = 0;
828 	char *end = NULL;
829 	int min, max;
830 	bool *vf_floating_veb = opaque;
831 
832 	while (isblank(*floating_veb_value))
833 		floating_veb_value++;
834 
835 	/* Reset floating VEB configuration for VFs */
836 	for (idx = 0; idx < I40E_MAX_VF; idx++)
837 		vf_floating_veb[idx] = false;
838 
839 	min = I40E_MAX_VF;
840 	do {
841 		while (isblank(*floating_veb_value))
842 			floating_veb_value++;
843 		if (*floating_veb_value == '\0')
844 			return -1;
845 		errno = 0;
846 		idx = strtoul(floating_veb_value, &end, 10);
847 		if (errno || end == NULL)
848 			return -1;
849 		if (idx < 0)
850 			return -1;
851 		while (isblank(*end))
852 			end++;
853 		if (*end == '-') {
854 			min = idx;
855 		} else if ((*end == ';') || (*end == '\0')) {
856 			max = idx;
857 			if (min == I40E_MAX_VF)
858 				min = idx;
859 			if (max >= I40E_MAX_VF)
860 				max = I40E_MAX_VF - 1;
861 			for (idx = min; idx <= max; idx++) {
862 				vf_floating_veb[idx] = true;
863 				count++;
864 			}
865 			min = I40E_MAX_VF;
866 		} else {
867 			return -1;
868 		}
869 		floating_veb_value = end + 1;
870 	} while (*end != '\0');
871 
872 	if (count == 0)
873 		return -1;
874 
875 	return 0;
876 }
877 
878 static void
config_vf_floating_veb(struct rte_devargs * devargs,uint16_t floating_veb,bool * vf_floating_veb)879 config_vf_floating_veb(struct rte_devargs *devargs,
880 		       uint16_t floating_veb,
881 		       bool *vf_floating_veb)
882 {
883 	struct rte_kvargs *kvlist;
884 	int i;
885 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
886 
887 	if (!floating_veb)
888 		return;
889 	/* All the VFs attach to the floating VEB by default
890 	 * when the floating VEB is enabled.
891 	 */
892 	for (i = 0; i < I40E_MAX_VF; i++)
893 		vf_floating_veb[i] = true;
894 
895 	if (devargs == NULL)
896 		return;
897 
898 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
899 	if (kvlist == NULL)
900 		return;
901 
902 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
903 		rte_kvargs_free(kvlist);
904 		return;
905 	}
906 	/* When the floating_veb_list parameter exists, all the VFs
907 	 * will attach to the legacy VEB firstly, then configure VFs
908 	 * to the floating VEB according to the floating_veb_list.
909 	 */
910 	if (rte_kvargs_process(kvlist, floating_veb_list,
911 			       floating_veb_list_handler,
912 			       vf_floating_veb) < 0) {
913 		rte_kvargs_free(kvlist);
914 		return;
915 	}
916 	rte_kvargs_free(kvlist);
917 }
918 
919 static int
i40e_check_floating_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)920 i40e_check_floating_handler(__rte_unused const char *key,
921 			    const char *value,
922 			    __rte_unused void *opaque)
923 {
924 	if (strcmp(value, "1"))
925 		return -1;
926 
927 	return 0;
928 }
929 
930 static int
is_floating_veb_supported(struct rte_devargs * devargs)931 is_floating_veb_supported(struct rte_devargs *devargs)
932 {
933 	struct rte_kvargs *kvlist;
934 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
935 
936 	if (devargs == NULL)
937 		return 0;
938 
939 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
940 	if (kvlist == NULL)
941 		return 0;
942 
943 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
944 		rte_kvargs_free(kvlist);
945 		return 0;
946 	}
947 	/* Floating VEB is enabled when there's key-value:
948 	 * enable_floating_veb=1
949 	 */
950 	if (rte_kvargs_process(kvlist, floating_veb_key,
951 			       i40e_check_floating_handler, NULL) < 0) {
952 		rte_kvargs_free(kvlist);
953 		return 0;
954 	}
955 	rte_kvargs_free(kvlist);
956 
957 	return 1;
958 }
959 
960 static void
config_floating_veb(struct rte_eth_dev * dev)961 config_floating_veb(struct rte_eth_dev *dev)
962 {
963 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
964 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
965 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
966 
967 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
968 
969 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
970 		pf->floating_veb =
971 			is_floating_veb_supported(pci_dev->device.devargs);
972 		config_vf_floating_veb(pci_dev->device.devargs,
973 				       pf->floating_veb,
974 				       pf->floating_veb_list);
975 	} else {
976 		pf->floating_veb = false;
977 	}
978 }
979 
980 #define I40E_L2_TAGS_S_TAG_SHIFT 1
981 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
982 
983 static int
i40e_init_ethtype_filter_list(struct rte_eth_dev * dev)984 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
985 {
986 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
987 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
988 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
989 	int ret;
990 
991 	struct rte_hash_parameters ethertype_hash_params = {
992 		.name = ethertype_hash_name,
993 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
994 		.key_len = sizeof(struct i40e_ethertype_filter_input),
995 		.hash_func = rte_hash_crc,
996 		.hash_func_init_val = 0,
997 		.socket_id = rte_socket_id(),
998 	};
999 
1000 	/* Initialize ethertype filter rule list and hash */
1001 	TAILQ_INIT(&ethertype_rule->ethertype_list);
1002 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1003 		 "ethertype_%s", dev->device->name);
1004 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1005 	if (!ethertype_rule->hash_table) {
1006 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1007 		return -EINVAL;
1008 	}
1009 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1010 				       sizeof(struct i40e_ethertype_filter *) *
1011 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
1012 				       0);
1013 	if (!ethertype_rule->hash_map) {
1014 		PMD_INIT_LOG(ERR,
1015 			     "Failed to allocate memory for ethertype hash map!");
1016 		ret = -ENOMEM;
1017 		goto err_ethertype_hash_map_alloc;
1018 	}
1019 
1020 	return 0;
1021 
1022 err_ethertype_hash_map_alloc:
1023 	rte_hash_free(ethertype_rule->hash_table);
1024 
1025 	return ret;
1026 }
1027 
1028 static int
i40e_init_tunnel_filter_list(struct rte_eth_dev * dev)1029 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1030 {
1031 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1032 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1033 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
1034 	int ret;
1035 
1036 	struct rte_hash_parameters tunnel_hash_params = {
1037 		.name = tunnel_hash_name,
1038 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
1039 		.key_len = sizeof(struct i40e_tunnel_filter_input),
1040 		.hash_func = rte_hash_crc,
1041 		.hash_func_init_val = 0,
1042 		.socket_id = rte_socket_id(),
1043 	};
1044 
1045 	/* Initialize tunnel filter rule list and hash */
1046 	TAILQ_INIT(&tunnel_rule->tunnel_list);
1047 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1048 		 "tunnel_%s", dev->device->name);
1049 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1050 	if (!tunnel_rule->hash_table) {
1051 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1052 		return -EINVAL;
1053 	}
1054 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1055 				    sizeof(struct i40e_tunnel_filter *) *
1056 				    I40E_MAX_TUNNEL_FILTER_NUM,
1057 				    0);
1058 	if (!tunnel_rule->hash_map) {
1059 		PMD_INIT_LOG(ERR,
1060 			     "Failed to allocate memory for tunnel hash map!");
1061 		ret = -ENOMEM;
1062 		goto err_tunnel_hash_map_alloc;
1063 	}
1064 
1065 	return 0;
1066 
1067 err_tunnel_hash_map_alloc:
1068 	rte_hash_free(tunnel_rule->hash_table);
1069 
1070 	return ret;
1071 }
1072 
1073 static int
i40e_init_fdir_filter_list(struct rte_eth_dev * dev)1074 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1075 {
1076 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1077 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1078 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1079 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1080 	uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1081 	uint32_t best = hw->func_caps.fd_filters_best_effort;
1082 	enum i40e_filter_pctype pctype;
1083 	struct rte_bitmap *bmp = NULL;
1084 	uint32_t bmp_size;
1085 	void *mem = NULL;
1086 	uint32_t i = 0;
1087 	int ret;
1088 
1089 	struct rte_hash_parameters fdir_hash_params = {
1090 		.name = fdir_hash_name,
1091 		.entries = I40E_MAX_FDIR_FILTER_NUM,
1092 		.key_len = sizeof(struct i40e_fdir_input),
1093 		.hash_func = rte_hash_crc,
1094 		.hash_func_init_val = 0,
1095 		.socket_id = rte_socket_id(),
1096 	};
1097 
1098 	/* Initialize flow director filter rule list and hash */
1099 	TAILQ_INIT(&fdir_info->fdir_list);
1100 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1101 		 "fdir_%s", dev->device->name);
1102 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1103 	if (!fdir_info->hash_table) {
1104 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1105 		return -EINVAL;
1106 	}
1107 
1108 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1109 					  sizeof(struct i40e_fdir_filter *) *
1110 					  I40E_MAX_FDIR_FILTER_NUM,
1111 					  0);
1112 	if (!fdir_info->hash_map) {
1113 		PMD_INIT_LOG(ERR,
1114 			     "Failed to allocate memory for fdir hash map!");
1115 		ret = -ENOMEM;
1116 		goto err_fdir_hash_map_alloc;
1117 	}
1118 
1119 	fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1120 			sizeof(struct i40e_fdir_filter) *
1121 			I40E_MAX_FDIR_FILTER_NUM,
1122 			0);
1123 
1124 	if (!fdir_info->fdir_filter_array) {
1125 		PMD_INIT_LOG(ERR,
1126 			     "Failed to allocate memory for fdir filter array!");
1127 		ret = -ENOMEM;
1128 		goto err_fdir_filter_array_alloc;
1129 	}
1130 
1131 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1132 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1133 		pf->fdir.flow_count[pctype] = 0;
1134 
1135 	fdir_info->fdir_space_size = alloc + best;
1136 	fdir_info->fdir_actual_cnt = 0;
1137 	fdir_info->fdir_guarantee_total_space = alloc;
1138 	fdir_info->fdir_guarantee_free_space =
1139 		fdir_info->fdir_guarantee_total_space;
1140 
1141 	PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1142 
1143 	fdir_info->fdir_flow_pool.pool =
1144 			rte_zmalloc("i40e_fdir_entry",
1145 				sizeof(struct i40e_fdir_entry) *
1146 				fdir_info->fdir_space_size,
1147 				0);
1148 
1149 	if (!fdir_info->fdir_flow_pool.pool) {
1150 		PMD_INIT_LOG(ERR,
1151 			     "Failed to allocate memory for bitmap flow!");
1152 		ret = -ENOMEM;
1153 		goto err_fdir_bitmap_flow_alloc;
1154 	}
1155 
1156 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1157 		fdir_info->fdir_flow_pool.pool[i].idx = i;
1158 
1159 	bmp_size =
1160 		rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1161 	mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1162 	if (mem == NULL) {
1163 		PMD_INIT_LOG(ERR,
1164 			     "Failed to allocate memory for fdir bitmap!");
1165 		ret = -ENOMEM;
1166 		goto err_fdir_mem_alloc;
1167 	}
1168 	bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1169 	if (bmp == NULL) {
1170 		PMD_INIT_LOG(ERR,
1171 			     "Failed to initialization fdir bitmap!");
1172 		ret = -ENOMEM;
1173 		goto err_fdir_bmp_alloc;
1174 	}
1175 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1176 		rte_bitmap_set(bmp, i);
1177 
1178 	fdir_info->fdir_flow_pool.bitmap = bmp;
1179 
1180 	return 0;
1181 
1182 err_fdir_bmp_alloc:
1183 	rte_free(mem);
1184 err_fdir_mem_alloc:
1185 	rte_free(fdir_info->fdir_flow_pool.pool);
1186 err_fdir_bitmap_flow_alloc:
1187 	rte_free(fdir_info->fdir_filter_array);
1188 err_fdir_filter_array_alloc:
1189 	rte_free(fdir_info->hash_map);
1190 err_fdir_hash_map_alloc:
1191 	rte_hash_free(fdir_info->hash_table);
1192 
1193 	return ret;
1194 }
1195 
1196 static void
i40e_init_customized_info(struct i40e_pf * pf)1197 i40e_init_customized_info(struct i40e_pf *pf)
1198 {
1199 	int i;
1200 
1201 	/* Initialize customized pctype */
1202 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1203 		pf->customized_pctype[i].index = i;
1204 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1205 		pf->customized_pctype[i].valid = false;
1206 	}
1207 
1208 	pf->gtp_support = false;
1209 	pf->esp_support = false;
1210 }
1211 
1212 static void
i40e_init_filter_invalidation(struct i40e_pf * pf)1213 i40e_init_filter_invalidation(struct i40e_pf *pf)
1214 {
1215 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1216 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1217 	uint32_t glqf_ctl_reg = 0;
1218 
1219 	glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1220 	if (!pf->support_multi_driver) {
1221 		fdir_info->fdir_invalprio = 1;
1222 		glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1223 		PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1224 		i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1225 	} else {
1226 		if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1227 			fdir_info->fdir_invalprio = 1;
1228 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1229 		} else {
1230 			fdir_info->fdir_invalprio = 0;
1231 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1232 		}
1233 	}
1234 }
1235 
1236 void
i40e_init_queue_region_conf(struct rte_eth_dev * dev)1237 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1238 {
1239 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1241 	struct i40e_queue_regions *info = &pf->queue_region;
1242 	uint16_t i;
1243 
1244 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1245 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1246 
1247 	memset(info, 0, sizeof(struct i40e_queue_regions));
1248 }
1249 
1250 static int
i40e_parse_multi_drv_handler(__rte_unused const char * key,const char * value,void * opaque)1251 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1252 			       const char *value,
1253 			       void *opaque)
1254 {
1255 	struct i40e_pf *pf;
1256 	unsigned long support_multi_driver;
1257 	char *end;
1258 
1259 	pf = (struct i40e_pf *)opaque;
1260 
1261 	errno = 0;
1262 	support_multi_driver = strtoul(value, &end, 10);
1263 	if (errno != 0 || end == value || *end != 0) {
1264 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
1265 		return -(EINVAL);
1266 	}
1267 
1268 	if (support_multi_driver == 1 || support_multi_driver == 0)
1269 		pf->support_multi_driver = (bool)support_multi_driver;
1270 	else
1271 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1272 			    "enable global configuration by default."
1273 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1274 	return 0;
1275 }
1276 
1277 static int
i40e_support_multi_driver(struct rte_eth_dev * dev)1278 i40e_support_multi_driver(struct rte_eth_dev *dev)
1279 {
1280 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1281 	struct rte_kvargs *kvlist;
1282 	int kvargs_count;
1283 
1284 	/* Enable global configuration by default */
1285 	pf->support_multi_driver = false;
1286 
1287 	if (!dev->device->devargs)
1288 		return 0;
1289 
1290 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1291 	if (!kvlist)
1292 		return -EINVAL;
1293 
1294 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1295 	if (!kvargs_count) {
1296 		rte_kvargs_free(kvlist);
1297 		return 0;
1298 	}
1299 
1300 	if (kvargs_count > 1)
1301 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1302 			    "the first invalid or last valid one is used !",
1303 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1304 
1305 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1306 			       i40e_parse_multi_drv_handler, pf) < 0) {
1307 		rte_kvargs_free(kvlist);
1308 		return -EINVAL;
1309 	}
1310 
1311 	rte_kvargs_free(kvlist);
1312 	return 0;
1313 }
1314 
1315 static int
i40e_aq_debug_write_global_register(struct i40e_hw * hw,uint32_t reg_addr,uint64_t reg_val,struct i40e_asq_cmd_details * cmd_details)1316 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1317 				    uint32_t reg_addr, uint64_t reg_val,
1318 				    struct i40e_asq_cmd_details *cmd_details)
1319 {
1320 	uint64_t ori_reg_val;
1321 	struct rte_eth_dev_data *dev_data =
1322 		((struct i40e_adapter *)hw->back)->pf.dev_data;
1323 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1324 	int ret;
1325 
1326 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1327 	if (ret != I40E_SUCCESS) {
1328 		PMD_DRV_LOG(ERR,
1329 			    "Fail to debug read from 0x%08x",
1330 			    reg_addr);
1331 		return -EIO;
1332 	}
1333 
1334 	if (ori_reg_val != reg_val)
1335 		PMD_DRV_LOG(WARNING,
1336 			    "i40e device %s changed global register [0x%08x]."
1337 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1338 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1339 
1340 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1341 }
1342 
1343 static int
read_vf_msg_config(__rte_unused const char * key,const char * value,void * opaque)1344 read_vf_msg_config(__rte_unused const char *key,
1345 			       const char *value,
1346 			       void *opaque)
1347 {
1348 	struct i40e_vf_msg_cfg *cfg = opaque;
1349 
1350 	if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1351 			&cfg->ignore_second) != 3) {
1352 		memset(cfg, 0, sizeof(*cfg));
1353 		PMD_DRV_LOG(ERR, "format error! example: "
1354 				"%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1355 		return -EINVAL;
1356 	}
1357 
1358 	/*
1359 	 * If the message validation function been enabled, the 'period'
1360 	 * and 'ignore_second' must greater than 0.
1361 	 */
1362 	if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1363 		memset(cfg, 0, sizeof(*cfg));
1364 		PMD_DRV_LOG(ERR, "%s error! the second and third"
1365 				" number must be greater than 0!",
1366 				ETH_I40E_VF_MSG_CFG);
1367 		return -EINVAL;
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 static int
i40e_parse_vf_msg_config(struct rte_eth_dev * dev,struct i40e_vf_msg_cfg * msg_cfg)1374 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1375 		struct i40e_vf_msg_cfg *msg_cfg)
1376 {
1377 	struct rte_kvargs *kvlist;
1378 	int kvargs_count;
1379 	int ret = 0;
1380 
1381 	memset(msg_cfg, 0, sizeof(*msg_cfg));
1382 
1383 	if (!dev->device->devargs)
1384 		return ret;
1385 
1386 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1387 	if (!kvlist)
1388 		return -EINVAL;
1389 
1390 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1391 	if (!kvargs_count)
1392 		goto free_end;
1393 
1394 	if (kvargs_count > 1) {
1395 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1396 				ETH_I40E_VF_MSG_CFG);
1397 		ret = -EINVAL;
1398 		goto free_end;
1399 	}
1400 
1401 	if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1402 			read_vf_msg_config, msg_cfg) < 0)
1403 		ret = -EINVAL;
1404 
1405 free_end:
1406 	rte_kvargs_free(kvlist);
1407 	return ret;
1408 }
1409 
1410 #define I40E_ALARM_INTERVAL 50000 /* us */
1411 
1412 static int
eth_i40e_dev_init(struct rte_eth_dev * dev,void * init_params __rte_unused)1413 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1414 {
1415 	struct rte_pci_device *pci_dev;
1416 	struct rte_intr_handle *intr_handle;
1417 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1418 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419 	struct i40e_vsi *vsi;
1420 	int ret;
1421 	uint32_t len, val;
1422 	uint8_t aq_fail = 0;
1423 
1424 	PMD_INIT_FUNC_TRACE();
1425 
1426 	dev->dev_ops = &i40e_eth_dev_ops;
1427 	dev->rx_queue_count = i40e_dev_rx_queue_count;
1428 	dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1429 	dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1430 	dev->rx_pkt_burst = i40e_recv_pkts;
1431 	dev->tx_pkt_burst = i40e_xmit_pkts;
1432 	dev->tx_pkt_prepare = i40e_prep_pkts;
1433 
1434 	/* for secondary processes, we don't initialise any further as primary
1435 	 * has already done this work. Only check we don't need a different
1436 	 * RX function */
1437 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1438 		i40e_set_rx_function(dev);
1439 		i40e_set_tx_function(dev);
1440 		return 0;
1441 	}
1442 	i40e_set_default_ptype_table(dev);
1443 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1444 	intr_handle = pci_dev->intr_handle;
1445 
1446 	rte_eth_copy_pci_info(dev, pci_dev);
1447 
1448 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1449 	pf->dev_data = dev->data;
1450 
1451 	hw->back = I40E_PF_TO_ADAPTER(pf);
1452 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1453 	if (!hw->hw_addr) {
1454 		PMD_INIT_LOG(ERR,
1455 			"Hardware is not available, as address is NULL");
1456 		return -ENODEV;
1457 	}
1458 
1459 	hw->vendor_id = pci_dev->id.vendor_id;
1460 	hw->device_id = pci_dev->id.device_id;
1461 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1462 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1463 	hw->bus.device = pci_dev->addr.devid;
1464 	hw->bus.func = pci_dev->addr.function;
1465 	hw->adapter_stopped = 0;
1466 	hw->adapter_closed = 0;
1467 
1468 	/* Init switch device pointer */
1469 	hw->switch_dev = NULL;
1470 
1471 	/*
1472 	 * Switch Tag value should not be identical to either the First Tag
1473 	 * or Second Tag values. So set something other than common Ethertype
1474 	 * for internal switching.
1475 	 */
1476 	hw->switch_tag = 0xffff;
1477 
1478 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1479 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1480 		PMD_INIT_LOG(ERR, "\nERROR: "
1481 			"Firmware recovery mode detected. Limiting functionality.\n"
1482 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1483 			"User Guide for details on firmware recovery mode.");
1484 		return -EIO;
1485 	}
1486 
1487 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1488 	/* Check if need to support multi-driver */
1489 	i40e_support_multi_driver(dev);
1490 
1491 	/* Make sure all is clean before doing PF reset */
1492 	i40e_clear_hw(hw);
1493 
1494 	/* Reset here to make sure all is clean for each PF */
1495 	ret = i40e_pf_reset(hw);
1496 	if (ret) {
1497 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1498 		return ret;
1499 	}
1500 
1501 	/* Initialize the shared code (base driver) */
1502 	ret = i40e_init_shared_code(hw);
1503 	if (ret) {
1504 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1505 		return ret;
1506 	}
1507 
1508 	/* Initialize the parameters for adminq */
1509 	i40e_init_adminq_parameter(hw);
1510 	ret = i40e_init_adminq(hw);
1511 	if (ret != I40E_SUCCESS) {
1512 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1513 		return -EIO;
1514 	}
1515 	/* Firmware of SFP x722 does not support 802.1ad frames ability */
1516 	if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1517 		hw->device_id == I40E_DEV_ID_SFP_I_X722)
1518 		hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1519 
1520 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1521 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1522 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1523 		     ((hw->nvm.version >> 12) & 0xf),
1524 		     ((hw->nvm.version >> 4) & 0xff),
1525 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1526 
1527 	/* Initialize the hardware */
1528 	i40e_hw_init(dev);
1529 
1530 	i40e_config_automask(pf);
1531 
1532 	i40e_set_default_pctype_table(dev);
1533 
1534 	/*
1535 	 * To work around the NVM issue, initialize registers
1536 	 * for packet type of QinQ by software.
1537 	 * It should be removed once issues are fixed in NVM.
1538 	 */
1539 	if (!pf->support_multi_driver)
1540 		i40e_GLQF_reg_init(hw);
1541 
1542 	/* Initialize the input set for filters (hash and fd) to default value */
1543 	i40e_filter_input_set_init(pf);
1544 
1545 	/* initialise the L3_MAP register */
1546 	if (!pf->support_multi_driver) {
1547 		ret = i40e_aq_debug_write_global_register(hw,
1548 						   I40E_GLQF_L3_MAP(40),
1549 						   0x00000028,	NULL);
1550 		if (ret)
1551 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1552 				     ret);
1553 		PMD_INIT_LOG(DEBUG,
1554 			     "Global register 0x%08x is changed with 0x28",
1555 			     I40E_GLQF_L3_MAP(40));
1556 	}
1557 
1558 	/* Need the special FW version to support floating VEB */
1559 	config_floating_veb(dev);
1560 	/* Clear PXE mode */
1561 	i40e_clear_pxe_mode(hw);
1562 	i40e_dev_sync_phy_type(hw);
1563 
1564 	/*
1565 	 * On X710, performance number is far from the expectation on recent
1566 	 * firmware versions. The fix for this issue may not be integrated in
1567 	 * the following firmware version. So the workaround in software driver
1568 	 * is needed. It needs to modify the initial values of 3 internal only
1569 	 * registers. Note that the workaround can be removed when it is fixed
1570 	 * in firmware in the future.
1571 	 */
1572 	i40e_configure_registers(hw);
1573 
1574 	/* Get hw capabilities */
1575 	ret = i40e_get_cap(hw);
1576 	if (ret != I40E_SUCCESS) {
1577 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1578 		goto err_get_capabilities;
1579 	}
1580 
1581 	/* Initialize parameters for PF */
1582 	ret = i40e_pf_parameter_init(dev);
1583 	if (ret != 0) {
1584 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1585 		goto err_parameter_init;
1586 	}
1587 
1588 	/* Initialize the queue management */
1589 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1590 	if (ret < 0) {
1591 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1592 		goto err_qp_pool_init;
1593 	}
1594 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1595 				hw->func_caps.num_msix_vectors - 1);
1596 	if (ret < 0) {
1597 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1598 		goto err_msix_pool_init;
1599 	}
1600 
1601 	/* Initialize lan hmc */
1602 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1603 				hw->func_caps.num_rx_qp, 0, 0);
1604 	if (ret != I40E_SUCCESS) {
1605 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1606 		goto err_init_lan_hmc;
1607 	}
1608 
1609 	/* Configure lan hmc */
1610 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1611 	if (ret != I40E_SUCCESS) {
1612 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1613 		goto err_configure_lan_hmc;
1614 	}
1615 
1616 	/* Get and check the mac address */
1617 	i40e_get_mac_addr(hw, hw->mac.addr);
1618 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1619 		PMD_INIT_LOG(ERR, "mac address is not valid");
1620 		ret = -EIO;
1621 		goto err_get_mac_addr;
1622 	}
1623 	/* Copy the permanent MAC address */
1624 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1625 			(struct rte_ether_addr *)hw->mac.perm_addr);
1626 
1627 	/* Disable flow control */
1628 	hw->fc.requested_mode = I40E_FC_NONE;
1629 	i40e_set_fc(hw, &aq_fail, TRUE);
1630 
1631 	/* Set the global registers with default ether type value */
1632 	if (!pf->support_multi_driver) {
1633 		ret = i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
1634 					 RTE_ETHER_TYPE_VLAN);
1635 		if (ret != I40E_SUCCESS) {
1636 			PMD_INIT_LOG(ERR,
1637 				     "Failed to set the default outer "
1638 				     "VLAN ether type");
1639 			goto err_setup_pf_switch;
1640 		}
1641 	}
1642 
1643 	/* PF setup, which includes VSI setup */
1644 	ret = i40e_pf_setup(pf);
1645 	if (ret) {
1646 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1647 		goto err_setup_pf_switch;
1648 	}
1649 
1650 	vsi = pf->main_vsi;
1651 
1652 	/* Disable double vlan by default */
1653 	i40e_vsi_config_double_vlan(vsi, FALSE);
1654 
1655 	/* Disable S-TAG identification when floating_veb is disabled */
1656 	if (!pf->floating_veb) {
1657 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1658 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1659 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1660 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1661 		}
1662 	}
1663 
1664 	if (!vsi->max_macaddrs)
1665 		len = RTE_ETHER_ADDR_LEN;
1666 	else
1667 		len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1668 
1669 	/* Should be after VSI initialized */
1670 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1671 	if (!dev->data->mac_addrs) {
1672 		PMD_INIT_LOG(ERR,
1673 			"Failed to allocated memory for storing mac address");
1674 		goto err_mac_alloc;
1675 	}
1676 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1677 					&dev->data->mac_addrs[0]);
1678 
1679 	/* Init dcb to sw mode by default */
1680 	ret = i40e_dcb_init_configure(dev, TRUE);
1681 	if (ret != I40E_SUCCESS) {
1682 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1683 		pf->flags &= ~I40E_FLAG_DCB;
1684 	}
1685 	/* Update HW struct after DCB configuration */
1686 	i40e_get_cap(hw);
1687 
1688 	/* initialize pf host driver to setup SRIOV resource if applicable */
1689 	i40e_pf_host_init(dev);
1690 
1691 	/* register callback func to eal lib */
1692 	rte_intr_callback_register(intr_handle,
1693 				   i40e_dev_interrupt_handler, dev);
1694 
1695 	/* configure and enable device interrupt */
1696 	i40e_pf_config_irq0(hw, TRUE);
1697 	i40e_pf_enable_irq0(hw);
1698 
1699 	/* enable uio intr after callback register */
1700 	rte_intr_enable(intr_handle);
1701 
1702 	/* By default disable flexible payload in global configuration */
1703 	if (!pf->support_multi_driver)
1704 		i40e_flex_payload_reg_set_default(hw);
1705 
1706 	/*
1707 	 * Add an ethertype filter to drop all flow control frames transmitted
1708 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1709 	 * frames to wire.
1710 	 */
1711 	i40e_add_tx_flow_control_drop_filter(pf);
1712 
1713 	/* initialize RSS rule list */
1714 	TAILQ_INIT(&pf->rss_config_list);
1715 
1716 	/* initialize Traffic Manager configuration */
1717 	i40e_tm_conf_init(dev);
1718 
1719 	/* Initialize customized information */
1720 	i40e_init_customized_info(pf);
1721 
1722 	/* Initialize the filter invalidation configuration */
1723 	i40e_init_filter_invalidation(pf);
1724 
1725 	ret = i40e_init_ethtype_filter_list(dev);
1726 	if (ret < 0)
1727 		goto err_init_ethtype_filter_list;
1728 	ret = i40e_init_tunnel_filter_list(dev);
1729 	if (ret < 0)
1730 		goto err_init_tunnel_filter_list;
1731 	ret = i40e_init_fdir_filter_list(dev);
1732 	if (ret < 0)
1733 		goto err_init_fdir_filter_list;
1734 
1735 	/* initialize queue region configuration */
1736 	i40e_init_queue_region_conf(dev);
1737 
1738 	/* reset all stats of the device, including pf and main vsi */
1739 	i40e_dev_stats_reset(dev);
1740 
1741 	return 0;
1742 
1743 err_init_fdir_filter_list:
1744 	rte_hash_free(pf->tunnel.hash_table);
1745 	rte_free(pf->tunnel.hash_map);
1746 err_init_tunnel_filter_list:
1747 	rte_hash_free(pf->ethertype.hash_table);
1748 	rte_free(pf->ethertype.hash_map);
1749 err_init_ethtype_filter_list:
1750 	rte_intr_callback_unregister(intr_handle,
1751 		i40e_dev_interrupt_handler, dev);
1752 	rte_free(dev->data->mac_addrs);
1753 	dev->data->mac_addrs = NULL;
1754 err_mac_alloc:
1755 	i40e_vsi_release(pf->main_vsi);
1756 err_setup_pf_switch:
1757 err_get_mac_addr:
1758 err_configure_lan_hmc:
1759 	(void)i40e_shutdown_lan_hmc(hw);
1760 err_init_lan_hmc:
1761 	i40e_res_pool_destroy(&pf->msix_pool);
1762 err_msix_pool_init:
1763 	i40e_res_pool_destroy(&pf->qp_pool);
1764 err_qp_pool_init:
1765 err_parameter_init:
1766 err_get_capabilities:
1767 	(void)i40e_shutdown_adminq(hw);
1768 
1769 	return ret;
1770 }
1771 
1772 static void
i40e_rm_ethtype_filter_list(struct i40e_pf * pf)1773 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1774 {
1775 	struct i40e_ethertype_filter *p_ethertype;
1776 	struct i40e_ethertype_rule *ethertype_rule;
1777 
1778 	ethertype_rule = &pf->ethertype;
1779 	/* Remove all ethertype filter rules and hash */
1780 	rte_free(ethertype_rule->hash_map);
1781 	rte_hash_free(ethertype_rule->hash_table);
1782 
1783 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1784 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1785 			     p_ethertype, rules);
1786 		rte_free(p_ethertype);
1787 	}
1788 }
1789 
1790 static void
i40e_rm_tunnel_filter_list(struct i40e_pf * pf)1791 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1792 {
1793 	struct i40e_tunnel_filter *p_tunnel;
1794 	struct i40e_tunnel_rule *tunnel_rule;
1795 
1796 	tunnel_rule = &pf->tunnel;
1797 	/* Remove all tunnel director rules and hash */
1798 	rte_free(tunnel_rule->hash_map);
1799 	rte_hash_free(tunnel_rule->hash_table);
1800 
1801 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1802 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1803 		rte_free(p_tunnel);
1804 	}
1805 }
1806 
1807 static void
i40e_rm_fdir_filter_list(struct i40e_pf * pf)1808 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1809 {
1810 	struct i40e_fdir_filter *p_fdir;
1811 	struct i40e_fdir_info *fdir_info;
1812 
1813 	fdir_info = &pf->fdir;
1814 
1815 	/* Remove all flow director rules */
1816 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1817 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1818 }
1819 
1820 static void
i40e_fdir_memory_cleanup(struct i40e_pf * pf)1821 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1822 {
1823 	struct i40e_fdir_info *fdir_info;
1824 
1825 	fdir_info = &pf->fdir;
1826 
1827 	/* flow director memory cleanup */
1828 	rte_free(fdir_info->hash_map);
1829 	rte_hash_free(fdir_info->hash_table);
1830 	rte_free(fdir_info->fdir_flow_pool.bitmap);
1831 	rte_free(fdir_info->fdir_flow_pool.pool);
1832 	rte_free(fdir_info->fdir_filter_array);
1833 }
1834 
i40e_flex_payload_reg_set_default(struct i40e_hw * hw)1835 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1836 {
1837 	/*
1838 	 * Disable by default flexible payload
1839 	 * for corresponding L2/L3/L4 layers.
1840 	 */
1841 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1842 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1843 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1844 }
1845 
1846 static int
eth_i40e_dev_uninit(struct rte_eth_dev * dev)1847 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1848 {
1849 	struct i40e_hw *hw;
1850 
1851 	PMD_INIT_FUNC_TRACE();
1852 
1853 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1854 		return 0;
1855 
1856 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1857 
1858 	if (hw->adapter_closed == 0)
1859 		i40e_dev_close(dev);
1860 
1861 	return 0;
1862 }
1863 
1864 static int
i40e_dev_configure(struct rte_eth_dev * dev)1865 i40e_dev_configure(struct rte_eth_dev *dev)
1866 {
1867 	struct i40e_adapter *ad =
1868 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1869 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1870 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1871 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1872 	int i, ret;
1873 
1874 	ret = i40e_dev_sync_phy_type(hw);
1875 	if (ret)
1876 		return ret;
1877 
1878 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1879 	 * bulk allocation or vector Rx preconditions we will reset it.
1880 	 */
1881 	ad->rx_bulk_alloc_allowed = true;
1882 	ad->rx_vec_allowed = true;
1883 	ad->tx_simple_allowed = true;
1884 	ad->tx_vec_allowed = true;
1885 
1886 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1887 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1888 
1889 	/* Only legacy filter API needs the following fdir config. So when the
1890 	 * legacy filter API is deprecated, the following codes should also be
1891 	 * removed.
1892 	 */
1893 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1894 		ret = i40e_fdir_setup(pf);
1895 		if (ret != I40E_SUCCESS) {
1896 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1897 			return -ENOTSUP;
1898 		}
1899 		ret = i40e_fdir_configure(dev);
1900 		if (ret < 0) {
1901 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1902 			goto err;
1903 		}
1904 	} else
1905 		i40e_fdir_teardown(pf);
1906 
1907 	ret = i40e_dev_init_vlan(dev);
1908 	if (ret < 0)
1909 		goto err;
1910 
1911 	/* VMDQ setup.
1912 	 *  General PMD call sequence are NIC init, configure,
1913 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1914 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1915 	 *  applicable. So, VMDQ setting has to be done before
1916 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1917 	 *  For RSS setting, it will try to calculate actual configured RX queue
1918 	 *  number, which will be available after rx_queue_setup(). dev_start()
1919 	 *  function is good to place RSS setup.
1920 	 */
1921 	if (mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) {
1922 		ret = i40e_vmdq_setup(dev);
1923 		if (ret)
1924 			goto err;
1925 	}
1926 
1927 	if (mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) {
1928 		ret = i40e_dcb_setup(dev);
1929 		if (ret) {
1930 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1931 			goto err_dcb;
1932 		}
1933 	}
1934 
1935 	TAILQ_INIT(&pf->flow_list);
1936 
1937 	return 0;
1938 
1939 err_dcb:
1940 	/* need to release vmdq resource if exists */
1941 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1942 		i40e_vsi_release(pf->vmdq[i].vsi);
1943 		pf->vmdq[i].vsi = NULL;
1944 	}
1945 	rte_free(pf->vmdq);
1946 	pf->vmdq = NULL;
1947 err:
1948 	/* Need to release fdir resource if exists.
1949 	 * Only legacy filter API needs the following fdir config. So when the
1950 	 * legacy filter API is deprecated, the following code should also be
1951 	 * removed.
1952 	 */
1953 	i40e_fdir_teardown(pf);
1954 	return ret;
1955 }
1956 
1957 void
i40e_vsi_queues_unbind_intr(struct i40e_vsi * vsi)1958 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1959 {
1960 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
1961 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1962 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1963 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1964 	uint16_t msix_vect = vsi->msix_intr;
1965 	uint16_t i;
1966 
1967 	for (i = 0; i < vsi->nb_qps; i++) {
1968 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1969 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1970 		rte_wmb();
1971 	}
1972 
1973 	if (vsi->type != I40E_VSI_SRIOV) {
1974 		if (!rte_intr_allow_others(intr_handle)) {
1975 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1976 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1977 			I40E_WRITE_REG(hw,
1978 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1979 				       0);
1980 		} else {
1981 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1982 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1983 			I40E_WRITE_REG(hw,
1984 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1985 						       msix_vect - 1), 0);
1986 		}
1987 	} else {
1988 		uint32_t reg;
1989 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1990 			vsi->user_param + (msix_vect - 1);
1991 
1992 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1993 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1994 	}
1995 	I40E_WRITE_FLUSH(hw);
1996 }
1997 
1998 static void
__vsi_queues_bind_intr(struct i40e_vsi * vsi,uint16_t msix_vect,int base_queue,int nb_queue,uint16_t itr_idx)1999 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2000 		       int base_queue, int nb_queue,
2001 		       uint16_t itr_idx)
2002 {
2003 	int i;
2004 	uint32_t val;
2005 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2006 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2007 
2008 	/* Bind all RX queues to allocated MSIX interrupt */
2009 	for (i = 0; i < nb_queue; i++) {
2010 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2011 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2012 			((base_queue + i + 1) <<
2013 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2014 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2015 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2016 
2017 		if (i == nb_queue - 1)
2018 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2019 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2020 	}
2021 
2022 	/* Write first RX queue to Link list register as the head element */
2023 	if (vsi->type != I40E_VSI_SRIOV) {
2024 		uint16_t interval =
2025 			i40e_calc_itr_interval(1, pf->support_multi_driver);
2026 
2027 		if (msix_vect == I40E_MISC_VEC_ID) {
2028 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2029 				       (base_queue <<
2030 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2031 				       (0x0 <<
2032 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2033 			I40E_WRITE_REG(hw,
2034 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2035 				       interval);
2036 		} else {
2037 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2038 				       (base_queue <<
2039 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2040 				       (0x0 <<
2041 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2042 			I40E_WRITE_REG(hw,
2043 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2044 						       msix_vect - 1),
2045 				       interval);
2046 		}
2047 	} else {
2048 		uint32_t reg;
2049 
2050 		if (msix_vect == I40E_MISC_VEC_ID) {
2051 			I40E_WRITE_REG(hw,
2052 				       I40E_VPINT_LNKLST0(vsi->user_param),
2053 				       (base_queue <<
2054 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2055 				       (0x0 <<
2056 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2057 		} else {
2058 			/* num_msix_vectors_vf needs to minus irq0 */
2059 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2060 				vsi->user_param + (msix_vect - 1);
2061 
2062 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2063 				       (base_queue <<
2064 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2065 				       (0x0 <<
2066 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2067 		}
2068 	}
2069 
2070 	I40E_WRITE_FLUSH(hw);
2071 }
2072 
2073 int
i40e_vsi_queues_bind_intr(struct i40e_vsi * vsi,uint16_t itr_idx)2074 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2075 {
2076 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2077 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2078 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2079 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2080 	uint16_t msix_vect = vsi->msix_intr;
2081 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
2082 				   rte_intr_nb_efd_get(intr_handle));
2083 	uint16_t queue_idx = 0;
2084 	int record = 0;
2085 	int i;
2086 
2087 	for (i = 0; i < vsi->nb_qps; i++) {
2088 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2089 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2090 	}
2091 
2092 	/* VF bind interrupt */
2093 	if (vsi->type == I40E_VSI_SRIOV) {
2094 		if (vsi->nb_msix == 0) {
2095 			PMD_DRV_LOG(ERR, "No msix resource");
2096 			return -EINVAL;
2097 		}
2098 		__vsi_queues_bind_intr(vsi, msix_vect,
2099 				       vsi->base_queue, vsi->nb_qps,
2100 				       itr_idx);
2101 		return 0;
2102 	}
2103 
2104 	/* PF & VMDq bind interrupt */
2105 	if (rte_intr_dp_is_en(intr_handle)) {
2106 		if (vsi->type == I40E_VSI_MAIN) {
2107 			queue_idx = 0;
2108 			record = 1;
2109 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2110 			struct i40e_vsi *main_vsi =
2111 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2112 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2113 			record = 1;
2114 		}
2115 	}
2116 
2117 	for (i = 0; i < vsi->nb_used_qps; i++) {
2118 		if (vsi->nb_msix == 0) {
2119 			PMD_DRV_LOG(ERR, "No msix resource");
2120 			return -EINVAL;
2121 		} else if (nb_msix <= 1) {
2122 			if (!rte_intr_allow_others(intr_handle))
2123 				/* allow to share MISC_VEC_ID */
2124 				msix_vect = I40E_MISC_VEC_ID;
2125 
2126 			/* no enough msix_vect, map all to one */
2127 			__vsi_queues_bind_intr(vsi, msix_vect,
2128 					       vsi->base_queue + i,
2129 					       vsi->nb_used_qps - i,
2130 					       itr_idx);
2131 			for (; !!record && i < vsi->nb_used_qps; i++)
2132 				rte_intr_vec_list_index_set(intr_handle,
2133 						queue_idx + i, msix_vect);
2134 			break;
2135 		}
2136 		/* 1:1 queue/msix_vect mapping */
2137 		__vsi_queues_bind_intr(vsi, msix_vect,
2138 				       vsi->base_queue + i, 1,
2139 				       itr_idx);
2140 		if (!!record)
2141 			if (rte_intr_vec_list_index_set(intr_handle,
2142 						queue_idx + i, msix_vect))
2143 				return -rte_errno;
2144 
2145 		msix_vect++;
2146 		nb_msix--;
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 void
i40e_vsi_enable_queues_intr(struct i40e_vsi * vsi)2153 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2154 {
2155 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2156 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2157 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2158 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2159 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2160 	uint16_t msix_intr, i;
2161 
2162 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2163 		for (i = 0; i < vsi->nb_msix; i++) {
2164 			msix_intr = vsi->msix_intr + i;
2165 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2166 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2167 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2168 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2169 		}
2170 	else
2171 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2172 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2173 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2174 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2175 
2176 	I40E_WRITE_FLUSH(hw);
2177 }
2178 
2179 void
i40e_vsi_disable_queues_intr(struct i40e_vsi * vsi)2180 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2181 {
2182 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2183 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2184 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2185 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2186 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2187 	uint16_t msix_intr, i;
2188 
2189 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2190 		for (i = 0; i < vsi->nb_msix; i++) {
2191 			msix_intr = vsi->msix_intr + i;
2192 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2193 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2194 		}
2195 	else
2196 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2197 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2198 
2199 	I40E_WRITE_FLUSH(hw);
2200 }
2201 
2202 static inline uint8_t
i40e_parse_link_speeds(uint16_t link_speeds)2203 i40e_parse_link_speeds(uint16_t link_speeds)
2204 {
2205 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2206 
2207 	if (link_speeds & RTE_ETH_LINK_SPEED_40G)
2208 		link_speed |= I40E_LINK_SPEED_40GB;
2209 	if (link_speeds & RTE_ETH_LINK_SPEED_25G)
2210 		link_speed |= I40E_LINK_SPEED_25GB;
2211 	if (link_speeds & RTE_ETH_LINK_SPEED_20G)
2212 		link_speed |= I40E_LINK_SPEED_20GB;
2213 	if (link_speeds & RTE_ETH_LINK_SPEED_10G)
2214 		link_speed |= I40E_LINK_SPEED_10GB;
2215 	if (link_speeds & RTE_ETH_LINK_SPEED_1G)
2216 		link_speed |= I40E_LINK_SPEED_1GB;
2217 	if (link_speeds & RTE_ETH_LINK_SPEED_100M)
2218 		link_speed |= I40E_LINK_SPEED_100MB;
2219 
2220 	return link_speed;
2221 }
2222 
2223 static int
i40e_phy_conf_link(struct i40e_hw * hw,uint8_t abilities,uint8_t force_speed,bool is_up)2224 i40e_phy_conf_link(struct i40e_hw *hw,
2225 		   uint8_t abilities,
2226 		   uint8_t force_speed,
2227 		   bool is_up)
2228 {
2229 	enum i40e_status_code status;
2230 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2231 	struct i40e_aq_set_phy_config phy_conf;
2232 	enum i40e_aq_phy_type cnt;
2233 	uint8_t avail_speed;
2234 	uint32_t phy_type_mask = 0;
2235 
2236 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2237 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2238 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2239 			I40E_AQ_PHY_FLAG_LOW_POWER;
2240 	int ret = -ENOTSUP;
2241 
2242 	/* To get phy capabilities of available speeds. */
2243 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2244 					      NULL);
2245 	if (status) {
2246 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2247 				status);
2248 		return ret;
2249 	}
2250 	avail_speed = phy_ab.link_speed;
2251 
2252 	/* To get the current phy config. */
2253 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2254 					      NULL);
2255 	if (status) {
2256 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2257 				status);
2258 		return ret;
2259 	}
2260 
2261 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2262 	 * no need to set up again.
2263 	 */
2264 	if (is_up && phy_ab.phy_type != 0 &&
2265 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2266 		     phy_ab.link_speed != 0)
2267 		return I40E_SUCCESS;
2268 
2269 	memset(&phy_conf, 0, sizeof(phy_conf));
2270 
2271 	/* bits 0-2 use the values from get_phy_abilities_resp */
2272 	abilities &= ~mask;
2273 	abilities |= phy_ab.abilities & mask;
2274 
2275 	phy_conf.abilities = abilities;
2276 
2277 	/* If link needs to go up, but the force speed is not supported,
2278 	 * Warn users and config the default available speeds.
2279 	 */
2280 	if (is_up && !(force_speed & avail_speed)) {
2281 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2282 		phy_conf.link_speed = avail_speed;
2283 	} else {
2284 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2285 	}
2286 
2287 	/* PHY type mask needs to include each type except PHY type extension */
2288 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2289 		phy_type_mask |= 1 << cnt;
2290 
2291 	/* use get_phy_abilities_resp value for the rest */
2292 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2293 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2294 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2295 		I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2296 		I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2297 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2298 	phy_conf.eee_capability = phy_ab.eee_capability;
2299 	phy_conf.eeer = phy_ab.eeer_val;
2300 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2301 
2302 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2303 		    phy_ab.abilities, phy_ab.link_speed);
2304 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2305 		    phy_conf.abilities, phy_conf.link_speed);
2306 
2307 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2308 	if (status)
2309 		return ret;
2310 
2311 	return I40E_SUCCESS;
2312 }
2313 
2314 static int
i40e_apply_link_speed(struct rte_eth_dev * dev)2315 i40e_apply_link_speed(struct rte_eth_dev *dev)
2316 {
2317 	uint8_t speed;
2318 	uint8_t abilities = 0;
2319 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2320 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2321 
2322 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2323 		     I40E_AQ_PHY_LINK_ENABLED;
2324 
2325 	if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
2326 		conf->link_speeds = RTE_ETH_LINK_SPEED_40G |
2327 				    RTE_ETH_LINK_SPEED_25G |
2328 				    RTE_ETH_LINK_SPEED_20G |
2329 				    RTE_ETH_LINK_SPEED_10G |
2330 				    RTE_ETH_LINK_SPEED_1G |
2331 				    RTE_ETH_LINK_SPEED_100M;
2332 
2333 		abilities |= I40E_AQ_PHY_AN_ENABLED;
2334 	} else {
2335 		abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2336 	}
2337 	speed = i40e_parse_link_speeds(conf->link_speeds);
2338 
2339 	return i40e_phy_conf_link(hw, abilities, speed, true);
2340 }
2341 
2342 static int
i40e_dev_start(struct rte_eth_dev * dev)2343 i40e_dev_start(struct rte_eth_dev *dev)
2344 {
2345 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2346 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2347 	struct i40e_vsi *main_vsi = pf->main_vsi;
2348 	int ret, i;
2349 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2350 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2351 	uint32_t intr_vector = 0;
2352 	struct i40e_vsi *vsi;
2353 	uint16_t nb_rxq, nb_txq;
2354 	uint16_t max_frame_size;
2355 
2356 	hw->adapter_stopped = 0;
2357 
2358 	rte_intr_disable(intr_handle);
2359 
2360 	if ((rte_intr_cap_multiple(intr_handle) ||
2361 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2362 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2363 		intr_vector = dev->data->nb_rx_queues;
2364 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
2365 		if (ret)
2366 			return ret;
2367 	}
2368 
2369 	if (rte_intr_dp_is_en(intr_handle)) {
2370 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
2371 						   dev->data->nb_rx_queues)) {
2372 			PMD_INIT_LOG(ERR,
2373 				"Failed to allocate %d rx_queues intr_vec",
2374 				dev->data->nb_rx_queues);
2375 			return -ENOMEM;
2376 		}
2377 	}
2378 
2379 	/* Initialize VSI */
2380 	ret = i40e_dev_rxtx_init(pf);
2381 	if (ret != I40E_SUCCESS) {
2382 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2383 		return ret;
2384 	}
2385 
2386 	/* Map queues with MSIX interrupt */
2387 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2388 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2389 	ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2390 	if (ret < 0)
2391 		return ret;
2392 	i40e_vsi_enable_queues_intr(main_vsi);
2393 
2394 	/* Map VMDQ VSI queues with MSIX interrupt */
2395 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2396 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2397 		ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2398 						I40E_ITR_INDEX_DEFAULT);
2399 		if (ret < 0)
2400 			return ret;
2401 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2402 	}
2403 
2404 	/* Enable all queues which have been configured */
2405 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2406 		ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2407 		if (ret)
2408 			goto rx_err;
2409 	}
2410 
2411 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2412 		ret = i40e_dev_tx_queue_start(dev, nb_txq);
2413 		if (ret)
2414 			goto tx_err;
2415 	}
2416 
2417 	/* Enable receiving broadcast packets */
2418 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2419 	if (ret != I40E_SUCCESS)
2420 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2421 
2422 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2423 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2424 						true, NULL);
2425 		if (ret != I40E_SUCCESS)
2426 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2427 	}
2428 
2429 	/* Enable the VLAN promiscuous mode. */
2430 	if (pf->vfs) {
2431 		for (i = 0; i < pf->vf_num; i++) {
2432 			vsi = pf->vfs[i].vsi;
2433 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2434 						     true, NULL);
2435 		}
2436 	}
2437 
2438 	/* Enable mac loopback mode */
2439 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2440 	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2441 		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2442 		if (ret != I40E_SUCCESS) {
2443 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2444 			goto tx_err;
2445 		}
2446 	}
2447 
2448 	/* Apply link configure */
2449 	ret = i40e_apply_link_speed(dev);
2450 	if (I40E_SUCCESS != ret) {
2451 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
2452 		goto tx_err;
2453 	}
2454 
2455 	if (!rte_intr_allow_others(intr_handle)) {
2456 		rte_intr_callback_unregister(intr_handle,
2457 					     i40e_dev_interrupt_handler,
2458 					     (void *)dev);
2459 		/* configure and enable device interrupt */
2460 		i40e_pf_config_irq0(hw, FALSE);
2461 		i40e_pf_enable_irq0(hw);
2462 
2463 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2464 			PMD_INIT_LOG(INFO,
2465 				"lsc won't enable because of no intr multiplex");
2466 	} else {
2467 		ret = i40e_aq_set_phy_int_mask(hw,
2468 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2469 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2470 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2471 		if (ret != I40E_SUCCESS)
2472 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2473 
2474 		/* Call get_link_info aq command to enable/disable LSE */
2475 		i40e_dev_link_update(dev, 0);
2476 	}
2477 
2478 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2479 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2480 				  i40e_dev_alarm_handler, dev);
2481 	} else {
2482 		/* enable uio intr after callback register */
2483 		rte_intr_enable(intr_handle);
2484 	}
2485 
2486 	i40e_filter_restore(pf);
2487 
2488 	if (pf->tm_conf.root && !pf->tm_conf.committed)
2489 		PMD_DRV_LOG(WARNING,
2490 			    "please call hierarchy_commit() "
2491 			    "before starting the port");
2492 
2493 	max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
2494 	i40e_set_mac_max_frame(dev, max_frame_size);
2495 
2496 	return I40E_SUCCESS;
2497 
2498 tx_err:
2499 	for (i = 0; i < nb_txq; i++)
2500 		i40e_dev_tx_queue_stop(dev, i);
2501 rx_err:
2502 	for (i = 0; i < nb_rxq; i++)
2503 		i40e_dev_rx_queue_stop(dev, i);
2504 
2505 	return ret;
2506 }
2507 
2508 static int
i40e_dev_stop(struct rte_eth_dev * dev)2509 i40e_dev_stop(struct rte_eth_dev *dev)
2510 {
2511 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2512 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2513 	struct i40e_vsi *main_vsi = pf->main_vsi;
2514 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2515 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2516 	int i;
2517 
2518 	if (hw->adapter_stopped == 1)
2519 		return 0;
2520 
2521 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2522 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2523 		rte_intr_enable(intr_handle);
2524 	}
2525 
2526 	/* Disable all queues */
2527 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2528 		i40e_dev_tx_queue_stop(dev, i);
2529 
2530 	for (i = 0; i < dev->data->nb_rx_queues; i++)
2531 		i40e_dev_rx_queue_stop(dev, i);
2532 
2533 	/* un-map queues with interrupt registers */
2534 	i40e_vsi_disable_queues_intr(main_vsi);
2535 	i40e_vsi_queues_unbind_intr(main_vsi);
2536 
2537 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2538 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2539 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2540 	}
2541 
2542 	/* Clear all queues and release memory */
2543 	i40e_dev_clear_queues(dev);
2544 
2545 	/* Set link down */
2546 	i40e_dev_set_link_down(dev);
2547 
2548 	if (!rte_intr_allow_others(intr_handle))
2549 		/* resume to the default handler */
2550 		rte_intr_callback_register(intr_handle,
2551 					   i40e_dev_interrupt_handler,
2552 					   (void *)dev);
2553 
2554 	/* Clean datapath event and queue/vec mapping */
2555 	rte_intr_efd_disable(intr_handle);
2556 
2557 	/* Cleanup vector list */
2558 	rte_intr_vec_list_free(intr_handle);
2559 
2560 	/* reset hierarchy commit */
2561 	pf->tm_conf.committed = false;
2562 
2563 	hw->adapter_stopped = 1;
2564 	dev->data->dev_started = 0;
2565 
2566 	pf->adapter->rss_reta_updated = 0;
2567 
2568 	return 0;
2569 }
2570 
2571 static int
i40e_dev_close(struct rte_eth_dev * dev)2572 i40e_dev_close(struct rte_eth_dev *dev)
2573 {
2574 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2575 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2576 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2577 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2578 	struct i40e_filter_control_settings settings;
2579 	struct rte_flow *p_flow;
2580 	uint32_t reg;
2581 	int i;
2582 	int ret;
2583 	uint8_t aq_fail = 0;
2584 	int retries = 0;
2585 
2586 	PMD_INIT_FUNC_TRACE();
2587 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2588 		return 0;
2589 
2590 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2591 	if (ret)
2592 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2593 
2594 
2595 	ret = i40e_dev_stop(dev);
2596 
2597 	i40e_dev_free_queues(dev);
2598 
2599 	/* Disable interrupt */
2600 	i40e_pf_disable_irq0(hw);
2601 	rte_intr_disable(intr_handle);
2602 
2603 	/*
2604 	 * Only legacy filter API needs the following fdir config. So when the
2605 	 * legacy filter API is deprecated, the following code should also be
2606 	 * removed.
2607 	 */
2608 	i40e_fdir_teardown(pf);
2609 
2610 	/* shutdown and destroy the HMC */
2611 	i40e_shutdown_lan_hmc(hw);
2612 
2613 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2614 		i40e_vsi_release(pf->vmdq[i].vsi);
2615 		pf->vmdq[i].vsi = NULL;
2616 	}
2617 	rte_free(pf->vmdq);
2618 	pf->vmdq = NULL;
2619 
2620 	/* release all the existing VSIs and VEBs */
2621 	i40e_vsi_release(pf->main_vsi);
2622 
2623 	/* shutdown the adminq */
2624 	i40e_aq_queue_shutdown(hw, true);
2625 	i40e_shutdown_adminq(hw);
2626 
2627 	i40e_res_pool_destroy(&pf->qp_pool);
2628 	i40e_res_pool_destroy(&pf->msix_pool);
2629 
2630 	/* Disable flexible payload in global configuration */
2631 	if (!pf->support_multi_driver)
2632 		i40e_flex_payload_reg_set_default(hw);
2633 
2634 	/* force a PF reset to clean anything leftover */
2635 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2636 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2637 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2638 	I40E_WRITE_FLUSH(hw);
2639 
2640 	/* Clear PXE mode */
2641 	i40e_clear_pxe_mode(hw);
2642 
2643 	/* Unconfigure filter control */
2644 	memset(&settings, 0, sizeof(settings));
2645 	ret = i40e_set_filter_control(hw, &settings);
2646 	if (ret)
2647 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2648 					ret);
2649 
2650 	/* Disable flow control */
2651 	hw->fc.requested_mode = I40E_FC_NONE;
2652 	i40e_set_fc(hw, &aq_fail, TRUE);
2653 
2654 	/* uninitialize pf host driver */
2655 	i40e_pf_host_uninit(dev);
2656 
2657 	do {
2658 		ret = rte_intr_callback_unregister(intr_handle,
2659 				i40e_dev_interrupt_handler, dev);
2660 		if (ret >= 0 || ret == -ENOENT) {
2661 			break;
2662 		} else if (ret != -EAGAIN) {
2663 			PMD_INIT_LOG(ERR,
2664 				 "intr callback unregister failed: %d",
2665 				 ret);
2666 		}
2667 		i40e_msec_delay(500);
2668 	} while (retries++ < 5);
2669 
2670 	i40e_rm_ethtype_filter_list(pf);
2671 	i40e_rm_tunnel_filter_list(pf);
2672 	i40e_rm_fdir_filter_list(pf);
2673 
2674 	/* Remove all flows */
2675 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2676 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2677 		/* Do not free FDIR flows since they are static allocated */
2678 		if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2679 			rte_free(p_flow);
2680 	}
2681 
2682 	/* release the fdir static allocated memory */
2683 	i40e_fdir_memory_cleanup(pf);
2684 
2685 	/* Remove all Traffic Manager configuration */
2686 	i40e_tm_conf_uninit(dev);
2687 
2688 	i40e_clear_automask(pf);
2689 
2690 	hw->adapter_closed = 1;
2691 	return ret;
2692 }
2693 
2694 /*
2695  * Reset PF device only to re-initialize resources in PMD layer
2696  */
2697 static int
i40e_dev_reset(struct rte_eth_dev * dev)2698 i40e_dev_reset(struct rte_eth_dev *dev)
2699 {
2700 	int ret;
2701 
2702 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2703 	 * its VF to make them align with it. The detailed notification
2704 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2705 	 * To avoid unexpected behavior in VF, currently reset of PF with
2706 	 * SR-IOV activation is not supported. It might be supported later.
2707 	 */
2708 	if (dev->data->sriov.active)
2709 		return -ENOTSUP;
2710 
2711 	ret = eth_i40e_dev_uninit(dev);
2712 	if (ret)
2713 		return ret;
2714 
2715 	ret = eth_i40e_dev_init(dev, NULL);
2716 
2717 	return ret;
2718 }
2719 
2720 static int
i40e_dev_promiscuous_enable(struct rte_eth_dev * dev)2721 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2722 {
2723 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2724 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2725 	struct i40e_vsi *vsi = pf->main_vsi;
2726 	int status;
2727 
2728 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2729 						     true, NULL, true);
2730 	if (status != I40E_SUCCESS) {
2731 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2732 		return -EAGAIN;
2733 	}
2734 
2735 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2736 							TRUE, NULL);
2737 	if (status != I40E_SUCCESS) {
2738 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2739 		/* Rollback unicast promiscuous mode */
2740 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2741 						    false, NULL, true);
2742 		return -EAGAIN;
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 static int
i40e_dev_promiscuous_disable(struct rte_eth_dev * dev)2749 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2750 {
2751 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2752 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2753 	struct i40e_vsi *vsi = pf->main_vsi;
2754 	int status;
2755 
2756 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2757 						     false, NULL, true);
2758 	if (status != I40E_SUCCESS) {
2759 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2760 		return -EAGAIN;
2761 	}
2762 
2763 	/* must remain in all_multicast mode */
2764 	if (dev->data->all_multicast == 1)
2765 		return 0;
2766 
2767 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2768 							false, NULL);
2769 	if (status != I40E_SUCCESS) {
2770 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2771 		/* Rollback unicast promiscuous mode */
2772 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2773 						    true, NULL, true);
2774 		return -EAGAIN;
2775 	}
2776 
2777 	return 0;
2778 }
2779 
2780 static int
i40e_dev_allmulticast_enable(struct rte_eth_dev * dev)2781 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2782 {
2783 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2784 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2785 	struct i40e_vsi *vsi = pf->main_vsi;
2786 	int ret;
2787 
2788 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2789 	if (ret != I40E_SUCCESS) {
2790 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2791 		return -EAGAIN;
2792 	}
2793 
2794 	return 0;
2795 }
2796 
2797 static int
i40e_dev_allmulticast_disable(struct rte_eth_dev * dev)2798 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2799 {
2800 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2801 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2802 	struct i40e_vsi *vsi = pf->main_vsi;
2803 	int ret;
2804 
2805 	if (dev->data->promiscuous == 1)
2806 		return 0; /* must remain in all_multicast mode */
2807 
2808 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2809 				vsi->seid, FALSE, NULL);
2810 	if (ret != I40E_SUCCESS) {
2811 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2812 		return -EAGAIN;
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 /*
2819  * Set device link up.
2820  */
2821 static int
i40e_dev_set_link_up(struct rte_eth_dev * dev)2822 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2823 {
2824 	/* re-apply link speed setting */
2825 	return i40e_apply_link_speed(dev);
2826 }
2827 
2828 /*
2829  * Set device link down.
2830  */
2831 static int
i40e_dev_set_link_down(struct rte_eth_dev * dev)2832 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2833 {
2834 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2835 	uint8_t abilities = 0;
2836 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2837 
2838 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2839 	return i40e_phy_conf_link(hw, abilities, speed, false);
2840 }
2841 
2842 #define CHECK_INTERVAL             100  /* 100ms */
2843 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2844 
2845 static __rte_always_inline void
update_link_reg(struct i40e_hw * hw,struct rte_eth_link * link)2846 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2847 {
2848 /* Link status registers and values*/
2849 #define I40E_REG_LINK_UP		0x40000080
2850 #define I40E_PRTMAC_MACC		0x001E24E0
2851 #define I40E_REG_MACC_25GB		0x00020000
2852 #define I40E_REG_SPEED_MASK		0x38000000
2853 #define I40E_REG_SPEED_0		0x00000000
2854 #define I40E_REG_SPEED_1		0x08000000
2855 #define I40E_REG_SPEED_2		0x10000000
2856 #define I40E_REG_SPEED_3		0x18000000
2857 #define I40E_REG_SPEED_4		0x20000000
2858 	uint32_t link_speed;
2859 	uint32_t reg_val;
2860 
2861 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA(0));
2862 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2863 	reg_val &= I40E_REG_LINK_UP;
2864 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2865 
2866 	if (unlikely(link->link_status == 0))
2867 		return;
2868 
2869 	/* Parse the link status */
2870 	switch (link_speed) {
2871 	case I40E_REG_SPEED_0:
2872 		link->link_speed = RTE_ETH_SPEED_NUM_100M;
2873 		break;
2874 	case I40E_REG_SPEED_1:
2875 		link->link_speed = RTE_ETH_SPEED_NUM_1G;
2876 		break;
2877 	case I40E_REG_SPEED_2:
2878 		if (hw->mac.type == I40E_MAC_X722)
2879 			link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
2880 		else
2881 			link->link_speed = RTE_ETH_SPEED_NUM_10G;
2882 		break;
2883 	case I40E_REG_SPEED_3:
2884 		if (hw->mac.type == I40E_MAC_X722) {
2885 			link->link_speed = RTE_ETH_SPEED_NUM_5G;
2886 		} else {
2887 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2888 
2889 			if (reg_val & I40E_REG_MACC_25GB)
2890 				link->link_speed = RTE_ETH_SPEED_NUM_25G;
2891 			else
2892 				link->link_speed = RTE_ETH_SPEED_NUM_40G;
2893 		}
2894 		break;
2895 	case I40E_REG_SPEED_4:
2896 		if (hw->mac.type == I40E_MAC_X722)
2897 			link->link_speed = RTE_ETH_SPEED_NUM_10G;
2898 		else
2899 			link->link_speed = RTE_ETH_SPEED_NUM_20G;
2900 		break;
2901 	default:
2902 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2903 		break;
2904 	}
2905 }
2906 
2907 static __rte_always_inline void
update_link_aq(struct i40e_hw * hw,struct rte_eth_link * link,bool enable_lse,int wait_to_complete)2908 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2909 	bool enable_lse, int wait_to_complete)
2910 {
2911 	uint32_t rep_cnt = MAX_REPEAT_TIME;
2912 	struct i40e_link_status link_status;
2913 	int status;
2914 
2915 	memset(&link_status, 0, sizeof(link_status));
2916 
2917 	do {
2918 		memset(&link_status, 0, sizeof(link_status));
2919 
2920 		/* Get link status information from hardware */
2921 		status = i40e_aq_get_link_info(hw, enable_lse,
2922 						&link_status, NULL);
2923 		if (unlikely(status != I40E_SUCCESS)) {
2924 			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2925 			link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2926 			PMD_DRV_LOG(ERR, "Failed to get link info");
2927 			return;
2928 		}
2929 
2930 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2931 		if (!wait_to_complete || link->link_status)
2932 			break;
2933 
2934 		rte_delay_ms(CHECK_INTERVAL);
2935 	} while (--rep_cnt);
2936 
2937 	/* Parse the link status */
2938 	switch (link_status.link_speed) {
2939 	case I40E_LINK_SPEED_100MB:
2940 		link->link_speed = RTE_ETH_SPEED_NUM_100M;
2941 		break;
2942 	case I40E_LINK_SPEED_1GB:
2943 		link->link_speed = RTE_ETH_SPEED_NUM_1G;
2944 		break;
2945 	case I40E_LINK_SPEED_10GB:
2946 		link->link_speed = RTE_ETH_SPEED_NUM_10G;
2947 		break;
2948 	case I40E_LINK_SPEED_20GB:
2949 		link->link_speed = RTE_ETH_SPEED_NUM_20G;
2950 		break;
2951 	case I40E_LINK_SPEED_25GB:
2952 		link->link_speed = RTE_ETH_SPEED_NUM_25G;
2953 		break;
2954 	case I40E_LINK_SPEED_40GB:
2955 		link->link_speed = RTE_ETH_SPEED_NUM_40G;
2956 		break;
2957 	default:
2958 		if (link->link_status)
2959 			link->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
2960 		else
2961 			link->link_speed = RTE_ETH_SPEED_NUM_NONE;
2962 		break;
2963 	}
2964 }
2965 
2966 int
i40e_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)2967 i40e_dev_link_update(struct rte_eth_dev *dev,
2968 		     int wait_to_complete)
2969 {
2970 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2971 	struct rte_eth_link link;
2972 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2973 	int ret;
2974 
2975 	memset(&link, 0, sizeof(link));
2976 
2977 	/* i40e uses full duplex only */
2978 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2979 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2980 			RTE_ETH_LINK_SPEED_FIXED);
2981 
2982 	if (!wait_to_complete && !enable_lse)
2983 		update_link_reg(hw, &link);
2984 	else
2985 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
2986 
2987 	if (hw->switch_dev)
2988 		rte_eth_linkstatus_get(hw->switch_dev, &link);
2989 
2990 	ret = rte_eth_linkstatus_set(dev, &link);
2991 	i40e_notify_all_vfs_link_status(dev);
2992 
2993 	return ret;
2994 }
2995 
2996 static void
i40e_stat_update_48_in_64(struct i40e_hw * hw,uint32_t hireg,uint32_t loreg,bool offset_loaded,uint64_t * offset,uint64_t * stat,uint64_t * prev_stat)2997 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
2998 			  uint32_t loreg, bool offset_loaded, uint64_t *offset,
2999 			  uint64_t *stat, uint64_t *prev_stat)
3000 {
3001 	i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3002 	/* enlarge the limitation when statistics counters overflowed */
3003 	if (offset_loaded) {
3004 		if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3005 			*stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3006 		*stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3007 	}
3008 	*prev_stat = *stat;
3009 }
3010 
3011 /* Get all the statistics of a VSI */
3012 void
i40e_update_vsi_stats(struct i40e_vsi * vsi)3013 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3014 {
3015 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3016 	struct i40e_eth_stats *nes = &vsi->eth_stats;
3017 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3018 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3019 
3020 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3021 				  vsi->offset_loaded, &oes->rx_bytes,
3022 				  &nes->rx_bytes, &vsi->prev_rx_bytes);
3023 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3024 			    vsi->offset_loaded, &oes->rx_unicast,
3025 			    &nes->rx_unicast);
3026 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3027 			    vsi->offset_loaded, &oes->rx_multicast,
3028 			    &nes->rx_multicast);
3029 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3030 			    vsi->offset_loaded, &oes->rx_broadcast,
3031 			    &nes->rx_broadcast);
3032 	/* exclude CRC bytes */
3033 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3034 		nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3035 
3036 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3037 			    &oes->rx_discards, &nes->rx_discards);
3038 	/* GLV_REPC not supported */
3039 	/* GLV_RMPC not supported */
3040 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3041 			    &oes->rx_unknown_protocol,
3042 			    &nes->rx_unknown_protocol);
3043 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3044 				  vsi->offset_loaded, &oes->tx_bytes,
3045 				  &nes->tx_bytes, &vsi->prev_tx_bytes);
3046 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3047 			    vsi->offset_loaded, &oes->tx_unicast,
3048 			    &nes->tx_unicast);
3049 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3050 			    vsi->offset_loaded, &oes->tx_multicast,
3051 			    &nes->tx_multicast);
3052 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3053 			    vsi->offset_loaded,  &oes->tx_broadcast,
3054 			    &nes->tx_broadcast);
3055 	/* GLV_TDPC not supported */
3056 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3057 			    &oes->tx_errors, &nes->tx_errors);
3058 	vsi->offset_loaded = true;
3059 
3060 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3061 		    vsi->vsi_id);
3062 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3063 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3064 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3065 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3066 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3067 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3068 		    nes->rx_unknown_protocol);
3069 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3070 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3071 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3072 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3073 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3074 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3075 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3076 		    vsi->vsi_id);
3077 }
3078 
3079 static void
i40e_read_stats_registers(struct i40e_pf * pf,struct i40e_hw * hw)3080 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3081 {
3082 	unsigned int i;
3083 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3084 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3085 
3086 	/* Get rx/tx bytes of internal transfer packets */
3087 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3088 				  I40E_GLV_GORCL(hw->port),
3089 				  pf->offset_loaded,
3090 				  &pf->internal_stats_offset.rx_bytes,
3091 				  &pf->internal_stats.rx_bytes,
3092 				  &pf->internal_prev_rx_bytes);
3093 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3094 				  I40E_GLV_GOTCL(hw->port),
3095 				  pf->offset_loaded,
3096 				  &pf->internal_stats_offset.tx_bytes,
3097 				  &pf->internal_stats.tx_bytes,
3098 				  &pf->internal_prev_tx_bytes);
3099 	/* Get total internal rx packet count */
3100 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3101 			    I40E_GLV_UPRCL(hw->port),
3102 			    pf->offset_loaded,
3103 			    &pf->internal_stats_offset.rx_unicast,
3104 			    &pf->internal_stats.rx_unicast);
3105 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3106 			    I40E_GLV_MPRCL(hw->port),
3107 			    pf->offset_loaded,
3108 			    &pf->internal_stats_offset.rx_multicast,
3109 			    &pf->internal_stats.rx_multicast);
3110 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3111 			    I40E_GLV_BPRCL(hw->port),
3112 			    pf->offset_loaded,
3113 			    &pf->internal_stats_offset.rx_broadcast,
3114 			    &pf->internal_stats.rx_broadcast);
3115 	/* Get total internal tx packet count */
3116 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3117 			    I40E_GLV_UPTCL(hw->port),
3118 			    pf->offset_loaded,
3119 			    &pf->internal_stats_offset.tx_unicast,
3120 			    &pf->internal_stats.tx_unicast);
3121 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3122 			    I40E_GLV_MPTCL(hw->port),
3123 			    pf->offset_loaded,
3124 			    &pf->internal_stats_offset.tx_multicast,
3125 			    &pf->internal_stats.tx_multicast);
3126 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3127 			    I40E_GLV_BPTCL(hw->port),
3128 			    pf->offset_loaded,
3129 			    &pf->internal_stats_offset.tx_broadcast,
3130 			    &pf->internal_stats.tx_broadcast);
3131 
3132 	/* exclude CRC size */
3133 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3134 		pf->internal_stats.rx_multicast +
3135 		pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3136 
3137 	/* Get statistics of struct i40e_eth_stats */
3138 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3139 				  I40E_GLPRT_GORCL(hw->port),
3140 				  pf->offset_loaded, &os->eth.rx_bytes,
3141 				  &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3142 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3143 			    I40E_GLPRT_UPRCL(hw->port),
3144 			    pf->offset_loaded, &os->eth.rx_unicast,
3145 			    &ns->eth.rx_unicast);
3146 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3147 			    I40E_GLPRT_MPRCL(hw->port),
3148 			    pf->offset_loaded, &os->eth.rx_multicast,
3149 			    &ns->eth.rx_multicast);
3150 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3151 			    I40E_GLPRT_BPRCL(hw->port),
3152 			    pf->offset_loaded, &os->eth.rx_broadcast,
3153 			    &ns->eth.rx_broadcast);
3154 	/* Workaround: CRC size should not be included in byte statistics,
3155 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3156 	 * packet.
3157 	 */
3158 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3159 		ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3160 
3161 	/* exclude internal rx bytes
3162 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3163 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3164 	 * value.
3165 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3166 	 */
3167 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3168 		ns->eth.rx_bytes = 0;
3169 	else
3170 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3171 
3172 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3173 		ns->eth.rx_unicast = 0;
3174 	else
3175 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3176 
3177 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3178 		ns->eth.rx_multicast = 0;
3179 	else
3180 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3181 
3182 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3183 		ns->eth.rx_broadcast = 0;
3184 	else
3185 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3186 
3187 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3188 			    pf->offset_loaded, &os->eth.rx_discards,
3189 			    &ns->eth.rx_discards);
3190 	/* GLPRT_REPC not supported */
3191 	/* GLPRT_RMPC not supported */
3192 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3193 			    pf->offset_loaded,
3194 			    &os->eth.rx_unknown_protocol,
3195 			    &ns->eth.rx_unknown_protocol);
3196 	i40e_stat_update_48(hw, I40E_GL_RXERR1_H(hw->pf_id + I40E_MAX_VF),
3197 			    I40E_GL_RXERR1_L(hw->pf_id + I40E_MAX_VF),
3198 			    pf->offset_loaded, &pf->rx_err1_offset,
3199 			    &pf->rx_err1);
3200 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3201 				  I40E_GLPRT_GOTCL(hw->port),
3202 				  pf->offset_loaded, &os->eth.tx_bytes,
3203 				  &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3204 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3205 			    I40E_GLPRT_UPTCL(hw->port),
3206 			    pf->offset_loaded, &os->eth.tx_unicast,
3207 			    &ns->eth.tx_unicast);
3208 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3209 			    I40E_GLPRT_MPTCL(hw->port),
3210 			    pf->offset_loaded, &os->eth.tx_multicast,
3211 			    &ns->eth.tx_multicast);
3212 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3213 			    I40E_GLPRT_BPTCL(hw->port),
3214 			    pf->offset_loaded, &os->eth.tx_broadcast,
3215 			    &ns->eth.tx_broadcast);
3216 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3217 		ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3218 
3219 	/* exclude internal tx bytes
3220 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3221 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3222 	 * value.
3223 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3224 	 */
3225 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3226 		ns->eth.tx_bytes = 0;
3227 	else
3228 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3229 
3230 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3231 		ns->eth.tx_unicast = 0;
3232 	else
3233 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3234 
3235 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3236 		ns->eth.tx_multicast = 0;
3237 	else
3238 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3239 
3240 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3241 		ns->eth.tx_broadcast = 0;
3242 	else
3243 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3244 
3245 	/* GLPRT_TEPC not supported */
3246 
3247 	/* additional port specific stats */
3248 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3249 			    pf->offset_loaded, &os->tx_dropped_link_down,
3250 			    &ns->tx_dropped_link_down);
3251 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3252 			    pf->offset_loaded, &os->crc_errors,
3253 			    &ns->crc_errors);
3254 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3255 			    pf->offset_loaded, &os->illegal_bytes,
3256 			    &ns->illegal_bytes);
3257 	/* GLPRT_ERRBC not supported */
3258 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3259 			    pf->offset_loaded, &os->mac_local_faults,
3260 			    &ns->mac_local_faults);
3261 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3262 			    pf->offset_loaded, &os->mac_remote_faults,
3263 			    &ns->mac_remote_faults);
3264 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3265 			    pf->offset_loaded, &os->rx_length_errors,
3266 			    &ns->rx_length_errors);
3267 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3268 			    pf->offset_loaded, &os->link_xon_rx,
3269 			    &ns->link_xon_rx);
3270 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3271 			    pf->offset_loaded, &os->link_xoff_rx,
3272 			    &ns->link_xoff_rx);
3273 	for (i = 0; i < 8; i++) {
3274 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3275 				    pf->offset_loaded,
3276 				    &os->priority_xon_rx[i],
3277 				    &ns->priority_xon_rx[i]);
3278 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3279 				    pf->offset_loaded,
3280 				    &os->priority_xoff_rx[i],
3281 				    &ns->priority_xoff_rx[i]);
3282 	}
3283 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3284 			    pf->offset_loaded, &os->link_xon_tx,
3285 			    &ns->link_xon_tx);
3286 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3287 			    pf->offset_loaded, &os->link_xoff_tx,
3288 			    &ns->link_xoff_tx);
3289 	for (i = 0; i < 8; i++) {
3290 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3291 				    pf->offset_loaded,
3292 				    &os->priority_xon_tx[i],
3293 				    &ns->priority_xon_tx[i]);
3294 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3295 				    pf->offset_loaded,
3296 				    &os->priority_xoff_tx[i],
3297 				    &ns->priority_xoff_tx[i]);
3298 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3299 				    pf->offset_loaded,
3300 				    &os->priority_xon_2_xoff[i],
3301 				    &ns->priority_xon_2_xoff[i]);
3302 	}
3303 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3304 			    I40E_GLPRT_PRC64L(hw->port),
3305 			    pf->offset_loaded, &os->rx_size_64,
3306 			    &ns->rx_size_64);
3307 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3308 			    I40E_GLPRT_PRC127L(hw->port),
3309 			    pf->offset_loaded, &os->rx_size_127,
3310 			    &ns->rx_size_127);
3311 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3312 			    I40E_GLPRT_PRC255L(hw->port),
3313 			    pf->offset_loaded, &os->rx_size_255,
3314 			    &ns->rx_size_255);
3315 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3316 			    I40E_GLPRT_PRC511L(hw->port),
3317 			    pf->offset_loaded, &os->rx_size_511,
3318 			    &ns->rx_size_511);
3319 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3320 			    I40E_GLPRT_PRC1023L(hw->port),
3321 			    pf->offset_loaded, &os->rx_size_1023,
3322 			    &ns->rx_size_1023);
3323 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3324 			    I40E_GLPRT_PRC1522L(hw->port),
3325 			    pf->offset_loaded, &os->rx_size_1522,
3326 			    &ns->rx_size_1522);
3327 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3328 			    I40E_GLPRT_PRC9522L(hw->port),
3329 			    pf->offset_loaded, &os->rx_size_big,
3330 			    &ns->rx_size_big);
3331 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3332 			    pf->offset_loaded, &os->rx_undersize,
3333 			    &ns->rx_undersize);
3334 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3335 			    pf->offset_loaded, &os->rx_fragments,
3336 			    &ns->rx_fragments);
3337 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3338 			    pf->offset_loaded, &os->rx_oversize,
3339 			    &ns->rx_oversize);
3340 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3341 			    pf->offset_loaded, &os->rx_jabber,
3342 			    &ns->rx_jabber);
3343 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3344 			    I40E_GLPRT_PTC64L(hw->port),
3345 			    pf->offset_loaded, &os->tx_size_64,
3346 			    &ns->tx_size_64);
3347 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3348 			    I40E_GLPRT_PTC127L(hw->port),
3349 			    pf->offset_loaded, &os->tx_size_127,
3350 			    &ns->tx_size_127);
3351 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3352 			    I40E_GLPRT_PTC255L(hw->port),
3353 			    pf->offset_loaded, &os->tx_size_255,
3354 			    &ns->tx_size_255);
3355 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3356 			    I40E_GLPRT_PTC511L(hw->port),
3357 			    pf->offset_loaded, &os->tx_size_511,
3358 			    &ns->tx_size_511);
3359 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3360 			    I40E_GLPRT_PTC1023L(hw->port),
3361 			    pf->offset_loaded, &os->tx_size_1023,
3362 			    &ns->tx_size_1023);
3363 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3364 			    I40E_GLPRT_PTC1522L(hw->port),
3365 			    pf->offset_loaded, &os->tx_size_1522,
3366 			    &ns->tx_size_1522);
3367 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3368 			    I40E_GLPRT_PTC9522L(hw->port),
3369 			    pf->offset_loaded, &os->tx_size_big,
3370 			    &ns->tx_size_big);
3371 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3372 			   pf->offset_loaded,
3373 			   &os->fd_sb_match, &ns->fd_sb_match);
3374 	/* GLPRT_MSPDC not supported */
3375 	/* GLPRT_XEC not supported */
3376 
3377 	pf->offset_loaded = true;
3378 
3379 	if (pf->main_vsi)
3380 		i40e_update_vsi_stats(pf->main_vsi);
3381 }
3382 
3383 /* Get all statistics of a port */
3384 static int
i40e_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)3385 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3386 {
3387 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3388 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3389 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3390 	struct i40e_vsi *vsi;
3391 	unsigned i;
3392 
3393 	/* call read registers - updates values, now write them to struct */
3394 	i40e_read_stats_registers(pf, hw);
3395 
3396 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3397 			pf->main_vsi->eth_stats.rx_multicast +
3398 			pf->main_vsi->eth_stats.rx_broadcast -
3399 			pf->main_vsi->eth_stats.rx_discards -
3400 			pf->rx_err1;
3401 	stats->opackets = ns->eth.tx_unicast +
3402 			ns->eth.tx_multicast +
3403 			ns->eth.tx_broadcast;
3404 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3405 	stats->obytes   = ns->eth.tx_bytes;
3406 	stats->oerrors  = ns->eth.tx_errors +
3407 			pf->main_vsi->eth_stats.tx_errors;
3408 
3409 	/* Rx Errors */
3410 	stats->imissed  = ns->eth.rx_discards +
3411 			pf->main_vsi->eth_stats.rx_discards;
3412 	stats->ierrors  = ns->crc_errors +
3413 			ns->rx_length_errors + ns->rx_undersize +
3414 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
3415 			pf->rx_err1;
3416 
3417 	if (pf->vfs) {
3418 		for (i = 0; i < pf->vf_num; i++) {
3419 			vsi = pf->vfs[i].vsi;
3420 			i40e_update_vsi_stats(vsi);
3421 
3422 			stats->ipackets += (vsi->eth_stats.rx_unicast +
3423 					vsi->eth_stats.rx_multicast +
3424 					vsi->eth_stats.rx_broadcast -
3425 					vsi->eth_stats.rx_discards);
3426 			stats->ibytes   += vsi->eth_stats.rx_bytes;
3427 			stats->oerrors  += vsi->eth_stats.tx_errors;
3428 			stats->imissed  += vsi->eth_stats.rx_discards;
3429 		}
3430 	}
3431 
3432 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3433 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3434 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3435 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3436 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3437 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3438 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3439 		    ns->eth.rx_unknown_protocol);
3440 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3441 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3442 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3443 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3444 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3445 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3446 
3447 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3448 		    ns->tx_dropped_link_down);
3449 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3450 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3451 		    ns->illegal_bytes);
3452 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3453 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3454 		    ns->mac_local_faults);
3455 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3456 		    ns->mac_remote_faults);
3457 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3458 		    ns->rx_length_errors);
3459 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3460 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3461 	for (i = 0; i < 8; i++) {
3462 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3463 				i, ns->priority_xon_rx[i]);
3464 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3465 				i, ns->priority_xoff_rx[i]);
3466 	}
3467 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3468 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3469 	for (i = 0; i < 8; i++) {
3470 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3471 				i, ns->priority_xon_tx[i]);
3472 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3473 				i, ns->priority_xoff_tx[i]);
3474 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3475 				i, ns->priority_xon_2_xoff[i]);
3476 	}
3477 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3478 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3479 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3480 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3481 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3482 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3483 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3484 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3485 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3486 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3487 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3488 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3489 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3490 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3491 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3492 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3493 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3494 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3495 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3496 			ns->mac_short_packet_dropped);
3497 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3498 		    ns->checksum_error);
3499 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3500 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3501 	return 0;
3502 }
3503 
3504 /* Reset the statistics */
3505 static int
i40e_dev_stats_reset(struct rte_eth_dev * dev)3506 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3507 {
3508 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3509 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3510 
3511 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3512 	pf->offset_loaded = false;
3513 	if (pf->main_vsi)
3514 		pf->main_vsi->offset_loaded = false;
3515 
3516 	/* read the stats, reading current register values into offset */
3517 	i40e_read_stats_registers(pf, hw);
3518 
3519 	return 0;
3520 }
3521 
3522 static uint32_t
i40e_xstats_calc_num(void)3523 i40e_xstats_calc_num(void)
3524 {
3525 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3526 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3527 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3528 }
3529 
i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)3530 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3531 				     struct rte_eth_xstat_name *xstats_names,
3532 				     __rte_unused unsigned limit)
3533 {
3534 	unsigned count = 0;
3535 	unsigned i, prio;
3536 
3537 	if (xstats_names == NULL)
3538 		return i40e_xstats_calc_num();
3539 
3540 	/* Note: limit checked in rte_eth_xstats_names() */
3541 
3542 	/* Get stats from i40e_eth_stats struct */
3543 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3544 		strlcpy(xstats_names[count].name,
3545 			rte_i40e_stats_strings[i].name,
3546 			sizeof(xstats_names[count].name));
3547 		count++;
3548 	}
3549 
3550 	/* Get individual stats from i40e_hw_port struct */
3551 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3552 		strlcpy(xstats_names[count].name,
3553 			rte_i40e_hw_port_strings[i].name,
3554 			sizeof(xstats_names[count].name));
3555 		count++;
3556 	}
3557 
3558 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3559 		for (prio = 0; prio < 8; prio++) {
3560 			snprintf(xstats_names[count].name,
3561 				 sizeof(xstats_names[count].name),
3562 				 "rx_priority%u_%s", prio,
3563 				 rte_i40e_rxq_prio_strings[i].name);
3564 			count++;
3565 		}
3566 	}
3567 
3568 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3569 		for (prio = 0; prio < 8; prio++) {
3570 			snprintf(xstats_names[count].name,
3571 				 sizeof(xstats_names[count].name),
3572 				 "tx_priority%u_%s", prio,
3573 				 rte_i40e_txq_prio_strings[i].name);
3574 			count++;
3575 		}
3576 	}
3577 	return count;
3578 }
3579 
3580 static int
i40e_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)3581 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3582 		    unsigned n)
3583 {
3584 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3585 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3586 	unsigned i, count, prio;
3587 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3588 
3589 	count = i40e_xstats_calc_num();
3590 	if (n < count)
3591 		return count;
3592 
3593 	i40e_read_stats_registers(pf, hw);
3594 
3595 	if (xstats == NULL)
3596 		return 0;
3597 
3598 	count = 0;
3599 
3600 	/* Get stats from i40e_eth_stats struct */
3601 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3602 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3603 			rte_i40e_stats_strings[i].offset);
3604 		xstats[count].id = count;
3605 		count++;
3606 	}
3607 
3608 	/* Get individual stats from i40e_hw_port struct */
3609 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3610 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3611 			rte_i40e_hw_port_strings[i].offset);
3612 		xstats[count].id = count;
3613 		count++;
3614 	}
3615 
3616 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3617 		for (prio = 0; prio < 8; prio++) {
3618 			xstats[count].value =
3619 				*(uint64_t *)(((char *)hw_stats) +
3620 				rte_i40e_rxq_prio_strings[i].offset +
3621 				(sizeof(uint64_t) * prio));
3622 			xstats[count].id = count;
3623 			count++;
3624 		}
3625 	}
3626 
3627 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3628 		for (prio = 0; prio < 8; prio++) {
3629 			xstats[count].value =
3630 				*(uint64_t *)(((char *)hw_stats) +
3631 				rte_i40e_txq_prio_strings[i].offset +
3632 				(sizeof(uint64_t) * prio));
3633 			xstats[count].id = count;
3634 			count++;
3635 		}
3636 	}
3637 
3638 	return count;
3639 }
3640 
3641 static int
i40e_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)3642 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3643 {
3644 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3645 	u32 full_ver;
3646 	u8 ver, patch;
3647 	u16 build;
3648 	int ret;
3649 
3650 	full_ver = hw->nvm.oem_ver;
3651 	ver = (u8)(full_ver >> 24);
3652 	build = (u16)((full_ver >> 8) & 0xffff);
3653 	patch = (u8)(full_ver & 0xff);
3654 
3655 	ret = snprintf(fw_version, fw_size,
3656 		 "%d.%d%d 0x%08x %d.%d.%d",
3657 		 ((hw->nvm.version >> 12) & 0xf),
3658 		 ((hw->nvm.version >> 4) & 0xff),
3659 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3660 		 ver, build, patch);
3661 	if (ret < 0)
3662 		return -EINVAL;
3663 
3664 	ret += 1; /* add the size of '\0' */
3665 	if (fw_size < (size_t)ret)
3666 		return ret;
3667 	else
3668 		return 0;
3669 }
3670 
3671 /*
3672  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3673  * the Rx data path does not hang if the FW LLDP is stopped.
3674  * return true if lldp need to stop
3675  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3676  */
3677 static bool
i40e_need_stop_lldp(struct rte_eth_dev * dev)3678 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3679 {
3680 	double nvm_ver;
3681 	char ver_str[64] = {0};
3682 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3683 
3684 	i40e_fw_version_get(dev, ver_str, 64);
3685 	nvm_ver = atof(ver_str);
3686 	if ((hw->mac.type == I40E_MAC_X722 ||
3687 	     hw->mac.type == I40E_MAC_X722_VF) &&
3688 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3689 		return true;
3690 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3691 		return true;
3692 
3693 	return false;
3694 }
3695 
3696 static int
i40e_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)3697 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3698 {
3699 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3700 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3701 	struct i40e_vsi *vsi = pf->main_vsi;
3702 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3703 
3704 	dev_info->max_rx_queues = vsi->nb_qps;
3705 	dev_info->max_tx_queues = vsi->nb_qps;
3706 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3707 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3708 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3709 	dev_info->max_vfs = pci_dev->max_vfs;
3710 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3711 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3712 	dev_info->rx_queue_offload_capa = 0;
3713 	dev_info->rx_offload_capa =
3714 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
3715 		RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
3716 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
3717 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
3718 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3719 		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3720 		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
3721 		RTE_ETH_RX_OFFLOAD_SCATTER |
3722 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
3723 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
3724 		RTE_ETH_RX_OFFLOAD_RSS_HASH;
3725 
3726 	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3727 	dev_info->tx_offload_capa =
3728 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
3729 		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
3730 		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
3731 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3732 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
3733 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
3734 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3735 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
3736 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
3737 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
3738 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
3739 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
3740 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3741 		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
3742 		dev_info->tx_queue_offload_capa;
3743 	dev_info->dev_capa =
3744 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3745 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3746 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
3747 
3748 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3749 						sizeof(uint32_t);
3750 	dev_info->reta_size = pf->hash_lut_size;
3751 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3752 
3753 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3754 		.rx_thresh = {
3755 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3756 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3757 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3758 		},
3759 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3760 		.rx_drop_en = 0,
3761 		.offloads = 0,
3762 	};
3763 
3764 	dev_info->default_txconf = (struct rte_eth_txconf) {
3765 		.tx_thresh = {
3766 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3767 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3768 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3769 		},
3770 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3771 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3772 		.offloads = 0,
3773 	};
3774 
3775 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3776 		.nb_max = I40E_MAX_RING_DESC,
3777 		.nb_min = I40E_MIN_RING_DESC,
3778 		.nb_align = I40E_ALIGN_RING_DESC,
3779 	};
3780 
3781 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3782 		.nb_max = I40E_MAX_RING_DESC,
3783 		.nb_min = I40E_MIN_RING_DESC,
3784 		.nb_align = I40E_ALIGN_RING_DESC,
3785 		.nb_seg_max = I40E_TX_MAX_SEG,
3786 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3787 	};
3788 
3789 	if (pf->flags & I40E_FLAG_VMDQ) {
3790 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3791 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3792 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3793 						pf->max_nb_vmdq_vsi;
3794 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3795 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3796 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3797 	}
3798 
3799 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3800 		/* For XL710 */
3801 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_40G;
3802 		dev_info->default_rxportconf.nb_queues = 2;
3803 		dev_info->default_txportconf.nb_queues = 2;
3804 		if (dev->data->nb_rx_queues == 1)
3805 			dev_info->default_rxportconf.ring_size = 2048;
3806 		else
3807 			dev_info->default_rxportconf.ring_size = 1024;
3808 		if (dev->data->nb_tx_queues == 1)
3809 			dev_info->default_txportconf.ring_size = 1024;
3810 		else
3811 			dev_info->default_txportconf.ring_size = 512;
3812 
3813 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3814 		/* For XXV710 */
3815 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_25G;
3816 		dev_info->default_rxportconf.nb_queues = 1;
3817 		dev_info->default_txportconf.nb_queues = 1;
3818 		dev_info->default_rxportconf.ring_size = 256;
3819 		dev_info->default_txportconf.ring_size = 256;
3820 	} else {
3821 		/* For X710 */
3822 		dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
3823 		dev_info->default_rxportconf.nb_queues = 1;
3824 		dev_info->default_txportconf.nb_queues = 1;
3825 		if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_10G) {
3826 			dev_info->default_rxportconf.ring_size = 512;
3827 			dev_info->default_txportconf.ring_size = 256;
3828 		} else {
3829 			dev_info->default_rxportconf.ring_size = 256;
3830 			dev_info->default_txportconf.ring_size = 256;
3831 		}
3832 	}
3833 	dev_info->default_rxportconf.burst_size = 32;
3834 	dev_info->default_txportconf.burst_size = 32;
3835 
3836 	return 0;
3837 }
3838 
3839 static int
i40e_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)3840 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3841 {
3842 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3843 	struct i40e_vsi *vsi = pf->main_vsi;
3844 	PMD_INIT_FUNC_TRACE();
3845 
3846 	if (on)
3847 		return i40e_vsi_add_vlan(vsi, vlan_id);
3848 	else
3849 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3850 }
3851 
3852 static int
i40e_vlan_tpid_set_by_registers(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid,int qinq)3853 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3854 				enum rte_vlan_type vlan_type,
3855 				uint16_t tpid, int qinq)
3856 {
3857 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3858 	uint64_t reg_r = 0;
3859 	uint64_t reg_w = 0;
3860 	uint16_t reg_id = 3;
3861 	int ret;
3862 
3863 	if (qinq) {
3864 		if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
3865 			reg_id = 2;
3866 	}
3867 
3868 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3869 					  &reg_r, NULL);
3870 	if (ret != I40E_SUCCESS) {
3871 		PMD_DRV_LOG(ERR,
3872 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3873 			   reg_id);
3874 		return -EIO;
3875 	}
3876 	PMD_DRV_LOG(DEBUG,
3877 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3878 		    reg_id, reg_r);
3879 
3880 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3881 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3882 	if (reg_r == reg_w) {
3883 		PMD_DRV_LOG(DEBUG, "No need to write");
3884 		return 0;
3885 	}
3886 
3887 	ret = i40e_aq_debug_write_global_register(hw,
3888 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
3889 					   reg_w, NULL);
3890 	if (ret != I40E_SUCCESS) {
3891 		PMD_DRV_LOG(ERR,
3892 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3893 			    reg_id);
3894 		return -EIO;
3895 	}
3896 	PMD_DRV_LOG(DEBUG,
3897 		    "Global register 0x%08x is changed with value 0x%08x",
3898 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3899 
3900 	return 0;
3901 }
3902 
3903 static int
i40e_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)3904 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3905 		   enum rte_vlan_type vlan_type,
3906 		   uint16_t tpid)
3907 {
3908 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3909 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3910 	int qinq = dev->data->dev_conf.rxmode.offloads &
3911 		   RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3912 	int ret = 0;
3913 
3914 	if ((vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
3915 	     vlan_type != RTE_ETH_VLAN_TYPE_OUTER) ||
3916 	    (!qinq && vlan_type == RTE_ETH_VLAN_TYPE_INNER)) {
3917 		PMD_DRV_LOG(ERR,
3918 			    "Unsupported vlan type.");
3919 		return -EINVAL;
3920 	}
3921 
3922 	if (pf->support_multi_driver) {
3923 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3924 		return -ENOTSUP;
3925 	}
3926 
3927 	/* 802.1ad frames ability is added in NVM API 1.7*/
3928 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3929 		if (qinq) {
3930 			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
3931 				hw->first_tag = rte_cpu_to_le_16(tpid);
3932 			else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER)
3933 				hw->second_tag = rte_cpu_to_le_16(tpid);
3934 		} else {
3935 			if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER)
3936 				hw->second_tag = rte_cpu_to_le_16(tpid);
3937 		}
3938 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3939 		if (ret != I40E_SUCCESS) {
3940 			PMD_DRV_LOG(ERR,
3941 				    "Set switch config failed aq_err: %d",
3942 				    hw->aq.asq_last_status);
3943 			ret = -EIO;
3944 		}
3945 	} else
3946 		/* If NVM API < 1.7, keep the register setting */
3947 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3948 						      tpid, qinq);
3949 
3950 	return ret;
3951 }
3952 
3953 /* Configure outer vlan stripping on or off in QinQ mode */
3954 static int
i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi * vsi,bool on)3955 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3956 {
3957 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3958 	int ret = I40E_SUCCESS;
3959 	uint32_t reg;
3960 
3961 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
3962 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
3963 		return -EINVAL;
3964 	}
3965 
3966 	/* Configure for outer VLAN RX stripping */
3967 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
3968 
3969 	if (on)
3970 		reg |= I40E_VSI_TSR_QINQ_STRIP;
3971 	else
3972 		reg &= ~I40E_VSI_TSR_QINQ_STRIP;
3973 
3974 	ret = i40e_aq_debug_write_register(hw,
3975 						   I40E_VSI_TSR(vsi->vsi_id),
3976 						   reg, NULL);
3977 	if (ret < 0) {
3978 		PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
3979 				    vsi->vsi_id);
3980 		return I40E_ERR_CONFIG;
3981 	}
3982 
3983 	return ret;
3984 }
3985 
3986 static int
i40e_vlan_offload_set(struct rte_eth_dev * dev,int mask)3987 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3988 {
3989 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3990 	struct i40e_vsi *vsi = pf->main_vsi;
3991 	struct rte_eth_rxmode *rxmode;
3992 
3993 	rxmode = &dev->data->dev_conf.rxmode;
3994 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
3995 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3996 			i40e_vsi_config_vlan_filter(vsi, TRUE);
3997 		else
3998 			i40e_vsi_config_vlan_filter(vsi, FALSE);
3999 	}
4000 
4001 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
4002 		/* Enable or disable VLAN stripping */
4003 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4004 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
4005 		else
4006 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
4007 	}
4008 
4009 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
4010 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
4011 			i40e_vsi_config_double_vlan(vsi, TRUE);
4012 			/* Set global registers with default ethertype. */
4013 			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
4014 					   RTE_ETHER_TYPE_VLAN);
4015 			i40e_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
4016 					   RTE_ETHER_TYPE_VLAN);
4017 		}
4018 		else
4019 			i40e_vsi_config_double_vlan(vsi, FALSE);
4020 	}
4021 
4022 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
4023 		/* Enable or disable outer VLAN stripping */
4024 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4025 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4026 		else
4027 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4028 	}
4029 
4030 	return 0;
4031 }
4032 
4033 static void
i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev * dev,__rte_unused uint16_t queue,__rte_unused int on)4034 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4035 			  __rte_unused uint16_t queue,
4036 			  __rte_unused int on)
4037 {
4038 	PMD_INIT_FUNC_TRACE();
4039 }
4040 
4041 static int
i40e_vlan_pvid_set(struct rte_eth_dev * dev,uint16_t pvid,int on)4042 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4043 {
4044 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4045 	struct i40e_vsi *vsi = pf->main_vsi;
4046 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4047 	struct i40e_vsi_vlan_pvid_info info;
4048 
4049 	memset(&info, 0, sizeof(info));
4050 	info.on = on;
4051 	if (info.on)
4052 		info.config.pvid = pvid;
4053 	else {
4054 		info.config.reject.tagged =
4055 				data->dev_conf.txmode.hw_vlan_reject_tagged;
4056 		info.config.reject.untagged =
4057 				data->dev_conf.txmode.hw_vlan_reject_untagged;
4058 	}
4059 
4060 	return i40e_vsi_vlan_pvid_set(vsi, &info);
4061 }
4062 
4063 static int
i40e_dev_led_on(struct rte_eth_dev * dev)4064 i40e_dev_led_on(struct rte_eth_dev *dev)
4065 {
4066 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4067 	uint32_t mode = i40e_led_get(hw);
4068 
4069 	if (mode == 0)
4070 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4071 
4072 	return 0;
4073 }
4074 
4075 static int
i40e_dev_led_off(struct rte_eth_dev * dev)4076 i40e_dev_led_off(struct rte_eth_dev *dev)
4077 {
4078 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079 	uint32_t mode = i40e_led_get(hw);
4080 
4081 	if (mode != 0)
4082 		i40e_led_set(hw, 0, false);
4083 
4084 	return 0;
4085 }
4086 
4087 static int
i40e_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4088 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4089 {
4090 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4091 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4092 
4093 	fc_conf->pause_time = pf->fc_conf.pause_time;
4094 
4095 	/* read out from register, in case they are modified by other port */
4096 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4097 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4098 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4099 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4100 
4101 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4102 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4103 
4104 	 /* Return current mode according to actual setting*/
4105 	switch (hw->fc.current_mode) {
4106 	case I40E_FC_FULL:
4107 		fc_conf->mode = RTE_ETH_FC_FULL;
4108 		break;
4109 	case I40E_FC_TX_PAUSE:
4110 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
4111 		break;
4112 	case I40E_FC_RX_PAUSE:
4113 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
4114 		break;
4115 	case I40E_FC_NONE:
4116 	default:
4117 		fc_conf->mode = RTE_ETH_FC_NONE;
4118 	};
4119 
4120 	return 0;
4121 }
4122 
4123 static int
i40e_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4124 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4125 {
4126 	uint32_t mflcn_reg, fctrl_reg, reg;
4127 	uint32_t max_high_water;
4128 	uint8_t i, aq_failure;
4129 	int err;
4130 	struct i40e_hw *hw;
4131 	struct i40e_pf *pf;
4132 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4133 		[RTE_ETH_FC_NONE] = I40E_FC_NONE,
4134 		[RTE_ETH_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4135 		[RTE_ETH_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4136 		[RTE_ETH_FC_FULL] = I40E_FC_FULL
4137 	};
4138 
4139 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4140 
4141 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4142 	if ((fc_conf->high_water > max_high_water) ||
4143 			(fc_conf->high_water < fc_conf->low_water)) {
4144 		PMD_INIT_LOG(ERR,
4145 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
4146 			max_high_water);
4147 		return -EINVAL;
4148 	}
4149 
4150 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4151 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4152 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4153 
4154 	pf->fc_conf.pause_time = fc_conf->pause_time;
4155 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4156 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4157 
4158 	PMD_INIT_FUNC_TRACE();
4159 
4160 	/* All the link flow control related enable/disable register
4161 	 * configuration is handle by the F/W
4162 	 */
4163 	err = i40e_set_fc(hw, &aq_failure, true);
4164 	if (err < 0)
4165 		return -ENOSYS;
4166 
4167 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4168 		/* Configure flow control refresh threshold,
4169 		 * the value for stat_tx_pause_refresh_timer[8]
4170 		 * is used for global pause operation.
4171 		 */
4172 
4173 		I40E_WRITE_REG(hw,
4174 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4175 			       pf->fc_conf.pause_time);
4176 
4177 		/* configure the timer value included in transmitted pause
4178 		 * frame,
4179 		 * the value for stat_tx_pause_quanta[8] is used for global
4180 		 * pause operation
4181 		 */
4182 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4183 			       pf->fc_conf.pause_time);
4184 
4185 		fctrl_reg = I40E_READ_REG(hw,
4186 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4187 
4188 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4189 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4190 		else
4191 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4192 
4193 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4194 			       fctrl_reg);
4195 	} else {
4196 		/* Configure pause time (2 TCs per register) */
4197 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4198 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4199 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4200 
4201 		/* Configure flow control refresh threshold value */
4202 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4203 			       pf->fc_conf.pause_time / 2);
4204 
4205 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4206 
4207 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
4208 		 *depending on configuration
4209 		 */
4210 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
4211 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4212 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4213 		} else {
4214 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4215 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4216 		}
4217 
4218 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4219 	}
4220 
4221 	if (!pf->support_multi_driver) {
4222 		/* config water marker both based on the packets and bytes */
4223 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4224 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4225 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4226 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4227 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4228 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4229 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4230 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4231 				  << I40E_KILOSHIFT);
4232 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4233 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4234 				   << I40E_KILOSHIFT);
4235 	} else {
4236 		PMD_DRV_LOG(ERR,
4237 			    "Water marker configuration is not supported.");
4238 	}
4239 
4240 	I40E_WRITE_FLUSH(hw);
4241 
4242 	return 0;
4243 }
4244 
4245 static int
i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev * dev,__rte_unused struct rte_eth_pfc_conf * pfc_conf)4246 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4247 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4248 {
4249 	PMD_INIT_FUNC_TRACE();
4250 
4251 	return -ENOSYS;
4252 }
4253 
4254 /* Add a MAC address, and update filters */
4255 static int
i40e_macaddr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,__rte_unused uint32_t index,uint32_t pool)4256 i40e_macaddr_add(struct rte_eth_dev *dev,
4257 		 struct rte_ether_addr *mac_addr,
4258 		 __rte_unused uint32_t index,
4259 		 uint32_t pool)
4260 {
4261 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4262 	struct i40e_mac_filter_info mac_filter;
4263 	struct i40e_vsi *vsi;
4264 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4265 	int ret;
4266 
4267 	/* If VMDQ not enabled or configured, return */
4268 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4269 			  !pf->nb_cfg_vmdq_vsi)) {
4270 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4271 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4272 			pool);
4273 		return -ENOTSUP;
4274 	}
4275 
4276 	if (pool > pf->nb_cfg_vmdq_vsi) {
4277 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4278 				pool, pf->nb_cfg_vmdq_vsi);
4279 		return -EINVAL;
4280 	}
4281 
4282 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4283 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4284 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4285 	else
4286 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4287 
4288 	if (pool == 0)
4289 		vsi = pf->main_vsi;
4290 	else
4291 		vsi = pf->vmdq[pool - 1].vsi;
4292 
4293 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4294 	if (ret != I40E_SUCCESS) {
4295 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4296 		return -ENODEV;
4297 	}
4298 	return 0;
4299 }
4300 
4301 /* Remove a MAC address, and update filters */
4302 static void
i40e_macaddr_remove(struct rte_eth_dev * dev,uint32_t index)4303 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4304 {
4305 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4306 	struct i40e_vsi *vsi;
4307 	struct rte_eth_dev_data *data = dev->data;
4308 	struct rte_ether_addr *macaddr;
4309 	int ret;
4310 	uint32_t i;
4311 	uint64_t pool_sel;
4312 
4313 	macaddr = &(data->mac_addrs[index]);
4314 
4315 	pool_sel = dev->data->mac_pool_sel[index];
4316 
4317 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4318 		if (pool_sel & (1ULL << i)) {
4319 			if (i == 0)
4320 				vsi = pf->main_vsi;
4321 			else {
4322 				/* No VMDQ pool enabled or configured */
4323 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4324 					(i > pf->nb_cfg_vmdq_vsi)) {
4325 					PMD_DRV_LOG(ERR,
4326 						"No VMDQ pool enabled/configured");
4327 					return;
4328 				}
4329 				vsi = pf->vmdq[i - 1].vsi;
4330 			}
4331 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4332 
4333 			if (ret) {
4334 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4335 				return;
4336 			}
4337 		}
4338 	}
4339 }
4340 
4341 static int
i40e_get_rss_lut(struct i40e_vsi * vsi,uint8_t * lut,uint16_t lut_size)4342 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4343 {
4344 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4345 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4346 	uint32_t reg;
4347 	int ret;
4348 
4349 	if (!lut)
4350 		return -EINVAL;
4351 
4352 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4353 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4354 					  vsi->type != I40E_VSI_SRIOV,
4355 					  lut, lut_size);
4356 		if (ret) {
4357 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4358 			return ret;
4359 		}
4360 	} else {
4361 		uint32_t *lut_dw = (uint32_t *)lut;
4362 		uint16_t i, lut_size_dw = lut_size / 4;
4363 
4364 		if (vsi->type == I40E_VSI_SRIOV) {
4365 			for (i = 0; i <= lut_size_dw; i++) {
4366 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4367 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4368 			}
4369 		} else {
4370 			for (i = 0; i < lut_size_dw; i++)
4371 				lut_dw[i] = I40E_READ_REG(hw,
4372 							  I40E_PFQF_HLUT(i));
4373 		}
4374 	}
4375 
4376 	return 0;
4377 }
4378 
4379 int
i40e_set_rss_lut(struct i40e_vsi * vsi,uint8_t * lut,uint16_t lut_size)4380 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4381 {
4382 	struct i40e_pf *pf;
4383 	struct i40e_hw *hw;
4384 
4385 	if (!vsi || !lut)
4386 		return -EINVAL;
4387 
4388 	pf = I40E_VSI_TO_PF(vsi);
4389 	hw = I40E_VSI_TO_HW(vsi);
4390 
4391 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4392 		enum i40e_status_code status;
4393 
4394 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4395 					     vsi->type != I40E_VSI_SRIOV,
4396 					     lut, lut_size);
4397 		if (status) {
4398 			PMD_DRV_LOG(ERR,
4399 				    "Failed to update RSS lookup table, error status: %d",
4400 				    status);
4401 			return -EIO;
4402 		}
4403 	} else {
4404 		uint32_t *lut_dw = (uint32_t *)lut;
4405 		uint16_t i, lut_size_dw = lut_size / 4;
4406 
4407 		if (vsi->type == I40E_VSI_SRIOV) {
4408 			for (i = 0; i < lut_size_dw; i++)
4409 				I40E_WRITE_REG(
4410 					hw,
4411 					I40E_VFQF_HLUT1(i, vsi->user_param),
4412 					lut_dw[i]);
4413 		} else {
4414 			for (i = 0; i < lut_size_dw; i++)
4415 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4416 					       lut_dw[i]);
4417 		}
4418 		I40E_WRITE_FLUSH(hw);
4419 	}
4420 
4421 	return 0;
4422 }
4423 
4424 static int
i40e_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)4425 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4426 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4427 			 uint16_t reta_size)
4428 {
4429 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4430 	uint16_t i, lut_size = pf->hash_lut_size;
4431 	uint16_t idx, shift;
4432 	uint8_t *lut;
4433 	int ret;
4434 
4435 	if (reta_size != lut_size ||
4436 		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
4437 		PMD_DRV_LOG(ERR,
4438 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4439 			reta_size, lut_size);
4440 		return -EINVAL;
4441 	}
4442 
4443 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4444 	if (!lut) {
4445 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4446 		return -ENOMEM;
4447 	}
4448 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4449 	if (ret)
4450 		goto out;
4451 	for (i = 0; i < reta_size; i++) {
4452 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4453 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4454 		if (reta_conf[idx].mask & (1ULL << shift))
4455 			lut[i] = reta_conf[idx].reta[shift];
4456 	}
4457 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4458 
4459 	pf->adapter->rss_reta_updated = 1;
4460 
4461 out:
4462 	rte_free(lut);
4463 
4464 	return ret;
4465 }
4466 
4467 static int
i40e_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)4468 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4469 			struct rte_eth_rss_reta_entry64 *reta_conf,
4470 			uint16_t reta_size)
4471 {
4472 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4473 	uint16_t i, lut_size = pf->hash_lut_size;
4474 	uint16_t idx, shift;
4475 	uint8_t *lut;
4476 	int ret;
4477 
4478 	if (reta_size != lut_size ||
4479 		reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
4480 		PMD_DRV_LOG(ERR,
4481 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4482 			reta_size, lut_size);
4483 		return -EINVAL;
4484 	}
4485 
4486 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4487 	if (!lut) {
4488 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4489 		return -ENOMEM;
4490 	}
4491 
4492 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4493 	if (ret)
4494 		goto out;
4495 	for (i = 0; i < reta_size; i++) {
4496 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4497 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4498 		if (reta_conf[idx].mask & (1ULL << shift))
4499 			reta_conf[idx].reta[shift] = lut[i];
4500 	}
4501 
4502 out:
4503 	rte_free(lut);
4504 
4505 	return ret;
4506 }
4507 
4508 /**
4509  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4510  * @hw:   pointer to the HW structure
4511  * @mem:  pointer to mem struct to fill out
4512  * @size: size of memory requested
4513  * @alignment: what to align the allocation to
4514  **/
4515 enum i40e_status_code
i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_dma_mem * mem,u64 size,u32 alignment)4516 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4517 			struct i40e_dma_mem *mem,
4518 			u64 size,
4519 			u32 alignment)
4520 {
4521 	static uint64_t i40e_dma_memzone_id;
4522 	const struct rte_memzone *mz = NULL;
4523 	char z_name[RTE_MEMZONE_NAMESIZE];
4524 
4525 	if (!mem)
4526 		return I40E_ERR_PARAM;
4527 
4528 	snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4529 		__atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
4530 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4531 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4532 	if (!mz)
4533 		return I40E_ERR_NO_MEMORY;
4534 
4535 	mem->size = size;
4536 	mem->va = mz->addr;
4537 	mem->pa = mz->iova;
4538 	mem->zone = (const void *)mz;
4539 	PMD_DRV_LOG(DEBUG,
4540 		"memzone %s allocated with physical address: %"PRIu64,
4541 		mz->name, mem->pa);
4542 
4543 	return I40E_SUCCESS;
4544 }
4545 
4546 /**
4547  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4548  * @hw:   pointer to the HW structure
4549  * @mem:  ptr to mem struct to free
4550  **/
4551 enum i40e_status_code
i40e_free_dma_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_dma_mem * mem)4552 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4553 		    struct i40e_dma_mem *mem)
4554 {
4555 	if (!mem)
4556 		return I40E_ERR_PARAM;
4557 
4558 	PMD_DRV_LOG(DEBUG,
4559 		"memzone %s to be freed with physical address: %"PRIu64,
4560 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4561 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4562 	mem->zone = NULL;
4563 	mem->va = NULL;
4564 	mem->pa = (u64)0;
4565 
4566 	return I40E_SUCCESS;
4567 }
4568 
4569 /**
4570  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4571  * @hw:   pointer to the HW structure
4572  * @mem:  pointer to mem struct to fill out
4573  * @size: size of memory requested
4574  **/
4575 enum i40e_status_code
i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_virt_mem * mem,u32 size)4576 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4577 			 struct i40e_virt_mem *mem,
4578 			 u32 size)
4579 {
4580 	if (!mem)
4581 		return I40E_ERR_PARAM;
4582 
4583 	mem->size = size;
4584 	mem->va = rte_zmalloc("i40e", size, 0);
4585 
4586 	if (mem->va)
4587 		return I40E_SUCCESS;
4588 	else
4589 		return I40E_ERR_NO_MEMORY;
4590 }
4591 
4592 /**
4593  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4594  * @hw:   pointer to the HW structure
4595  * @mem:  pointer to mem struct to free
4596  **/
4597 enum i40e_status_code
i40e_free_virt_mem_d(__rte_unused struct i40e_hw * hw,struct i40e_virt_mem * mem)4598 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4599 		     struct i40e_virt_mem *mem)
4600 {
4601 	if (!mem)
4602 		return I40E_ERR_PARAM;
4603 
4604 	rte_free(mem->va);
4605 	mem->va = NULL;
4606 
4607 	return I40E_SUCCESS;
4608 }
4609 
4610 void
i40e_init_spinlock_d(struct i40e_spinlock * sp)4611 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4612 {
4613 	rte_spinlock_init(&sp->spinlock);
4614 }
4615 
4616 void
i40e_acquire_spinlock_d(struct i40e_spinlock * sp)4617 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4618 {
4619 	rte_spinlock_lock(&sp->spinlock);
4620 }
4621 
4622 void
i40e_release_spinlock_d(struct i40e_spinlock * sp)4623 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4624 {
4625 	rte_spinlock_unlock(&sp->spinlock);
4626 }
4627 
4628 void
i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock * sp)4629 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4630 {
4631 	return;
4632 }
4633 
4634 /**
4635  * Get the hardware capabilities, which will be parsed
4636  * and saved into struct i40e_hw.
4637  */
4638 static int
i40e_get_cap(struct i40e_hw * hw)4639 i40e_get_cap(struct i40e_hw *hw)
4640 {
4641 	struct i40e_aqc_list_capabilities_element_resp *buf;
4642 	uint16_t len, size = 0;
4643 	int ret;
4644 
4645 	/* Calculate a huge enough buff for saving response data temporarily */
4646 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4647 						I40E_MAX_CAP_ELE_NUM;
4648 	buf = rte_zmalloc("i40e", len, 0);
4649 	if (!buf) {
4650 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4651 		return I40E_ERR_NO_MEMORY;
4652 	}
4653 
4654 	/* Get, parse the capabilities and save it to hw */
4655 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4656 			i40e_aqc_opc_list_func_capabilities, NULL);
4657 	if (ret != I40E_SUCCESS)
4658 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4659 
4660 	/* Free the temporary buffer after being used */
4661 	rte_free(buf);
4662 
4663 	return ret;
4664 }
4665 
4666 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4667 
i40e_pf_parse_vf_queue_number_handler(const char * key,const char * value,void * opaque)4668 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4669 		const char *value,
4670 		void *opaque)
4671 {
4672 	struct i40e_pf *pf;
4673 	unsigned long num;
4674 	char *end;
4675 
4676 	pf = (struct i40e_pf *)opaque;
4677 	RTE_SET_USED(key);
4678 
4679 	errno = 0;
4680 	num = strtoul(value, &end, 0);
4681 	if (errno != 0 || end == value || *end != 0) {
4682 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4683 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4684 		return -(EINVAL);
4685 	}
4686 
4687 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4688 		pf->vf_nb_qp_max = (uint16_t)num;
4689 	else
4690 		/* here return 0 to make next valid same argument work */
4691 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4692 			    "power of 2 and equal or less than 16 !, Now it is "
4693 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4694 
4695 	return 0;
4696 }
4697 
i40e_pf_config_vf_rxq_number(struct rte_eth_dev * dev)4698 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4699 {
4700 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4701 	struct rte_kvargs *kvlist;
4702 	int kvargs_count;
4703 
4704 	/* set default queue number per VF as 4 */
4705 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4706 
4707 	if (dev->device->devargs == NULL)
4708 		return 0;
4709 
4710 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4711 	if (kvlist == NULL)
4712 		return -(EINVAL);
4713 
4714 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4715 	if (!kvargs_count) {
4716 		rte_kvargs_free(kvlist);
4717 		return 0;
4718 	}
4719 
4720 	if (kvargs_count > 1)
4721 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4722 			    "the first invalid or last valid one is used !",
4723 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4724 
4725 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4726 			   i40e_pf_parse_vf_queue_number_handler, pf);
4727 
4728 	rte_kvargs_free(kvlist);
4729 
4730 	return 0;
4731 }
4732 
4733 static int
i40e_pf_parameter_init(struct rte_eth_dev * dev)4734 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4735 {
4736 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4737 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4738 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4739 	uint16_t qp_count = 0, vsi_count = 0;
4740 
4741 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4742 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4743 		return -EINVAL;
4744 	}
4745 
4746 	i40e_pf_config_vf_rxq_number(dev);
4747 
4748 	/* Add the parameter init for LFC */
4749 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4750 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4751 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4752 
4753 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4754 	pf->max_num_vsi = hw->func_caps.num_vsis;
4755 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4756 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4757 
4758 	/* FDir queue/VSI allocation */
4759 	pf->fdir_qp_offset = 0;
4760 	if (hw->func_caps.fd) {
4761 		pf->flags |= I40E_FLAG_FDIR;
4762 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4763 	} else {
4764 		pf->fdir_nb_qps = 0;
4765 	}
4766 	qp_count += pf->fdir_nb_qps;
4767 	vsi_count += 1;
4768 
4769 	/* LAN queue/VSI allocation */
4770 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4771 	if (!hw->func_caps.rss) {
4772 		pf->lan_nb_qps = 1;
4773 	} else {
4774 		pf->flags |= I40E_FLAG_RSS;
4775 		if (hw->mac.type == I40E_MAC_X722)
4776 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4777 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4778 	}
4779 	qp_count += pf->lan_nb_qps;
4780 	vsi_count += 1;
4781 
4782 	/* VF queue/VSI allocation */
4783 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4784 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4785 		pf->flags |= I40E_FLAG_SRIOV;
4786 		pf->vf_nb_qps = pf->vf_nb_qp_max;
4787 		pf->vf_num = pci_dev->max_vfs;
4788 		PMD_DRV_LOG(DEBUG,
4789 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
4790 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4791 	} else {
4792 		pf->vf_nb_qps = 0;
4793 		pf->vf_num = 0;
4794 	}
4795 	qp_count += pf->vf_nb_qps * pf->vf_num;
4796 	vsi_count += pf->vf_num;
4797 
4798 	/* VMDq queue/VSI allocation */
4799 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4800 	pf->vmdq_nb_qps = 0;
4801 	pf->max_nb_vmdq_vsi = 0;
4802 	if (hw->func_caps.vmdq) {
4803 		if (qp_count < hw->func_caps.num_tx_qp &&
4804 			vsi_count < hw->func_caps.num_vsis) {
4805 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4806 				qp_count) / pf->vmdq_nb_qp_max;
4807 
4808 			/* Limit the maximum number of VMDq vsi to the maximum
4809 			 * ethdev can support
4810 			 */
4811 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4812 				hw->func_caps.num_vsis - vsi_count);
4813 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4814 				RTE_ETH_64_POOLS);
4815 			if (pf->max_nb_vmdq_vsi) {
4816 				pf->flags |= I40E_FLAG_VMDQ;
4817 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4818 				PMD_DRV_LOG(DEBUG,
4819 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4820 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4821 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4822 			} else {
4823 				PMD_DRV_LOG(INFO,
4824 					"No enough queues left for VMDq");
4825 			}
4826 		} else {
4827 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4828 		}
4829 	}
4830 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4831 	vsi_count += pf->max_nb_vmdq_vsi;
4832 
4833 	if (hw->func_caps.dcb)
4834 		pf->flags |= I40E_FLAG_DCB;
4835 
4836 	if (qp_count > hw->func_caps.num_tx_qp) {
4837 		PMD_DRV_LOG(ERR,
4838 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
4839 			qp_count, hw->func_caps.num_tx_qp);
4840 		return -EINVAL;
4841 	}
4842 	if (vsi_count > hw->func_caps.num_vsis) {
4843 		PMD_DRV_LOG(ERR,
4844 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4845 			vsi_count, hw->func_caps.num_vsis);
4846 		return -EINVAL;
4847 	}
4848 
4849 	return 0;
4850 }
4851 
4852 static int
i40e_pf_get_switch_config(struct i40e_pf * pf)4853 i40e_pf_get_switch_config(struct i40e_pf *pf)
4854 {
4855 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4856 	struct i40e_aqc_get_switch_config_resp *switch_config;
4857 	struct i40e_aqc_switch_config_element_resp *element;
4858 	uint16_t start_seid = 0, num_reported;
4859 	int ret;
4860 
4861 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4862 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4863 	if (!switch_config) {
4864 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
4865 		return -ENOMEM;
4866 	}
4867 
4868 	/* Get the switch configurations */
4869 	ret = i40e_aq_get_switch_config(hw, switch_config,
4870 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
4871 	if (ret != I40E_SUCCESS) {
4872 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4873 		goto fail;
4874 	}
4875 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4876 	if (num_reported != 1) { /* The number should be 1 */
4877 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4878 		goto fail;
4879 	}
4880 
4881 	/* Parse the switch configuration elements */
4882 	element = &(switch_config->element[0]);
4883 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4884 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4885 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4886 	} else
4887 		PMD_DRV_LOG(INFO, "Unknown element type");
4888 
4889 fail:
4890 	rte_free(switch_config);
4891 
4892 	return ret;
4893 }
4894 
4895 static int
i40e_res_pool_init(struct i40e_res_pool_info * pool,uint32_t base,uint32_t num)4896 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4897 			uint32_t num)
4898 {
4899 	struct pool_entry *entry;
4900 
4901 	if (pool == NULL || num == 0)
4902 		return -EINVAL;
4903 
4904 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4905 	if (entry == NULL) {
4906 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4907 		return -ENOMEM;
4908 	}
4909 
4910 	/* queue heap initialize */
4911 	pool->num_free = num;
4912 	pool->num_alloc = 0;
4913 	pool->base = base;
4914 	LIST_INIT(&pool->alloc_list);
4915 	LIST_INIT(&pool->free_list);
4916 
4917 	/* Initialize element  */
4918 	entry->base = 0;
4919 	entry->len = num;
4920 
4921 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
4922 	return 0;
4923 }
4924 
4925 static void
i40e_res_pool_destroy(struct i40e_res_pool_info * pool)4926 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4927 {
4928 	struct pool_entry *entry, *next_entry;
4929 
4930 	if (pool == NULL)
4931 		return;
4932 
4933 	for (entry = LIST_FIRST(&pool->alloc_list);
4934 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4935 			entry = next_entry) {
4936 		LIST_REMOVE(entry, next);
4937 		rte_free(entry);
4938 	}
4939 
4940 	for (entry = LIST_FIRST(&pool->free_list);
4941 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4942 			entry = next_entry) {
4943 		LIST_REMOVE(entry, next);
4944 		rte_free(entry);
4945 	}
4946 
4947 	pool->num_free = 0;
4948 	pool->num_alloc = 0;
4949 	pool->base = 0;
4950 	LIST_INIT(&pool->alloc_list);
4951 	LIST_INIT(&pool->free_list);
4952 }
4953 
4954 static int
i40e_res_pool_free(struct i40e_res_pool_info * pool,uint32_t base)4955 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4956 		       uint32_t base)
4957 {
4958 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4959 	uint32_t pool_offset;
4960 	uint16_t len;
4961 	int insert;
4962 
4963 	if (pool == NULL) {
4964 		PMD_DRV_LOG(ERR, "Invalid parameter");
4965 		return -EINVAL;
4966 	}
4967 
4968 	pool_offset = base - pool->base;
4969 	/* Lookup in alloc list */
4970 	LIST_FOREACH(entry, &pool->alloc_list, next) {
4971 		if (entry->base == pool_offset) {
4972 			valid_entry = entry;
4973 			LIST_REMOVE(entry, next);
4974 			break;
4975 		}
4976 	}
4977 
4978 	/* Not find, return */
4979 	if (valid_entry == NULL) {
4980 		PMD_DRV_LOG(ERR, "Failed to find entry");
4981 		return -EINVAL;
4982 	}
4983 
4984 	/**
4985 	 * Found it, move it to free list  and try to merge.
4986 	 * In order to make merge easier, always sort it by qbase.
4987 	 * Find adjacent prev and last entries.
4988 	 */
4989 	prev = next = NULL;
4990 	LIST_FOREACH(entry, &pool->free_list, next) {
4991 		if (entry->base > valid_entry->base) {
4992 			next = entry;
4993 			break;
4994 		}
4995 		prev = entry;
4996 	}
4997 
4998 	insert = 0;
4999 	len = valid_entry->len;
5000 	/* Try to merge with next one*/
5001 	if (next != NULL) {
5002 		/* Merge with next one */
5003 		if (valid_entry->base + len == next->base) {
5004 			next->base = valid_entry->base;
5005 			next->len += len;
5006 			rte_free(valid_entry);
5007 			valid_entry = next;
5008 			insert = 1;
5009 		}
5010 	}
5011 
5012 	if (prev != NULL) {
5013 		/* Merge with previous one */
5014 		if (prev->base + prev->len == valid_entry->base) {
5015 			prev->len += len;
5016 			/* If it merge with next one, remove next node */
5017 			if (insert == 1) {
5018 				LIST_REMOVE(valid_entry, next);
5019 				rte_free(valid_entry);
5020 				valid_entry = NULL;
5021 			} else {
5022 				rte_free(valid_entry);
5023 				valid_entry = NULL;
5024 				insert = 1;
5025 			}
5026 		}
5027 	}
5028 
5029 	/* Not find any entry to merge, insert */
5030 	if (insert == 0) {
5031 		if (prev != NULL)
5032 			LIST_INSERT_AFTER(prev, valid_entry, next);
5033 		else if (next != NULL)
5034 			LIST_INSERT_BEFORE(next, valid_entry, next);
5035 		else /* It's empty list, insert to head */
5036 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5037 	}
5038 
5039 	pool->num_free += len;
5040 	pool->num_alloc -= len;
5041 
5042 	return 0;
5043 }
5044 
5045 static int
i40e_res_pool_alloc(struct i40e_res_pool_info * pool,uint16_t num)5046 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5047 		       uint16_t num)
5048 {
5049 	struct pool_entry *entry, *valid_entry;
5050 
5051 	if (pool == NULL || num == 0) {
5052 		PMD_DRV_LOG(ERR, "Invalid parameter");
5053 		return -EINVAL;
5054 	}
5055 
5056 	if (pool->num_free < num) {
5057 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5058 			    num, pool->num_free);
5059 		return -ENOMEM;
5060 	}
5061 
5062 	valid_entry = NULL;
5063 	/* Lookup  in free list and find most fit one */
5064 	LIST_FOREACH(entry, &pool->free_list, next) {
5065 		if (entry->len >= num) {
5066 			/* Find best one */
5067 			if (entry->len == num) {
5068 				valid_entry = entry;
5069 				break;
5070 			}
5071 			if (valid_entry == NULL || valid_entry->len > entry->len)
5072 				valid_entry = entry;
5073 		}
5074 	}
5075 
5076 	/* Not find one to satisfy the request, return */
5077 	if (valid_entry == NULL) {
5078 		PMD_DRV_LOG(ERR, "No valid entry found");
5079 		return -ENOMEM;
5080 	}
5081 	/**
5082 	 * The entry have equal queue number as requested,
5083 	 * remove it from alloc_list.
5084 	 */
5085 	if (valid_entry->len == num) {
5086 		LIST_REMOVE(valid_entry, next);
5087 	} else {
5088 		/**
5089 		 * The entry have more numbers than requested,
5090 		 * create a new entry for alloc_list and minus its
5091 		 * queue base and number in free_list.
5092 		 */
5093 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5094 		if (entry == NULL) {
5095 			PMD_DRV_LOG(ERR,
5096 				"Failed to allocate memory for resource pool");
5097 			return -ENOMEM;
5098 		}
5099 		entry->base = valid_entry->base;
5100 		entry->len = num;
5101 		valid_entry->base += num;
5102 		valid_entry->len -= num;
5103 		valid_entry = entry;
5104 	}
5105 
5106 	/* Insert it into alloc list, not sorted */
5107 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5108 
5109 	pool->num_free -= valid_entry->len;
5110 	pool->num_alloc += valid_entry->len;
5111 
5112 	return valid_entry->base + pool->base;
5113 }
5114 
5115 /**
5116  * bitmap_is_subset - Check whether src2 is subset of src1
5117  **/
5118 static inline int
bitmap_is_subset(uint8_t src1,uint8_t src2)5119 bitmap_is_subset(uint8_t src1, uint8_t src2)
5120 {
5121 	return !((src1 ^ src2) & src2);
5122 }
5123 
5124 static enum i40e_status_code
validate_tcmap_parameter(struct i40e_vsi * vsi,uint8_t enabled_tcmap)5125 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5126 {
5127 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5128 
5129 	/* If DCB is not supported, only default TC is supported */
5130 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5131 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5132 		return I40E_NOT_SUPPORTED;
5133 	}
5134 
5135 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5136 		PMD_DRV_LOG(ERR,
5137 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
5138 			hw->func_caps.enabled_tcmap, enabled_tcmap);
5139 		return I40E_NOT_SUPPORTED;
5140 	}
5141 	return I40E_SUCCESS;
5142 }
5143 
5144 int
i40e_vsi_vlan_pvid_set(struct i40e_vsi * vsi,struct i40e_vsi_vlan_pvid_info * info)5145 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5146 				struct i40e_vsi_vlan_pvid_info *info)
5147 {
5148 	struct i40e_hw *hw;
5149 	struct i40e_vsi_context ctxt;
5150 	uint8_t vlan_flags = 0;
5151 	int ret;
5152 
5153 	if (vsi == NULL || info == NULL) {
5154 		PMD_DRV_LOG(ERR, "invalid parameters");
5155 		return I40E_ERR_PARAM;
5156 	}
5157 
5158 	if (info->on) {
5159 		vsi->info.pvid = info->config.pvid;
5160 		/**
5161 		 * If insert pvid is enabled, only tagged pkts are
5162 		 * allowed to be sent out.
5163 		 */
5164 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5165 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5166 	} else {
5167 		vsi->info.pvid = 0;
5168 		if (info->config.reject.tagged == 0)
5169 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5170 
5171 		if (info->config.reject.untagged == 0)
5172 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5173 	}
5174 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5175 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5176 	vsi->info.port_vlan_flags |= vlan_flags;
5177 	vsi->info.valid_sections =
5178 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5179 	memset(&ctxt, 0, sizeof(ctxt));
5180 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5181 	ctxt.seid = vsi->seid;
5182 
5183 	hw = I40E_VSI_TO_HW(vsi);
5184 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5185 	if (ret != I40E_SUCCESS)
5186 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5187 
5188 	return ret;
5189 }
5190 
5191 static int
i40e_vsi_update_tc_bandwidth(struct i40e_vsi * vsi,uint8_t enabled_tcmap)5192 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5193 {
5194 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5195 	int i, ret;
5196 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5197 
5198 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5199 	if (ret != I40E_SUCCESS)
5200 		return ret;
5201 
5202 	if (!vsi->seid) {
5203 		PMD_DRV_LOG(ERR, "seid not valid");
5204 		return -EINVAL;
5205 	}
5206 
5207 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5208 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5209 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5210 		tc_bw_data.tc_bw_credits[i] =
5211 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5212 
5213 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5214 	if (ret != I40E_SUCCESS) {
5215 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5216 		return ret;
5217 	}
5218 
5219 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5220 					sizeof(vsi->info.qs_handle));
5221 	return I40E_SUCCESS;
5222 }
5223 
5224 static enum i40e_status_code
i40e_vsi_config_tc_queue_mapping(struct i40e_vsi * vsi,struct i40e_aqc_vsi_properties_data * info,uint8_t enabled_tcmap)5225 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5226 				 struct i40e_aqc_vsi_properties_data *info,
5227 				 uint8_t enabled_tcmap)
5228 {
5229 	enum i40e_status_code ret;
5230 	int i, total_tc = 0;
5231 	uint16_t qpnum_per_tc, bsf, qp_idx;
5232 
5233 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5234 	if (ret != I40E_SUCCESS)
5235 		return ret;
5236 
5237 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5238 		if (enabled_tcmap & (1 << i))
5239 			total_tc++;
5240 	if (total_tc == 0)
5241 		total_tc = 1;
5242 	vsi->enabled_tc = enabled_tcmap;
5243 
5244 	/* Number of queues per enabled TC */
5245 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5246 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5247 	bsf = rte_bsf32(qpnum_per_tc);
5248 
5249 	/* Adjust the queue number to actual queues that can be applied */
5250 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5251 		vsi->nb_qps = qpnum_per_tc * total_tc;
5252 
5253 	/**
5254 	 * Configure TC and queue mapping parameters, for enabled TC,
5255 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5256 	 * default queue will serve it.
5257 	 */
5258 	qp_idx = 0;
5259 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5260 		if (vsi->enabled_tc & (1 << i)) {
5261 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5262 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5263 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5264 			qp_idx += qpnum_per_tc;
5265 		} else
5266 			info->tc_mapping[i] = 0;
5267 	}
5268 
5269 	/* Associate queue number with VSI */
5270 	if (vsi->type == I40E_VSI_SRIOV) {
5271 		info->mapping_flags |=
5272 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5273 		for (i = 0; i < vsi->nb_qps; i++)
5274 			info->queue_mapping[i] =
5275 				rte_cpu_to_le_16(vsi->base_queue + i);
5276 	} else {
5277 		info->mapping_flags |=
5278 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5279 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5280 	}
5281 	info->valid_sections |=
5282 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5283 
5284 	return I40E_SUCCESS;
5285 }
5286 
5287 static int
i40e_veb_release(struct i40e_veb * veb)5288 i40e_veb_release(struct i40e_veb *veb)
5289 {
5290 	struct i40e_vsi *vsi;
5291 	struct i40e_hw *hw;
5292 
5293 	if (veb == NULL)
5294 		return -EINVAL;
5295 
5296 	if (!TAILQ_EMPTY(&veb->head)) {
5297 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5298 		return -EACCES;
5299 	}
5300 	/* associate_vsi field is NULL for floating VEB */
5301 	if (veb->associate_vsi != NULL) {
5302 		vsi = veb->associate_vsi;
5303 		hw = I40E_VSI_TO_HW(vsi);
5304 
5305 		vsi->uplink_seid = veb->uplink_seid;
5306 		vsi->veb = NULL;
5307 	} else {
5308 		veb->associate_pf->main_vsi->floating_veb = NULL;
5309 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5310 	}
5311 
5312 	i40e_aq_delete_element(hw, veb->seid, NULL);
5313 	rte_free(veb);
5314 	return I40E_SUCCESS;
5315 }
5316 
5317 /* Setup a veb */
5318 static struct i40e_veb *
i40e_veb_setup(struct i40e_pf * pf,struct i40e_vsi * vsi)5319 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5320 {
5321 	struct i40e_veb *veb;
5322 	int ret;
5323 	struct i40e_hw *hw;
5324 
5325 	if (pf == NULL) {
5326 		PMD_DRV_LOG(ERR,
5327 			    "veb setup failed, associated PF shouldn't null");
5328 		return NULL;
5329 	}
5330 	hw = I40E_PF_TO_HW(pf);
5331 
5332 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5333 	if (!veb) {
5334 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5335 		goto fail;
5336 	}
5337 
5338 	veb->associate_vsi = vsi;
5339 	veb->associate_pf = pf;
5340 	TAILQ_INIT(&veb->head);
5341 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5342 
5343 	/* create floating veb if vsi is NULL */
5344 	if (vsi != NULL) {
5345 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5346 				      I40E_DEFAULT_TCMAP, false,
5347 				      &veb->seid, false, NULL);
5348 	} else {
5349 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5350 				      true, &veb->seid, false, NULL);
5351 	}
5352 
5353 	if (ret != I40E_SUCCESS) {
5354 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5355 			    hw->aq.asq_last_status);
5356 		goto fail;
5357 	}
5358 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5359 
5360 	/* get statistics index */
5361 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5362 				&veb->stats_idx, NULL, NULL, NULL);
5363 	if (ret != I40E_SUCCESS) {
5364 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5365 			    hw->aq.asq_last_status);
5366 		goto fail;
5367 	}
5368 	/* Get VEB bandwidth, to be implemented */
5369 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5370 	if (vsi)
5371 		vsi->uplink_seid = veb->seid;
5372 
5373 	return veb;
5374 fail:
5375 	rte_free(veb);
5376 	return NULL;
5377 }
5378 
5379 int
i40e_vsi_release(struct i40e_vsi * vsi)5380 i40e_vsi_release(struct i40e_vsi *vsi)
5381 {
5382 	struct i40e_pf *pf;
5383 	struct i40e_hw *hw;
5384 	struct i40e_vsi_list *vsi_list;
5385 	void *temp;
5386 	int ret;
5387 	struct i40e_mac_filter *f;
5388 	uint16_t user_param;
5389 
5390 	if (!vsi)
5391 		return I40E_SUCCESS;
5392 
5393 	if (!vsi->adapter)
5394 		return -EFAULT;
5395 
5396 	user_param = vsi->user_param;
5397 
5398 	pf = I40E_VSI_TO_PF(vsi);
5399 	hw = I40E_VSI_TO_HW(vsi);
5400 
5401 	/* VSI has child to attach, release child first */
5402 	if (vsi->veb) {
5403 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5404 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5405 				return -1;
5406 		}
5407 		i40e_veb_release(vsi->veb);
5408 	}
5409 
5410 	if (vsi->floating_veb) {
5411 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head,
5412 			list, temp) {
5413 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5414 				return -1;
5415 		}
5416 	}
5417 
5418 	/* Remove all macvlan filters of the VSI */
5419 	i40e_vsi_remove_all_macvlan_filter(vsi);
5420 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5421 		rte_free(f);
5422 
5423 	if (vsi->type != I40E_VSI_MAIN &&
5424 	    ((vsi->type != I40E_VSI_SRIOV) ||
5425 	    !pf->floating_veb_list[user_param])) {
5426 		/* Remove vsi from parent's sibling list */
5427 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5428 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5429 			return I40E_ERR_PARAM;
5430 		}
5431 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5432 				&vsi->sib_vsi_list, list);
5433 
5434 		/* Remove all switch element of the VSI */
5435 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5436 		if (ret != I40E_SUCCESS)
5437 			PMD_DRV_LOG(ERR, "Failed to delete element");
5438 	}
5439 
5440 	if ((vsi->type == I40E_VSI_SRIOV) &&
5441 	    pf->floating_veb_list[user_param]) {
5442 		/* Remove vsi from parent's sibling list */
5443 		if (vsi->parent_vsi == NULL ||
5444 		    vsi->parent_vsi->floating_veb == NULL) {
5445 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5446 			return I40E_ERR_PARAM;
5447 		}
5448 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5449 			     &vsi->sib_vsi_list, list);
5450 
5451 		/* Remove all switch element of the VSI */
5452 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5453 		if (ret != I40E_SUCCESS)
5454 			PMD_DRV_LOG(ERR, "Failed to delete element");
5455 	}
5456 
5457 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5458 
5459 	if (vsi->type != I40E_VSI_SRIOV)
5460 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5461 	rte_free(vsi);
5462 
5463 	return I40E_SUCCESS;
5464 }
5465 
5466 static int
i40e_update_default_filter_setting(struct i40e_vsi * vsi)5467 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5468 {
5469 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5470 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5471 	struct i40e_mac_filter_info filter;
5472 	int ret;
5473 
5474 	if (vsi->type != I40E_VSI_MAIN)
5475 		return I40E_ERR_CONFIG;
5476 	memset(&def_filter, 0, sizeof(def_filter));
5477 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5478 					ETH_ADDR_LEN);
5479 	def_filter.vlan_tag = 0;
5480 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5481 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5482 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5483 	if (ret != I40E_SUCCESS) {
5484 		struct i40e_mac_filter *f;
5485 		struct rte_ether_addr *mac;
5486 
5487 		PMD_DRV_LOG(DEBUG,
5488 			    "Cannot remove the default macvlan filter");
5489 		/* It needs to add the permanent mac into mac list */
5490 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5491 		if (f == NULL) {
5492 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5493 			return I40E_ERR_NO_MEMORY;
5494 		}
5495 		mac = &f->mac_info.mac_addr;
5496 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5497 				ETH_ADDR_LEN);
5498 		f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5499 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5500 		vsi->mac_num++;
5501 
5502 		return ret;
5503 	}
5504 	rte_memcpy(&filter.mac_addr,
5505 		(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5506 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5507 	return i40e_vsi_add_mac(vsi, &filter);
5508 }
5509 
5510 /*
5511  * i40e_vsi_get_bw_config - Query VSI BW Information
5512  * @vsi: the VSI to be queried
5513  *
5514  * Returns 0 on success, negative value on failure
5515  */
5516 static enum i40e_status_code
i40e_vsi_get_bw_config(struct i40e_vsi * vsi)5517 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5518 {
5519 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5520 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5521 	struct i40e_hw *hw = &vsi->adapter->hw;
5522 	i40e_status ret;
5523 	int i;
5524 	uint32_t bw_max;
5525 
5526 	memset(&bw_config, 0, sizeof(bw_config));
5527 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5528 	if (ret != I40E_SUCCESS) {
5529 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5530 			    hw->aq.asq_last_status);
5531 		return ret;
5532 	}
5533 
5534 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5535 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5536 					&ets_sla_config, NULL);
5537 	if (ret != I40E_SUCCESS) {
5538 		PMD_DRV_LOG(ERR,
5539 			"VSI failed to get TC bandwidth configuration %u",
5540 			hw->aq.asq_last_status);
5541 		return ret;
5542 	}
5543 
5544 	/* store and print out BW info */
5545 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5546 	vsi->bw_info.bw_max = bw_config.max_bw;
5547 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5548 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5549 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5550 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5551 		     I40E_16_BIT_WIDTH);
5552 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5553 		vsi->bw_info.bw_ets_share_credits[i] =
5554 				ets_sla_config.share_credits[i];
5555 		vsi->bw_info.bw_ets_credits[i] =
5556 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5557 		/* 4 bits per TC, 4th bit is reserved */
5558 		vsi->bw_info.bw_ets_max[i] =
5559 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5560 				  RTE_LEN2MASK(3, uint8_t));
5561 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5562 			    vsi->bw_info.bw_ets_share_credits[i]);
5563 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5564 			    vsi->bw_info.bw_ets_credits[i]);
5565 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5566 			    vsi->bw_info.bw_ets_max[i]);
5567 	}
5568 
5569 	return I40E_SUCCESS;
5570 }
5571 
5572 /* i40e_enable_pf_lb
5573  * @pf: pointer to the pf structure
5574  *
5575  * allow loopback on pf
5576  */
5577 static inline void
i40e_enable_pf_lb(struct i40e_pf * pf)5578 i40e_enable_pf_lb(struct i40e_pf *pf)
5579 {
5580 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5581 	struct i40e_vsi_context ctxt;
5582 	int ret;
5583 
5584 	/* Use the FW API if FW >= v5.0 */
5585 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5586 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5587 		return;
5588 	}
5589 
5590 	memset(&ctxt, 0, sizeof(ctxt));
5591 	ctxt.seid = pf->main_vsi_seid;
5592 	ctxt.pf_num = hw->pf_id;
5593 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5594 	if (ret) {
5595 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5596 			    ret, hw->aq.asq_last_status);
5597 		return;
5598 	}
5599 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5600 	ctxt.info.valid_sections =
5601 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5602 	ctxt.info.switch_id |=
5603 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5604 
5605 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5606 	if (ret)
5607 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5608 			    hw->aq.asq_last_status);
5609 }
5610 
5611 /* Setup a VSI */
5612 struct i40e_vsi *
i40e_vsi_setup(struct i40e_pf * pf,enum i40e_vsi_type type,struct i40e_vsi * uplink_vsi,uint16_t user_param)5613 i40e_vsi_setup(struct i40e_pf *pf,
5614 	       enum i40e_vsi_type type,
5615 	       struct i40e_vsi *uplink_vsi,
5616 	       uint16_t user_param)
5617 {
5618 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5619 	struct i40e_vsi *vsi;
5620 	struct i40e_mac_filter_info filter;
5621 	int ret;
5622 	struct i40e_vsi_context ctxt;
5623 	struct rte_ether_addr broadcast =
5624 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5625 
5626 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5627 	    uplink_vsi == NULL) {
5628 		PMD_DRV_LOG(ERR,
5629 			"VSI setup failed, VSI link shouldn't be NULL");
5630 		return NULL;
5631 	}
5632 
5633 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5634 		PMD_DRV_LOG(ERR,
5635 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5636 		return NULL;
5637 	}
5638 
5639 	/* two situations
5640 	 * 1.type is not MAIN and uplink vsi is not NULL
5641 	 * If uplink vsi didn't setup VEB, create one first under veb field
5642 	 * 2.type is SRIOV and the uplink is NULL
5643 	 * If floating VEB is NULL, create one veb under floating veb field
5644 	 */
5645 
5646 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5647 	    uplink_vsi->veb == NULL) {
5648 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5649 
5650 		if (uplink_vsi->veb == NULL) {
5651 			PMD_DRV_LOG(ERR, "VEB setup failed");
5652 			return NULL;
5653 		}
5654 		/* set ALLOWLOOPBACk on pf, when veb is created */
5655 		i40e_enable_pf_lb(pf);
5656 	}
5657 
5658 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5659 	    pf->main_vsi->floating_veb == NULL) {
5660 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5661 
5662 		if (pf->main_vsi->floating_veb == NULL) {
5663 			PMD_DRV_LOG(ERR, "VEB setup failed");
5664 			return NULL;
5665 		}
5666 	}
5667 
5668 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5669 	if (!vsi) {
5670 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5671 		return NULL;
5672 	}
5673 	TAILQ_INIT(&vsi->mac_list);
5674 	vsi->type = type;
5675 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5676 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5677 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5678 	vsi->user_param = user_param;
5679 	vsi->vlan_anti_spoof_on = 0;
5680 	vsi->vlan_filter_on = 0;
5681 	/* Allocate queues */
5682 	switch (vsi->type) {
5683 	case I40E_VSI_MAIN  :
5684 		vsi->nb_qps = pf->lan_nb_qps;
5685 		break;
5686 	case I40E_VSI_SRIOV :
5687 		vsi->nb_qps = pf->vf_nb_qps;
5688 		break;
5689 	case I40E_VSI_VMDQ2:
5690 		vsi->nb_qps = pf->vmdq_nb_qps;
5691 		break;
5692 	case I40E_VSI_FDIR:
5693 		vsi->nb_qps = pf->fdir_nb_qps;
5694 		break;
5695 	default:
5696 		goto fail_mem;
5697 	}
5698 	/*
5699 	 * The filter status descriptor is reported in rx queue 0,
5700 	 * while the tx queue for fdir filter programming has no
5701 	 * such constraints, can be non-zero queues.
5702 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5703 	 * To make sure it will use queue 0 pair, queue allocation
5704 	 * need be done before this function is called
5705 	 */
5706 	if (type != I40E_VSI_FDIR) {
5707 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5708 			if (ret < 0) {
5709 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5710 						vsi->seid, ret);
5711 				goto fail_mem;
5712 			}
5713 			vsi->base_queue = ret;
5714 	} else
5715 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5716 
5717 	/* VF has MSIX interrupt in VF range, don't allocate here */
5718 	if (type == I40E_VSI_MAIN) {
5719 		if (pf->support_multi_driver) {
5720 			/* If support multi-driver, need to use INT0 instead of
5721 			 * allocating from msix pool. The Msix pool is init from
5722 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5723 			 * to 1 without calling i40e_res_pool_alloc.
5724 			 */
5725 			vsi->msix_intr = 0;
5726 			vsi->nb_msix = 1;
5727 		} else {
5728 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5729 						  RTE_MIN(vsi->nb_qps,
5730 						     RTE_MAX_RXTX_INTR_VEC_ID));
5731 			if (ret < 0) {
5732 				PMD_DRV_LOG(ERR,
5733 					    "VSI MAIN %d get heap failed %d",
5734 					    vsi->seid, ret);
5735 				goto fail_queue_alloc;
5736 			}
5737 			vsi->msix_intr = ret;
5738 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5739 					       RTE_MAX_RXTX_INTR_VEC_ID);
5740 		}
5741 	} else if (type != I40E_VSI_SRIOV) {
5742 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5743 		if (ret < 0) {
5744 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5745 			if (type != I40E_VSI_FDIR)
5746 				goto fail_queue_alloc;
5747 			vsi->msix_intr = 0;
5748 			vsi->nb_msix = 0;
5749 		} else {
5750 			vsi->msix_intr = ret;
5751 			vsi->nb_msix = 1;
5752 		}
5753 	} else {
5754 		vsi->msix_intr = 0;
5755 		vsi->nb_msix = 0;
5756 	}
5757 
5758 	/* Add VSI */
5759 	if (type == I40E_VSI_MAIN) {
5760 		/* For main VSI, no need to add since it's default one */
5761 		vsi->uplink_seid = pf->mac_seid;
5762 		vsi->seid = pf->main_vsi_seid;
5763 		/* Bind queues with specific MSIX interrupt */
5764 		/**
5765 		 * Needs 2 interrupt at least, one for misc cause which will
5766 		 * enabled from OS side, Another for queues binding the
5767 		 * interrupt from device side only.
5768 		 */
5769 
5770 		/* Get default VSI parameters from hardware */
5771 		memset(&ctxt, 0, sizeof(ctxt));
5772 		ctxt.seid = vsi->seid;
5773 		ctxt.pf_num = hw->pf_id;
5774 		ctxt.uplink_seid = vsi->uplink_seid;
5775 		ctxt.vf_num = 0;
5776 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5777 		if (ret != I40E_SUCCESS) {
5778 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5779 			goto fail_msix_alloc;
5780 		}
5781 		rte_memcpy(&vsi->info, &ctxt.info,
5782 			sizeof(struct i40e_aqc_vsi_properties_data));
5783 		vsi->vsi_id = ctxt.vsi_number;
5784 		vsi->info.valid_sections = 0;
5785 
5786 		/* Configure tc, enabled TC0 only */
5787 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5788 			I40E_SUCCESS) {
5789 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5790 			goto fail_msix_alloc;
5791 		}
5792 
5793 		/* TC, queue mapping */
5794 		memset(&ctxt, 0, sizeof(ctxt));
5795 		vsi->info.valid_sections |=
5796 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5797 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5798 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5799 		rte_memcpy(&ctxt.info, &vsi->info,
5800 			sizeof(struct i40e_aqc_vsi_properties_data));
5801 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5802 						I40E_DEFAULT_TCMAP);
5803 		if (ret != I40E_SUCCESS) {
5804 			PMD_DRV_LOG(ERR,
5805 				"Failed to configure TC queue mapping");
5806 			goto fail_msix_alloc;
5807 		}
5808 		ctxt.seid = vsi->seid;
5809 		ctxt.pf_num = hw->pf_id;
5810 		ctxt.uplink_seid = vsi->uplink_seid;
5811 		ctxt.vf_num = 0;
5812 
5813 		/* Update VSI parameters */
5814 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5815 		if (ret != I40E_SUCCESS) {
5816 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
5817 			goto fail_msix_alloc;
5818 		}
5819 
5820 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5821 						sizeof(vsi->info.tc_mapping));
5822 		rte_memcpy(&vsi->info.queue_mapping,
5823 				&ctxt.info.queue_mapping,
5824 			sizeof(vsi->info.queue_mapping));
5825 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
5826 		vsi->info.valid_sections = 0;
5827 
5828 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5829 				ETH_ADDR_LEN);
5830 
5831 		/**
5832 		 * Updating default filter settings are necessary to prevent
5833 		 * reception of tagged packets.
5834 		 * Some old firmware configurations load a default macvlan
5835 		 * filter which accepts both tagged and untagged packets.
5836 		 * The updating is to use a normal filter instead if needed.
5837 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
5838 		 * The firmware with correct configurations load the default
5839 		 * macvlan filter which is expected and cannot be removed.
5840 		 */
5841 		i40e_update_default_filter_setting(vsi);
5842 		i40e_config_qinq(hw, vsi);
5843 	} else if (type == I40E_VSI_SRIOV) {
5844 		memset(&ctxt, 0, sizeof(ctxt));
5845 		/**
5846 		 * For other VSI, the uplink_seid equals to uplink VSI's
5847 		 * uplink_seid since they share same VEB
5848 		 */
5849 		if (uplink_vsi == NULL)
5850 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5851 		else
5852 			vsi->uplink_seid = uplink_vsi->uplink_seid;
5853 		ctxt.pf_num = hw->pf_id;
5854 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5855 		ctxt.uplink_seid = vsi->uplink_seid;
5856 		ctxt.connection_type = 0x1;
5857 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5858 
5859 		/* Use the VEB configuration if FW >= v5.0 */
5860 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5861 			/* Configure switch ID */
5862 			ctxt.info.valid_sections |=
5863 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5864 			ctxt.info.switch_id =
5865 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5866 		}
5867 
5868 		/* Configure port/vlan */
5869 		ctxt.info.valid_sections |=
5870 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5871 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5872 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5873 						hw->func_caps.enabled_tcmap);
5874 		if (ret != I40E_SUCCESS) {
5875 			PMD_DRV_LOG(ERR,
5876 				"Failed to configure TC queue mapping");
5877 			goto fail_msix_alloc;
5878 		}
5879 
5880 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5881 		ctxt.info.valid_sections |=
5882 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5883 		/**
5884 		 * Since VSI is not created yet, only configure parameter,
5885 		 * will add vsi below.
5886 		 */
5887 
5888 		i40e_config_qinq(hw, vsi);
5889 	} else if (type == I40E_VSI_VMDQ2) {
5890 		memset(&ctxt, 0, sizeof(ctxt));
5891 		/*
5892 		 * For other VSI, the uplink_seid equals to uplink VSI's
5893 		 * uplink_seid since they share same VEB
5894 		 */
5895 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5896 		ctxt.pf_num = hw->pf_id;
5897 		ctxt.vf_num = 0;
5898 		ctxt.uplink_seid = vsi->uplink_seid;
5899 		ctxt.connection_type = 0x1;
5900 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5901 
5902 		ctxt.info.valid_sections |=
5903 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5904 		/* user_param carries flag to enable loop back */
5905 		if (user_param) {
5906 			ctxt.info.switch_id =
5907 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5908 			ctxt.info.switch_id |=
5909 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5910 		}
5911 
5912 		/* Configure port/vlan */
5913 		ctxt.info.valid_sections |=
5914 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5915 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5916 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5917 						I40E_DEFAULT_TCMAP);
5918 		if (ret != I40E_SUCCESS) {
5919 			PMD_DRV_LOG(ERR,
5920 				"Failed to configure TC queue mapping");
5921 			goto fail_msix_alloc;
5922 		}
5923 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5924 		ctxt.info.valid_sections |=
5925 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5926 	} else if (type == I40E_VSI_FDIR) {
5927 		memset(&ctxt, 0, sizeof(ctxt));
5928 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5929 		ctxt.pf_num = hw->pf_id;
5930 		ctxt.vf_num = 0;
5931 		ctxt.uplink_seid = vsi->uplink_seid;
5932 		ctxt.connection_type = 0x1;     /* regular data port */
5933 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5934 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5935 						I40E_DEFAULT_TCMAP);
5936 		if (ret != I40E_SUCCESS) {
5937 			PMD_DRV_LOG(ERR,
5938 				"Failed to configure TC queue mapping.");
5939 			goto fail_msix_alloc;
5940 		}
5941 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5942 		ctxt.info.valid_sections |=
5943 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5944 	} else {
5945 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5946 		goto fail_msix_alloc;
5947 	}
5948 
5949 	if (vsi->type != I40E_VSI_MAIN) {
5950 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5951 		if (ret != I40E_SUCCESS) {
5952 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5953 				    hw->aq.asq_last_status);
5954 			goto fail_msix_alloc;
5955 		}
5956 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5957 		vsi->info.valid_sections = 0;
5958 		vsi->seid = ctxt.seid;
5959 		vsi->vsi_id = ctxt.vsi_number;
5960 		vsi->sib_vsi_list.vsi = vsi;
5961 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5962 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5963 					  &vsi->sib_vsi_list, list);
5964 		} else {
5965 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5966 					  &vsi->sib_vsi_list, list);
5967 		}
5968 	}
5969 
5970 	/* MAC/VLAN configuration */
5971 	rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
5972 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5973 
5974 	ret = i40e_vsi_add_mac(vsi, &filter);
5975 	if (ret != I40E_SUCCESS) {
5976 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5977 		goto fail_msix_alloc;
5978 	}
5979 
5980 	/* Get VSI BW information */
5981 	i40e_vsi_get_bw_config(vsi);
5982 	return vsi;
5983 fail_msix_alloc:
5984 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5985 fail_queue_alloc:
5986 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5987 fail_mem:
5988 	rte_free(vsi);
5989 	return NULL;
5990 }
5991 
5992 /* Configure vlan filter on or off */
5993 int
i40e_vsi_config_vlan_filter(struct i40e_vsi * vsi,bool on)5994 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5995 {
5996 	int i, num;
5997 	struct i40e_mac_filter *f;
5998 	void *temp;
5999 	struct i40e_mac_filter_info *mac_filter;
6000 	enum i40e_mac_filter_type desired_filter;
6001 	int ret = I40E_SUCCESS;
6002 
6003 	if (on) {
6004 		/* Filter to match MAC and VLAN */
6005 		desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6006 	} else {
6007 		/* Filter to match only MAC */
6008 		desired_filter = I40E_MAC_PERFECT_MATCH;
6009 	}
6010 
6011 	num = vsi->mac_num;
6012 
6013 	mac_filter = rte_zmalloc("mac_filter_info_data",
6014 				 num * sizeof(*mac_filter), 0);
6015 	if (mac_filter == NULL) {
6016 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6017 		return I40E_ERR_NO_MEMORY;
6018 	}
6019 
6020 	i = 0;
6021 
6022 	/* Remove all existing mac */
6023 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6024 		mac_filter[i] = f->mac_info;
6025 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6026 		if (ret) {
6027 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6028 				    on ? "enable" : "disable");
6029 			goto DONE;
6030 		}
6031 		i++;
6032 	}
6033 
6034 	/* Override with new filter */
6035 	for (i = 0; i < num; i++) {
6036 		mac_filter[i].filter_type = desired_filter;
6037 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6038 		if (ret) {
6039 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6040 				    on ? "enable" : "disable");
6041 			goto DONE;
6042 		}
6043 	}
6044 
6045 DONE:
6046 	rte_free(mac_filter);
6047 	return ret;
6048 }
6049 
6050 /* Configure vlan stripping on or off */
6051 int
i40e_vsi_config_vlan_stripping(struct i40e_vsi * vsi,bool on)6052 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6053 {
6054 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6055 	struct i40e_vsi_context ctxt;
6056 	uint8_t vlan_flags;
6057 	int ret = I40E_SUCCESS;
6058 
6059 	/* Check if it has been already on or off */
6060 	if (vsi->info.valid_sections &
6061 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6062 		if (on) {
6063 			if ((vsi->info.port_vlan_flags &
6064 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6065 				return 0; /* already on */
6066 		} else {
6067 			if ((vsi->info.port_vlan_flags &
6068 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6069 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
6070 				return 0; /* already off */
6071 		}
6072 	}
6073 
6074 	if (on)
6075 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6076 	else
6077 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6078 	vsi->info.valid_sections =
6079 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6080 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6081 	vsi->info.port_vlan_flags |= vlan_flags;
6082 	ctxt.seid = vsi->seid;
6083 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6084 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6085 	if (ret)
6086 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6087 			    on ? "enable" : "disable");
6088 
6089 	return ret;
6090 }
6091 
6092 static int
i40e_dev_init_vlan(struct rte_eth_dev * dev)6093 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6094 {
6095 	struct rte_eth_dev_data *data = dev->data;
6096 	int ret;
6097 	int mask = 0;
6098 
6099 	/* Apply vlan offload setting */
6100 	mask = RTE_ETH_VLAN_STRIP_MASK |
6101 	       RTE_ETH_QINQ_STRIP_MASK |
6102 	       RTE_ETH_VLAN_FILTER_MASK |
6103 	       RTE_ETH_VLAN_EXTEND_MASK;
6104 	ret = i40e_vlan_offload_set(dev, mask);
6105 	if (ret) {
6106 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6107 		return ret;
6108 	}
6109 
6110 	/* Apply pvid setting */
6111 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6112 				data->dev_conf.txmode.hw_vlan_insert_pvid);
6113 	if (ret)
6114 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
6115 
6116 	return ret;
6117 }
6118 
6119 static int
i40e_vsi_config_double_vlan(struct i40e_vsi * vsi,int on)6120 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6121 {
6122 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6123 
6124 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6125 }
6126 
6127 static int
i40e_update_flow_control(struct i40e_hw * hw)6128 i40e_update_flow_control(struct i40e_hw *hw)
6129 {
6130 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6131 	struct i40e_link_status link_status;
6132 	uint32_t rxfc = 0, txfc = 0, reg;
6133 	uint8_t an_info;
6134 	int ret;
6135 
6136 	memset(&link_status, 0, sizeof(link_status));
6137 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6138 	if (ret != I40E_SUCCESS) {
6139 		PMD_DRV_LOG(ERR, "Failed to get link status information");
6140 		goto write_reg; /* Disable flow control */
6141 	}
6142 
6143 	an_info = hw->phy.link_info.an_info;
6144 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6145 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6146 		ret = I40E_ERR_NOT_READY;
6147 		goto write_reg; /* Disable flow control */
6148 	}
6149 	/**
6150 	 * If link auto negotiation is enabled, flow control needs to
6151 	 * be configured according to it
6152 	 */
6153 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
6154 	case I40E_LINK_PAUSE_RXTX:
6155 		rxfc = 1;
6156 		txfc = 1;
6157 		hw->fc.current_mode = I40E_FC_FULL;
6158 		break;
6159 	case I40E_AQ_LINK_PAUSE_RX:
6160 		rxfc = 1;
6161 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6162 		break;
6163 	case I40E_AQ_LINK_PAUSE_TX:
6164 		txfc = 1;
6165 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6166 		break;
6167 	default:
6168 		hw->fc.current_mode = I40E_FC_NONE;
6169 		break;
6170 	}
6171 
6172 write_reg:
6173 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6174 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6175 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6176 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6177 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6178 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6179 
6180 	return ret;
6181 }
6182 
6183 /* PF setup */
6184 static int
i40e_pf_setup(struct i40e_pf * pf)6185 i40e_pf_setup(struct i40e_pf *pf)
6186 {
6187 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6188 	struct i40e_filter_control_settings settings;
6189 	struct i40e_vsi *vsi;
6190 	int ret;
6191 
6192 	/* Clear all stats counters */
6193 	pf->offset_loaded = FALSE;
6194 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6195 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6196 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6197 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6198 	pf->rx_err1 = 0;
6199 	pf->rx_err1_offset = 0;
6200 
6201 	ret = i40e_pf_get_switch_config(pf);
6202 	if (ret != I40E_SUCCESS) {
6203 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6204 		return ret;
6205 	}
6206 
6207 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6208 	if (ret)
6209 		PMD_INIT_LOG(WARNING,
6210 			"failed to allocate switch domain for device %d", ret);
6211 
6212 	if (pf->flags & I40E_FLAG_FDIR) {
6213 		/* make queue allocated first, let FDIR use queue pair 0*/
6214 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6215 		if (ret != I40E_FDIR_QUEUE_ID) {
6216 			PMD_DRV_LOG(ERR,
6217 				"queue allocation fails for FDIR: ret =%d",
6218 				ret);
6219 			pf->flags &= ~I40E_FLAG_FDIR;
6220 		}
6221 	}
6222 	/*  main VSI setup */
6223 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6224 	if (!vsi) {
6225 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6226 		return I40E_ERR_NOT_READY;
6227 	}
6228 	pf->main_vsi = vsi;
6229 
6230 	/* Configure filter control */
6231 	memset(&settings, 0, sizeof(settings));
6232 	if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_128)
6233 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6234 	else if (hw->func_caps.rss_table_size == RTE_ETH_RSS_RETA_SIZE_512)
6235 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6236 	else {
6237 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6238 			hw->func_caps.rss_table_size);
6239 		return I40E_ERR_PARAM;
6240 	}
6241 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6242 		hw->func_caps.rss_table_size);
6243 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6244 
6245 	/* Enable ethtype and macvlan filters */
6246 	settings.enable_ethtype = TRUE;
6247 	settings.enable_macvlan = TRUE;
6248 	ret = i40e_set_filter_control(hw, &settings);
6249 	if (ret)
6250 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6251 								ret);
6252 
6253 	/* Update flow control according to the auto negotiation */
6254 	i40e_update_flow_control(hw);
6255 
6256 	return I40E_SUCCESS;
6257 }
6258 
6259 int
i40e_switch_tx_queue(struct i40e_hw * hw,uint16_t q_idx,bool on)6260 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6261 {
6262 	uint32_t reg;
6263 	uint16_t j;
6264 
6265 	/**
6266 	 * Set or clear TX Queue Disable flags,
6267 	 * which is required by hardware.
6268 	 */
6269 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6270 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6271 
6272 	/* Wait until the request is finished */
6273 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6274 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6275 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6276 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6277 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6278 							& 0x1))) {
6279 			break;
6280 		}
6281 	}
6282 	if (on) {
6283 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6284 			return I40E_SUCCESS; /* already on, skip next steps */
6285 
6286 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6287 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6288 	} else {
6289 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6290 			return I40E_SUCCESS; /* already off, skip next steps */
6291 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6292 	}
6293 	/* Write the register */
6294 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6295 	/* Check the result */
6296 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6297 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6298 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6299 		if (on) {
6300 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6301 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6302 				break;
6303 		} else {
6304 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6305 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6306 				break;
6307 		}
6308 	}
6309 	/* Check if it is timeout */
6310 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6311 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6312 			    (on ? "enable" : "disable"), q_idx);
6313 		return I40E_ERR_TIMEOUT;
6314 	}
6315 
6316 	return I40E_SUCCESS;
6317 }
6318 
6319 int
i40e_switch_rx_queue(struct i40e_hw * hw,uint16_t q_idx,bool on)6320 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6321 {
6322 	uint32_t reg;
6323 	uint16_t j;
6324 
6325 	/* Wait until the request is finished */
6326 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6327 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6328 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6329 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6330 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6331 			break;
6332 	}
6333 
6334 	if (on) {
6335 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6336 			return I40E_SUCCESS; /* Already on, skip next steps */
6337 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6338 	} else {
6339 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6340 			return I40E_SUCCESS; /* Already off, skip next steps */
6341 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6342 	}
6343 
6344 	/* Write the register */
6345 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6346 	/* Check the result */
6347 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6348 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6349 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6350 		if (on) {
6351 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6352 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6353 				break;
6354 		} else {
6355 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6356 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6357 				break;
6358 		}
6359 	}
6360 
6361 	/* Check if it is timeout */
6362 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6363 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6364 			    (on ? "enable" : "disable"), q_idx);
6365 		return I40E_ERR_TIMEOUT;
6366 	}
6367 
6368 	return I40E_SUCCESS;
6369 }
6370 
6371 /* Initialize VSI for TX */
6372 static int
i40e_dev_tx_init(struct i40e_pf * pf)6373 i40e_dev_tx_init(struct i40e_pf *pf)
6374 {
6375 	struct rte_eth_dev_data *data = pf->dev_data;
6376 	uint16_t i;
6377 	uint32_t ret = I40E_SUCCESS;
6378 	struct i40e_tx_queue *txq;
6379 
6380 	for (i = 0; i < data->nb_tx_queues; i++) {
6381 		txq = data->tx_queues[i];
6382 		if (!txq || !txq->q_set)
6383 			continue;
6384 		ret = i40e_tx_queue_init(txq);
6385 		if (ret != I40E_SUCCESS)
6386 			break;
6387 	}
6388 	if (ret == I40E_SUCCESS)
6389 		i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6390 
6391 	return ret;
6392 }
6393 
6394 /* Initialize VSI for RX */
6395 static int
i40e_dev_rx_init(struct i40e_pf * pf)6396 i40e_dev_rx_init(struct i40e_pf *pf)
6397 {
6398 	struct rte_eth_dev_data *data = pf->dev_data;
6399 	int ret = I40E_SUCCESS;
6400 	uint16_t i;
6401 	struct i40e_rx_queue *rxq;
6402 
6403 	i40e_pf_config_rss(pf);
6404 	for (i = 0; i < data->nb_rx_queues; i++) {
6405 		rxq = data->rx_queues[i];
6406 		if (!rxq || !rxq->q_set)
6407 			continue;
6408 
6409 		ret = i40e_rx_queue_init(rxq);
6410 		if (ret != I40E_SUCCESS) {
6411 			PMD_DRV_LOG(ERR,
6412 				"Failed to do RX queue initialization");
6413 			break;
6414 		}
6415 	}
6416 	if (ret == I40E_SUCCESS)
6417 		i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6418 
6419 	return ret;
6420 }
6421 
6422 static int
i40e_dev_rxtx_init(struct i40e_pf * pf)6423 i40e_dev_rxtx_init(struct i40e_pf *pf)
6424 {
6425 	int err;
6426 
6427 	err = i40e_dev_tx_init(pf);
6428 	if (err) {
6429 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6430 		return err;
6431 	}
6432 	err = i40e_dev_rx_init(pf);
6433 	if (err) {
6434 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6435 		return err;
6436 	}
6437 
6438 	return err;
6439 }
6440 
6441 static int
i40e_vmdq_setup(struct rte_eth_dev * dev)6442 i40e_vmdq_setup(struct rte_eth_dev *dev)
6443 {
6444 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6445 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6446 	int i, err, conf_vsis, j, loop;
6447 	struct i40e_vsi *vsi;
6448 	struct i40e_vmdq_info *vmdq_info;
6449 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6450 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6451 
6452 	/*
6453 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6454 	 * avoid race condition in VSI creation/destroy.
6455 	 */
6456 	i40e_pf_disable_irq0(hw);
6457 
6458 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6459 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6460 		return -ENOTSUP;
6461 	}
6462 
6463 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6464 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6465 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6466 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6467 			pf->max_nb_vmdq_vsi);
6468 		return -ENOTSUP;
6469 	}
6470 
6471 	if (pf->vmdq != NULL) {
6472 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6473 		return 0;
6474 	}
6475 
6476 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6477 				sizeof(*vmdq_info) * conf_vsis, 0);
6478 
6479 	if (pf->vmdq == NULL) {
6480 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6481 		return -ENOMEM;
6482 	}
6483 
6484 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6485 
6486 	/* Create VMDQ VSI */
6487 	for (i = 0; i < conf_vsis; i++) {
6488 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6489 				vmdq_conf->enable_loop_back);
6490 		if (vsi == NULL) {
6491 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6492 			err = -1;
6493 			goto err_vsi_setup;
6494 		}
6495 		vmdq_info = &pf->vmdq[i];
6496 		vmdq_info->pf = pf;
6497 		vmdq_info->vsi = vsi;
6498 	}
6499 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6500 
6501 	/* Configure Vlan */
6502 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6503 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6504 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6505 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6506 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6507 					vmdq_conf->pool_map[i].vlan_id, j);
6508 
6509 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6510 						vmdq_conf->pool_map[i].vlan_id);
6511 				if (err) {
6512 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6513 					err = -1;
6514 					goto err_vsi_setup;
6515 				}
6516 			}
6517 		}
6518 	}
6519 
6520 	i40e_pf_enable_irq0(hw);
6521 
6522 	return 0;
6523 
6524 err_vsi_setup:
6525 	for (i = 0; i < conf_vsis; i++)
6526 		if (pf->vmdq[i].vsi == NULL)
6527 			break;
6528 		else
6529 			i40e_vsi_release(pf->vmdq[i].vsi);
6530 
6531 	rte_free(pf->vmdq);
6532 	pf->vmdq = NULL;
6533 	i40e_pf_enable_irq0(hw);
6534 	return err;
6535 }
6536 
6537 static void
i40e_stat_update_32(struct i40e_hw * hw,uint32_t reg,bool offset_loaded,uint64_t * offset,uint64_t * stat)6538 i40e_stat_update_32(struct i40e_hw *hw,
6539 		   uint32_t reg,
6540 		   bool offset_loaded,
6541 		   uint64_t *offset,
6542 		   uint64_t *stat)
6543 {
6544 	uint64_t new_data;
6545 
6546 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6547 	if (!offset_loaded)
6548 		*offset = new_data;
6549 
6550 	if (new_data >= *offset)
6551 		*stat = (uint64_t)(new_data - *offset);
6552 	else
6553 		*stat = (uint64_t)((new_data +
6554 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6555 }
6556 
6557 static void
i40e_stat_update_48(struct i40e_hw * hw,uint32_t hireg,uint32_t loreg,bool offset_loaded,uint64_t * offset,uint64_t * stat)6558 i40e_stat_update_48(struct i40e_hw *hw,
6559 		   uint32_t hireg,
6560 		   uint32_t loreg,
6561 		   bool offset_loaded,
6562 		   uint64_t *offset,
6563 		   uint64_t *stat)
6564 {
6565 	uint64_t new_data;
6566 
6567 	if (hw->device_id == I40E_DEV_ID_QEMU) {
6568 		new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6569 		new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6570 				I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6571 	} else {
6572 		new_data = I40E_READ_REG64(hw, loreg);
6573 	}
6574 
6575 	if (!offset_loaded)
6576 		*offset = new_data;
6577 
6578 	if (new_data >= *offset)
6579 		*stat = new_data - *offset;
6580 	else
6581 		*stat = (uint64_t)((new_data +
6582 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6583 
6584 	*stat &= I40E_48_BIT_MASK;
6585 }
6586 
6587 /* Disable IRQ0 */
6588 void
i40e_pf_disable_irq0(struct i40e_hw * hw)6589 i40e_pf_disable_irq0(struct i40e_hw *hw)
6590 {
6591 	/* Disable all interrupt types */
6592 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6593 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6594 	I40E_WRITE_FLUSH(hw);
6595 }
6596 
6597 /* Enable IRQ0 */
6598 void
i40e_pf_enable_irq0(struct i40e_hw * hw)6599 i40e_pf_enable_irq0(struct i40e_hw *hw)
6600 {
6601 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6602 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6603 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6604 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6605 	I40E_WRITE_FLUSH(hw);
6606 }
6607 
6608 static void
i40e_pf_config_irq0(struct i40e_hw * hw,bool no_queue)6609 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6610 {
6611 	/* read pending request and disable first */
6612 	i40e_pf_disable_irq0(hw);
6613 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6614 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6615 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6616 
6617 	if (no_queue)
6618 		/* Link no queues with irq0 */
6619 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6620 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6621 }
6622 
6623 static void
i40e_dev_handle_vfr_event(struct rte_eth_dev * dev)6624 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6625 {
6626 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6627 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6628 	int i;
6629 	uint16_t abs_vf_id;
6630 	uint32_t index, offset, val;
6631 
6632 	if (!pf->vfs)
6633 		return;
6634 	/**
6635 	 * Try to find which VF trigger a reset, use absolute VF id to access
6636 	 * since the reg is global register.
6637 	 */
6638 	for (i = 0; i < pf->vf_num; i++) {
6639 		abs_vf_id = hw->func_caps.vf_base_id + i;
6640 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6641 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6642 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6643 		/* VFR event occurred */
6644 		if (val & (0x1 << offset)) {
6645 			int ret;
6646 
6647 			/* Clear the event first */
6648 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6649 							(0x1 << offset));
6650 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6651 			/**
6652 			 * Only notify a VF reset event occurred,
6653 			 * don't trigger another SW reset
6654 			 */
6655 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6656 			if (ret != I40E_SUCCESS)
6657 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6658 		}
6659 	}
6660 }
6661 
6662 static void
i40e_notify_all_vfs_link_status(struct rte_eth_dev * dev)6663 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6664 {
6665 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6666 	int i;
6667 
6668 	for (i = 0; i < pf->vf_num; i++)
6669 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6670 }
6671 
6672 static void
i40e_dev_handle_aq_msg(struct rte_eth_dev * dev)6673 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6674 {
6675 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6676 	struct i40e_arq_event_info info;
6677 	uint16_t pending, opcode;
6678 	int ret;
6679 
6680 	info.buf_len = I40E_AQ_BUF_SZ;
6681 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6682 	if (!info.msg_buf) {
6683 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6684 		return;
6685 	}
6686 
6687 	pending = 1;
6688 	while (pending) {
6689 		ret = i40e_clean_arq_element(hw, &info, &pending);
6690 
6691 		if (ret != I40E_SUCCESS) {
6692 			PMD_DRV_LOG(INFO,
6693 				"Failed to read msg from AdminQ, aq_err: %u",
6694 				hw->aq.asq_last_status);
6695 			break;
6696 		}
6697 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6698 
6699 		switch (opcode) {
6700 		case i40e_aqc_opc_send_msg_to_pf:
6701 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6702 			i40e_pf_host_handle_vf_msg(dev,
6703 					rte_le_to_cpu_16(info.desc.retval),
6704 					rte_le_to_cpu_32(info.desc.cookie_high),
6705 					rte_le_to_cpu_32(info.desc.cookie_low),
6706 					info.msg_buf,
6707 					info.msg_len);
6708 			break;
6709 		case i40e_aqc_opc_get_link_status:
6710 			ret = i40e_dev_link_update(dev, 0);
6711 			if (!ret)
6712 				rte_eth_dev_callback_process(dev,
6713 					RTE_ETH_EVENT_INTR_LSC, NULL);
6714 
6715 			break;
6716 		default:
6717 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6718 				    opcode);
6719 			break;
6720 		}
6721 	}
6722 	rte_free(info.msg_buf);
6723 }
6724 
6725 static void
i40e_handle_mdd_event(struct rte_eth_dev * dev)6726 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6727 {
6728 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6729 #define I40E_MDD_CLEAR16 0xFFFF
6730 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6731 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6732 	bool mdd_detected = false;
6733 	struct i40e_pf_vf *vf;
6734 	uint32_t reg;
6735 	int i;
6736 
6737 	/* find what triggered the MDD event */
6738 	reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6739 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6740 		uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6741 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6742 		uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6743 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6744 		uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6745 				I40E_GL_MDET_TX_EVENT_SHIFT;
6746 		uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6747 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6748 					hw->func_caps.base_queue;
6749 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6750 			"queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6751 				event, queue, pf_num, vf_num, dev->data->name);
6752 		I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6753 		mdd_detected = true;
6754 	}
6755 	reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6756 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6757 		uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6758 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6759 		uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6760 				I40E_GL_MDET_RX_EVENT_SHIFT;
6761 		uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6762 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6763 					hw->func_caps.base_queue;
6764 
6765 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6766 				"queue %d of function 0x%02x device %s\n",
6767 					event, queue, func, dev->data->name);
6768 		I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6769 		mdd_detected = true;
6770 	}
6771 
6772 	if (mdd_detected) {
6773 		reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6774 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6775 			I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6776 			PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6777 		}
6778 		reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6779 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6780 			I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6781 					I40E_MDD_CLEAR16);
6782 			PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6783 		}
6784 	}
6785 
6786 	/* see if one of the VFs needs its hand slapped */
6787 	for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6788 		vf = &pf->vfs[i];
6789 		reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6790 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6791 			I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6792 					I40E_MDD_CLEAR16);
6793 			vf->num_mdd_events++;
6794 			PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6795 					PRIu64 "times\n",
6796 					i, vf->num_mdd_events);
6797 		}
6798 
6799 		reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6800 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6801 			I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6802 					I40E_MDD_CLEAR16);
6803 			vf->num_mdd_events++;
6804 			PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6805 					PRIu64 "times\n",
6806 					i, vf->num_mdd_events);
6807 		}
6808 	}
6809 }
6810 
6811 /**
6812  * Interrupt handler triggered by NIC  for handling
6813  * specific interrupt.
6814  *
6815  * @param handle
6816  *  Pointer to interrupt handle.
6817  * @param param
6818  *  The address of parameter (struct rte_eth_dev *) registered before.
6819  *
6820  * @return
6821  *  void
6822  */
6823 static void
i40e_dev_interrupt_handler(void * param)6824 i40e_dev_interrupt_handler(void *param)
6825 {
6826 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6827 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6828 	uint32_t icr0;
6829 
6830 	/* Disable interrupt */
6831 	i40e_pf_disable_irq0(hw);
6832 
6833 	/* read out interrupt causes */
6834 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6835 
6836 	/* No interrupt event indicated */
6837 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6838 		PMD_DRV_LOG(INFO, "No interrupt event");
6839 		goto done;
6840 	}
6841 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6842 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6843 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6844 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6845 		i40e_handle_mdd_event(dev);
6846 	}
6847 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6848 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6849 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6850 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6851 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6852 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6853 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6854 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6855 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6856 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6857 
6858 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6859 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6860 		i40e_dev_handle_vfr_event(dev);
6861 	}
6862 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6863 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6864 		i40e_dev_handle_aq_msg(dev);
6865 	}
6866 
6867 done:
6868 	/* Enable interrupt */
6869 	i40e_pf_enable_irq0(hw);
6870 }
6871 
6872 static void
i40e_dev_alarm_handler(void * param)6873 i40e_dev_alarm_handler(void *param)
6874 {
6875 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6876 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6877 	uint32_t icr0;
6878 
6879 	/* Disable interrupt */
6880 	i40e_pf_disable_irq0(hw);
6881 
6882 	/* read out interrupt causes */
6883 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6884 
6885 	/* No interrupt event indicated */
6886 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6887 		goto done;
6888 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6889 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6890 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6891 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6892 		i40e_handle_mdd_event(dev);
6893 	}
6894 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6895 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6896 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6897 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6898 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6899 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6900 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6901 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6902 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6903 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6904 
6905 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6906 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6907 		i40e_dev_handle_vfr_event(dev);
6908 	}
6909 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6910 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6911 		i40e_dev_handle_aq_msg(dev);
6912 	}
6913 
6914 done:
6915 	/* Enable interrupt */
6916 	i40e_pf_enable_irq0(hw);
6917 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6918 			  i40e_dev_alarm_handler, dev);
6919 }
6920 
6921 int
i40e_add_macvlan_filters(struct i40e_vsi * vsi,struct i40e_macvlan_filter * filter,int total)6922 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6923 			 struct i40e_macvlan_filter *filter,
6924 			 int total)
6925 {
6926 	int ele_num, ele_buff_size;
6927 	int num, actual_num, i;
6928 	uint16_t flags;
6929 	int ret = I40E_SUCCESS;
6930 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6931 	struct i40e_aqc_add_macvlan_element_data *req_list;
6932 
6933 	if (filter == NULL  || total == 0)
6934 		return I40E_ERR_PARAM;
6935 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6936 	ele_buff_size = hw->aq.asq_buf_size;
6937 
6938 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6939 	if (req_list == NULL) {
6940 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6941 		return I40E_ERR_NO_MEMORY;
6942 	}
6943 
6944 	num = 0;
6945 	do {
6946 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6947 		memset(req_list, 0, ele_buff_size);
6948 
6949 		for (i = 0; i < actual_num; i++) {
6950 			rte_memcpy(req_list[i].mac_addr,
6951 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6952 			req_list[i].vlan_tag =
6953 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6954 
6955 			switch (filter[num + i].filter_type) {
6956 			case I40E_MAC_PERFECT_MATCH:
6957 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6958 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6959 				break;
6960 			case I40E_MACVLAN_PERFECT_MATCH:
6961 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6962 				break;
6963 			case I40E_MAC_HASH_MATCH:
6964 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6965 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6966 				break;
6967 			case I40E_MACVLAN_HASH_MATCH:
6968 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6969 				break;
6970 			default:
6971 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
6972 				ret = I40E_ERR_PARAM;
6973 				goto DONE;
6974 			}
6975 
6976 			req_list[i].queue_number = 0;
6977 
6978 			req_list[i].flags = rte_cpu_to_le_16(flags);
6979 		}
6980 
6981 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6982 						actual_num, NULL);
6983 		if (ret != I40E_SUCCESS) {
6984 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6985 			goto DONE;
6986 		}
6987 		num += actual_num;
6988 	} while (num < total);
6989 
6990 DONE:
6991 	rte_free(req_list);
6992 	return ret;
6993 }
6994 
6995 int
i40e_remove_macvlan_filters(struct i40e_vsi * vsi,struct i40e_macvlan_filter * filter,int total)6996 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6997 			    struct i40e_macvlan_filter *filter,
6998 			    int total)
6999 {
7000 	int ele_num, ele_buff_size;
7001 	int num, actual_num, i;
7002 	uint16_t flags;
7003 	int ret = I40E_SUCCESS;
7004 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7005 	struct i40e_aqc_remove_macvlan_element_data *req_list;
7006 	enum i40e_admin_queue_err aq_status;
7007 
7008 	if (filter == NULL  || total == 0)
7009 		return I40E_ERR_PARAM;
7010 
7011 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7012 	ele_buff_size = hw->aq.asq_buf_size;
7013 
7014 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7015 	if (req_list == NULL) {
7016 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
7017 		return I40E_ERR_NO_MEMORY;
7018 	}
7019 
7020 	num = 0;
7021 	do {
7022 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7023 		memset(req_list, 0, ele_buff_size);
7024 
7025 		for (i = 0; i < actual_num; i++) {
7026 			rte_memcpy(req_list[i].mac_addr,
7027 				&filter[num + i].macaddr, ETH_ADDR_LEN);
7028 			req_list[i].vlan_tag =
7029 				rte_cpu_to_le_16(filter[num + i].vlan_id);
7030 
7031 			switch (filter[num + i].filter_type) {
7032 			case I40E_MAC_PERFECT_MATCH:
7033 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7034 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7035 				break;
7036 			case I40E_MACVLAN_PERFECT_MATCH:
7037 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7038 				break;
7039 			case I40E_MAC_HASH_MATCH:
7040 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7041 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7042 				break;
7043 			case I40E_MACVLAN_HASH_MATCH:
7044 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7045 				break;
7046 			default:
7047 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7048 				ret = I40E_ERR_PARAM;
7049 				goto DONE;
7050 			}
7051 			req_list[i].flags = rte_cpu_to_le_16(flags);
7052 		}
7053 
7054 		ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, req_list,
7055 						actual_num, NULL, &aq_status);
7056 
7057 		if (ret != I40E_SUCCESS) {
7058 			/* Do not report as an error when firmware returns ENOENT */
7059 			if (aq_status == I40E_AQ_RC_ENOENT) {
7060 				ret = I40E_SUCCESS;
7061 			} else {
7062 				PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7063 				goto DONE;
7064 			}
7065 		}
7066 		num += actual_num;
7067 	} while (num < total);
7068 
7069 DONE:
7070 	rte_free(req_list);
7071 	return ret;
7072 }
7073 
7074 /* Find out specific MAC filter */
7075 static struct i40e_mac_filter *
i40e_find_mac_filter(struct i40e_vsi * vsi,struct rte_ether_addr * macaddr)7076 i40e_find_mac_filter(struct i40e_vsi *vsi,
7077 			 struct rte_ether_addr *macaddr)
7078 {
7079 	struct i40e_mac_filter *f;
7080 
7081 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7082 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7083 			return f;
7084 	}
7085 
7086 	return NULL;
7087 }
7088 
7089 static bool
i40e_find_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id)7090 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7091 			 uint16_t vlan_id)
7092 {
7093 	uint32_t vid_idx, vid_bit;
7094 
7095 	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
7096 		return 0;
7097 
7098 	vid_idx = I40E_VFTA_IDX(vlan_id);
7099 	vid_bit = I40E_VFTA_BIT(vlan_id);
7100 
7101 	if (vsi->vfta[vid_idx] & vid_bit)
7102 		return 1;
7103 	else
7104 		return 0;
7105 }
7106 
7107 static void
i40e_store_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id,bool on)7108 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7109 		       uint16_t vlan_id, bool on)
7110 {
7111 	uint32_t vid_idx, vid_bit;
7112 
7113 	vid_idx = I40E_VFTA_IDX(vlan_id);
7114 	vid_bit = I40E_VFTA_BIT(vlan_id);
7115 
7116 	if (on)
7117 		vsi->vfta[vid_idx] |= vid_bit;
7118 	else
7119 		vsi->vfta[vid_idx] &= ~vid_bit;
7120 }
7121 
7122 void
i40e_set_vlan_filter(struct i40e_vsi * vsi,uint16_t vlan_id,bool on)7123 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7124 		     uint16_t vlan_id, bool on)
7125 {
7126 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7127 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7128 	int ret;
7129 
7130 	if (vlan_id > RTE_ETH_VLAN_ID_MAX)
7131 		return;
7132 
7133 	i40e_store_vlan_filter(vsi, vlan_id, on);
7134 
7135 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7136 		return;
7137 
7138 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7139 
7140 	if (on) {
7141 		ret = i40e_aq_add_vlan(hw, vsi->seid,
7142 				       &vlan_data, 1, NULL);
7143 		if (ret != I40E_SUCCESS)
7144 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7145 	} else {
7146 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
7147 					  &vlan_data, 1, NULL);
7148 		if (ret != I40E_SUCCESS)
7149 			PMD_DRV_LOG(ERR,
7150 				    "Failed to remove vlan filter");
7151 	}
7152 }
7153 
7154 /**
7155  * Find all vlan options for specific mac addr,
7156  * return with actual vlan found.
7157  */
7158 int
i40e_find_all_vlan_for_mac(struct i40e_vsi * vsi,struct i40e_macvlan_filter * mv_f,int num,struct rte_ether_addr * addr)7159 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7160 			   struct i40e_macvlan_filter *mv_f,
7161 			   int num, struct rte_ether_addr *addr)
7162 {
7163 	int i;
7164 	uint32_t j, k;
7165 
7166 	/**
7167 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
7168 	 * although the code looks complex.
7169 	  */
7170 	if (num < vsi->vlan_num)
7171 		return I40E_ERR_PARAM;
7172 
7173 	i = 0;
7174 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
7175 		if (vsi->vfta[j]) {
7176 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7177 				if (vsi->vfta[j] & (1 << k)) {
7178 					if (i > num - 1) {
7179 						PMD_DRV_LOG(ERR,
7180 							"vlan number doesn't match");
7181 						return I40E_ERR_PARAM;
7182 					}
7183 					rte_memcpy(&mv_f[i].macaddr,
7184 							addr, ETH_ADDR_LEN);
7185 					mv_f[i].vlan_id =
7186 						j * I40E_UINT32_BIT_SIZE + k;
7187 					i++;
7188 				}
7189 			}
7190 		}
7191 	}
7192 	return I40E_SUCCESS;
7193 }
7194 
7195 static inline int
i40e_find_all_mac_for_vlan(struct i40e_vsi * vsi,struct i40e_macvlan_filter * mv_f,int num,uint16_t vlan)7196 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7197 			   struct i40e_macvlan_filter *mv_f,
7198 			   int num,
7199 			   uint16_t vlan)
7200 {
7201 	int i = 0;
7202 	struct i40e_mac_filter *f;
7203 
7204 	if (num < vsi->mac_num)
7205 		return I40E_ERR_PARAM;
7206 
7207 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7208 		if (i > num - 1) {
7209 			PMD_DRV_LOG(ERR, "buffer number not match");
7210 			return I40E_ERR_PARAM;
7211 		}
7212 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7213 				ETH_ADDR_LEN);
7214 		mv_f[i].vlan_id = vlan;
7215 		mv_f[i].filter_type = f->mac_info.filter_type;
7216 		i++;
7217 	}
7218 
7219 	return I40E_SUCCESS;
7220 }
7221 
7222 static int
i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi * vsi)7223 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7224 {
7225 	int i, j, num;
7226 	struct i40e_mac_filter *f;
7227 	struct i40e_macvlan_filter *mv_f;
7228 	int ret = I40E_SUCCESS;
7229 
7230 	if (vsi == NULL || vsi->mac_num == 0)
7231 		return I40E_ERR_PARAM;
7232 
7233 	/* Case that no vlan is set */
7234 	if (vsi->vlan_num == 0)
7235 		num = vsi->mac_num;
7236 	else
7237 		num = vsi->mac_num * vsi->vlan_num;
7238 
7239 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7240 	if (mv_f == NULL) {
7241 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7242 		return I40E_ERR_NO_MEMORY;
7243 	}
7244 
7245 	i = 0;
7246 	if (vsi->vlan_num == 0) {
7247 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7248 			rte_memcpy(&mv_f[i].macaddr,
7249 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
7250 			mv_f[i].filter_type = f->mac_info.filter_type;
7251 			mv_f[i].vlan_id = 0;
7252 			i++;
7253 		}
7254 	} else {
7255 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7256 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7257 					vsi->vlan_num, &f->mac_info.mac_addr);
7258 			if (ret != I40E_SUCCESS)
7259 				goto DONE;
7260 			for (j = i; j < i + vsi->vlan_num; j++)
7261 				mv_f[j].filter_type = f->mac_info.filter_type;
7262 			i += vsi->vlan_num;
7263 		}
7264 	}
7265 
7266 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7267 DONE:
7268 	rte_free(mv_f);
7269 
7270 	return ret;
7271 }
7272 
7273 int
i40e_vsi_add_vlan(struct i40e_vsi * vsi,uint16_t vlan)7274 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7275 {
7276 	struct i40e_macvlan_filter *mv_f;
7277 	int mac_num;
7278 	int ret = I40E_SUCCESS;
7279 
7280 	if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7281 		return I40E_ERR_PARAM;
7282 
7283 	/* If it's already set, just return */
7284 	if (i40e_find_vlan_filter(vsi,vlan))
7285 		return I40E_SUCCESS;
7286 
7287 	mac_num = vsi->mac_num;
7288 
7289 	if (mac_num == 0) {
7290 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7291 		return I40E_ERR_PARAM;
7292 	}
7293 
7294 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7295 
7296 	if (mv_f == NULL) {
7297 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7298 		return I40E_ERR_NO_MEMORY;
7299 	}
7300 
7301 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7302 
7303 	if (ret != I40E_SUCCESS)
7304 		goto DONE;
7305 
7306 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7307 
7308 	if (ret != I40E_SUCCESS)
7309 		goto DONE;
7310 
7311 	i40e_set_vlan_filter(vsi, vlan, 1);
7312 
7313 	vsi->vlan_num++;
7314 	ret = I40E_SUCCESS;
7315 DONE:
7316 	rte_free(mv_f);
7317 	return ret;
7318 }
7319 
7320 int
i40e_vsi_delete_vlan(struct i40e_vsi * vsi,uint16_t vlan)7321 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7322 {
7323 	struct i40e_macvlan_filter *mv_f;
7324 	int mac_num;
7325 	int ret = I40E_SUCCESS;
7326 
7327 	/**
7328 	 * Vlan 0 is the generic filter for untagged packets
7329 	 * and can't be removed.
7330 	 */
7331 	if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7332 		return I40E_ERR_PARAM;
7333 
7334 	/* If can't find it, just return */
7335 	if (!i40e_find_vlan_filter(vsi, vlan))
7336 		return I40E_ERR_PARAM;
7337 
7338 	mac_num = vsi->mac_num;
7339 
7340 	if (mac_num == 0) {
7341 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7342 		return I40E_ERR_PARAM;
7343 	}
7344 
7345 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7346 
7347 	if (mv_f == NULL) {
7348 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7349 		return I40E_ERR_NO_MEMORY;
7350 	}
7351 
7352 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7353 
7354 	if (ret != I40E_SUCCESS)
7355 		goto DONE;
7356 
7357 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7358 
7359 	if (ret != I40E_SUCCESS)
7360 		goto DONE;
7361 
7362 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7363 	if (vsi->vlan_num == 1) {
7364 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7365 		if (ret != I40E_SUCCESS)
7366 			goto DONE;
7367 
7368 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7369 		if (ret != I40E_SUCCESS)
7370 			goto DONE;
7371 	}
7372 
7373 	i40e_set_vlan_filter(vsi, vlan, 0);
7374 
7375 	vsi->vlan_num--;
7376 	ret = I40E_SUCCESS;
7377 DONE:
7378 	rte_free(mv_f);
7379 	return ret;
7380 }
7381 
7382 int
i40e_vsi_add_mac(struct i40e_vsi * vsi,struct i40e_mac_filter_info * mac_filter)7383 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7384 {
7385 	struct i40e_mac_filter *f;
7386 	struct i40e_macvlan_filter *mv_f;
7387 	int i, vlan_num = 0;
7388 	int ret = I40E_SUCCESS;
7389 
7390 	/* If it's add and we've config it, return */
7391 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7392 	if (f != NULL)
7393 		return I40E_SUCCESS;
7394 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7395 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7396 
7397 		/**
7398 		 * If vlan_num is 0, that's the first time to add mac,
7399 		 * set mask for vlan_id 0.
7400 		 */
7401 		if (vsi->vlan_num == 0) {
7402 			i40e_set_vlan_filter(vsi, 0, 1);
7403 			vsi->vlan_num = 1;
7404 		}
7405 		vlan_num = vsi->vlan_num;
7406 	} else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7407 			mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7408 		vlan_num = 1;
7409 
7410 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7411 	if (mv_f == NULL) {
7412 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7413 		return I40E_ERR_NO_MEMORY;
7414 	}
7415 
7416 	for (i = 0; i < vlan_num; i++) {
7417 		mv_f[i].filter_type = mac_filter->filter_type;
7418 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7419 				ETH_ADDR_LEN);
7420 	}
7421 
7422 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7423 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7424 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7425 					&mac_filter->mac_addr);
7426 		if (ret != I40E_SUCCESS)
7427 			goto DONE;
7428 	}
7429 
7430 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7431 	if (ret != I40E_SUCCESS)
7432 		goto DONE;
7433 
7434 	/* Add the mac addr into mac list */
7435 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7436 	if (f == NULL) {
7437 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7438 		ret = I40E_ERR_NO_MEMORY;
7439 		goto DONE;
7440 	}
7441 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7442 			ETH_ADDR_LEN);
7443 	f->mac_info.filter_type = mac_filter->filter_type;
7444 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7445 	vsi->mac_num++;
7446 
7447 	ret = I40E_SUCCESS;
7448 DONE:
7449 	rte_free(mv_f);
7450 
7451 	return ret;
7452 }
7453 
7454 int
i40e_vsi_delete_mac(struct i40e_vsi * vsi,struct rte_ether_addr * addr)7455 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7456 {
7457 	struct i40e_mac_filter *f;
7458 	struct i40e_macvlan_filter *mv_f;
7459 	int i, vlan_num;
7460 	enum i40e_mac_filter_type filter_type;
7461 	int ret = I40E_SUCCESS;
7462 
7463 	/* Can't find it, return an error */
7464 	f = i40e_find_mac_filter(vsi, addr);
7465 	if (f == NULL)
7466 		return I40E_ERR_PARAM;
7467 
7468 	vlan_num = vsi->vlan_num;
7469 	filter_type = f->mac_info.filter_type;
7470 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7471 		filter_type == I40E_MACVLAN_HASH_MATCH) {
7472 		if (vlan_num == 0) {
7473 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7474 			return I40E_ERR_PARAM;
7475 		}
7476 	} else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7477 			filter_type == I40E_MAC_HASH_MATCH)
7478 		vlan_num = 1;
7479 
7480 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7481 	if (mv_f == NULL) {
7482 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7483 		return I40E_ERR_NO_MEMORY;
7484 	}
7485 
7486 	for (i = 0; i < vlan_num; i++) {
7487 		mv_f[i].filter_type = filter_type;
7488 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7489 				ETH_ADDR_LEN);
7490 	}
7491 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7492 			filter_type == I40E_MACVLAN_HASH_MATCH) {
7493 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7494 		if (ret != I40E_SUCCESS)
7495 			goto DONE;
7496 	}
7497 
7498 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7499 	if (ret != I40E_SUCCESS)
7500 		goto DONE;
7501 
7502 	/* Remove the mac addr into mac list */
7503 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7504 	rte_free(f);
7505 	vsi->mac_num--;
7506 
7507 	ret = I40E_SUCCESS;
7508 DONE:
7509 	rte_free(mv_f);
7510 	return ret;
7511 }
7512 
7513 /* Configure hash enable flags for RSS */
7514 uint64_t
i40e_config_hena(const struct i40e_adapter * adapter,uint64_t flags)7515 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7516 {
7517 	uint64_t hena = 0;
7518 	int i;
7519 
7520 	if (!flags)
7521 		return hena;
7522 
7523 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7524 		if (flags & (1ULL << i))
7525 			hena |= adapter->pctypes_tbl[i];
7526 	}
7527 
7528 	return hena;
7529 }
7530 
7531 /* Parse the hash enable flags */
7532 uint64_t
i40e_parse_hena(const struct i40e_adapter * adapter,uint64_t flags)7533 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7534 {
7535 	uint64_t rss_hf = 0;
7536 
7537 	if (!flags)
7538 		return rss_hf;
7539 	int i;
7540 
7541 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7542 		if (flags & adapter->pctypes_tbl[i])
7543 			rss_hf |= (1ULL << i);
7544 	}
7545 	return rss_hf;
7546 }
7547 
7548 /* Disable RSS */
7549 void
i40e_pf_disable_rss(struct i40e_pf * pf)7550 i40e_pf_disable_rss(struct i40e_pf *pf)
7551 {
7552 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7553 
7554 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7555 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7556 	I40E_WRITE_FLUSH(hw);
7557 }
7558 
7559 int
i40e_set_rss_key(struct i40e_vsi * vsi,uint8_t * key,uint8_t key_len)7560 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7561 {
7562 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7563 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7564 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7565 			   I40E_VFQF_HKEY_MAX_INDEX :
7566 			   I40E_PFQF_HKEY_MAX_INDEX;
7567 
7568 	if (!key || key_len == 0) {
7569 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7570 		return 0;
7571 	} else if (key_len != (key_idx + 1) *
7572 		sizeof(uint32_t)) {
7573 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7574 		return -EINVAL;
7575 	}
7576 
7577 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7578 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7579 				(struct i40e_aqc_get_set_rss_key_data *)key;
7580 		enum i40e_status_code status =
7581 				i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7582 
7583 		if (status) {
7584 			PMD_DRV_LOG(ERR,
7585 				    "Failed to configure RSS key via AQ, error status: %d",
7586 				    status);
7587 			return -EIO;
7588 		}
7589 	} else {
7590 		uint32_t *hash_key = (uint32_t *)key;
7591 		uint16_t i;
7592 
7593 		if (vsi->type == I40E_VSI_SRIOV) {
7594 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7595 				I40E_WRITE_REG(
7596 					hw,
7597 					I40E_VFQF_HKEY1(i, vsi->user_param),
7598 					hash_key[i]);
7599 
7600 		} else {
7601 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7602 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7603 					       hash_key[i]);
7604 		}
7605 		I40E_WRITE_FLUSH(hw);
7606 	}
7607 
7608 	return 0;
7609 }
7610 
7611 static int
i40e_get_rss_key(struct i40e_vsi * vsi,uint8_t * key,uint8_t * key_len)7612 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7613 {
7614 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7615 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7616 	uint32_t reg;
7617 	int ret;
7618 
7619 	if (!key || !key_len)
7620 		return 0;
7621 
7622 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7623 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7624 			(struct i40e_aqc_get_set_rss_key_data *)key);
7625 		if (ret) {
7626 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7627 			return ret;
7628 		}
7629 	} else {
7630 		uint32_t *key_dw = (uint32_t *)key;
7631 		uint16_t i;
7632 
7633 		if (vsi->type == I40E_VSI_SRIOV) {
7634 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7635 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7636 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7637 			}
7638 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7639 				   sizeof(uint32_t);
7640 		} else {
7641 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7642 				reg = I40E_PFQF_HKEY(i);
7643 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7644 			}
7645 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7646 				   sizeof(uint32_t);
7647 		}
7648 	}
7649 	return 0;
7650 }
7651 
7652 static int
i40e_hw_rss_hash_set(struct i40e_pf * pf,struct rte_eth_rss_conf * rss_conf)7653 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7654 {
7655 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7656 	uint64_t hena;
7657 	int ret;
7658 
7659 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7660 			       rss_conf->rss_key_len);
7661 	if (ret)
7662 		return ret;
7663 
7664 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7665 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7666 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7667 	I40E_WRITE_FLUSH(hw);
7668 
7669 	return 0;
7670 }
7671 
7672 static int
i40e_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)7673 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7674 			 struct rte_eth_rss_conf *rss_conf)
7675 {
7676 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7677 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7678 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7679 	uint64_t hena;
7680 
7681 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7682 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7683 
7684 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7685 		if (rss_hf != 0) /* Enable RSS */
7686 			return -EINVAL;
7687 		return 0; /* Nothing to do */
7688 	}
7689 	/* RSS enabled */
7690 	if (rss_hf == 0) /* Disable RSS */
7691 		return -EINVAL;
7692 
7693 	return i40e_hw_rss_hash_set(pf, rss_conf);
7694 }
7695 
7696 static int
i40e_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)7697 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7698 			   struct rte_eth_rss_conf *rss_conf)
7699 {
7700 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7701 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7702 	uint64_t hena;
7703 	int ret;
7704 
7705 	if (!rss_conf)
7706 		return -EINVAL;
7707 
7708 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7709 			 &rss_conf->rss_key_len);
7710 	if (ret)
7711 		return ret;
7712 
7713 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7714 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7715 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7716 
7717 	return 0;
7718 }
7719 
7720 static int
i40e_dev_get_filter_type(uint16_t filter_type,uint16_t * flag)7721 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7722 {
7723 	switch (filter_type) {
7724 	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN:
7725 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7726 		break;
7727 	case RTE_ETH_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7728 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7729 		break;
7730 	case RTE_ETH_TUNNEL_FILTER_IMAC_TENID:
7731 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7732 		break;
7733 	case RTE_ETH_TUNNEL_FILTER_OMAC_TENID_IMAC:
7734 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7735 		break;
7736 	case RTE_ETH_TUNNEL_FILTER_IMAC:
7737 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7738 		break;
7739 	case RTE_ETH_TUNNEL_FILTER_OIP:
7740 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7741 		break;
7742 	case RTE_ETH_TUNNEL_FILTER_IIP:
7743 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7744 		break;
7745 	default:
7746 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7747 		return -EINVAL;
7748 	}
7749 
7750 	return 0;
7751 }
7752 
7753 /* Convert tunnel filter structure */
7754 static int
i40e_tunnel_filter_convert(struct i40e_aqc_cloud_filters_element_bb * cld_filter,struct i40e_tunnel_filter * tunnel_filter)7755 i40e_tunnel_filter_convert(
7756 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7757 	struct i40e_tunnel_filter *tunnel_filter)
7758 {
7759 	rte_ether_addr_copy((struct rte_ether_addr *)
7760 			&cld_filter->element.outer_mac,
7761 		(struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7762 	rte_ether_addr_copy((struct rte_ether_addr *)
7763 			&cld_filter->element.inner_mac,
7764 		(struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7765 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7766 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7767 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7768 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7769 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7770 	else
7771 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7772 	tunnel_filter->input.flags = cld_filter->element.flags;
7773 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7774 	tunnel_filter->queue = cld_filter->element.queue_number;
7775 	rte_memcpy(tunnel_filter->input.general_fields,
7776 		   cld_filter->general_fields,
7777 		   sizeof(cld_filter->general_fields));
7778 
7779 	return 0;
7780 }
7781 
7782 /* Check if there exists the tunnel filter */
7783 struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule * tunnel_rule,const struct i40e_tunnel_filter_input * input)7784 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7785 			     const struct i40e_tunnel_filter_input *input)
7786 {
7787 	int ret;
7788 
7789 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7790 	if (ret < 0)
7791 		return NULL;
7792 
7793 	return tunnel_rule->hash_map[ret];
7794 }
7795 
7796 /* Add a tunnel filter into the SW list */
7797 static int
i40e_sw_tunnel_filter_insert(struct i40e_pf * pf,struct i40e_tunnel_filter * tunnel_filter)7798 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7799 			     struct i40e_tunnel_filter *tunnel_filter)
7800 {
7801 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7802 	int ret;
7803 
7804 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7805 	if (ret < 0) {
7806 		PMD_DRV_LOG(ERR,
7807 			    "Failed to insert tunnel filter to hash table %d!",
7808 			    ret);
7809 		return ret;
7810 	}
7811 	rule->hash_map[ret] = tunnel_filter;
7812 
7813 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7814 
7815 	return 0;
7816 }
7817 
7818 /* Delete a tunnel filter from the SW list */
7819 int
i40e_sw_tunnel_filter_del(struct i40e_pf * pf,struct i40e_tunnel_filter_input * input)7820 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7821 			  struct i40e_tunnel_filter_input *input)
7822 {
7823 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7824 	struct i40e_tunnel_filter *tunnel_filter;
7825 	int ret;
7826 
7827 	ret = rte_hash_del_key(rule->hash_table, input);
7828 	if (ret < 0) {
7829 		PMD_DRV_LOG(ERR,
7830 			    "Failed to delete tunnel filter to hash table %d!",
7831 			    ret);
7832 		return ret;
7833 	}
7834 	tunnel_filter = rule->hash_map[ret];
7835 	rule->hash_map[ret] = NULL;
7836 
7837 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7838 	rte_free(tunnel_filter);
7839 
7840 	return 0;
7841 }
7842 
7843 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7844 #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
7845 #define I40E_TR_GENEVE_KEY_MASK			0x8
7846 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
7847 #define I40E_TR_GRE_KEY_MASK			0x400
7848 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
7849 #define I40E_TR_GRE_NO_KEY_MASK			0x8000
7850 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7851 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7852 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7853 #define I40E_DIRECTION_INGRESS_KEY		0x8000
7854 #define I40E_TR_L4_TYPE_TCP			0x2
7855 #define I40E_TR_L4_TYPE_UDP			0x4
7856 #define I40E_TR_L4_TYPE_SCTP			0x8
7857 
7858 static enum
i40e_replace_mpls_l1_filter(struct i40e_pf * pf)7859 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7860 {
7861 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7862 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7863 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7864 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7865 	enum i40e_status_code status = I40E_SUCCESS;
7866 
7867 	if (pf->support_multi_driver) {
7868 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7869 		return I40E_NOT_SUPPORTED;
7870 	}
7871 
7872 	memset(&filter_replace, 0,
7873 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7874 	memset(&filter_replace_buf, 0,
7875 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7876 
7877 	/* create L1 filter */
7878 	filter_replace.old_filter_type =
7879 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7880 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7881 	filter_replace.tr_bit = 0;
7882 
7883 	/* Prepare the buffer, 3 entries */
7884 	filter_replace_buf.data[0] =
7885 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7886 	filter_replace_buf.data[0] |=
7887 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7888 	filter_replace_buf.data[2] = 0xFF;
7889 	filter_replace_buf.data[3] = 0xFF;
7890 	filter_replace_buf.data[4] =
7891 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7892 	filter_replace_buf.data[4] |=
7893 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7894 	filter_replace_buf.data[7] = 0xF0;
7895 	filter_replace_buf.data[8]
7896 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7897 	filter_replace_buf.data[8] |=
7898 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7899 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7900 		I40E_TR_GENEVE_KEY_MASK |
7901 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7902 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7903 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7904 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
7905 
7906 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7907 					       &filter_replace_buf);
7908 	if (!status && (filter_replace.old_filter_type !=
7909 			filter_replace.new_filter_type))
7910 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7911 			    " original: 0x%x, new: 0x%x",
7912 			    dev->device->name,
7913 			    filter_replace.old_filter_type,
7914 			    filter_replace.new_filter_type);
7915 
7916 	return status;
7917 }
7918 
7919 static enum
i40e_replace_mpls_cloud_filter(struct i40e_pf * pf)7920 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7921 {
7922 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7923 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7924 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7925 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7926 	enum i40e_status_code status = I40E_SUCCESS;
7927 
7928 	if (pf->support_multi_driver) {
7929 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7930 		return I40E_NOT_SUPPORTED;
7931 	}
7932 
7933 	/* For MPLSoUDP */
7934 	memset(&filter_replace, 0,
7935 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7936 	memset(&filter_replace_buf, 0,
7937 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7938 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7939 		I40E_AQC_MIRROR_CLOUD_FILTER;
7940 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7941 	filter_replace.new_filter_type =
7942 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
7943 	/* Prepare the buffer, 2 entries */
7944 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7945 	filter_replace_buf.data[0] |=
7946 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7947 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7948 	filter_replace_buf.data[4] |=
7949 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7950 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7951 					       &filter_replace_buf);
7952 	if (status < 0)
7953 		return status;
7954 	if (filter_replace.old_filter_type !=
7955 	    filter_replace.new_filter_type)
7956 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7957 			    " original: 0x%x, new: 0x%x",
7958 			    dev->device->name,
7959 			    filter_replace.old_filter_type,
7960 			    filter_replace.new_filter_type);
7961 
7962 	/* For MPLSoGRE */
7963 	memset(&filter_replace, 0,
7964 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7965 	memset(&filter_replace_buf, 0,
7966 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7967 
7968 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7969 		I40E_AQC_MIRROR_CLOUD_FILTER;
7970 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7971 	filter_replace.new_filter_type =
7972 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
7973 	/* Prepare the buffer, 2 entries */
7974 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7975 	filter_replace_buf.data[0] |=
7976 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7977 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7978 	filter_replace_buf.data[4] |=
7979 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7980 
7981 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7982 					       &filter_replace_buf);
7983 	if (!status && (filter_replace.old_filter_type !=
7984 			filter_replace.new_filter_type))
7985 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7986 			    " original: 0x%x, new: 0x%x",
7987 			    dev->device->name,
7988 			    filter_replace.old_filter_type,
7989 			    filter_replace.new_filter_type);
7990 
7991 	return status;
7992 }
7993 
7994 static enum i40e_status_code
i40e_replace_gtp_l1_filter(struct i40e_pf * pf)7995 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7996 {
7997 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7998 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7999 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8000 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8001 	enum i40e_status_code status = I40E_SUCCESS;
8002 
8003 	if (pf->support_multi_driver) {
8004 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8005 		return I40E_NOT_SUPPORTED;
8006 	}
8007 
8008 	/* For GTP-C */
8009 	memset(&filter_replace, 0,
8010 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8011 	memset(&filter_replace_buf, 0,
8012 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8013 	/* create L1 filter */
8014 	filter_replace.old_filter_type =
8015 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8016 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8017 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8018 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8019 	/* Prepare the buffer, 2 entries */
8020 	filter_replace_buf.data[0] =
8021 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8022 	filter_replace_buf.data[0] |=
8023 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8024 	filter_replace_buf.data[2] = 0xFF;
8025 	filter_replace_buf.data[3] = 0xFF;
8026 	filter_replace_buf.data[4] =
8027 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8028 	filter_replace_buf.data[4] |=
8029 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8030 	filter_replace_buf.data[6] = 0xFF;
8031 	filter_replace_buf.data[7] = 0xFF;
8032 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8033 					       &filter_replace_buf);
8034 	if (status < 0)
8035 		return status;
8036 	if (filter_replace.old_filter_type !=
8037 	    filter_replace.new_filter_type)
8038 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8039 			    " original: 0x%x, new: 0x%x",
8040 			    dev->device->name,
8041 			    filter_replace.old_filter_type,
8042 			    filter_replace.new_filter_type);
8043 
8044 	/* for GTP-U */
8045 	memset(&filter_replace, 0,
8046 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8047 	memset(&filter_replace_buf, 0,
8048 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8049 	/* create L1 filter */
8050 	filter_replace.old_filter_type =
8051 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8052 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8053 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8054 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8055 	/* Prepare the buffer, 2 entries */
8056 	filter_replace_buf.data[0] =
8057 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8058 	filter_replace_buf.data[0] |=
8059 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8060 	filter_replace_buf.data[2] = 0xFF;
8061 	filter_replace_buf.data[3] = 0xFF;
8062 	filter_replace_buf.data[4] =
8063 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8064 	filter_replace_buf.data[4] |=
8065 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8066 	filter_replace_buf.data[6] = 0xFF;
8067 	filter_replace_buf.data[7] = 0xFF;
8068 
8069 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8070 					       &filter_replace_buf);
8071 	if (!status && (filter_replace.old_filter_type !=
8072 			filter_replace.new_filter_type))
8073 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8074 			    " original: 0x%x, new: 0x%x",
8075 			    dev->device->name,
8076 			    filter_replace.old_filter_type,
8077 			    filter_replace.new_filter_type);
8078 
8079 	return status;
8080 }
8081 
8082 static enum
i40e_replace_gtp_cloud_filter(struct i40e_pf * pf)8083 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8084 {
8085 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8086 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8087 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8088 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8089 	enum i40e_status_code status = I40E_SUCCESS;
8090 
8091 	if (pf->support_multi_driver) {
8092 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8093 		return I40E_NOT_SUPPORTED;
8094 	}
8095 
8096 	/* for GTP-C */
8097 	memset(&filter_replace, 0,
8098 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8099 	memset(&filter_replace_buf, 0,
8100 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8101 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8102 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8103 	filter_replace.new_filter_type =
8104 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8105 	/* Prepare the buffer, 2 entries */
8106 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8107 	filter_replace_buf.data[0] |=
8108 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8109 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8110 	filter_replace_buf.data[4] |=
8111 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8112 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8113 					       &filter_replace_buf);
8114 	if (status < 0)
8115 		return status;
8116 	if (filter_replace.old_filter_type !=
8117 	    filter_replace.new_filter_type)
8118 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8119 			    " original: 0x%x, new: 0x%x",
8120 			    dev->device->name,
8121 			    filter_replace.old_filter_type,
8122 			    filter_replace.new_filter_type);
8123 
8124 	/* for GTP-U */
8125 	memset(&filter_replace, 0,
8126 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8127 	memset(&filter_replace_buf, 0,
8128 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8129 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8130 	filter_replace.old_filter_type =
8131 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8132 	filter_replace.new_filter_type =
8133 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8134 	/* Prepare the buffer, 2 entries */
8135 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8136 	filter_replace_buf.data[0] |=
8137 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8138 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8139 	filter_replace_buf.data[4] |=
8140 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8141 
8142 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8143 					       &filter_replace_buf);
8144 	if (!status && (filter_replace.old_filter_type !=
8145 			filter_replace.new_filter_type))
8146 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8147 			    " original: 0x%x, new: 0x%x",
8148 			    dev->device->name,
8149 			    filter_replace.old_filter_type,
8150 			    filter_replace.new_filter_type);
8151 
8152 	return status;
8153 }
8154 
8155 static enum i40e_status_code
i40e_replace_port_l1_filter(struct i40e_pf * pf,enum i40e_l4_port_type l4_port_type)8156 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8157 			    enum i40e_l4_port_type l4_port_type)
8158 {
8159 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8160 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8161 	enum i40e_status_code status = I40E_SUCCESS;
8162 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8163 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8164 
8165 	if (pf->support_multi_driver) {
8166 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8167 		return I40E_NOT_SUPPORTED;
8168 	}
8169 
8170 	memset(&filter_replace, 0,
8171 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8172 	memset(&filter_replace_buf, 0,
8173 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8174 
8175 	/* create L1 filter */
8176 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8177 		filter_replace.old_filter_type =
8178 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8179 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8180 		filter_replace_buf.data[8] =
8181 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8182 	} else {
8183 		filter_replace.old_filter_type =
8184 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8185 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8186 		filter_replace_buf.data[8] =
8187 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8188 	}
8189 
8190 	filter_replace.tr_bit = 0;
8191 	/* Prepare the buffer, 3 entries */
8192 	filter_replace_buf.data[0] =
8193 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8194 	filter_replace_buf.data[0] |=
8195 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8196 	filter_replace_buf.data[2] = 0x00;
8197 	filter_replace_buf.data[3] =
8198 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8199 	filter_replace_buf.data[4] =
8200 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8201 	filter_replace_buf.data[4] |=
8202 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8203 	filter_replace_buf.data[5] = 0x00;
8204 	filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8205 		I40E_TR_L4_TYPE_TCP |
8206 		I40E_TR_L4_TYPE_SCTP;
8207 	filter_replace_buf.data[7] = 0x00;
8208 	filter_replace_buf.data[8] |=
8209 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8210 	filter_replace_buf.data[9] = 0x00;
8211 	filter_replace_buf.data[10] = 0xFF;
8212 	filter_replace_buf.data[11] = 0xFF;
8213 
8214 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8215 					       &filter_replace_buf);
8216 	if (!status && filter_replace.old_filter_type !=
8217 	    filter_replace.new_filter_type)
8218 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8219 			    " original: 0x%x, new: 0x%x",
8220 			    dev->device->name,
8221 			    filter_replace.old_filter_type,
8222 			    filter_replace.new_filter_type);
8223 
8224 	return status;
8225 }
8226 
8227 static enum i40e_status_code
i40e_replace_port_cloud_filter(struct i40e_pf * pf,enum i40e_l4_port_type l4_port_type)8228 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8229 			       enum i40e_l4_port_type l4_port_type)
8230 {
8231 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8232 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8233 	enum i40e_status_code status = I40E_SUCCESS;
8234 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8235 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8236 
8237 	if (pf->support_multi_driver) {
8238 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8239 		return I40E_NOT_SUPPORTED;
8240 	}
8241 
8242 	memset(&filter_replace, 0,
8243 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8244 	memset(&filter_replace_buf, 0,
8245 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8246 
8247 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8248 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8249 		filter_replace.new_filter_type =
8250 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8251 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8252 	} else {
8253 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8254 		filter_replace.new_filter_type =
8255 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8256 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8257 	}
8258 
8259 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8260 	filter_replace.tr_bit = 0;
8261 	/* Prepare the buffer, 2 entries */
8262 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8263 	filter_replace_buf.data[0] |=
8264 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8265 	filter_replace_buf.data[4] |=
8266 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8267 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8268 					       &filter_replace_buf);
8269 
8270 	if (!status && filter_replace.old_filter_type !=
8271 	    filter_replace.new_filter_type)
8272 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8273 			    " original: 0x%x, new: 0x%x",
8274 			    dev->device->name,
8275 			    filter_replace.old_filter_type,
8276 			    filter_replace.new_filter_type);
8277 
8278 	return status;
8279 }
8280 
8281 int
i40e_dev_consistent_tunnel_filter_set(struct i40e_pf * pf,struct i40e_tunnel_filter_conf * tunnel_filter,uint8_t add)8282 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8283 		      struct i40e_tunnel_filter_conf *tunnel_filter,
8284 		      uint8_t add)
8285 {
8286 	uint16_t ip_type;
8287 	uint32_t ipv4_addr, ipv4_addr_le;
8288 	uint8_t i, tun_type = 0;
8289 	/* internal variable to convert ipv6 byte order */
8290 	uint32_t convert_ipv6[4];
8291 	int val, ret = 0;
8292 	struct i40e_pf_vf *vf = NULL;
8293 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8294 	struct i40e_vsi *vsi;
8295 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8296 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
8297 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8298 	struct i40e_tunnel_filter *tunnel, *node;
8299 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8300 	uint32_t teid_le;
8301 	bool big_buffer = 0;
8302 
8303 	cld_filter = rte_zmalloc("tunnel_filter",
8304 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8305 			 0);
8306 
8307 	if (cld_filter == NULL) {
8308 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8309 		return -ENOMEM;
8310 	}
8311 	pfilter = cld_filter;
8312 
8313 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
8314 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
8315 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
8316 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
8317 
8318 	pfilter->element.inner_vlan =
8319 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8320 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8321 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8322 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8323 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8324 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
8325 				&ipv4_addr_le,
8326 				sizeof(pfilter->element.ipaddr.v4.data));
8327 	} else {
8328 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8329 		for (i = 0; i < 4; i++) {
8330 			convert_ipv6[i] =
8331 			rte_cpu_to_le_32(rte_be_to_cpu_32(
8332 					 tunnel_filter->ip_addr.ipv6_addr[i]));
8333 		}
8334 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
8335 			   &convert_ipv6,
8336 			   sizeof(pfilter->element.ipaddr.v6.data));
8337 	}
8338 
8339 	/* check tunneled type */
8340 	switch (tunnel_filter->tunnel_type) {
8341 	case I40E_TUNNEL_TYPE_VXLAN:
8342 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8343 		break;
8344 	case I40E_TUNNEL_TYPE_NVGRE:
8345 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8346 		break;
8347 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
8348 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8349 		break;
8350 	case I40E_TUNNEL_TYPE_MPLSoUDP:
8351 		if (!pf->mpls_replace_flag) {
8352 			i40e_replace_mpls_l1_filter(pf);
8353 			i40e_replace_mpls_cloud_filter(pf);
8354 			pf->mpls_replace_flag = 1;
8355 		}
8356 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8357 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8358 			teid_le >> 4;
8359 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8360 			(teid_le & 0xF) << 12;
8361 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8362 			0x40;
8363 		big_buffer = 1;
8364 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8365 		break;
8366 	case I40E_TUNNEL_TYPE_MPLSoGRE:
8367 		if (!pf->mpls_replace_flag) {
8368 			i40e_replace_mpls_l1_filter(pf);
8369 			i40e_replace_mpls_cloud_filter(pf);
8370 			pf->mpls_replace_flag = 1;
8371 		}
8372 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8373 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8374 			teid_le >> 4;
8375 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8376 			(teid_le & 0xF) << 12;
8377 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8378 			0x0;
8379 		big_buffer = 1;
8380 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8381 		break;
8382 	case I40E_TUNNEL_TYPE_GTPC:
8383 		if (!pf->gtp_replace_flag) {
8384 			i40e_replace_gtp_l1_filter(pf);
8385 			i40e_replace_gtp_cloud_filter(pf);
8386 			pf->gtp_replace_flag = 1;
8387 		}
8388 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8389 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8390 			(teid_le >> 16) & 0xFFFF;
8391 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8392 			teid_le & 0xFFFF;
8393 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8394 			0x0;
8395 		big_buffer = 1;
8396 		break;
8397 	case I40E_TUNNEL_TYPE_GTPU:
8398 		if (!pf->gtp_replace_flag) {
8399 			i40e_replace_gtp_l1_filter(pf);
8400 			i40e_replace_gtp_cloud_filter(pf);
8401 			pf->gtp_replace_flag = 1;
8402 		}
8403 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8404 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8405 			(teid_le >> 16) & 0xFFFF;
8406 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8407 			teid_le & 0xFFFF;
8408 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8409 			0x0;
8410 		big_buffer = 1;
8411 		break;
8412 	case I40E_TUNNEL_TYPE_QINQ:
8413 		if (!pf->qinq_replace_flag) {
8414 			ret = i40e_cloud_filter_qinq_create(pf);
8415 			if (ret < 0)
8416 				PMD_DRV_LOG(DEBUG,
8417 					    "QinQ tunnel filter already created.");
8418 			pf->qinq_replace_flag = 1;
8419 		}
8420 		/*	Add in the General fields the values of
8421 		 *	the Outer and Inner VLAN
8422 		 *	Big Buffer should be set, see changes in
8423 		 *	i40e_aq_add_cloud_filters
8424 		 */
8425 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8426 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8427 		big_buffer = 1;
8428 		break;
8429 	case I40E_CLOUD_TYPE_UDP:
8430 	case I40E_CLOUD_TYPE_TCP:
8431 	case I40E_CLOUD_TYPE_SCTP:
8432 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8433 			if (!pf->sport_replace_flag) {
8434 				i40e_replace_port_l1_filter(pf,
8435 						tunnel_filter->l4_port_type);
8436 				i40e_replace_port_cloud_filter(pf,
8437 						tunnel_filter->l4_port_type);
8438 				pf->sport_replace_flag = 1;
8439 			}
8440 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8441 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8442 				I40E_DIRECTION_INGRESS_KEY;
8443 
8444 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8445 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8446 					I40E_TR_L4_TYPE_UDP;
8447 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8448 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8449 					I40E_TR_L4_TYPE_TCP;
8450 			else
8451 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8452 					I40E_TR_L4_TYPE_SCTP;
8453 
8454 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8455 				(teid_le >> 16) & 0xFFFF;
8456 			big_buffer = 1;
8457 		} else {
8458 			if (!pf->dport_replace_flag) {
8459 				i40e_replace_port_l1_filter(pf,
8460 						tunnel_filter->l4_port_type);
8461 				i40e_replace_port_cloud_filter(pf,
8462 						tunnel_filter->l4_port_type);
8463 				pf->dport_replace_flag = 1;
8464 			}
8465 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8466 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8467 				I40E_DIRECTION_INGRESS_KEY;
8468 
8469 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8470 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8471 					I40E_TR_L4_TYPE_UDP;
8472 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8473 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8474 					I40E_TR_L4_TYPE_TCP;
8475 			else
8476 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8477 					I40E_TR_L4_TYPE_SCTP;
8478 
8479 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8480 				(teid_le >> 16) & 0xFFFF;
8481 			big_buffer = 1;
8482 		}
8483 
8484 		break;
8485 	default:
8486 		/* Other tunnel types is not supported. */
8487 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8488 		rte_free(cld_filter);
8489 		return -EINVAL;
8490 	}
8491 
8492 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8493 		pfilter->element.flags =
8494 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8495 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8496 		pfilter->element.flags =
8497 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8498 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8499 		pfilter->element.flags =
8500 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8501 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8502 		pfilter->element.flags =
8503 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8504 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8505 		pfilter->element.flags |=
8506 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8507 	else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8508 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8509 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8510 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8511 			pfilter->element.flags |=
8512 				I40E_AQC_ADD_CLOUD_FILTER_0X11;
8513 		else
8514 			pfilter->element.flags |=
8515 				I40E_AQC_ADD_CLOUD_FILTER_0X10;
8516 	} else {
8517 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8518 						&pfilter->element.flags);
8519 		if (val < 0) {
8520 			rte_free(cld_filter);
8521 			return -EINVAL;
8522 		}
8523 	}
8524 
8525 	pfilter->element.flags |= rte_cpu_to_le_16(
8526 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8527 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8528 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8529 	pfilter->element.queue_number =
8530 		rte_cpu_to_le_16(tunnel_filter->queue_id);
8531 
8532 	if (!tunnel_filter->is_to_vf)
8533 		vsi = pf->main_vsi;
8534 	else {
8535 		if (tunnel_filter->vf_id >= pf->vf_num) {
8536 			PMD_DRV_LOG(ERR, "Invalid argument.");
8537 			rte_free(cld_filter);
8538 			return -EINVAL;
8539 		}
8540 		vf = &pf->vfs[tunnel_filter->vf_id];
8541 		vsi = vf->vsi;
8542 	}
8543 
8544 	/* Check if there is the filter in SW list */
8545 	memset(&check_filter, 0, sizeof(check_filter));
8546 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
8547 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
8548 	check_filter.vf_id = tunnel_filter->vf_id;
8549 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8550 	if (add && node) {
8551 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8552 		rte_free(cld_filter);
8553 		return -EINVAL;
8554 	}
8555 
8556 	if (!add && !node) {
8557 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8558 		rte_free(cld_filter);
8559 		return -EINVAL;
8560 	}
8561 
8562 	if (add) {
8563 		if (big_buffer)
8564 			ret = i40e_aq_add_cloud_filters_bb(hw,
8565 						   vsi->seid, cld_filter, 1);
8566 		else
8567 			ret = i40e_aq_add_cloud_filters(hw,
8568 					vsi->seid, &cld_filter->element, 1);
8569 		if (ret < 0) {
8570 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8571 			rte_free(cld_filter);
8572 			return -ENOTSUP;
8573 		}
8574 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8575 		if (tunnel == NULL) {
8576 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8577 			rte_free(cld_filter);
8578 			return -ENOMEM;
8579 		}
8580 
8581 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8582 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8583 		if (ret < 0)
8584 			rte_free(tunnel);
8585 	} else {
8586 		if (big_buffer)
8587 			ret = i40e_aq_rem_cloud_filters_bb(
8588 				hw, vsi->seid, cld_filter, 1);
8589 		else
8590 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8591 						&cld_filter->element, 1);
8592 		if (ret < 0) {
8593 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8594 			rte_free(cld_filter);
8595 			return -ENOTSUP;
8596 		}
8597 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8598 	}
8599 
8600 	rte_free(cld_filter);
8601 	return ret;
8602 }
8603 
8604 static int
i40e_get_vxlan_port_idx(struct i40e_pf * pf,uint16_t port)8605 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8606 {
8607 	uint8_t i;
8608 
8609 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8610 		if (pf->vxlan_ports[i] == port)
8611 			return i;
8612 	}
8613 
8614 	return -1;
8615 }
8616 
8617 static int
i40e_add_vxlan_port(struct i40e_pf * pf,uint16_t port,int udp_type)8618 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8619 {
8620 	int  idx, ret;
8621 	uint8_t filter_idx = 0;
8622 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8623 
8624 	idx = i40e_get_vxlan_port_idx(pf, port);
8625 
8626 	/* Check if port already exists */
8627 	if (idx >= 0) {
8628 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8629 		return -EINVAL;
8630 	}
8631 
8632 	/* Now check if there is space to add the new port */
8633 	idx = i40e_get_vxlan_port_idx(pf, 0);
8634 	if (idx < 0) {
8635 		PMD_DRV_LOG(ERR,
8636 			"Maximum number of UDP ports reached, not adding port %d",
8637 			port);
8638 		return -ENOSPC;
8639 	}
8640 
8641 	ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8642 					&filter_idx, NULL);
8643 	if (ret < 0) {
8644 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8645 		return -1;
8646 	}
8647 
8648 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8649 			 port,  filter_idx);
8650 
8651 	/* New port: add it and mark its index in the bitmap */
8652 	pf->vxlan_ports[idx] = port;
8653 	pf->vxlan_bitmap |= (1 << idx);
8654 
8655 	if (!(pf->flags & I40E_FLAG_VXLAN))
8656 		pf->flags |= I40E_FLAG_VXLAN;
8657 
8658 	return 0;
8659 }
8660 
8661 static int
i40e_del_vxlan_port(struct i40e_pf * pf,uint16_t port)8662 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8663 {
8664 	int idx;
8665 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8666 
8667 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8668 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8669 		return -EINVAL;
8670 	}
8671 
8672 	idx = i40e_get_vxlan_port_idx(pf, port);
8673 
8674 	if (idx < 0) {
8675 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8676 		return -EINVAL;
8677 	}
8678 
8679 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8680 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8681 		return -1;
8682 	}
8683 
8684 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8685 			port, idx);
8686 
8687 	pf->vxlan_ports[idx] = 0;
8688 	pf->vxlan_bitmap &= ~(1 << idx);
8689 
8690 	if (!pf->vxlan_bitmap)
8691 		pf->flags &= ~I40E_FLAG_VXLAN;
8692 
8693 	return 0;
8694 }
8695 
8696 /* Add UDP tunneling port */
8697 static int
i40e_dev_udp_tunnel_port_add(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)8698 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8699 			     struct rte_eth_udp_tunnel *udp_tunnel)
8700 {
8701 	int ret = 0;
8702 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8703 
8704 	if (udp_tunnel == NULL)
8705 		return -EINVAL;
8706 
8707 	switch (udp_tunnel->prot_type) {
8708 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
8709 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8710 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
8711 		break;
8712 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
8713 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8714 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8715 		break;
8716 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
8717 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
8718 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8719 		ret = -1;
8720 		break;
8721 
8722 	default:
8723 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8724 		ret = -1;
8725 		break;
8726 	}
8727 
8728 	return ret;
8729 }
8730 
8731 /* Remove UDP tunneling port */
8732 static int
i40e_dev_udp_tunnel_port_del(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)8733 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8734 			     struct rte_eth_udp_tunnel *udp_tunnel)
8735 {
8736 	int ret = 0;
8737 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8738 
8739 	if (udp_tunnel == NULL)
8740 		return -EINVAL;
8741 
8742 	switch (udp_tunnel->prot_type) {
8743 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
8744 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
8745 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8746 		break;
8747 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
8748 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
8749 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8750 		ret = -1;
8751 		break;
8752 	default:
8753 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8754 		ret = -1;
8755 		break;
8756 	}
8757 
8758 	return ret;
8759 }
8760 
8761 /* Calculate the maximum number of contiguous PF queues that are configured */
8762 int
i40e_pf_calc_configured_queues_num(struct i40e_pf * pf)8763 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8764 {
8765 	struct rte_eth_dev_data *data = pf->dev_data;
8766 	int i, num;
8767 	struct i40e_rx_queue *rxq;
8768 
8769 	num = 0;
8770 	for (i = 0; i < pf->lan_nb_qps; i++) {
8771 		rxq = data->rx_queues[i];
8772 		if (rxq && rxq->q_set)
8773 			num++;
8774 		else
8775 			break;
8776 	}
8777 
8778 	return num;
8779 }
8780 
8781 /* Reset the global configure of hash function and input sets */
8782 static void
i40e_pf_global_rss_reset(struct i40e_pf * pf)8783 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8784 {
8785 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8786 	uint32_t reg, reg_val;
8787 	int i;
8788 
8789 	/* Reset global RSS function sets */
8790 	reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8791 	if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8792 		reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8793 		i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8794 	}
8795 
8796 	for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8797 		uint64_t inset;
8798 		int j, pctype;
8799 
8800 		if (hw->mac.type == I40E_MAC_X722)
8801 			pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8802 		else
8803 			pctype = i;
8804 
8805 		/* Reset pctype insets */
8806 		inset = i40e_get_default_input_set(i);
8807 		if (inset) {
8808 			pf->hash_input_set[pctype] = inset;
8809 			inset = i40e_translate_input_set_reg(hw->mac.type,
8810 							     inset);
8811 
8812 			reg = I40E_GLQF_HASH_INSET(0, pctype);
8813 			i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8814 			reg = I40E_GLQF_HASH_INSET(1, pctype);
8815 			i40e_check_write_global_reg(hw, reg,
8816 						    (uint32_t)(inset >> 32));
8817 
8818 			/* Clear unused mask registers of the pctype */
8819 			for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8820 				reg = I40E_GLQF_HASH_MSK(j, pctype);
8821 				i40e_check_write_global_reg(hw, reg, 0);
8822 			}
8823 		}
8824 
8825 		/* Reset pctype symmetric sets */
8826 		reg = I40E_GLQF_HSYM(pctype);
8827 		reg_val = i40e_read_rx_ctl(hw, reg);
8828 		if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8829 			reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8830 			i40e_write_global_rx_ctl(hw, reg, reg_val);
8831 		}
8832 	}
8833 	I40E_WRITE_FLUSH(hw);
8834 }
8835 
8836 int
i40e_pf_reset_rss_reta(struct i40e_pf * pf)8837 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8838 {
8839 	struct i40e_hw *hw = &pf->adapter->hw;
8840 	uint8_t lut[RTE_ETH_RSS_RETA_SIZE_512];
8841 	uint32_t i;
8842 	int num;
8843 
8844 	/* If both VMDQ and RSS enabled, not all of PF queues are
8845 	 * configured. It's necessary to calculate the actual PF
8846 	 * queues that are configured.
8847 	 */
8848 	if (pf->dev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG)
8849 		num = i40e_pf_calc_configured_queues_num(pf);
8850 	else
8851 		num = pf->dev_data->nb_rx_queues;
8852 
8853 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8854 	if (num <= 0)
8855 		return 0;
8856 
8857 	for (i = 0; i < hw->func_caps.rss_table_size; i++)
8858 		lut[i] = (uint8_t)(i % (uint32_t)num);
8859 
8860 	return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8861 }
8862 
8863 int
i40e_pf_reset_rss_key(struct i40e_pf * pf)8864 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8865 {
8866 	const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8867 			sizeof(uint32_t);
8868 	uint8_t *rss_key;
8869 
8870 	/* Reset key */
8871 	rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8872 	if (!rss_key ||
8873 	    pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8874 		static uint32_t rss_key_default[] = {0x6b793944,
8875 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8876 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8877 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8878 
8879 		rss_key = (uint8_t *)rss_key_default;
8880 	}
8881 
8882 	return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8883 }
8884 
8885 static int
i40e_pf_rss_reset(struct i40e_pf * pf)8886 i40e_pf_rss_reset(struct i40e_pf *pf)
8887 {
8888 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8889 
8890 	int ret;
8891 
8892 	pf->hash_filter_enabled = 0;
8893 	i40e_pf_disable_rss(pf);
8894 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
8895 
8896 	if (!pf->support_multi_driver)
8897 		i40e_pf_global_rss_reset(pf);
8898 
8899 	/* Reset RETA table */
8900 	if (pf->adapter->rss_reta_updated == 0) {
8901 		ret = i40e_pf_reset_rss_reta(pf);
8902 		if (ret)
8903 			return ret;
8904 	}
8905 
8906 	return i40e_pf_reset_rss_key(pf);
8907 }
8908 
8909 /* Configure RSS */
8910 int
i40e_pf_config_rss(struct i40e_pf * pf)8911 i40e_pf_config_rss(struct i40e_pf *pf)
8912 {
8913 	struct i40e_hw *hw;
8914 	enum rte_eth_rx_mq_mode mq_mode;
8915 	uint64_t rss_hf, hena;
8916 	int ret;
8917 
8918 	ret = i40e_pf_rss_reset(pf);
8919 	if (ret) {
8920 		PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8921 		return ret;
8922 	}
8923 
8924 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8925 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8926 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
8927 	    !(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
8928 		return 0;
8929 
8930 	hw = I40E_PF_TO_HW(pf);
8931 	hena = i40e_config_hena(pf->adapter, rss_hf);
8932 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8933 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8934 	I40E_WRITE_FLUSH(hw);
8935 
8936 	return 0;
8937 }
8938 
8939 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8940 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8941 int
i40e_dev_set_gre_key_len(struct i40e_hw * hw,uint8_t len)8942 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8943 {
8944 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8945 	uint32_t val, reg;
8946 	int ret = -EINVAL;
8947 
8948 	if (pf->support_multi_driver) {
8949 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8950 		return -ENOTSUP;
8951 	}
8952 
8953 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8954 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8955 
8956 	if (len == 3) {
8957 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8958 	} else if (len == 4) {
8959 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8960 	} else {
8961 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8962 		return ret;
8963 	}
8964 
8965 	if (reg != val) {
8966 		ret = i40e_aq_debug_write_global_register(hw,
8967 						   I40E_GL_PRS_FVBM(2),
8968 						   reg, NULL);
8969 		if (ret != 0)
8970 			return ret;
8971 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8972 			    "with value 0x%08x",
8973 			    I40E_GL_PRS_FVBM(2), reg);
8974 	} else {
8975 		ret = 0;
8976 	}
8977 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8978 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8979 
8980 	return ret;
8981 }
8982 
8983 /* Set the symmetric hash enable configurations per port */
8984 void
i40e_set_symmetric_hash_enable_per_port(struct i40e_hw * hw,uint8_t enable)8985 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8986 {
8987 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8988 
8989 	if (enable > 0) {
8990 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
8991 			return;
8992 
8993 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8994 	} else {
8995 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
8996 			return;
8997 
8998 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8999 	}
9000 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9001 	I40E_WRITE_FLUSH(hw);
9002 }
9003 
9004 /**
9005  * Valid input sets for hash and flow director filters per PCTYPE
9006  */
9007 static uint64_t
i40e_get_valid_input_set(enum i40e_filter_pctype pctype,enum rte_filter_type filter)9008 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9009 		enum rte_filter_type filter)
9010 {
9011 	uint64_t valid;
9012 
9013 	static const uint64_t valid_hash_inset_table[] = {
9014 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9015 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9016 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9017 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9018 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9019 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9020 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9021 			I40E_INSET_FLEX_PAYLOAD,
9022 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9023 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9024 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9025 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9026 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9027 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9028 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9029 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9030 			I40E_INSET_FLEX_PAYLOAD,
9031 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9032 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9033 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9034 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9035 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9036 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9037 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9038 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9039 			I40E_INSET_FLEX_PAYLOAD,
9040 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9041 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9042 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9043 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9044 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9045 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9046 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9047 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9048 			I40E_INSET_FLEX_PAYLOAD,
9049 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9050 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9051 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9052 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9053 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9054 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9055 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9056 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9057 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9058 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9059 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9060 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9061 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9062 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9063 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9064 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9065 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9066 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9067 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9068 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9069 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9070 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9071 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9072 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9073 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9074 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9075 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9076 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9077 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9078 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9079 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9080 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9081 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9082 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9083 			I40E_INSET_FLEX_PAYLOAD,
9084 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9085 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9086 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9087 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9088 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9089 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9090 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9091 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9092 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9093 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9094 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9095 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9096 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9097 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9098 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9099 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9100 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9101 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9102 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9103 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9104 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9105 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9106 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9107 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9108 			I40E_INSET_FLEX_PAYLOAD,
9109 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9110 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9111 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9112 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9113 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9114 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9115 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9116 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9117 			I40E_INSET_FLEX_PAYLOAD,
9118 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9119 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9120 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9121 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9122 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9123 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9124 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9125 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9126 			I40E_INSET_FLEX_PAYLOAD,
9127 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9128 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9129 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9130 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9131 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9132 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9133 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9134 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9135 			I40E_INSET_FLEX_PAYLOAD,
9136 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9137 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9138 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9139 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9140 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9141 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9142 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9143 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9144 			I40E_INSET_FLEX_PAYLOAD,
9145 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9146 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9147 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9148 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9149 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9150 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9151 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9152 			I40E_INSET_FLEX_PAYLOAD,
9153 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9154 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9155 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9156 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9157 			I40E_INSET_FLEX_PAYLOAD,
9158 	};
9159 
9160 	/**
9161 	 * Flow director supports only fields defined in
9162 	 * union rte_eth_fdir_flow.
9163 	 */
9164 	static const uint64_t valid_fdir_inset_table[] = {
9165 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9166 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9167 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9168 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9169 		I40E_INSET_IPV4_TTL,
9170 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9171 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9172 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9173 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9174 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9175 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9176 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9177 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9178 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9179 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9180 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9181 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9182 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9183 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9184 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9185 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9186 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9187 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9188 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9189 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9190 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9191 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9192 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9193 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9194 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9195 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9196 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9197 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9198 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9199 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9200 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9201 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9202 		I40E_INSET_SCTP_VT,
9203 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9204 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9205 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9206 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9207 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9208 		I40E_INSET_IPV4_TTL,
9209 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9210 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9211 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9212 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9213 		I40E_INSET_IPV6_HOP_LIMIT,
9214 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9215 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9216 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9217 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9218 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9219 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9220 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9221 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9222 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9223 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9224 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9225 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9226 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9227 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9228 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9229 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9230 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9231 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9232 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9233 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9234 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9235 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9236 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9237 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9238 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9239 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9240 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9241 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9242 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9243 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9244 		I40E_INSET_SCTP_VT,
9245 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9246 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9247 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9248 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9249 		I40E_INSET_IPV6_HOP_LIMIT,
9250 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9251 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9252 		I40E_INSET_LAST_ETHER_TYPE,
9253 	};
9254 
9255 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9256 		return 0;
9257 	if (filter == RTE_ETH_FILTER_HASH)
9258 		valid = valid_hash_inset_table[pctype];
9259 	else
9260 		valid = valid_fdir_inset_table[pctype];
9261 
9262 	return valid;
9263 }
9264 
9265 /**
9266  * Validate if the input set is allowed for a specific PCTYPE
9267  */
9268 int
i40e_validate_input_set(enum i40e_filter_pctype pctype,enum rte_filter_type filter,uint64_t inset)9269 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9270 		enum rte_filter_type filter, uint64_t inset)
9271 {
9272 	uint64_t valid;
9273 
9274 	valid = i40e_get_valid_input_set(pctype, filter);
9275 	if (inset & (~valid))
9276 		return -EINVAL;
9277 
9278 	return 0;
9279 }
9280 
9281 /* default input set fields combination per pctype */
9282 uint64_t
i40e_get_default_input_set(uint16_t pctype)9283 i40e_get_default_input_set(uint16_t pctype)
9284 {
9285 	static const uint64_t default_inset_table[] = {
9286 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9287 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9288 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9289 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9290 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9291 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9292 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9293 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9294 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9295 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9296 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9297 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9298 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9299 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9300 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9301 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9302 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9303 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9304 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9305 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9306 			I40E_INSET_SCTP_VT,
9307 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9308 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9309 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9310 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9311 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9312 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9313 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9314 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9315 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9316 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9317 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9318 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9319 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9320 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9321 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9322 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9323 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9324 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9325 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9326 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9327 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9328 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9329 			I40E_INSET_SCTP_VT,
9330 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9331 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9332 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9333 			I40E_INSET_LAST_ETHER_TYPE,
9334 	};
9335 
9336 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9337 		return 0;
9338 
9339 	return default_inset_table[pctype];
9340 }
9341 
9342 /**
9343  * Translate the input set from bit masks to register aware bit masks
9344  * and vice versa
9345  */
9346 uint64_t
i40e_translate_input_set_reg(enum i40e_mac_type type,uint64_t input)9347 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9348 {
9349 	uint64_t val = 0;
9350 	uint16_t i;
9351 
9352 	struct inset_map {
9353 		uint64_t inset;
9354 		uint64_t inset_reg;
9355 	};
9356 
9357 	static const struct inset_map inset_map_common[] = {
9358 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9359 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9360 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9361 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9362 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9363 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9364 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9365 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9366 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9367 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9368 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9369 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9370 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9371 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9372 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9373 		{I40E_INSET_TUNNEL_DMAC,
9374 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9375 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9376 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9377 		{I40E_INSET_TUNNEL_SRC_PORT,
9378 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9379 		{I40E_INSET_TUNNEL_DST_PORT,
9380 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9381 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9382 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9383 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9384 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9385 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9386 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9387 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9388 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9389 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9390 	};
9391 
9392     /* some different registers map in x722*/
9393 	static const struct inset_map inset_map_diff_x722[] = {
9394 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9395 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9396 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9397 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9398 	};
9399 
9400 	static const struct inset_map inset_map_diff_not_x722[] = {
9401 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9402 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9403 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9404 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9405 	};
9406 
9407 	if (input == 0)
9408 		return val;
9409 
9410 	/* Translate input set to register aware inset */
9411 	if (type == I40E_MAC_X722) {
9412 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9413 			if (input & inset_map_diff_x722[i].inset)
9414 				val |= inset_map_diff_x722[i].inset_reg;
9415 		}
9416 	} else {
9417 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9418 			if (input & inset_map_diff_not_x722[i].inset)
9419 				val |= inset_map_diff_not_x722[i].inset_reg;
9420 		}
9421 	}
9422 
9423 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9424 		if (input & inset_map_common[i].inset)
9425 			val |= inset_map_common[i].inset_reg;
9426 	}
9427 
9428 	return val;
9429 }
9430 
9431 static int
i40e_get_inset_field_offset(struct i40e_hw * hw,uint32_t pit_reg_start,uint32_t pit_reg_count,uint32_t hdr_off)9432 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9433 			    uint32_t pit_reg_count, uint32_t hdr_off)
9434 {
9435 	const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9436 	uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9437 	uint32_t i, reg_val, src_off, count;
9438 
9439 	for (i = pit_reg_start; i < pit_reg_end; i++) {
9440 		reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9441 
9442 		src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9443 		count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9444 
9445 		if (src_off <= field_off && (src_off + count) > field_off)
9446 			break;
9447 	}
9448 
9449 	if (i >= pit_reg_end) {
9450 		PMD_DRV_LOG(ERR,
9451 			    "Hardware GLQF_PIT configuration does not support this field mask");
9452 		return -1;
9453 	}
9454 
9455 	return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9456 }
9457 
9458 int
i40e_generate_inset_mask_reg(struct i40e_hw * hw,uint64_t inset,uint32_t * mask,uint8_t nb_elem)9459 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9460 			     uint32_t *mask, uint8_t nb_elem)
9461 {
9462 	static const uint64_t mask_inset[] = {
9463 		I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9464 		I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9465 
9466 	static const struct {
9467 		uint64_t inset;
9468 		uint32_t mask;
9469 		uint32_t offset;
9470 	} inset_mask_offset_map[] = {
9471 		{ I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9472 		  offsetof(struct rte_ipv4_hdr, type_of_service) },
9473 
9474 		{ I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9475 		  offsetof(struct rte_ipv4_hdr, next_proto_id) },
9476 
9477 		{ I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9478 		  offsetof(struct rte_ipv4_hdr, time_to_live) },
9479 
9480 		{ I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9481 		  offsetof(struct rte_ipv6_hdr, vtc_flow) },
9482 
9483 		{ I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9484 		  offsetof(struct rte_ipv6_hdr, proto) },
9485 
9486 		{ I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9487 		  offsetof(struct rte_ipv6_hdr, hop_limits) },
9488 	};
9489 
9490 	uint32_t i;
9491 	int idx = 0;
9492 
9493 	assert(mask);
9494 	if (!inset)
9495 		return 0;
9496 
9497 	for (i = 0; i < RTE_DIM(mask_inset); i++) {
9498 		/* Clear the inset bit, if no MASK is required,
9499 		 * for example proto + ttl
9500 		 */
9501 		if ((mask_inset[i] & inset) == mask_inset[i]) {
9502 			inset &= ~mask_inset[i];
9503 			if (!inset)
9504 				return 0;
9505 		}
9506 	}
9507 
9508 	for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9509 		uint32_t pit_start, pit_count;
9510 		int offset;
9511 
9512 		if (!(inset_mask_offset_map[i].inset & inset))
9513 			continue;
9514 
9515 		if (inset_mask_offset_map[i].inset &
9516 		    (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9517 		     I40E_INSET_IPV4_TTL)) {
9518 			pit_start = I40E_GLQF_PIT_IPV4_START;
9519 			pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9520 		} else {
9521 			pit_start = I40E_GLQF_PIT_IPV6_START;
9522 			pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9523 		}
9524 
9525 		offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9526 				inset_mask_offset_map[i].offset);
9527 
9528 		if (offset < 0)
9529 			return -EINVAL;
9530 
9531 		if (idx >= nb_elem) {
9532 			PMD_DRV_LOG(ERR,
9533 				    "Configuration of inset mask out of range %u",
9534 				    nb_elem);
9535 			return -ERANGE;
9536 		}
9537 
9538 		mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9539 						inset_mask_offset_map[i].mask);
9540 		idx++;
9541 	}
9542 
9543 	return idx;
9544 }
9545 
9546 void
i40e_check_write_reg(struct i40e_hw * hw,uint32_t addr,uint32_t val)9547 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9548 {
9549 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9550 
9551 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9552 	if (reg != val)
9553 		i40e_write_rx_ctl(hw, addr, val);
9554 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9555 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
9556 }
9557 
9558 void
i40e_check_write_global_reg(struct i40e_hw * hw,uint32_t addr,uint32_t val)9559 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9560 {
9561 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9562 	struct rte_eth_dev_data *dev_data =
9563 		((struct i40e_adapter *)hw->back)->pf.dev_data;
9564 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9565 
9566 	if (reg != val) {
9567 		i40e_write_rx_ctl(hw, addr, val);
9568 		PMD_DRV_LOG(WARNING,
9569 			    "i40e device %s changed global register [0x%08x]."
9570 			    " original: 0x%08x, new: 0x%08x",
9571 			    dev->device->name, addr, reg,
9572 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9573 	}
9574 }
9575 
9576 static void
i40e_filter_input_set_init(struct i40e_pf * pf)9577 i40e_filter_input_set_init(struct i40e_pf *pf)
9578 {
9579 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9580 	enum i40e_filter_pctype pctype;
9581 	uint64_t input_set, inset_reg;
9582 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9583 	int num, i;
9584 	uint16_t flow_type;
9585 
9586 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9587 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9588 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9589 
9590 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9591 			continue;
9592 
9593 		input_set = i40e_get_default_input_set(pctype);
9594 
9595 		num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9596 						   I40E_INSET_MASK_NUM_REG);
9597 		if (num < 0)
9598 			return;
9599 		if (pf->support_multi_driver && num > 0) {
9600 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9601 			return;
9602 		}
9603 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9604 					input_set);
9605 
9606 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9607 				      (uint32_t)(inset_reg & UINT32_MAX));
9608 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9609 				     (uint32_t)((inset_reg >>
9610 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
9611 		if (!pf->support_multi_driver) {
9612 			i40e_check_write_global_reg(hw,
9613 					    I40E_GLQF_HASH_INSET(0, pctype),
9614 					    (uint32_t)(inset_reg & UINT32_MAX));
9615 			i40e_check_write_global_reg(hw,
9616 					     I40E_GLQF_HASH_INSET(1, pctype),
9617 					     (uint32_t)((inset_reg >>
9618 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9619 
9620 			for (i = 0; i < num; i++) {
9621 				i40e_check_write_global_reg(hw,
9622 						    I40E_GLQF_FD_MSK(i, pctype),
9623 						    mask_reg[i]);
9624 				i40e_check_write_global_reg(hw,
9625 						  I40E_GLQF_HASH_MSK(i, pctype),
9626 						  mask_reg[i]);
9627 			}
9628 			/*clear unused mask registers of the pctype */
9629 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9630 				i40e_check_write_global_reg(hw,
9631 						    I40E_GLQF_FD_MSK(i, pctype),
9632 						    0);
9633 				i40e_check_write_global_reg(hw,
9634 						  I40E_GLQF_HASH_MSK(i, pctype),
9635 						  0);
9636 			}
9637 		} else {
9638 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9639 		}
9640 		I40E_WRITE_FLUSH(hw);
9641 
9642 		/* store the default input set */
9643 		if (!pf->support_multi_driver)
9644 			pf->hash_input_set[pctype] = input_set;
9645 		pf->fdir.input_set[pctype] = input_set;
9646 	}
9647 }
9648 
9649 int
i40e_set_hash_inset(struct i40e_hw * hw,uint64_t input_set,uint32_t pctype,bool add)9650 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9651 		    uint32_t pctype, bool add)
9652 {
9653 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9654 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9655 	uint64_t inset_reg = 0;
9656 	int num, i;
9657 
9658 	if (pf->support_multi_driver) {
9659 		PMD_DRV_LOG(ERR,
9660 			    "Modify input set is not permitted when multi-driver enabled.");
9661 		return -EPERM;
9662 	}
9663 
9664 	/* For X722, get translated pctype in fd pctype register */
9665 	if (hw->mac.type == I40E_MAC_X722)
9666 		pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9667 
9668 	if (add) {
9669 		/* get inset value in register */
9670 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9671 		inset_reg <<= I40E_32_BIT_WIDTH;
9672 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9673 		input_set |= pf->hash_input_set[pctype];
9674 	}
9675 	num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9676 					   I40E_INSET_MASK_NUM_REG);
9677 	if (num < 0)
9678 		return -EINVAL;
9679 
9680 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9681 
9682 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9683 				    (uint32_t)(inset_reg & UINT32_MAX));
9684 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9685 				    (uint32_t)((inset_reg >>
9686 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9687 
9688 	for (i = 0; i < num; i++)
9689 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9690 					    mask_reg[i]);
9691 	/*clear unused mask registers of the pctype */
9692 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9693 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9694 					    0);
9695 	I40E_WRITE_FLUSH(hw);
9696 
9697 	pf->hash_input_set[pctype] = input_set;
9698 	return 0;
9699 }
9700 
9701 /* Convert ethertype filter structure */
9702 static int
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter * input,struct i40e_ethertype_filter * filter)9703 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9704 			      struct i40e_ethertype_filter *filter)
9705 {
9706 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9707 		RTE_ETHER_ADDR_LEN);
9708 	filter->input.ether_type = input->ether_type;
9709 	filter->flags = input->flags;
9710 	filter->queue = input->queue;
9711 
9712 	return 0;
9713 }
9714 
9715 /* Check if there exists the ethertype filter */
9716 struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule * ethertype_rule,const struct i40e_ethertype_filter_input * input)9717 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9718 				const struct i40e_ethertype_filter_input *input)
9719 {
9720 	int ret;
9721 
9722 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9723 	if (ret < 0)
9724 		return NULL;
9725 
9726 	return ethertype_rule->hash_map[ret];
9727 }
9728 
9729 /* Add ethertype filter in SW list */
9730 static int
i40e_sw_ethertype_filter_insert(struct i40e_pf * pf,struct i40e_ethertype_filter * filter)9731 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9732 				struct i40e_ethertype_filter *filter)
9733 {
9734 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9735 	int ret;
9736 
9737 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
9738 	if (ret < 0) {
9739 		PMD_DRV_LOG(ERR,
9740 			    "Failed to insert ethertype filter"
9741 			    " to hash table %d!",
9742 			    ret);
9743 		return ret;
9744 	}
9745 	rule->hash_map[ret] = filter;
9746 
9747 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9748 
9749 	return 0;
9750 }
9751 
9752 /* Delete ethertype filter in SW list */
9753 int
i40e_sw_ethertype_filter_del(struct i40e_pf * pf,struct i40e_ethertype_filter_input * input)9754 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9755 			     struct i40e_ethertype_filter_input *input)
9756 {
9757 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9758 	struct i40e_ethertype_filter *filter;
9759 	int ret;
9760 
9761 	ret = rte_hash_del_key(rule->hash_table, input);
9762 	if (ret < 0) {
9763 		PMD_DRV_LOG(ERR,
9764 			    "Failed to delete ethertype filter"
9765 			    " to hash table %d!",
9766 			    ret);
9767 		return ret;
9768 	}
9769 	filter = rule->hash_map[ret];
9770 	rule->hash_map[ret] = NULL;
9771 
9772 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9773 	rte_free(filter);
9774 
9775 	return 0;
9776 }
9777 
9778 /*
9779  * Configure ethertype filter, which can director packet by filtering
9780  * with mac address and ether_type or only ether_type
9781  */
9782 int
i40e_ethertype_filter_set(struct i40e_pf * pf,struct rte_eth_ethertype_filter * filter,bool add)9783 i40e_ethertype_filter_set(struct i40e_pf *pf,
9784 			struct rte_eth_ethertype_filter *filter,
9785 			bool add)
9786 {
9787 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9788 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9789 	struct i40e_ethertype_filter *ethertype_filter, *node;
9790 	struct i40e_ethertype_filter check_filter;
9791 	struct i40e_control_filter_stats stats;
9792 	uint16_t flags = 0;
9793 	int ret;
9794 
9795 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
9796 		PMD_DRV_LOG(ERR, "Invalid queue ID");
9797 		return -EINVAL;
9798 	}
9799 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9800 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9801 		PMD_DRV_LOG(ERR,
9802 			"unsupported ether_type(0x%04x) in control packet filter.",
9803 			filter->ether_type);
9804 		return -EINVAL;
9805 	}
9806 	if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9807 		PMD_DRV_LOG(WARNING,
9808 			"filter vlan ether_type in first tag is not supported.");
9809 
9810 	/* Check if there is the filter in SW list */
9811 	memset(&check_filter, 0, sizeof(check_filter));
9812 	i40e_ethertype_filter_convert(filter, &check_filter);
9813 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9814 					       &check_filter.input);
9815 	if (add && node) {
9816 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9817 		return -EINVAL;
9818 	}
9819 
9820 	if (!add && !node) {
9821 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9822 		return -EINVAL;
9823 	}
9824 
9825 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9826 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9827 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9828 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9829 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9830 
9831 	memset(&stats, 0, sizeof(stats));
9832 	ret = i40e_aq_add_rem_control_packet_filter(hw,
9833 			filter->mac_addr.addr_bytes,
9834 			filter->ether_type, flags,
9835 			pf->main_vsi->seid,
9836 			filter->queue, add, &stats, NULL);
9837 
9838 	PMD_DRV_LOG(INFO,
9839 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9840 		ret, stats.mac_etype_used, stats.etype_used,
9841 		stats.mac_etype_free, stats.etype_free);
9842 	if (ret < 0)
9843 		return -ENOSYS;
9844 
9845 	/* Add or delete a filter in SW list */
9846 	if (add) {
9847 		ethertype_filter = rte_zmalloc("ethertype_filter",
9848 				       sizeof(*ethertype_filter), 0);
9849 		if (ethertype_filter == NULL) {
9850 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9851 			return -ENOMEM;
9852 		}
9853 
9854 		rte_memcpy(ethertype_filter, &check_filter,
9855 			   sizeof(check_filter));
9856 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9857 		if (ret < 0)
9858 			rte_free(ethertype_filter);
9859 	} else {
9860 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9861 	}
9862 
9863 	return ret;
9864 }
9865 
9866 static int
i40e_dev_flow_ops_get(struct rte_eth_dev * dev,const struct rte_flow_ops ** ops)9867 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9868 		      const struct rte_flow_ops **ops)
9869 {
9870 	if (dev == NULL)
9871 		return -EINVAL;
9872 
9873 	*ops = &i40e_flow_ops;
9874 	return 0;
9875 }
9876 
9877 /*
9878  * Check and enable Extended Tag.
9879  * Enabling Extended Tag is important for 40G performance.
9880  */
9881 static void
i40e_enable_extended_tag(struct rte_eth_dev * dev)9882 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9883 {
9884 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9885 	uint32_t buf = 0;
9886 	int ret;
9887 
9888 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9889 				      PCI_DEV_CAP_REG);
9890 	if (ret < 0) {
9891 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9892 			    PCI_DEV_CAP_REG);
9893 		return;
9894 	}
9895 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9896 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9897 		return;
9898 	}
9899 
9900 	buf = 0;
9901 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9902 				      PCI_DEV_CTRL_REG);
9903 	if (ret < 0) {
9904 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9905 			    PCI_DEV_CTRL_REG);
9906 		return;
9907 	}
9908 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9909 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9910 		return;
9911 	}
9912 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9913 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9914 				       PCI_DEV_CTRL_REG);
9915 	if (ret < 0) {
9916 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9917 			    PCI_DEV_CTRL_REG);
9918 		return;
9919 	}
9920 }
9921 
9922 /*
9923  * As some registers wouldn't be reset unless a global hardware reset,
9924  * hardware initialization is needed to put those registers into an
9925  * expected initial state.
9926  */
9927 static void
i40e_hw_init(struct rte_eth_dev * dev)9928 i40e_hw_init(struct rte_eth_dev *dev)
9929 {
9930 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9931 
9932 	i40e_enable_extended_tag(dev);
9933 
9934 	/* clear the PF Queue Filter control register */
9935 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9936 
9937 	/* Disable symmetric hash per port */
9938 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
9939 }
9940 
9941 /*
9942  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9943  * however this function will return only one highest pctype index,
9944  * which is not quite correct. This is known problem of i40e driver
9945  * and needs to be fixed later.
9946  */
9947 enum i40e_filter_pctype
i40e_flowtype_to_pctype(const struct i40e_adapter * adapter,uint16_t flow_type)9948 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9949 {
9950 	int i;
9951 	uint64_t pctype_mask;
9952 
9953 	if (flow_type < I40E_FLOW_TYPE_MAX) {
9954 		pctype_mask = adapter->pctypes_tbl[flow_type];
9955 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9956 			if (pctype_mask & (1ULL << i))
9957 				return (enum i40e_filter_pctype)i;
9958 		}
9959 	}
9960 	return I40E_FILTER_PCTYPE_INVALID;
9961 }
9962 
9963 uint16_t
i40e_pctype_to_flowtype(const struct i40e_adapter * adapter,enum i40e_filter_pctype pctype)9964 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9965 			enum i40e_filter_pctype pctype)
9966 {
9967 	uint16_t flowtype;
9968 	uint64_t pctype_mask = 1ULL << pctype;
9969 
9970 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9971 	     flowtype++) {
9972 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9973 			return flowtype;
9974 	}
9975 
9976 	return RTE_ETH_FLOW_UNKNOWN;
9977 }
9978 
9979 /*
9980  * On X710, performance number is far from the expectation on recent firmware
9981  * versions; on XL710, performance number is also far from the expectation on
9982  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9983  * mode is enabled and port MAC address is equal to the packet destination MAC
9984  * address. The fix for this issue may not be integrated in the following
9985  * firmware version. So the workaround in software driver is needed. It needs
9986  * to modify the initial values of 3 internal only registers for both X710 and
9987  * XL710. Note that the values for X710 or XL710 could be different, and the
9988  * workaround can be removed when it is fixed in firmware in the future.
9989  */
9990 
9991 /* For both X710 and XL710 */
9992 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
9993 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
9994 #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
9995 
9996 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9997 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9998 
9999 /* For X722 */
10000 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10001 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10002 
10003 /* For X710 */
10004 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10005 /* For XL710 */
10006 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10007 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10008 
10009 /*
10010  * GL_SWR_PM_UP_THR:
10011  * The value is not impacted from the link speed, its value is set according
10012  * to the total number of ports for a better pipe-monitor configuration.
10013  */
10014 static bool
i40e_get_swr_pm_cfg(struct i40e_hw * hw,uint32_t * value)10015 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10016 {
10017 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10018 		.device_id = (dev),   \
10019 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10020 
10021 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10022 		.device_id = (dev),   \
10023 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10024 
10025 	static const struct {
10026 		uint16_t device_id;
10027 		uint32_t val;
10028 	} swr_pm_table[] = {
10029 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10030 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10031 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10032 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10033 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10034 
10035 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10036 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10037 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10038 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10039 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10040 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10041 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10042 	};
10043 	uint32_t i;
10044 
10045 	if (value == NULL) {
10046 		PMD_DRV_LOG(ERR, "value is NULL");
10047 		return false;
10048 	}
10049 
10050 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10051 		if (hw->device_id == swr_pm_table[i].device_id) {
10052 			*value = swr_pm_table[i].val;
10053 
10054 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10055 				    "value - 0x%08x",
10056 				    hw->device_id, *value);
10057 			return true;
10058 		}
10059 	}
10060 
10061 	return false;
10062 }
10063 
10064 static int
i40e_dev_sync_phy_type(struct i40e_hw * hw)10065 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10066 {
10067 	enum i40e_status_code status;
10068 	struct i40e_aq_get_phy_abilities_resp phy_ab;
10069 	int ret = -ENOTSUP;
10070 	int retries = 0;
10071 
10072 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10073 					      NULL);
10074 
10075 	while (status) {
10076 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10077 			status);
10078 		retries++;
10079 		rte_delay_us(100000);
10080 		if  (retries < 5)
10081 			status = i40e_aq_get_phy_capabilities(hw, false,
10082 					true, &phy_ab, NULL);
10083 		else
10084 			return ret;
10085 	}
10086 	return 0;
10087 }
10088 
10089 static void
i40e_configure_registers(struct i40e_hw * hw)10090 i40e_configure_registers(struct i40e_hw *hw)
10091 {
10092 	static struct {
10093 		uint32_t addr;
10094 		uint64_t val;
10095 	} reg_table[] = {
10096 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10097 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10098 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10099 	};
10100 	uint64_t reg;
10101 	uint32_t i;
10102 	int ret;
10103 
10104 	for (i = 0; i < RTE_DIM(reg_table); i++) {
10105 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10106 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10107 				reg_table[i].val =
10108 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10109 			else /* For X710/XL710/XXV710 */
10110 				if (hw->aq.fw_maj_ver < 6)
10111 					reg_table[i].val =
10112 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10113 				else
10114 					reg_table[i].val =
10115 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10116 		}
10117 
10118 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10119 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10120 				reg_table[i].val =
10121 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10122 			else /* For X710/XL710/XXV710 */
10123 				reg_table[i].val =
10124 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10125 		}
10126 
10127 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10128 			uint32_t cfg_val;
10129 
10130 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10131 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10132 					    "GL_SWR_PM_UP_THR value fixup",
10133 					    hw->device_id);
10134 				continue;
10135 			}
10136 
10137 			reg_table[i].val = cfg_val;
10138 		}
10139 
10140 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10141 							&reg, NULL);
10142 		if (ret < 0) {
10143 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10144 							reg_table[i].addr);
10145 			break;
10146 		}
10147 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10148 						reg_table[i].addr, reg);
10149 		if (reg == reg_table[i].val)
10150 			continue;
10151 
10152 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10153 						reg_table[i].val, NULL);
10154 		if (ret < 0) {
10155 			PMD_DRV_LOG(ERR,
10156 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10157 				reg_table[i].val, reg_table[i].addr);
10158 			break;
10159 		}
10160 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10161 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10162 	}
10163 }
10164 
10165 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10166 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10167 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10168 static int
i40e_config_qinq(struct i40e_hw * hw,struct i40e_vsi * vsi)10169 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10170 {
10171 	uint32_t reg;
10172 	int ret;
10173 
10174 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10175 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10176 		return -EINVAL;
10177 	}
10178 
10179 	/* Configure for double VLAN RX stripping */
10180 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10181 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10182 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10183 		ret = i40e_aq_debug_write_register(hw,
10184 						   I40E_VSI_TSR(vsi->vsi_id),
10185 						   reg, NULL);
10186 		if (ret < 0) {
10187 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10188 				    vsi->vsi_id);
10189 			return I40E_ERR_CONFIG;
10190 		}
10191 	}
10192 
10193 	/* Configure for double VLAN TX insertion */
10194 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10195 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10196 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10197 		ret = i40e_aq_debug_write_register(hw,
10198 						   I40E_VSI_L2TAGSTXVALID(
10199 						   vsi->vsi_id), reg, NULL);
10200 		if (ret < 0) {
10201 			PMD_DRV_LOG(ERR,
10202 				"Failed to update VSI_L2TAGSTXVALID[%d]",
10203 				vsi->vsi_id);
10204 			return I40E_ERR_CONFIG;
10205 		}
10206 	}
10207 
10208 	return 0;
10209 }
10210 
10211 static uint64_t
i40e_read_systime_cyclecounter(struct rte_eth_dev * dev)10212 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10213 {
10214 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10215 	uint64_t systim_cycles;
10216 
10217 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10218 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10219 			<< 32;
10220 
10221 	return systim_cycles;
10222 }
10223 
10224 static uint64_t
i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev,uint8_t index)10225 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10226 {
10227 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10228 	uint64_t rx_tstamp;
10229 
10230 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10231 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10232 			<< 32;
10233 
10234 	return rx_tstamp;
10235 }
10236 
10237 static uint64_t
i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)10238 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10239 {
10240 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10241 	uint64_t tx_tstamp;
10242 
10243 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10244 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10245 			<< 32;
10246 
10247 	return tx_tstamp;
10248 }
10249 
10250 static void
i40e_start_timecounters(struct rte_eth_dev * dev)10251 i40e_start_timecounters(struct rte_eth_dev *dev)
10252 {
10253 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10254 	struct i40e_adapter *adapter = dev->data->dev_private;
10255 	struct rte_eth_link link;
10256 	uint32_t tsync_inc_l;
10257 	uint32_t tsync_inc_h;
10258 
10259 	/* Get current link speed. */
10260 	i40e_dev_link_update(dev, 1);
10261 	rte_eth_linkstatus_get(dev, &link);
10262 
10263 	switch (link.link_speed) {
10264 	case RTE_ETH_SPEED_NUM_40G:
10265 	case RTE_ETH_SPEED_NUM_25G:
10266 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10267 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10268 		break;
10269 	case RTE_ETH_SPEED_NUM_10G:
10270 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10271 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10272 		break;
10273 	case RTE_ETH_SPEED_NUM_1G:
10274 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10275 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10276 		break;
10277 	default:
10278 		tsync_inc_l = 0x0;
10279 		tsync_inc_h = 0x0;
10280 	}
10281 
10282 	/* Set the timesync increment value. */
10283 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10284 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10285 
10286 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10287 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10288 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10289 
10290 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10291 	adapter->systime_tc.cc_shift = 0;
10292 	adapter->systime_tc.nsec_mask = 0;
10293 
10294 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10295 	adapter->rx_tstamp_tc.cc_shift = 0;
10296 	adapter->rx_tstamp_tc.nsec_mask = 0;
10297 
10298 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10299 	adapter->tx_tstamp_tc.cc_shift = 0;
10300 	adapter->tx_tstamp_tc.nsec_mask = 0;
10301 }
10302 
10303 static int
i40e_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)10304 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10305 {
10306 	struct i40e_adapter *adapter = dev->data->dev_private;
10307 
10308 	adapter->systime_tc.nsec += delta;
10309 	adapter->rx_tstamp_tc.nsec += delta;
10310 	adapter->tx_tstamp_tc.nsec += delta;
10311 
10312 	return 0;
10313 }
10314 
10315 static int
i40e_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)10316 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10317 {
10318 	uint64_t ns;
10319 	struct i40e_adapter *adapter = dev->data->dev_private;
10320 
10321 	ns = rte_timespec_to_ns(ts);
10322 
10323 	/* Set the timecounters to a new value. */
10324 	adapter->systime_tc.nsec = ns;
10325 	adapter->rx_tstamp_tc.nsec = ns;
10326 	adapter->tx_tstamp_tc.nsec = ns;
10327 
10328 	return 0;
10329 }
10330 
10331 static int
i40e_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)10332 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10333 {
10334 	uint64_t ns, systime_cycles;
10335 	struct i40e_adapter *adapter = dev->data->dev_private;
10336 
10337 	systime_cycles = i40e_read_systime_cyclecounter(dev);
10338 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10339 	*ts = rte_ns_to_timespec(ns);
10340 
10341 	return 0;
10342 }
10343 
10344 static int
i40e_timesync_enable(struct rte_eth_dev * dev)10345 i40e_timesync_enable(struct rte_eth_dev *dev)
10346 {
10347 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10348 	uint32_t tsync_ctl_l;
10349 	uint32_t tsync_ctl_h;
10350 
10351 	/* Stop the timesync system time. */
10352 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10353 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10354 	/* Reset the timesync system time value. */
10355 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10356 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10357 
10358 	i40e_start_timecounters(dev);
10359 
10360 	/* Clear timesync registers. */
10361 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10362 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10363 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10364 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10365 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10366 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10367 
10368 	/* Enable timestamping of PTP packets. */
10369 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10370 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10371 
10372 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10373 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10374 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10375 
10376 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10377 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10378 
10379 	return 0;
10380 }
10381 
10382 static int
i40e_timesync_disable(struct rte_eth_dev * dev)10383 i40e_timesync_disable(struct rte_eth_dev *dev)
10384 {
10385 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10386 	uint32_t tsync_ctl_l;
10387 	uint32_t tsync_ctl_h;
10388 
10389 	/* Disable timestamping of transmitted PTP packets. */
10390 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10391 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10392 
10393 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10394 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10395 
10396 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10397 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10398 
10399 	/* Reset the timesync increment value. */
10400 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10401 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10402 
10403 	return 0;
10404 }
10405 
10406 static int
i40e_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags)10407 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10408 				struct timespec *timestamp, uint32_t flags)
10409 {
10410 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10411 	struct i40e_adapter *adapter = dev->data->dev_private;
10412 	uint32_t sync_status;
10413 	uint32_t index = flags & 0x03;
10414 	uint64_t rx_tstamp_cycles;
10415 	uint64_t ns;
10416 
10417 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10418 	if ((sync_status & (1 << index)) == 0)
10419 		return -EINVAL;
10420 
10421 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10422 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10423 	*timestamp = rte_ns_to_timespec(ns);
10424 
10425 	return 0;
10426 }
10427 
10428 static int
i40e_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)10429 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10430 				struct timespec *timestamp)
10431 {
10432 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10433 	struct i40e_adapter *adapter = dev->data->dev_private;
10434 	uint32_t sync_status;
10435 	uint64_t tx_tstamp_cycles;
10436 	uint64_t ns;
10437 
10438 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10439 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10440 		return -EINVAL;
10441 
10442 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10443 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10444 	*timestamp = rte_ns_to_timespec(ns);
10445 
10446 	return 0;
10447 }
10448 
10449 /*
10450  * i40e_parse_dcb_configure - parse dcb configure from user
10451  * @dev: the device being configured
10452  * @dcb_cfg: pointer of the result of parse
10453  * @*tc_map: bit map of enabled traffic classes
10454  *
10455  * Returns 0 on success, negative value on failure
10456  */
10457 static int
i40e_parse_dcb_configure(struct rte_eth_dev * dev,struct i40e_dcbx_config * dcb_cfg,uint8_t * tc_map)10458 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10459 			 struct i40e_dcbx_config *dcb_cfg,
10460 			 uint8_t *tc_map)
10461 {
10462 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10463 	uint8_t i, tc_bw, bw_lf;
10464 
10465 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10466 
10467 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10468 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10469 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10470 		return -EINVAL;
10471 	}
10472 
10473 	/* assume each tc has the same bw */
10474 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10475 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10476 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10477 	/* to ensure the sum of tcbw is equal to 100 */
10478 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10479 	for (i = 0; i < bw_lf; i++)
10480 		dcb_cfg->etscfg.tcbwtable[i]++;
10481 
10482 	/* assume each tc has the same Transmission Selection Algorithm */
10483 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10484 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10485 
10486 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10487 		dcb_cfg->etscfg.prioritytable[i] =
10488 				dcb_rx_conf->dcb_tc[i];
10489 
10490 	/* FW needs one App to configure HW */
10491 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10492 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10493 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10494 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10495 
10496 	if (dcb_rx_conf->nb_tcs == 0)
10497 		*tc_map = 1; /* tc0 only */
10498 	else
10499 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10500 
10501 	if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
10502 		dcb_cfg->pfc.willing = 0;
10503 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10504 		dcb_cfg->pfc.pfcenable = *tc_map;
10505 	}
10506 	return 0;
10507 }
10508 
10509 
10510 static enum i40e_status_code
i40e_vsi_update_queue_mapping(struct i40e_vsi * vsi,struct i40e_aqc_vsi_properties_data * info,uint8_t enabled_tcmap)10511 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10512 			      struct i40e_aqc_vsi_properties_data *info,
10513 			      uint8_t enabled_tcmap)
10514 {
10515 	enum i40e_status_code ret;
10516 	int i, total_tc = 0;
10517 	uint16_t qpnum_per_tc, bsf, qp_idx;
10518 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10519 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10520 	uint16_t used_queues;
10521 
10522 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10523 	if (ret != I40E_SUCCESS)
10524 		return ret;
10525 
10526 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10527 		if (enabled_tcmap & (1 << i))
10528 			total_tc++;
10529 	}
10530 	if (total_tc == 0)
10531 		total_tc = 1;
10532 	vsi->enabled_tc = enabled_tcmap;
10533 
10534 	/* different VSI has different queues assigned */
10535 	if (vsi->type == I40E_VSI_MAIN)
10536 		used_queues = dev_data->nb_rx_queues -
10537 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10538 	else if (vsi->type == I40E_VSI_VMDQ2)
10539 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10540 	else {
10541 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
10542 		return I40E_ERR_NO_AVAILABLE_VSI;
10543 	}
10544 
10545 	qpnum_per_tc = used_queues / total_tc;
10546 	/* Number of queues per enabled TC */
10547 	if (qpnum_per_tc == 0) {
10548 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10549 		return I40E_ERR_INVALID_QP_ID;
10550 	}
10551 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10552 				I40E_MAX_Q_PER_TC);
10553 	bsf = rte_bsf32(qpnum_per_tc);
10554 
10555 	/**
10556 	 * Configure TC and queue mapping parameters, for enabled TC,
10557 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10558 	 * default queue will serve it.
10559 	 */
10560 	qp_idx = 0;
10561 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10562 		if (vsi->enabled_tc & (1 << i)) {
10563 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10564 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10565 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10566 			qp_idx += qpnum_per_tc;
10567 		} else
10568 			info->tc_mapping[i] = 0;
10569 	}
10570 
10571 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10572 	if (vsi->type == I40E_VSI_SRIOV) {
10573 		info->mapping_flags |=
10574 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10575 		for (i = 0; i < vsi->nb_qps; i++)
10576 			info->queue_mapping[i] =
10577 				rte_cpu_to_le_16(vsi->base_queue + i);
10578 	} else {
10579 		info->mapping_flags |=
10580 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10581 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10582 	}
10583 	info->valid_sections |=
10584 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10585 
10586 	return I40E_SUCCESS;
10587 }
10588 
10589 /*
10590  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10591  * @veb: VEB to be configured
10592  * @tc_map: enabled TC bitmap
10593  *
10594  * Returns 0 on success, negative value on failure
10595  */
10596 static enum i40e_status_code
i40e_config_switch_comp_tc(struct i40e_veb * veb,uint8_t tc_map)10597 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10598 {
10599 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10600 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10601 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10602 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10603 	enum i40e_status_code ret = I40E_SUCCESS;
10604 	int i;
10605 	uint32_t bw_max;
10606 
10607 	/* Check if enabled_tc is same as existing or new TCs */
10608 	if (veb->enabled_tc == tc_map)
10609 		return ret;
10610 
10611 	/* configure tc bandwidth */
10612 	memset(&veb_bw, 0, sizeof(veb_bw));
10613 	veb_bw.tc_valid_bits = tc_map;
10614 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10615 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10616 		if (tc_map & BIT_ULL(i))
10617 			veb_bw.tc_bw_share_credits[i] = 1;
10618 	}
10619 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10620 						   &veb_bw, NULL);
10621 	if (ret) {
10622 		PMD_INIT_LOG(ERR,
10623 			"AQ command Config switch_comp BW allocation per TC failed = %d",
10624 			hw->aq.asq_last_status);
10625 		return ret;
10626 	}
10627 
10628 	memset(&ets_query, 0, sizeof(ets_query));
10629 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10630 						   &ets_query, NULL);
10631 	if (ret != I40E_SUCCESS) {
10632 		PMD_DRV_LOG(ERR,
10633 			"Failed to get switch_comp ETS configuration %u",
10634 			hw->aq.asq_last_status);
10635 		return ret;
10636 	}
10637 	memset(&bw_query, 0, sizeof(bw_query));
10638 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10639 						  &bw_query, NULL);
10640 	if (ret != I40E_SUCCESS) {
10641 		PMD_DRV_LOG(ERR,
10642 			"Failed to get switch_comp bandwidth configuration %u",
10643 			hw->aq.asq_last_status);
10644 		return ret;
10645 	}
10646 
10647 	/* store and print out BW info */
10648 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10649 	veb->bw_info.bw_max = ets_query.tc_bw_max;
10650 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10651 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10652 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10653 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10654 		     I40E_16_BIT_WIDTH);
10655 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10656 		veb->bw_info.bw_ets_share_credits[i] =
10657 				bw_query.tc_bw_share_credits[i];
10658 		veb->bw_info.bw_ets_credits[i] =
10659 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10660 		/* 4 bits per TC, 4th bit is reserved */
10661 		veb->bw_info.bw_ets_max[i] =
10662 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10663 				  RTE_LEN2MASK(3, uint8_t));
10664 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10665 			    veb->bw_info.bw_ets_share_credits[i]);
10666 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10667 			    veb->bw_info.bw_ets_credits[i]);
10668 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10669 			    veb->bw_info.bw_ets_max[i]);
10670 	}
10671 
10672 	veb->enabled_tc = tc_map;
10673 
10674 	return ret;
10675 }
10676 
10677 
10678 /*
10679  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10680  * @vsi: VSI to be configured
10681  * @tc_map: enabled TC bitmap
10682  *
10683  * Returns 0 on success, negative value on failure
10684  */
10685 static enum i40e_status_code
i40e_vsi_config_tc(struct i40e_vsi * vsi,uint8_t tc_map)10686 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10687 {
10688 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10689 	struct i40e_vsi_context ctxt;
10690 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10691 	enum i40e_status_code ret = I40E_SUCCESS;
10692 	int i;
10693 
10694 	/* Check if enabled_tc is same as existing or new TCs */
10695 	if (vsi->enabled_tc == tc_map)
10696 		return ret;
10697 
10698 	/* configure tc bandwidth */
10699 	memset(&bw_data, 0, sizeof(bw_data));
10700 	bw_data.tc_valid_bits = tc_map;
10701 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10702 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10703 		if (tc_map & BIT_ULL(i))
10704 			bw_data.tc_bw_credits[i] = 1;
10705 	}
10706 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10707 	if (ret) {
10708 		PMD_INIT_LOG(ERR,
10709 			"AQ command Config VSI BW allocation per TC failed = %d",
10710 			hw->aq.asq_last_status);
10711 		goto out;
10712 	}
10713 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10714 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10715 
10716 	/* Update Queue Pairs Mapping for currently enabled UPs */
10717 	ctxt.seid = vsi->seid;
10718 	ctxt.pf_num = hw->pf_id;
10719 	ctxt.vf_num = 0;
10720 	ctxt.uplink_seid = vsi->uplink_seid;
10721 	ctxt.info = vsi->info;
10722 	i40e_get_cap(hw);
10723 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10724 	if (ret)
10725 		goto out;
10726 
10727 	/* Update the VSI after updating the VSI queue-mapping information */
10728 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10729 	if (ret) {
10730 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10731 			hw->aq.asq_last_status);
10732 		goto out;
10733 	}
10734 	/* update the local VSI info with updated queue map */
10735 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10736 					sizeof(vsi->info.tc_mapping));
10737 	rte_memcpy(&vsi->info.queue_mapping,
10738 			&ctxt.info.queue_mapping,
10739 		sizeof(vsi->info.queue_mapping));
10740 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
10741 	vsi->info.valid_sections = 0;
10742 
10743 	/* query and update current VSI BW information */
10744 	ret = i40e_vsi_get_bw_config(vsi);
10745 	if (ret) {
10746 		PMD_INIT_LOG(ERR,
10747 			 "Failed updating vsi bw info, err %s aq_err %s",
10748 			 i40e_stat_str(hw, ret),
10749 			 i40e_aq_str(hw, hw->aq.asq_last_status));
10750 		goto out;
10751 	}
10752 
10753 	vsi->enabled_tc = tc_map;
10754 
10755 out:
10756 	return ret;
10757 }
10758 
10759 /*
10760  * i40e_dcb_hw_configure - program the dcb setting to hw
10761  * @pf: pf the configuration is taken on
10762  * @new_cfg: new configuration
10763  * @tc_map: enabled TC bitmap
10764  *
10765  * Returns 0 on success, negative value on failure
10766  */
10767 static enum i40e_status_code
i40e_dcb_hw_configure(struct i40e_pf * pf,struct i40e_dcbx_config * new_cfg,uint8_t tc_map)10768 i40e_dcb_hw_configure(struct i40e_pf *pf,
10769 		      struct i40e_dcbx_config *new_cfg,
10770 		      uint8_t tc_map)
10771 {
10772 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10773 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10774 	struct i40e_vsi *main_vsi = pf->main_vsi;
10775 	struct i40e_vsi_list *vsi_list;
10776 	enum i40e_status_code ret;
10777 	int i;
10778 	uint32_t val;
10779 
10780 	/* Use the FW API if FW > v4.4*/
10781 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10782 	      (hw->aq.fw_maj_ver >= 5))) {
10783 		PMD_INIT_LOG(ERR,
10784 			"FW < v4.4, can not use FW LLDP API to configure DCB");
10785 		return I40E_ERR_FIRMWARE_API_VERSION;
10786 	}
10787 
10788 	/* Check if need reconfiguration */
10789 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10790 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10791 		return I40E_SUCCESS;
10792 	}
10793 
10794 	/* Copy the new config to the current config */
10795 	*old_cfg = *new_cfg;
10796 	old_cfg->etsrec = old_cfg->etscfg;
10797 	ret = i40e_set_dcb_config(hw);
10798 	if (ret) {
10799 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10800 			 i40e_stat_str(hw, ret),
10801 			 i40e_aq_str(hw, hw->aq.asq_last_status));
10802 		return ret;
10803 	}
10804 	/* set receive Arbiter to RR mode and ETS scheme by default */
10805 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10806 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10807 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10808 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10809 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10810 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10811 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10812 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10813 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10814 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10815 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10816 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10817 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10818 	}
10819 	/* get local mib to check whether it is configured correctly */
10820 	/* IEEE mode */
10821 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10822 	/* Get Local DCB Config */
10823 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10824 				     &hw->local_dcbx_config);
10825 
10826 	/* if Veb is created, need to update TC of it at first */
10827 	if (main_vsi->veb) {
10828 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10829 		if (ret)
10830 			PMD_INIT_LOG(WARNING,
10831 				 "Failed configuring TC for VEB seid=%d",
10832 				 main_vsi->veb->seid);
10833 	}
10834 	/* Update each VSI */
10835 	i40e_vsi_config_tc(main_vsi, tc_map);
10836 	if (main_vsi->veb) {
10837 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10838 			/* Beside main VSI and VMDQ VSIs, only enable default
10839 			 * TC for other VSIs
10840 			 */
10841 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10842 				ret = i40e_vsi_config_tc(vsi_list->vsi,
10843 							 tc_map);
10844 			else
10845 				ret = i40e_vsi_config_tc(vsi_list->vsi,
10846 							 I40E_DEFAULT_TCMAP);
10847 			if (ret)
10848 				PMD_INIT_LOG(WARNING,
10849 					"Failed configuring TC for VSI seid=%d",
10850 					vsi_list->vsi->seid);
10851 			/* continue */
10852 		}
10853 	}
10854 	return I40E_SUCCESS;
10855 }
10856 
10857 /*
10858  * i40e_dcb_init_configure - initial dcb config
10859  * @dev: device being configured
10860  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10861  *
10862  * Returns 0 on success, negative value on failure
10863  */
10864 int
i40e_dcb_init_configure(struct rte_eth_dev * dev,bool sw_dcb)10865 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10866 {
10867 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10868 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10869 	int i, ret = 0;
10870 
10871 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
10872 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10873 		return -ENOTSUP;
10874 	}
10875 
10876 	/* DCB initialization:
10877 	 * Update DCB configuration from the Firmware and configure
10878 	 * LLDP MIB change event.
10879 	 */
10880 	if (sw_dcb == TRUE) {
10881 		/* Stopping lldp is necessary for DPDK, but it will cause
10882 		 * DCB init failed. For i40e_init_dcb(), the prerequisite
10883 		 * for successful initialization of DCB is that LLDP is
10884 		 * enabled. So it is needed to start lldp before DCB init
10885 		 * and stop it after initialization.
10886 		 */
10887 		ret = i40e_aq_start_lldp(hw, true, NULL);
10888 		if (ret != I40E_SUCCESS)
10889 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10890 
10891 		ret = i40e_init_dcb(hw, true);
10892 		/* If lldp agent is stopped, the return value from
10893 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10894 		 * adminq status. Otherwise, it should return success.
10895 		 */
10896 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10897 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10898 			memset(&hw->local_dcbx_config, 0,
10899 				sizeof(struct i40e_dcbx_config));
10900 			/* set dcb default configuration */
10901 			hw->local_dcbx_config.etscfg.willing = 0;
10902 			hw->local_dcbx_config.etscfg.maxtcs = 0;
10903 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10904 			hw->local_dcbx_config.etscfg.tsatable[0] =
10905 						I40E_IEEE_TSA_ETS;
10906 			/* all UPs mapping to TC0 */
10907 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10908 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10909 			hw->local_dcbx_config.etsrec =
10910 				hw->local_dcbx_config.etscfg;
10911 			hw->local_dcbx_config.pfc.willing = 0;
10912 			hw->local_dcbx_config.pfc.pfccap =
10913 						I40E_MAX_TRAFFIC_CLASS;
10914 			/* FW needs one App to configure HW */
10915 			hw->local_dcbx_config.numapps = 1;
10916 			hw->local_dcbx_config.app[0].selector =
10917 						I40E_APP_SEL_ETHTYPE;
10918 			hw->local_dcbx_config.app[0].priority = 3;
10919 			hw->local_dcbx_config.app[0].protocolid =
10920 						I40E_APP_PROTOID_FCOE;
10921 			ret = i40e_set_dcb_config(hw);
10922 			if (ret) {
10923 				PMD_INIT_LOG(ERR,
10924 					"default dcb config fails. err = %d, aq_err = %d.",
10925 					ret, hw->aq.asq_last_status);
10926 				return -ENOSYS;
10927 			}
10928 		} else {
10929 			PMD_INIT_LOG(ERR,
10930 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
10931 				ret, hw->aq.asq_last_status);
10932 			return -ENOTSUP;
10933 		}
10934 
10935 		if (i40e_need_stop_lldp(dev)) {
10936 			ret = i40e_aq_stop_lldp(hw, true, true, NULL);
10937 			if (ret != I40E_SUCCESS)
10938 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
10939 		}
10940 	} else {
10941 		ret = i40e_aq_start_lldp(hw, true, NULL);
10942 		if (ret != I40E_SUCCESS)
10943 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10944 
10945 		ret = i40e_init_dcb(hw, true);
10946 		if (!ret) {
10947 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10948 				PMD_INIT_LOG(ERR,
10949 					"HW doesn't support DCBX offload.");
10950 				return -ENOTSUP;
10951 			}
10952 		} else {
10953 			PMD_INIT_LOG(ERR,
10954 				"DCBX configuration failed, err = %d, aq_err = %d.",
10955 				ret, hw->aq.asq_last_status);
10956 			return -ENOTSUP;
10957 		}
10958 	}
10959 	return 0;
10960 }
10961 
10962 /*
10963  * i40e_dcb_setup - setup dcb related config
10964  * @dev: device being configured
10965  *
10966  * Returns 0 on success, negative value on failure
10967  */
10968 static int
i40e_dcb_setup(struct rte_eth_dev * dev)10969 i40e_dcb_setup(struct rte_eth_dev *dev)
10970 {
10971 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10972 	struct i40e_dcbx_config dcb_cfg;
10973 	uint8_t tc_map = 0;
10974 	int ret = 0;
10975 
10976 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
10977 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10978 		return -ENOTSUP;
10979 	}
10980 
10981 	if (pf->vf_num != 0)
10982 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10983 
10984 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10985 	if (ret) {
10986 		PMD_INIT_LOG(ERR, "invalid dcb config");
10987 		return -EINVAL;
10988 	}
10989 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10990 	if (ret) {
10991 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
10992 		return -ENOSYS;
10993 	}
10994 
10995 	return 0;
10996 }
10997 
10998 static int
i40e_dev_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)10999 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11000 		      struct rte_eth_dcb_info *dcb_info)
11001 {
11002 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11003 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11004 	struct i40e_vsi *vsi = pf->main_vsi;
11005 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11006 	uint16_t bsf, tc_mapping;
11007 	int i, j = 0;
11008 
11009 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
11010 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11011 	else
11012 		dcb_info->nb_tcs = 1;
11013 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11014 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11015 	for (i = 0; i < dcb_info->nb_tcs; i++)
11016 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11017 
11018 	/* get queue mapping if vmdq is disabled */
11019 	if (!pf->nb_cfg_vmdq_vsi) {
11020 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11021 			if (!(vsi->enabled_tc & (1 << i)))
11022 				continue;
11023 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11024 			dcb_info->tc_queue.tc_rxq[j][i].base =
11025 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11026 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11027 			dcb_info->tc_queue.tc_txq[j][i].base =
11028 				dcb_info->tc_queue.tc_rxq[j][i].base;
11029 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11030 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11031 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11032 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11033 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11034 		}
11035 		return 0;
11036 	}
11037 
11038 	/* get queue mapping if vmdq is enabled */
11039 	do {
11040 		vsi = pf->vmdq[j].vsi;
11041 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11042 			if (!(vsi->enabled_tc & (1 << i)))
11043 				continue;
11044 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11045 			dcb_info->tc_queue.tc_rxq[j][i].base =
11046 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11047 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11048 			dcb_info->tc_queue.tc_txq[j][i].base =
11049 				dcb_info->tc_queue.tc_rxq[j][i].base;
11050 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11051 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11052 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11053 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11054 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11055 		}
11056 		j++;
11057 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, RTE_ETH_MAX_VMDQ_POOL));
11058 	return 0;
11059 }
11060 
11061 static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)11062 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11063 {
11064 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11065 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
11066 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11067 	uint16_t msix_intr;
11068 
11069 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
11070 	if (msix_intr == I40E_MISC_VEC_ID)
11071 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11072 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
11073 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11074 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11075 	else
11076 		I40E_WRITE_REG(hw,
11077 			       I40E_PFINT_DYN_CTLN(msix_intr -
11078 						   I40E_RX_VEC_START),
11079 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11080 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11081 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11082 
11083 	I40E_WRITE_FLUSH(hw);
11084 	rte_intr_ack(pci_dev->intr_handle);
11085 
11086 	return 0;
11087 }
11088 
11089 static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)11090 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11091 {
11092 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11093 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
11094 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11095 	uint16_t msix_intr;
11096 
11097 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
11098 	if (msix_intr == I40E_MISC_VEC_ID)
11099 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11100 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11101 	else
11102 		I40E_WRITE_REG(hw,
11103 			       I40E_PFINT_DYN_CTLN(msix_intr -
11104 						   I40E_RX_VEC_START),
11105 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11106 	I40E_WRITE_FLUSH(hw);
11107 
11108 	return 0;
11109 }
11110 
11111 /**
11112  * This function is used to check if the register is valid.
11113  * Below is the valid registers list for X722 only:
11114  * 0x2b800--0x2bb00
11115  * 0x38700--0x38a00
11116  * 0x3d800--0x3db00
11117  * 0x208e00--0x209000
11118  * 0x20be00--0x20c000
11119  * 0x263c00--0x264000
11120  * 0x265c00--0x266000
11121  */
i40e_valid_regs(enum i40e_mac_type type,uint32_t reg_offset)11122 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11123 {
11124 	if ((type != I40E_MAC_X722) &&
11125 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11126 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11127 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11128 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11129 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11130 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11131 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11132 		return 0;
11133 	else
11134 		return 1;
11135 }
11136 
i40e_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)11137 static int i40e_get_regs(struct rte_eth_dev *dev,
11138 			 struct rte_dev_reg_info *regs)
11139 {
11140 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11141 	uint32_t *ptr_data = regs->data;
11142 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11143 	const struct i40e_reg_info *reg_info;
11144 
11145 	if (ptr_data == NULL) {
11146 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11147 		regs->width = sizeof(uint32_t);
11148 		return 0;
11149 	}
11150 
11151 	/* The first few registers have to be read using AQ operations */
11152 	reg_idx = 0;
11153 	while (i40e_regs_adminq[reg_idx].name) {
11154 		reg_info = &i40e_regs_adminq[reg_idx++];
11155 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11156 			for (arr_idx2 = 0;
11157 					arr_idx2 <= reg_info->count2;
11158 					arr_idx2++) {
11159 				reg_offset = arr_idx * reg_info->stride1 +
11160 					arr_idx2 * reg_info->stride2;
11161 				reg_offset += reg_info->base_addr;
11162 				ptr_data[reg_offset >> 2] =
11163 					i40e_read_rx_ctl(hw, reg_offset);
11164 			}
11165 	}
11166 
11167 	/* The remaining registers can be read using primitives */
11168 	reg_idx = 0;
11169 	while (i40e_regs_others[reg_idx].name) {
11170 		reg_info = &i40e_regs_others[reg_idx++];
11171 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11172 			for (arr_idx2 = 0;
11173 					arr_idx2 <= reg_info->count2;
11174 					arr_idx2++) {
11175 				reg_offset = arr_idx * reg_info->stride1 +
11176 					arr_idx2 * reg_info->stride2;
11177 				reg_offset += reg_info->base_addr;
11178 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11179 					ptr_data[reg_offset >> 2] = 0;
11180 				else
11181 					ptr_data[reg_offset >> 2] =
11182 						I40E_READ_REG(hw, reg_offset);
11183 			}
11184 	}
11185 
11186 	return 0;
11187 }
11188 
i40e_get_eeprom_length(struct rte_eth_dev * dev)11189 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11190 {
11191 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11192 
11193 	/* Convert word count to byte count */
11194 	return hw->nvm.sr_size << 1;
11195 }
11196 
i40e_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)11197 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11198 			   struct rte_dev_eeprom_info *eeprom)
11199 {
11200 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11201 	uint16_t *data = eeprom->data;
11202 	uint16_t offset, length, cnt_words;
11203 	int ret_code;
11204 
11205 	offset = eeprom->offset >> 1;
11206 	length = eeprom->length >> 1;
11207 	cnt_words = length;
11208 
11209 	if (offset > hw->nvm.sr_size ||
11210 		offset + length > hw->nvm.sr_size) {
11211 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11212 		return -EINVAL;
11213 	}
11214 
11215 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11216 
11217 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11218 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11219 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11220 		return -EIO;
11221 	}
11222 
11223 	return 0;
11224 }
11225 
i40e_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)11226 static int i40e_get_module_info(struct rte_eth_dev *dev,
11227 				struct rte_eth_dev_module_info *modinfo)
11228 {
11229 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11230 	uint32_t sff8472_comp = 0;
11231 	uint32_t sff8472_swap = 0;
11232 	uint32_t sff8636_rev = 0;
11233 	i40e_status status;
11234 	uint32_t type = 0;
11235 
11236 	/* Check if firmware supports reading module EEPROM. */
11237 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11238 		PMD_DRV_LOG(ERR,
11239 			    "Module EEPROM memory read not supported. "
11240 			    "Please update the NVM image.\n");
11241 		return -EINVAL;
11242 	}
11243 
11244 	status = i40e_update_link_info(hw);
11245 	if (status)
11246 		return -EIO;
11247 
11248 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11249 		PMD_DRV_LOG(ERR,
11250 			    "Cannot read module EEPROM memory. "
11251 			    "No module connected.\n");
11252 		return -EINVAL;
11253 	}
11254 
11255 	type = hw->phy.link_info.module_type[0];
11256 
11257 	switch (type) {
11258 	case I40E_MODULE_TYPE_SFP:
11259 		status = i40e_aq_get_phy_register(hw,
11260 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11261 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11262 				I40E_MODULE_SFF_8472_COMP,
11263 				&sff8472_comp, NULL);
11264 		if (status)
11265 			return -EIO;
11266 
11267 		status = i40e_aq_get_phy_register(hw,
11268 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11269 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11270 				I40E_MODULE_SFF_8472_SWAP,
11271 				&sff8472_swap, NULL);
11272 		if (status)
11273 			return -EIO;
11274 
11275 		/* Check if the module requires address swap to access
11276 		 * the other EEPROM memory page.
11277 		 */
11278 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11279 			PMD_DRV_LOG(WARNING,
11280 				    "Module address swap to access "
11281 				    "page 0xA2 is not supported.\n");
11282 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11283 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11284 		} else if (sff8472_comp == 0x00) {
11285 			/* Module is not SFF-8472 compliant */
11286 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11287 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11288 		} else {
11289 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11290 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11291 		}
11292 		break;
11293 	case I40E_MODULE_TYPE_QSFP_PLUS:
11294 		/* Read from memory page 0. */
11295 		status = i40e_aq_get_phy_register(hw,
11296 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11297 				0, 1,
11298 				I40E_MODULE_REVISION_ADDR,
11299 				&sff8636_rev, NULL);
11300 		if (status)
11301 			return -EIO;
11302 		/* Determine revision compliance byte */
11303 		if (sff8636_rev > 0x02) {
11304 			/* Module is SFF-8636 compliant */
11305 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11306 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11307 		} else {
11308 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11309 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11310 		}
11311 		break;
11312 	case I40E_MODULE_TYPE_QSFP28:
11313 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11314 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11315 		break;
11316 	default:
11317 		PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11318 		return -EINVAL;
11319 	}
11320 	return 0;
11321 }
11322 
i40e_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)11323 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11324 				  struct rte_dev_eeprom_info *info)
11325 {
11326 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11327 	bool is_sfp = false;
11328 	i40e_status status;
11329 	uint8_t *data;
11330 	uint32_t value = 0;
11331 	uint32_t i;
11332 
11333 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11334 		is_sfp = true;
11335 
11336 	data = info->data;
11337 	for (i = 0; i < info->length; i++) {
11338 		u32 offset = i + info->offset;
11339 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11340 
11341 		/* Check if we need to access the other memory page */
11342 		if (is_sfp) {
11343 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11344 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11345 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
11346 			}
11347 		} else {
11348 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11349 				/* Compute memory page number and offset. */
11350 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11351 				addr++;
11352 			}
11353 		}
11354 		status = i40e_aq_get_phy_register(hw,
11355 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11356 				addr, 1, offset, &value, NULL);
11357 		if (status)
11358 			return -EIO;
11359 		data[i] = (uint8_t)value;
11360 	}
11361 	return 0;
11362 }
11363 
i40e_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)11364 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11365 				     struct rte_ether_addr *mac_addr)
11366 {
11367 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11368 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11369 	struct i40e_vsi *vsi = pf->main_vsi;
11370 	struct i40e_mac_filter_info mac_filter;
11371 	struct i40e_mac_filter *f;
11372 	int ret;
11373 
11374 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11375 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11376 		return -EINVAL;
11377 	}
11378 
11379 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
11380 		if (rte_is_same_ether_addr(&pf->dev_addr,
11381 						&f->mac_info.mac_addr))
11382 			break;
11383 	}
11384 
11385 	if (f == NULL) {
11386 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11387 		return -EIO;
11388 	}
11389 
11390 	mac_filter = f->mac_info;
11391 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11392 	if (ret != I40E_SUCCESS) {
11393 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11394 		return -EIO;
11395 	}
11396 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11397 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
11398 	if (ret != I40E_SUCCESS) {
11399 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
11400 		return -EIO;
11401 	}
11402 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11403 
11404 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11405 					mac_addr->addr_bytes, NULL);
11406 	if (ret != I40E_SUCCESS) {
11407 		PMD_DRV_LOG(ERR, "Failed to change mac");
11408 		return -EIO;
11409 	}
11410 
11411 	return 0;
11412 }
11413 
11414 static int
i40e_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu __rte_unused)11415 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
11416 {
11417 	/* mtu setting is forbidden if port is start */
11418 	if (dev->data->dev_started != 0) {
11419 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11420 			    dev->data->port_id);
11421 		return -EBUSY;
11422 	}
11423 
11424 	return 0;
11425 }
11426 
11427 /* Restore ethertype filter */
11428 static void
i40e_ethertype_filter_restore(struct i40e_pf * pf)11429 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11430 {
11431 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11432 	struct i40e_ethertype_filter_list
11433 		*ethertype_list = &pf->ethertype.ethertype_list;
11434 	struct i40e_ethertype_filter *f;
11435 	struct i40e_control_filter_stats stats;
11436 	uint16_t flags;
11437 
11438 	TAILQ_FOREACH(f, ethertype_list, rules) {
11439 		flags = 0;
11440 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11441 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11442 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11443 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11444 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11445 
11446 		memset(&stats, 0, sizeof(stats));
11447 		i40e_aq_add_rem_control_packet_filter(hw,
11448 					    f->input.mac_addr.addr_bytes,
11449 					    f->input.ether_type,
11450 					    flags, pf->main_vsi->seid,
11451 					    f->queue, 1, &stats, NULL);
11452 	}
11453 	PMD_DRV_LOG(INFO, "Ethertype filter:"
11454 		    " mac_etype_used = %u, etype_used = %u,"
11455 		    " mac_etype_free = %u, etype_free = %u",
11456 		    stats.mac_etype_used, stats.etype_used,
11457 		    stats.mac_etype_free, stats.etype_free);
11458 }
11459 
11460 /* Restore tunnel filter */
11461 static void
i40e_tunnel_filter_restore(struct i40e_pf * pf)11462 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11463 {
11464 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11465 	struct i40e_vsi *vsi;
11466 	struct i40e_pf_vf *vf;
11467 	struct i40e_tunnel_filter_list
11468 		*tunnel_list = &pf->tunnel.tunnel_list;
11469 	struct i40e_tunnel_filter *f;
11470 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
11471 	bool big_buffer = 0;
11472 
11473 	TAILQ_FOREACH(f, tunnel_list, rules) {
11474 		if (!f->is_to_vf)
11475 			vsi = pf->main_vsi;
11476 		else {
11477 			vf = &pf->vfs[f->vf_id];
11478 			vsi = vf->vsi;
11479 		}
11480 		memset(&cld_filter, 0, sizeof(cld_filter));
11481 		rte_ether_addr_copy((struct rte_ether_addr *)
11482 				&f->input.outer_mac,
11483 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
11484 		rte_ether_addr_copy((struct rte_ether_addr *)
11485 				&f->input.inner_mac,
11486 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
11487 		cld_filter.element.inner_vlan = f->input.inner_vlan;
11488 		cld_filter.element.flags = f->input.flags;
11489 		cld_filter.element.tenant_id = f->input.tenant_id;
11490 		cld_filter.element.queue_number = f->queue;
11491 		rte_memcpy(cld_filter.general_fields,
11492 			   f->input.general_fields,
11493 			   sizeof(f->input.general_fields));
11494 
11495 		if (((f->input.flags &
11496 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11497 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11498 		    ((f->input.flags &
11499 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11500 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11501 		    ((f->input.flags &
11502 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11503 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
11504 			big_buffer = 1;
11505 
11506 		if (big_buffer)
11507 			i40e_aq_add_cloud_filters_bb(hw,
11508 					vsi->seid, &cld_filter, 1);
11509 		else
11510 			i40e_aq_add_cloud_filters(hw, vsi->seid,
11511 						  &cld_filter.element, 1);
11512 	}
11513 }
11514 
11515 static void
i40e_filter_restore(struct i40e_pf * pf)11516 i40e_filter_restore(struct i40e_pf *pf)
11517 {
11518 	i40e_ethertype_filter_restore(pf);
11519 	i40e_tunnel_filter_restore(pf);
11520 	i40e_fdir_filter_restore(pf);
11521 	(void)i40e_hash_filter_restore(pf);
11522 }
11523 
11524 bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)11525 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11526 {
11527 	if (strcmp(dev->device->driver->name, drv->driver.name))
11528 		return false;
11529 
11530 	return true;
11531 }
11532 
11533 bool
is_i40e_supported(struct rte_eth_dev * dev)11534 is_i40e_supported(struct rte_eth_dev *dev)
11535 {
11536 	return is_device_supported(dev, &rte_i40e_pmd);
11537 }
11538 
11539 struct i40e_customized_pctype*
i40e_find_customized_pctype(struct i40e_pf * pf,uint8_t index)11540 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11541 {
11542 	int i;
11543 
11544 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11545 		if (pf->customized_pctype[i].index == index)
11546 			return &pf->customized_pctype[i];
11547 	}
11548 	return NULL;
11549 }
11550 
11551 static int
i40e_update_customized_pctype(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,uint32_t proto_num,struct rte_pmd_i40e_proto_info * proto,enum rte_pmd_i40e_package_op op)11552 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11553 			      uint32_t pkg_size, uint32_t proto_num,
11554 			      struct rte_pmd_i40e_proto_info *proto,
11555 			      enum rte_pmd_i40e_package_op op)
11556 {
11557 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11558 	uint32_t pctype_num;
11559 	struct rte_pmd_i40e_ptype_info *pctype;
11560 	uint32_t buff_size;
11561 	struct i40e_customized_pctype *new_pctype = NULL;
11562 	uint8_t proto_id;
11563 	uint8_t pctype_value;
11564 	char name[64];
11565 	uint32_t i, j, n;
11566 	int ret;
11567 
11568 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11569 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11570 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11571 		return -1;
11572 	}
11573 
11574 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11575 				(uint8_t *)&pctype_num, sizeof(pctype_num),
11576 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11577 	if (ret) {
11578 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
11579 		return -1;
11580 	}
11581 	if (!pctype_num) {
11582 		PMD_DRV_LOG(INFO, "No new pctype added");
11583 		return -1;
11584 	}
11585 
11586 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11587 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
11588 	if (!pctype) {
11589 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11590 		return -1;
11591 	}
11592 	/* get information about new pctype list */
11593 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11594 					(uint8_t *)pctype, buff_size,
11595 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11596 	if (ret) {
11597 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
11598 		rte_free(pctype);
11599 		return -1;
11600 	}
11601 
11602 	/* Update customized pctype. */
11603 	for (i = 0; i < pctype_num; i++) {
11604 		pctype_value = pctype[i].ptype_id;
11605 		memset(name, 0, sizeof(name));
11606 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11607 			proto_id = pctype[i].protocols[j];
11608 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11609 				continue;
11610 			for (n = 0; n < proto_num; n++) {
11611 				if (proto[n].proto_id != proto_id)
11612 					continue;
11613 				strlcat(name, proto[n].name, sizeof(name));
11614 				strlcat(name, "_", sizeof(name));
11615 				break;
11616 			}
11617 		}
11618 		name[strlen(name) - 1] = '\0';
11619 		PMD_DRV_LOG(INFO, "name = %s\n", name);
11620 		if (!strcmp(name, "GTPC"))
11621 			new_pctype =
11622 				i40e_find_customized_pctype(pf,
11623 						      I40E_CUSTOMIZED_GTPC);
11624 		else if (!strcmp(name, "GTPU_IPV4"))
11625 			new_pctype =
11626 				i40e_find_customized_pctype(pf,
11627 						   I40E_CUSTOMIZED_GTPU_IPV4);
11628 		else if (!strcmp(name, "GTPU_IPV6"))
11629 			new_pctype =
11630 				i40e_find_customized_pctype(pf,
11631 						   I40E_CUSTOMIZED_GTPU_IPV6);
11632 		else if (!strcmp(name, "GTPU"))
11633 			new_pctype =
11634 				i40e_find_customized_pctype(pf,
11635 						      I40E_CUSTOMIZED_GTPU);
11636 		else if (!strcmp(name, "IPV4_L2TPV3"))
11637 			new_pctype =
11638 				i40e_find_customized_pctype(pf,
11639 						I40E_CUSTOMIZED_IPV4_L2TPV3);
11640 		else if (!strcmp(name, "IPV6_L2TPV3"))
11641 			new_pctype =
11642 				i40e_find_customized_pctype(pf,
11643 						I40E_CUSTOMIZED_IPV6_L2TPV3);
11644 		else if (!strcmp(name, "IPV4_ESP"))
11645 			new_pctype =
11646 				i40e_find_customized_pctype(pf,
11647 						I40E_CUSTOMIZED_ESP_IPV4);
11648 		else if (!strcmp(name, "IPV6_ESP"))
11649 			new_pctype =
11650 				i40e_find_customized_pctype(pf,
11651 						I40E_CUSTOMIZED_ESP_IPV6);
11652 		else if (!strcmp(name, "IPV4_UDP_ESP"))
11653 			new_pctype =
11654 				i40e_find_customized_pctype(pf,
11655 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
11656 		else if (!strcmp(name, "IPV6_UDP_ESP"))
11657 			new_pctype =
11658 				i40e_find_customized_pctype(pf,
11659 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
11660 		else if (!strcmp(name, "IPV4_AH"))
11661 			new_pctype =
11662 				i40e_find_customized_pctype(pf,
11663 						I40E_CUSTOMIZED_AH_IPV4);
11664 		else if (!strcmp(name, "IPV6_AH"))
11665 			new_pctype =
11666 				i40e_find_customized_pctype(pf,
11667 						I40E_CUSTOMIZED_AH_IPV6);
11668 		if (new_pctype) {
11669 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11670 				new_pctype->pctype = pctype_value;
11671 				new_pctype->valid = true;
11672 			} else {
11673 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11674 				new_pctype->valid = false;
11675 			}
11676 		}
11677 	}
11678 
11679 	rte_free(pctype);
11680 	return 0;
11681 }
11682 
11683 static int
i40e_update_customized_ptype(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,uint32_t proto_num,struct rte_pmd_i40e_proto_info * proto,enum rte_pmd_i40e_package_op op)11684 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11685 			     uint32_t pkg_size, uint32_t proto_num,
11686 			     struct rte_pmd_i40e_proto_info *proto,
11687 			     enum rte_pmd_i40e_package_op op)
11688 {
11689 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11690 	uint16_t port_id = dev->data->port_id;
11691 	uint32_t ptype_num;
11692 	struct rte_pmd_i40e_ptype_info *ptype;
11693 	uint32_t buff_size;
11694 	uint8_t proto_id;
11695 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11696 	uint32_t i, j, n;
11697 	bool in_tunnel;
11698 	int ret;
11699 
11700 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11701 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11702 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11703 		return -1;
11704 	}
11705 
11706 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11707 		rte_pmd_i40e_ptype_mapping_reset(port_id);
11708 		return 0;
11709 	}
11710 
11711 	/* get information about new ptype num */
11712 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11713 				(uint8_t *)&ptype_num, sizeof(ptype_num),
11714 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11715 	if (ret) {
11716 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
11717 		return ret;
11718 	}
11719 	if (!ptype_num) {
11720 		PMD_DRV_LOG(INFO, "No new ptype added");
11721 		return -1;
11722 	}
11723 
11724 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11725 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
11726 	if (!ptype) {
11727 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11728 		return -1;
11729 	}
11730 
11731 	/* get information about new ptype list */
11732 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11733 					(uint8_t *)ptype, buff_size,
11734 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11735 	if (ret) {
11736 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
11737 		rte_free(ptype);
11738 		return ret;
11739 	}
11740 
11741 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11742 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11743 	if (!ptype_mapping) {
11744 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11745 		rte_free(ptype);
11746 		return -1;
11747 	}
11748 
11749 	/* Update ptype mapping table. */
11750 	for (i = 0; i < ptype_num; i++) {
11751 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11752 		ptype_mapping[i].sw_ptype = 0;
11753 		in_tunnel = false;
11754 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11755 			proto_id = ptype[i].protocols[j];
11756 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11757 				continue;
11758 			for (n = 0; n < proto_num; n++) {
11759 				if (proto[n].proto_id != proto_id)
11760 					continue;
11761 				memset(name, 0, sizeof(name));
11762 				strcpy(name, proto[n].name);
11763 				PMD_DRV_LOG(INFO, "name = %s\n", name);
11764 				if (!strncasecmp(name, "PPPOE", 5))
11765 					ptype_mapping[i].sw_ptype |=
11766 						RTE_PTYPE_L2_ETHER_PPPOE;
11767 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11768 					 !in_tunnel) {
11769 					ptype_mapping[i].sw_ptype |=
11770 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11771 					ptype_mapping[i].sw_ptype |=
11772 						RTE_PTYPE_L4_FRAG;
11773 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11774 					   in_tunnel) {
11775 					ptype_mapping[i].sw_ptype |=
11776 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11777 					ptype_mapping[i].sw_ptype |=
11778 						RTE_PTYPE_INNER_L4_FRAG;
11779 				} else if (!strncasecmp(name, "OIPV4", 5)) {
11780 					ptype_mapping[i].sw_ptype |=
11781 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11782 					in_tunnel = true;
11783 				} else if (!strncasecmp(name, "IPV4", 4) &&
11784 					   !in_tunnel)
11785 					ptype_mapping[i].sw_ptype |=
11786 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11787 				else if (!strncasecmp(name, "IPV4", 4) &&
11788 					 in_tunnel)
11789 					ptype_mapping[i].sw_ptype |=
11790 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11791 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11792 					 !in_tunnel) {
11793 					ptype_mapping[i].sw_ptype |=
11794 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11795 					ptype_mapping[i].sw_ptype |=
11796 						RTE_PTYPE_L4_FRAG;
11797 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11798 					   in_tunnel) {
11799 					ptype_mapping[i].sw_ptype |=
11800 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11801 					ptype_mapping[i].sw_ptype |=
11802 						RTE_PTYPE_INNER_L4_FRAG;
11803 				} else if (!strncasecmp(name, "OIPV6", 5)) {
11804 					ptype_mapping[i].sw_ptype |=
11805 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11806 					in_tunnel = true;
11807 				} else if (!strncasecmp(name, "IPV6", 4) &&
11808 					   !in_tunnel)
11809 					ptype_mapping[i].sw_ptype |=
11810 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11811 				else if (!strncasecmp(name, "IPV6", 4) &&
11812 					 in_tunnel)
11813 					ptype_mapping[i].sw_ptype |=
11814 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11815 				else if (!strncasecmp(name, "UDP", 3) &&
11816 					 !in_tunnel)
11817 					ptype_mapping[i].sw_ptype |=
11818 						RTE_PTYPE_L4_UDP;
11819 				else if (!strncasecmp(name, "UDP", 3) &&
11820 					 in_tunnel)
11821 					ptype_mapping[i].sw_ptype |=
11822 						RTE_PTYPE_INNER_L4_UDP;
11823 				else if (!strncasecmp(name, "TCP", 3) &&
11824 					 !in_tunnel)
11825 					ptype_mapping[i].sw_ptype |=
11826 						RTE_PTYPE_L4_TCP;
11827 				else if (!strncasecmp(name, "TCP", 3) &&
11828 					 in_tunnel)
11829 					ptype_mapping[i].sw_ptype |=
11830 						RTE_PTYPE_INNER_L4_TCP;
11831 				else if (!strncasecmp(name, "SCTP", 4) &&
11832 					 !in_tunnel)
11833 					ptype_mapping[i].sw_ptype |=
11834 						RTE_PTYPE_L4_SCTP;
11835 				else if (!strncasecmp(name, "SCTP", 4) &&
11836 					 in_tunnel)
11837 					ptype_mapping[i].sw_ptype |=
11838 						RTE_PTYPE_INNER_L4_SCTP;
11839 				else if ((!strncasecmp(name, "ICMP", 4) ||
11840 					  !strncasecmp(name, "ICMPV6", 6)) &&
11841 					 !in_tunnel)
11842 					ptype_mapping[i].sw_ptype |=
11843 						RTE_PTYPE_L4_ICMP;
11844 				else if ((!strncasecmp(name, "ICMP", 4) ||
11845 					  !strncasecmp(name, "ICMPV6", 6)) &&
11846 					 in_tunnel)
11847 					ptype_mapping[i].sw_ptype |=
11848 						RTE_PTYPE_INNER_L4_ICMP;
11849 				else if (!strncasecmp(name, "GTPC", 4)) {
11850 					ptype_mapping[i].sw_ptype |=
11851 						RTE_PTYPE_TUNNEL_GTPC;
11852 					in_tunnel = true;
11853 				} else if (!strncasecmp(name, "GTPU", 4)) {
11854 					ptype_mapping[i].sw_ptype |=
11855 						RTE_PTYPE_TUNNEL_GTPU;
11856 					in_tunnel = true;
11857 				} else if (!strncasecmp(name, "ESP", 3)) {
11858 					ptype_mapping[i].sw_ptype |=
11859 						RTE_PTYPE_TUNNEL_ESP;
11860 					in_tunnel = true;
11861 				} else if (!strncasecmp(name, "GRENAT", 6)) {
11862 					ptype_mapping[i].sw_ptype |=
11863 						RTE_PTYPE_TUNNEL_GRENAT;
11864 					in_tunnel = true;
11865 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
11866 					   !strncasecmp(name, "L2TPV2", 6) ||
11867 					   !strncasecmp(name, "L2TPV3", 6)) {
11868 					ptype_mapping[i].sw_ptype |=
11869 						RTE_PTYPE_TUNNEL_L2TP;
11870 					in_tunnel = true;
11871 				}
11872 
11873 				break;
11874 			}
11875 		}
11876 	}
11877 
11878 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11879 						ptype_num, 0);
11880 	if (ret)
11881 		PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
11882 
11883 	rte_free(ptype_mapping);
11884 	rte_free(ptype);
11885 	return ret;
11886 }
11887 
11888 void
i40e_update_customized_info(struct rte_eth_dev * dev,uint8_t * pkg,uint32_t pkg_size,enum rte_pmd_i40e_package_op op)11889 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11890 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
11891 {
11892 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11893 	uint32_t proto_num;
11894 	struct rte_pmd_i40e_proto_info *proto;
11895 	uint32_t buff_size;
11896 	uint32_t i;
11897 	int ret;
11898 
11899 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11900 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11901 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11902 		return;
11903 	}
11904 
11905 	/* get information about protocol number */
11906 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11907 				       (uint8_t *)&proto_num, sizeof(proto_num),
11908 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11909 	if (ret) {
11910 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
11911 		return;
11912 	}
11913 	if (!proto_num) {
11914 		PMD_DRV_LOG(INFO, "No new protocol added");
11915 		return;
11916 	}
11917 
11918 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11919 	proto = rte_zmalloc("new_proto", buff_size, 0);
11920 	if (!proto) {
11921 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11922 		return;
11923 	}
11924 
11925 	/* get information about protocol list */
11926 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11927 					(uint8_t *)proto, buff_size,
11928 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11929 	if (ret) {
11930 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
11931 		rte_free(proto);
11932 		return;
11933 	}
11934 
11935 	/* Check if GTP is supported. */
11936 	for (i = 0; i < proto_num; i++) {
11937 		if (!strncmp(proto[i].name, "GTP", 3)) {
11938 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
11939 				pf->gtp_support = true;
11940 			else
11941 				pf->gtp_support = false;
11942 			break;
11943 		}
11944 	}
11945 
11946 	/* Check if ESP is supported. */
11947 	for (i = 0; i < proto_num; i++) {
11948 		if (!strncmp(proto[i].name, "ESP", 3)) {
11949 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
11950 				pf->esp_support = true;
11951 			else
11952 				pf->esp_support = false;
11953 			break;
11954 		}
11955 	}
11956 
11957 	/* Update customized pctype info */
11958 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11959 					    proto_num, proto, op);
11960 	if (ret)
11961 		PMD_DRV_LOG(INFO, "No pctype is updated.");
11962 
11963 	/* Update customized ptype info */
11964 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11965 					   proto_num, proto, op);
11966 	if (ret)
11967 		PMD_DRV_LOG(INFO, "No ptype is updated.");
11968 
11969 	rte_free(proto);
11970 }
11971 
11972 /* Create a QinQ cloud filter
11973  *
11974  * The Fortville NIC has limited resources for tunnel filters,
11975  * so we can only reuse existing filters.
11976  *
11977  * In step 1 we define which Field Vector fields can be used for
11978  * filter types.
11979  * As we do not have the inner tag defined as a field,
11980  * we have to define it first, by reusing one of L1 entries.
11981  *
11982  * In step 2 we are replacing one of existing filter types with
11983  * a new one for QinQ.
11984  * As we reusing L1 and replacing L2, some of the default filter
11985  * types will disappear,which depends on L1 and L2 entries we reuse.
11986  *
11987  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11988  *
11989  * 1.	Create L1 filter of outer vlan (12b) which will be in use
11990  *		later when we define the cloud filter.
11991  *	a.	Valid_flags.replace_cloud = 0
11992  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
11993  *	c.	New_filter = 0x10
11994  *	d.	TR bit = 0xff (optional, not used here)
11995  *	e.	Buffer – 2 entries:
11996  *		i.	Byte 0 = 8 (outer vlan FV index).
11997  *			Byte 1 = 0 (rsv)
11998  *			Byte 2-3 = 0x0fff
11999  *		ii.	Byte 0 = 37 (inner vlan FV index).
12000  *			Byte 1 =0 (rsv)
12001  *			Byte 2-3 = 0x0fff
12002  *
12003  * Step 2:
12004  * 2.	Create cloud filter using two L1 filters entries: stag and
12005  *		new filter(outer vlan+ inner vlan)
12006  *	a.	Valid_flags.replace_cloud = 1
12007  *	b.	Old_filter = 1 (instead of outer IP)
12008  *	c.	New_filter = 0x10
12009  *	d.	Buffer – 2 entries:
12010  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
12011  *			Byte 1-3 = 0 (rsv)
12012  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12013  *			Byte 9-11 = 0 (rsv)
12014  */
12015 static int
i40e_cloud_filter_qinq_create(struct i40e_pf * pf)12016 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12017 {
12018 	int ret = -ENOTSUP;
12019 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12020 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12021 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12022 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12023 
12024 	if (pf->support_multi_driver) {
12025 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12026 		return ret;
12027 	}
12028 
12029 	/* Init */
12030 	memset(&filter_replace, 0,
12031 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12032 	memset(&filter_replace_buf, 0,
12033 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12034 
12035 	/* create L1 filter */
12036 	filter_replace.old_filter_type =
12037 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12038 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12039 	filter_replace.tr_bit = 0;
12040 
12041 	/* Prepare the buffer, 2 entries */
12042 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12043 	filter_replace_buf.data[0] |=
12044 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12045 	/* Field Vector 12b mask */
12046 	filter_replace_buf.data[2] = 0xff;
12047 	filter_replace_buf.data[3] = 0x0f;
12048 	filter_replace_buf.data[4] =
12049 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12050 	filter_replace_buf.data[4] |=
12051 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12052 	/* Field Vector 12b mask */
12053 	filter_replace_buf.data[6] = 0xff;
12054 	filter_replace_buf.data[7] = 0x0f;
12055 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12056 			&filter_replace_buf);
12057 	if (ret != I40E_SUCCESS)
12058 		return ret;
12059 
12060 	if (filter_replace.old_filter_type !=
12061 	    filter_replace.new_filter_type)
12062 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12063 			    " original: 0x%x, new: 0x%x",
12064 			    dev->device->name,
12065 			    filter_replace.old_filter_type,
12066 			    filter_replace.new_filter_type);
12067 
12068 	/* Apply the second L2 cloud filter */
12069 	memset(&filter_replace, 0,
12070 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12071 	memset(&filter_replace_buf, 0,
12072 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12073 
12074 	/* create L2 filter, input for L2 filter will be L1 filter  */
12075 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12076 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12077 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12078 
12079 	/* Prepare the buffer, 2 entries */
12080 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12081 	filter_replace_buf.data[0] |=
12082 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12083 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12084 	filter_replace_buf.data[4] |=
12085 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12086 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12087 			&filter_replace_buf);
12088 	if (!ret && (filter_replace.old_filter_type !=
12089 		     filter_replace.new_filter_type))
12090 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12091 			    " original: 0x%x, new: 0x%x",
12092 			    dev->device->name,
12093 			    filter_replace.old_filter_type,
12094 			    filter_replace.new_filter_type);
12095 
12096 	return ret;
12097 }
12098 
12099 static void
i40e_set_mac_max_frame(struct rte_eth_dev * dev,uint16_t size)12100 i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
12101 {
12102 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12103 	uint32_t rep_cnt = MAX_REPEAT_TIME;
12104 	struct rte_eth_link link;
12105 	enum i40e_status_code status;
12106 
12107 	do {
12108 		update_link_reg(hw, &link);
12109 		if (link.link_status)
12110 			break;
12111 
12112 		rte_delay_ms(CHECK_INTERVAL);
12113 	} while (--rep_cnt);
12114 
12115 	if (link.link_status) {
12116 		status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
12117 		if (status != I40E_SUCCESS)
12118 			PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
12119 	} else {
12120 		PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
12121 	}
12122 }
12123 
12124 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12125 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12126 #ifdef RTE_ETHDEV_DEBUG_RX
12127 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12128 #endif
12129 #ifdef RTE_ETHDEV_DEBUG_TX
12130 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12131 #endif
12132 
12133 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12134 			      ETH_I40E_FLOATING_VEB_ARG "=1"
12135 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12136 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12137 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
12138