xref: /dpdk/drivers/net/ice/ice_ethdev.h (revision 531d2555)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #ifndef _ICE_ETHDEV_H_
6 #define _ICE_ETHDEV_H_
7 
8 #include <rte_kvargs.h>
9 #include <rte_time.h>
10 
11 #include <ethdev_driver.h>
12 
13 #include "base/ice_common.h"
14 #include "base/ice_adminq_cmd.h"
15 #include "base/ice_flow.h"
16 
17 #define ICE_ADMINQ_LEN               32
18 #define ICE_SBIOQ_LEN                32
19 #define ICE_MAILBOXQ_LEN             32
20 #define ICE_SBQ_LEN                  64
21 #define ICE_ADMINQ_BUF_SZ            4096
22 #define ICE_SBIOQ_BUF_SZ             4096
23 #define ICE_MAILBOXQ_BUF_SZ          4096
24 /* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64, 128, 256 */
25 #define ICE_MAX_Q_PER_TC         256
26 #define ICE_NUM_DESC_DEFAULT     512
27 #define ICE_BUF_SIZE_MIN         1024
28 #define ICE_FRAME_SIZE_MAX       9728
29 #define ICE_QUEUE_BASE_ADDR_UNIT 128
30 /* number of VSIs and queue default setting */
31 #define ICE_MAX_QP_NUM_PER_VF    16
32 #define ICE_DEFAULT_QP_NUM_FDIR  1
33 #define ICE_UINT32_BIT_SIZE      (CHAR_BIT * sizeof(uint32_t))
34 #define ICE_VFTA_SIZE            (4096 / ICE_UINT32_BIT_SIZE)
35 /* Maximun number of MAC addresses */
36 #define ICE_NUM_MACADDR_MAX       64
37 /* Maximum number of VFs */
38 #define ICE_MAX_VF               128
39 #define ICE_MAX_INTR_QUEUE_NUM   256
40 
41 #define ICE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
42 #define ICE_RX_VEC_ID            RTE_INTR_VEC_RXTX_OFFSET
43 
44 #define ICE_MAX_PKT_TYPE  1024
45 
46 /* DDP package search path */
47 #define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
48 #define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
49 #define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
50 #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
51 #define ICE_MAX_PKG_FILENAME_SIZE   256
52 
53 #define MAX_ACL_NORMAL_ENTRIES    256
54 
55 /**
56  * vlan_id is a 12 bit number.
57  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
58  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
59  * The higher 7 bit val specifies VFTA array index.
60  */
61 #define ICE_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
62 #define ICE_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
63 
64 /* Default TC traffic in case DCB is not enabled */
65 #define ICE_DEFAULT_TCMAP        0x1
66 #define ICE_FDIR_QUEUE_ID        0
67 
68 /* Always assign pool 0 to main VSI, VMDQ will start from 1 */
69 #define ICE_VMDQ_POOL_BASE       1
70 
71 #define ICE_DEFAULT_RX_FREE_THRESH  32
72 #define ICE_DEFAULT_RX_PTHRESH      8
73 #define ICE_DEFAULT_RX_HTHRESH      8
74 #define ICE_DEFAULT_RX_WTHRESH      0
75 
76 #define ICE_DEFAULT_TX_FREE_THRESH  32
77 #define ICE_DEFAULT_TX_PTHRESH      32
78 #define ICE_DEFAULT_TX_HTHRESH      0
79 #define ICE_DEFAULT_TX_WTHRESH      0
80 #define ICE_DEFAULT_TX_RSBIT_THRESH 32
81 
82 /* Bit shift and mask */
83 #define ICE_4_BIT_WIDTH  (CHAR_BIT / 2)
84 #define ICE_4_BIT_MASK   RTE_LEN2MASK(ICE_4_BIT_WIDTH, uint8_t)
85 #define ICE_8_BIT_WIDTH  CHAR_BIT
86 #define ICE_8_BIT_MASK   UINT8_MAX
87 #define ICE_16_BIT_WIDTH (CHAR_BIT * 2)
88 #define ICE_16_BIT_MASK  UINT16_MAX
89 #define ICE_32_BIT_WIDTH (CHAR_BIT * 4)
90 #define ICE_32_BIT_MASK  UINT32_MAX
91 #define ICE_40_BIT_WIDTH (CHAR_BIT * 5)
92 #define ICE_40_BIT_MASK  RTE_LEN2MASK(ICE_40_BIT_WIDTH, uint64_t)
93 #define ICE_48_BIT_WIDTH (CHAR_BIT * 6)
94 #define ICE_48_BIT_MASK  RTE_LEN2MASK(ICE_48_BIT_WIDTH, uint64_t)
95 
96 #define ICE_FLAG_RSS                   BIT_ULL(0)
97 #define ICE_FLAG_DCB                   BIT_ULL(1)
98 #define ICE_FLAG_VMDQ                  BIT_ULL(2)
99 #define ICE_FLAG_SRIOV                 BIT_ULL(3)
100 #define ICE_FLAG_HEADER_SPLIT_DISABLED BIT_ULL(4)
101 #define ICE_FLAG_HEADER_SPLIT_ENABLED  BIT_ULL(5)
102 #define ICE_FLAG_FDIR                  BIT_ULL(6)
103 #define ICE_FLAG_VXLAN                 BIT_ULL(7)
104 #define ICE_FLAG_RSS_AQ_CAPABLE        BIT_ULL(8)
105 #define ICE_FLAG_VF_MAC_BY_PF          BIT_ULL(9)
106 #define ICE_FLAG_ALL  (ICE_FLAG_RSS | \
107 		       ICE_FLAG_DCB | \
108 		       ICE_FLAG_VMDQ | \
109 		       ICE_FLAG_SRIOV | \
110 		       ICE_FLAG_HEADER_SPLIT_DISABLED | \
111 		       ICE_FLAG_HEADER_SPLIT_ENABLED | \
112 		       ICE_FLAG_FDIR | \
113 		       ICE_FLAG_VXLAN | \
114 		       ICE_FLAG_RSS_AQ_CAPABLE | \
115 		       ICE_FLAG_VF_MAC_BY_PF)
116 
117 #define ICE_RSS_OFFLOAD_ALL ( \
118 	RTE_ETH_RSS_IPV4 | \
119 	RTE_ETH_RSS_FRAG_IPV4 | \
120 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
121 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
122 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
123 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
124 	RTE_ETH_RSS_IPV6 | \
125 	RTE_ETH_RSS_FRAG_IPV6 | \
126 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
127 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
128 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
129 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
130 	RTE_ETH_RSS_L2_PAYLOAD)
131 
132 /**
133  * The overhead from MTU to max frame size.
134  * Considering QinQ packet, the VLAN tag needs to be counted twice.
135  */
136 #define ICE_ETH_OVERHEAD \
137 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
138 #define ICE_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_ETH_OVERHEAD)
139 
140 #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK)
141 #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK)
142 
143 /* Max number of flexible descriptor rxdid */
144 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64
145 
146 #define ICE_I2C_EEPROM_DEV_ADDR		0xA0
147 #define ICE_I2C_EEPROM_DEV_ADDR2	0xA2
148 #define ICE_MODULE_TYPE_SFP		0x03
149 #define ICE_MODULE_TYPE_QSFP_PLUS	0x0D
150 #define ICE_MODULE_TYPE_QSFP28		0x11
151 #define ICE_MODULE_SFF_ADDR_MODE	0x04
152 #define ICE_MODULE_SFF_DIAG_CAPAB	0x40
153 #define ICE_MODULE_REVISION_ADDR	0x01
154 #define ICE_MODULE_SFF_8472_COMP	0x5E
155 #define ICE_MODULE_SFF_8472_SWAP	0x5C
156 #define ICE_MODULE_QSFP_MAX_LEN		640
157 
158 /* EEPROM Standards for plug in modules */
159 #define ICE_MODULE_SFF_8079		0x1
160 #define ICE_MODULE_SFF_8079_LEN		256
161 #define ICE_MODULE_SFF_8472		0x2
162 #define ICE_MODULE_SFF_8472_LEN		512
163 #define ICE_MODULE_SFF_8636		0x3
164 #define ICE_MODULE_SFF_8636_LEN		256
165 #define ICE_MODULE_SFF_8636_MAX_LEN     640
166 #define ICE_MODULE_SFF_8436		0x4
167 #define ICE_MODULE_SFF_8436_LEN		256
168 #define ICE_MODULE_SFF_8436_MAX_LEN     640
169 
170 
171 /* Per-channel register definitions */
172 #define GLTSYN_AUX_OUT(_chan, _idx)     (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
173 #define GLTSYN_CLKO(_chan, _idx)        (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
174 #define GLTSYN_TGT_L(_chan, _idx)       (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
175 #define GLTSYN_TGT_H(_chan, _idx)       (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
176 
177 /* DDP package type */
178 enum ice_pkg_type {
179 	ICE_PKG_TYPE_UNKNOWN,
180 	ICE_PKG_TYPE_OS_DEFAULT,
181 	ICE_PKG_TYPE_COMMS,
182 };
183 
184 enum pps_type {
185 	PPS_NONE,
186 	PPS_PIN,
187 	PPS_MAX,
188 };
189 
190 struct ice_adapter;
191 
192 /**
193  * MAC filter structure
194  */
195 struct ice_mac_filter_info {
196 	struct rte_ether_addr mac_addr;
197 };
198 
199 TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter);
200 
201 /* MAC filter list structure */
202 struct ice_mac_filter {
203 	TAILQ_ENTRY(ice_mac_filter) next;
204 	struct ice_mac_filter_info mac_info;
205 };
206 
207 struct ice_vlan {
208 	uint16_t tpid;
209 	uint16_t vid;
210 };
211 
212 #define ICE_VLAN(tpid, vid) \
213 	((struct ice_vlan){ tpid, vid })
214 
215 /**
216  * VLAN filter structure
217  */
218 struct ice_vlan_filter_info {
219 	struct ice_vlan vlan;
220 };
221 
222 TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter);
223 
224 /* VLAN filter list structure */
225 struct ice_vlan_filter {
226 	TAILQ_ENTRY(ice_vlan_filter) next;
227 	struct ice_vlan_filter_info vlan_info;
228 };
229 
230 struct pool_entry {
231 	LIST_ENTRY(pool_entry) next;
232 	uint16_t base;
233 	uint16_t len;
234 };
235 
236 LIST_HEAD(res_list, pool_entry);
237 
238 struct ice_res_pool_info {
239 	uint32_t base;              /* Resource start index */
240 	uint32_t num_alloc;         /* Allocated resource number */
241 	uint32_t num_free;          /* Total available resource number */
242 	struct res_list alloc_list; /* Allocated resource list */
243 	struct res_list free_list;  /* Available resource list */
244 };
245 
246 TAILQ_HEAD(ice_vsi_list_head, ice_vsi_list);
247 
248 struct ice_vsi;
249 
250 /* VSI list structure */
251 struct ice_vsi_list {
252 	TAILQ_ENTRY(ice_vsi_list) list;
253 	struct ice_vsi *vsi;
254 };
255 
256 struct ice_rx_queue;
257 struct ice_tx_queue;
258 
259 /**
260  * Structure that defines a VSI, associated with a adapter.
261  */
262 struct ice_vsi {
263 	struct ice_adapter *adapter; /* Backreference to associated adapter */
264 	struct ice_aqc_vsi_props info; /* VSI properties */
265 	/**
266 	 * When drivers loaded, only a default main VSI exists. In case new VSI
267 	 * needs to add, HW needs to know the layout that VSIs are organized.
268 	 * Besides that, VSI isan element and can't switch packets, which needs
269 	 * to add new component VEB to perform switching. So, a new VSI needs
270 	 * to specify the uplink VSI (Parent VSI) before created. The
271 	 * uplink VSI will check whether it had a VEB to switch packets. If no,
272 	 * it will try to create one. Then, uplink VSI will move the new VSI
273 	 * into its' sib_vsi_list to manage all the downlink VSI.
274 	 *  sib_vsi_list: the VSI list that shared the same uplink VSI.
275 	 *  parent_vsi  : the uplink VSI. It's NULL for main VSI.
276 	 *  veb         : the VEB associates with the VSI.
277 	 */
278 	struct ice_vsi_list sib_vsi_list; /* sibling vsi list */
279 	struct ice_vsi *parent_vsi;
280 	enum ice_vsi_type type; /* VSI types */
281 	uint16_t vlan_num;       /* Total VLAN number */
282 	uint16_t mac_num;        /* Total mac number */
283 	struct ice_mac_filter_list mac_list; /* macvlan filter list */
284 	struct ice_vlan_filter_list vlan_list; /* vlan filter list */
285 	uint16_t nb_qps;         /* Number of queue pairs VSI can occupy */
286 	uint16_t nb_used_qps;    /* Number of queue pairs VSI uses */
287 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
288 	uint16_t base_queue;     /* The first queue index of this VSI */
289 	uint16_t vsi_id;         /* Hardware Id */
290 	uint16_t idx;            /* vsi_handle: SW index in hw->vsi_ctx */
291 	/* VF number to which the VSI connects, valid when VSI is VF type */
292 	uint8_t vf_num;
293 	uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
294 	uint16_t nb_msix;   /* The max number of msix vector */
295 	uint8_t enabled_tc; /* The traffic class enabled */
296 	uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
297 	uint8_t vlan_filter_on; /* The VLAN filter enabled */
298 	/* information about rss configuration */
299 	u32 rss_key_size;
300 	u32 rss_lut_size;
301 	uint8_t *rss_lut;
302 	uint8_t *rss_key;
303 	struct ice_eth_stats eth_stats_offset;
304 	struct ice_eth_stats eth_stats;
305 	bool offset_loaded;
306 	uint64_t old_rx_bytes;
307 	uint64_t old_tx_bytes;
308 };
309 
310 enum proto_xtr_type {
311 	PROTO_XTR_NONE,
312 	PROTO_XTR_VLAN,
313 	PROTO_XTR_IPV4,
314 	PROTO_XTR_IPV6,
315 	PROTO_XTR_IPV6_FLOW,
316 	PROTO_XTR_TCP,
317 	PROTO_XTR_IP_OFFSET,
318 	PROTO_XTR_MAX /* The last one */
319 };
320 
321 enum ice_fdir_tunnel_type {
322 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
323 	ICE_FDIR_TUNNEL_TYPE_VXLAN,
324 	ICE_FDIR_TUNNEL_TYPE_GTPU,
325 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
326 };
327 
328 struct rte_flow;
329 TAILQ_HEAD(ice_flow_list, rte_flow);
330 
331 struct ice_flow_parser_node;
332 TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
333 
334 struct ice_fdir_filter_conf {
335 	struct ice_fdir_fltr input;
336 	enum ice_fdir_tunnel_type tunnel_type;
337 
338 	struct ice_fdir_counter *counter; /* flow specific counter context */
339 	struct rte_flow_action_count act_count;
340 
341 	uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
342 	uint64_t input_set_i; /* only for tunnel inner fields */
343 	uint32_t mark_flag;
344 
345 	struct ice_parser_profile *prof;
346 	bool parser_ena;
347 	u8 *pkt_buf;
348 	u8 pkt_len;
349 };
350 
351 #define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
352 
353 struct ice_fdir_fltr_pattern {
354 	enum ice_fltr_ptype flow_type;
355 
356 	union {
357 		struct ice_fdir_v4 v4;
358 		struct ice_fdir_v6 v6;
359 	} ip, mask;
360 
361 	struct ice_fdir_udp_gtp gtpu_data;
362 	struct ice_fdir_udp_gtp gtpu_mask;
363 
364 	struct ice_fdir_extra ext_data;
365 	struct ice_fdir_extra ext_mask;
366 
367 	enum ice_fdir_tunnel_type tunnel_type;
368 };
369 
370 #define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE	1
371 #define ICE_FDIR_COUNTER_MAX_POOL_SIZE		32
372 #define ICE_FDIR_COUNTERS_PER_BLOCK		256
373 #define ICE_FDIR_COUNTER_INDEX(base_idx) \
374 				((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
375 struct ice_fdir_counter_pool;
376 
377 struct ice_fdir_counter {
378 	TAILQ_ENTRY(ice_fdir_counter) next;
379 	struct ice_fdir_counter_pool *pool;
380 	uint8_t shared;
381 	uint32_t ref_cnt;
382 	uint32_t id;
383 	uint64_t hits;
384 	uint64_t bytes;
385 	uint32_t hw_index;
386 };
387 
388 TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter);
389 
390 struct ice_fdir_counter_pool {
391 	TAILQ_ENTRY(ice_fdir_counter_pool) next;
392 	struct ice_fdir_counter_list counter_list;
393 	struct ice_fdir_counter counters[0];
394 };
395 
396 TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool);
397 
398 struct ice_fdir_counter_pool_container {
399 	struct ice_fdir_counter_pool_list pool_list;
400 	struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE];
401 	uint8_t index_free;
402 };
403 
404 /**
405  *  A structure used to define fields of a FDIR related info.
406  */
407 struct ice_fdir_info {
408 	struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
409 	struct ice_tx_queue *txq;
410 	struct ice_rx_queue *rxq;
411 	void *prg_pkt;                 /* memory for fdir program packet */
412 	uint64_t dma_addr;             /* physic address of packet memory*/
413 	const struct rte_memzone *mz;
414 	struct ice_fdir_filter_conf conf;
415 
416 	struct ice_fdir_filter_conf **hash_map;
417 	struct rte_hash *hash_table;
418 
419 	struct ice_fdir_counter_pool_container counter;
420 };
421 
422 #define ICE_HASH_GTPU_CTX_EH_IP		0
423 #define ICE_HASH_GTPU_CTX_EH_IP_UDP	1
424 #define ICE_HASH_GTPU_CTX_EH_IP_TCP	2
425 #define ICE_HASH_GTPU_CTX_UP_IP		3
426 #define ICE_HASH_GTPU_CTX_UP_IP_UDP	4
427 #define ICE_HASH_GTPU_CTX_UP_IP_TCP	5
428 #define ICE_HASH_GTPU_CTX_DW_IP		6
429 #define ICE_HASH_GTPU_CTX_DW_IP_UDP	7
430 #define ICE_HASH_GTPU_CTX_DW_IP_TCP	8
431 #define ICE_HASH_GTPU_CTX_MAX		9
432 
433 struct ice_hash_gtpu_ctx {
434 	struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
435 };
436 
437 struct ice_hash_ctx {
438 	struct ice_hash_gtpu_ctx gtpu4;
439 	struct ice_hash_gtpu_ctx gtpu6;
440 };
441 
442 struct ice_acl_conf {
443 	struct ice_fdir_fltr input;
444 	uint64_t input_set;
445 };
446 
447 /**
448  * A structure used to define fields of ACL related info.
449  */
450 struct ice_acl_info {
451 	struct ice_acl_conf conf;
452 	struct rte_bitmap *slots;
453 	uint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES];
454 };
455 
456 struct ice_pf {
457 	struct ice_adapter *adapter; /* The adapter this PF associate to */
458 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
459 	/* Used for next free software vsi idx.
460 	 * To save the effort, we don't recycle the index.
461 	 * Suppose the indexes are more than enough.
462 	 */
463 	uint16_t next_vsi_idx;
464 	uint16_t vsis_allocated;
465 	uint16_t vsis_unallocated;
466 	struct ice_res_pool_info qp_pool;    /*Queue pair pool */
467 	struct ice_res_pool_info msix_pool;  /* MSIX interrupt pool */
468 	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
469 	struct rte_ether_addr dev_addr; /* PF device mac address */
470 	uint64_t flags; /* PF feature flags */
471 	uint16_t hash_lut_size; /* The size of hash lookup table */
472 	uint16_t lan_nb_qp_max;
473 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
474 	uint16_t base_queue; /* The base queue pairs index  in the device */
475 	uint8_t *proto_xtr; /* Protocol extraction type for all queues */
476 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
477 	uint16_t fdir_qp_offset;
478 	struct ice_fdir_info fdir; /* flow director info */
479 	struct ice_acl_info acl; /* ACL info */
480 	struct ice_hash_ctx hash_ctx;
481 	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
482 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
483 	struct ice_hw_port_stats stats_offset;
484 	struct ice_hw_port_stats stats;
485 	/* internal packet statistics, it should be excluded from the total */
486 	struct ice_eth_stats internal_stats_offset;
487 	struct ice_eth_stats internal_stats;
488 	bool offset_loaded;
489 	bool adapter_stopped;
490 	struct ice_flow_list flow_list;
491 	rte_spinlock_t flow_ops_lock;
492 	struct ice_parser_list rss_parser_list;
493 	struct ice_parser_list perm_parser_list;
494 	struct ice_parser_list dist_parser_list;
495 	bool init_link_up;
496 	uint64_t old_rx_bytes;
497 	uint64_t old_tx_bytes;
498 	uint64_t supported_rxdid; /* bitmap for supported RXDID */
499 	uint64_t rss_hf;
500 };
501 
502 #define ICE_MAX_QUEUE_NUM  2048
503 #define ICE_MAX_PIN_NUM   4
504 
505 /**
506  * Cache devargs parse result.
507  */
508 struct ice_devargs {
509 	int rx_low_latency;
510 	int safe_mode_support;
511 	uint8_t proto_xtr_dflt;
512 	int pipe_mode_support;
513 	uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
514 	uint8_t pin_idx;
515 	uint8_t pps_out_ena;
516 };
517 
518 /**
519  * Structure to store fdir fv entry.
520  */
521 struct ice_fdir_prof_info {
522 	struct ice_parser_profile prof;
523 	u64 fdir_actived_cnt;
524 };
525 
526 /**
527  * Structure to store rss fv entry.
528  */
529 struct ice_rss_prof_info {
530 	struct ice_parser_profile prof;
531 	bool symm;
532 };
533 
534 /**
535  * Structure to store private data for each PF/VF instance.
536  */
537 struct ice_adapter {
538 	/* Common for both PF and VF */
539 	struct ice_hw hw;
540 	struct ice_pf pf;
541 	bool rx_bulk_alloc_allowed;
542 	bool rx_vec_allowed;
543 	bool tx_vec_allowed;
544 	bool tx_simple_allowed;
545 	/* ptype mapping table */
546 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
547 	bool is_safe_mode;
548 	struct ice_devargs devargs;
549 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
550 	uint16_t fdir_ref_cnt;
551 	/* For PTP */
552 	struct rte_timecounter systime_tc;
553 	struct rte_timecounter rx_tstamp_tc;
554 	struct rte_timecounter tx_tstamp_tc;
555 	bool ptp_ena;
556 	uint64_t time_hw;
557 	uint32_t hw_time_high; /* high 32 bits of timestamp */
558 	uint32_t hw_time_low; /* low 32 bits of timestamp */
559 	uint64_t hw_time_update; /* SW time of HW record updating */
560 	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
561 	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
562 	/* True if DCF state of the associated PF is on */
563 	bool dcf_state_on;
564 	struct ice_parser *psr;
565 #ifdef RTE_ARCH_X86
566 	bool rx_use_avx2;
567 	bool rx_use_avx512;
568 	bool tx_use_avx2;
569 	bool tx_use_avx512;
570 	bool rx_vec_offload_support;
571 #endif
572 };
573 
574 struct ice_vsi_vlan_pvid_info {
575 	uint16_t on;		/* Enable or disable pvid */
576 	union {
577 		uint16_t pvid;	/* Valid in case 'on' is set to set pvid */
578 		struct {
579 			/* Valid in case 'on' is cleared. 'tagged' will reject
580 			 * tagged packets, while 'untagged' will reject
581 			 * untagged packets.
582 			 */
583 			uint8_t tagged;
584 			uint8_t untagged;
585 		} reject;
586 	} config;
587 };
588 
589 #define ICE_DEV_TO_PCI(eth_dev) \
590 	RTE_DEV_TO_PCI((eth_dev)->device)
591 
592 /* ICE_DEV_PRIVATE_TO */
593 #define ICE_DEV_PRIVATE_TO_PF(adapter) \
594 	(&((struct ice_adapter *)adapter)->pf)
595 #define ICE_DEV_PRIVATE_TO_HW(adapter) \
596 	(&((struct ice_adapter *)adapter)->hw)
597 #define ICE_DEV_PRIVATE_TO_ADAPTER(adapter) \
598 	((struct ice_adapter *)adapter)
599 
600 /* ICE_VSI_TO */
601 #define ICE_VSI_TO_HW(vsi) \
602 	(&(((struct ice_vsi *)vsi)->adapter->hw))
603 #define ICE_VSI_TO_PF(vsi) \
604 	(&(((struct ice_vsi *)vsi)->adapter->pf))
605 
606 /* ICE_PF_TO */
607 #define ICE_PF_TO_HW(pf) \
608 	(&(((struct ice_pf *)pf)->adapter->hw))
609 #define ICE_PF_TO_ADAPTER(pf) \
610 	((struct ice_adapter *)(pf)->adapter)
611 #define ICE_PF_TO_ETH_DEV(pf) \
612 	(((struct ice_pf *)pf)->adapter->eth_dev)
613 
614 int
615 ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn);
616 struct ice_vsi *
617 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
618 int
619 ice_release_vsi(struct ice_vsi *vsi);
620 void ice_vsi_enable_queues_intr(struct ice_vsi *vsi);
621 void ice_vsi_disable_queues_intr(struct ice_vsi *vsi);
622 void ice_vsi_queues_bind_intr(struct ice_vsi *vsi);
623 int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
624 			 struct ice_rss_hash_cfg *cfg);
625 int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
626 			 struct ice_rss_hash_cfg *cfg);
627 
628 static inline int
ice_align_floor(int n)629 ice_align_floor(int n)
630 {
631 	if (n == 0)
632 		return 0;
633 	return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
634 }
635 
636 #define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \
637 	(((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \
638 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \
639 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \
640 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \
641 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \
642 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \
643 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \
644 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \
645 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \
646 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \
647 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \
648 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \
649 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \
650 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \
651 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1))
652 
653 #define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \
654 	(((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \
655 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \
656 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \
657 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \
658 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \
659 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \
660 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \
661 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \
662 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \
663 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \
664 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \
665 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \
666 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR))
667 
668 #define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \
669 	(((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \
670 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \
671 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \
672 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \
673 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2))
674 
675 #endif /* _ICE_ETHDEV_H_ */
676