xref: /f-stack/dpdk/drivers/net/ice/ice_ethdev.h (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #ifndef _ICE_ETHDEV_H_
6 #define _ICE_ETHDEV_H_
7 
8 #include <rte_kvargs.h>
9 
10 #include <rte_ethdev_driver.h>
11 
12 #include "base/ice_common.h"
13 #include "base/ice_adminq_cmd.h"
14 
15 #define ICE_VLAN_TAG_SIZE        4
16 
17 #define ICE_ADMINQ_LEN               32
18 #define ICE_SBIOQ_LEN                32
19 #define ICE_MAILBOXQ_LEN             32
20 #define ICE_ADMINQ_BUF_SZ            4096
21 #define ICE_SBIOQ_BUF_SZ             4096
22 #define ICE_MAILBOXQ_BUF_SZ          4096
23 /* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */
24 #define ICE_MAX_Q_PER_TC         64
25 #define ICE_NUM_DESC_DEFAULT     512
26 #define ICE_BUF_SIZE_MIN         1024
27 #define ICE_FRAME_SIZE_MAX       9728
28 #define ICE_QUEUE_BASE_ADDR_UNIT 128
29 /* number of VSIs and queue default setting */
30 #define ICE_MAX_QP_NUM_PER_VF    16
31 #define ICE_DEFAULT_QP_NUM_FDIR  1
32 #define ICE_UINT32_BIT_SIZE      (CHAR_BIT * sizeof(uint32_t))
33 #define ICE_VFTA_SIZE            (4096 / ICE_UINT32_BIT_SIZE)
34 /* Maximun number of MAC addresses */
35 #define ICE_NUM_MACADDR_MAX       64
36 /* Maximum number of VFs */
37 #define ICE_MAX_VF               128
38 #define ICE_MAX_INTR_QUEUE_NUM   256
39 
40 #define ICE_MISC_VEC_ID          RTE_INTR_VEC_ZERO_OFFSET
41 #define ICE_RX_VEC_ID            RTE_INTR_VEC_RXTX_OFFSET
42 
43 #define ICE_MAX_PKT_TYPE  1024
44 
45 /**
46  * vlan_id is a 12 bit number.
47  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
48  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
49  * The higher 7 bit val specifies VFTA array index.
50  */
51 #define ICE_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
52 #define ICE_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
53 
54 /* Default TC traffic in case DCB is not enabled */
55 #define ICE_DEFAULT_TCMAP        0x1
56 #define ICE_FDIR_QUEUE_ID        0
57 
58 /* Always assign pool 0 to main VSI, VMDQ will start from 1 */
59 #define ICE_VMDQ_POOL_BASE       1
60 
61 #define ICE_DEFAULT_RX_FREE_THRESH  32
62 #define ICE_DEFAULT_RX_PTHRESH      8
63 #define ICE_DEFAULT_RX_HTHRESH      8
64 #define ICE_DEFAULT_RX_WTHRESH      0
65 
66 #define ICE_DEFAULT_TX_FREE_THRESH  32
67 #define ICE_DEFAULT_TX_PTHRESH      32
68 #define ICE_DEFAULT_TX_HTHRESH      0
69 #define ICE_DEFAULT_TX_WTHRESH      0
70 #define ICE_DEFAULT_TX_RSBIT_THRESH 32
71 
72 /* Bit shift and mask */
73 #define ICE_4_BIT_WIDTH  (CHAR_BIT / 2)
74 #define ICE_4_BIT_MASK   RTE_LEN2MASK(ICE_4_BIT_WIDTH, uint8_t)
75 #define ICE_8_BIT_WIDTH  CHAR_BIT
76 #define ICE_8_BIT_MASK   UINT8_MAX
77 #define ICE_16_BIT_WIDTH (CHAR_BIT * 2)
78 #define ICE_16_BIT_MASK  UINT16_MAX
79 #define ICE_32_BIT_WIDTH (CHAR_BIT * 4)
80 #define ICE_32_BIT_MASK  UINT32_MAX
81 #define ICE_40_BIT_WIDTH (CHAR_BIT * 5)
82 #define ICE_40_BIT_MASK  RTE_LEN2MASK(ICE_40_BIT_WIDTH, uint64_t)
83 #define ICE_48_BIT_WIDTH (CHAR_BIT * 6)
84 #define ICE_48_BIT_MASK  RTE_LEN2MASK(ICE_48_BIT_WIDTH, uint64_t)
85 
86 #define ICE_FLAG_RSS                   BIT_ULL(0)
87 #define ICE_FLAG_DCB                   BIT_ULL(1)
88 #define ICE_FLAG_VMDQ                  BIT_ULL(2)
89 #define ICE_FLAG_SRIOV                 BIT_ULL(3)
90 #define ICE_FLAG_HEADER_SPLIT_DISABLED BIT_ULL(4)
91 #define ICE_FLAG_HEADER_SPLIT_ENABLED  BIT_ULL(5)
92 #define ICE_FLAG_FDIR                  BIT_ULL(6)
93 #define ICE_FLAG_VXLAN                 BIT_ULL(7)
94 #define ICE_FLAG_RSS_AQ_CAPABLE        BIT_ULL(8)
95 #define ICE_FLAG_VF_MAC_BY_PF          BIT_ULL(9)
96 #define ICE_FLAG_ALL  (ICE_FLAG_RSS | \
97 		       ICE_FLAG_DCB | \
98 		       ICE_FLAG_VMDQ | \
99 		       ICE_FLAG_SRIOV | \
100 		       ICE_FLAG_HEADER_SPLIT_DISABLED | \
101 		       ICE_FLAG_HEADER_SPLIT_ENABLED | \
102 		       ICE_FLAG_FDIR | \
103 		       ICE_FLAG_VXLAN | \
104 		       ICE_FLAG_RSS_AQ_CAPABLE | \
105 		       ICE_FLAG_VF_MAC_BY_PF)
106 
107 #define ICE_RSS_OFFLOAD_ALL ( \
108 	ETH_RSS_FRAG_IPV4 | \
109 	ETH_RSS_NONFRAG_IPV4_TCP | \
110 	ETH_RSS_NONFRAG_IPV4_UDP | \
111 	ETH_RSS_NONFRAG_IPV4_SCTP | \
112 	ETH_RSS_NONFRAG_IPV4_OTHER | \
113 	ETH_RSS_FRAG_IPV6 | \
114 	ETH_RSS_NONFRAG_IPV6_TCP | \
115 	ETH_RSS_NONFRAG_IPV6_UDP | \
116 	ETH_RSS_NONFRAG_IPV6_SCTP | \
117 	ETH_RSS_NONFRAG_IPV6_OTHER | \
118 	ETH_RSS_L2_PAYLOAD)
119 
120 /**
121  * The overhead from MTU to max frame size.
122  * Considering QinQ packet, the VLAN tag needs to be counted twice.
123  */
124 #define ICE_ETH_OVERHEAD \
125 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
126 
127 /* DDP package type */
128 enum ice_pkg_type {
129 	ICE_PKG_TYPE_UNKNOWN,
130 	ICE_PKG_TYPE_OS_DEFAULT,
131 	ICE_PKG_TYPE_COMMS,
132 };
133 
134 struct ice_adapter;
135 
136 /**
137  * MAC filter structure
138  */
139 struct ice_mac_filter_info {
140 	struct rte_ether_addr mac_addr;
141 };
142 
143 TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter);
144 
145 /* MAC filter list structure */
146 struct ice_mac_filter {
147 	TAILQ_ENTRY(ice_mac_filter) next;
148 	struct ice_mac_filter_info mac_info;
149 };
150 
151 /**
152  * VLAN filter structure
153  */
154 struct ice_vlan_filter_info {
155 	uint16_t vlan_id;
156 };
157 
158 TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter);
159 
160 /* VLAN filter list structure */
161 struct ice_vlan_filter {
162 	TAILQ_ENTRY(ice_vlan_filter) next;
163 	struct ice_vlan_filter_info vlan_info;
164 };
165 
166 struct pool_entry {
167 	LIST_ENTRY(pool_entry) next;
168 	uint16_t base;
169 	uint16_t len;
170 };
171 
172 LIST_HEAD(res_list, pool_entry);
173 
174 struct ice_res_pool_info {
175 	uint32_t base;              /* Resource start index */
176 	uint32_t num_alloc;         /* Allocated resource number */
177 	uint32_t num_free;          /* Total available resource number */
178 	struct res_list alloc_list; /* Allocated resource list */
179 	struct res_list free_list;  /* Available resource list */
180 };
181 
182 TAILQ_HEAD(ice_vsi_list_head, ice_vsi_list);
183 
184 struct ice_vsi;
185 
186 /* VSI list structure */
187 struct ice_vsi_list {
188 	TAILQ_ENTRY(ice_vsi_list) list;
189 	struct ice_vsi *vsi;
190 };
191 
192 struct ice_rx_queue;
193 struct ice_tx_queue;
194 
195 /**
196  * Structure that defines a VSI, associated with a adapter.
197  */
198 struct ice_vsi {
199 	struct ice_adapter *adapter; /* Backreference to associated adapter */
200 	struct ice_aqc_vsi_props info; /* VSI properties */
201 	/**
202 	 * When drivers loaded, only a default main VSI exists. In case new VSI
203 	 * needs to add, HW needs to know the layout that VSIs are organized.
204 	 * Besides that, VSI isan element and can't switch packets, which needs
205 	 * to add new component VEB to perform switching. So, a new VSI needs
206 	 * to specify the the uplink VSI (Parent VSI) before created. The
207 	 * uplink VSI will check whether it had a VEB to switch packets. If no,
208 	 * it will try to create one. Then, uplink VSI will move the new VSI
209 	 * into its' sib_vsi_list to manage all the downlink VSI.
210 	 *  sib_vsi_list: the VSI list that shared the same uplink VSI.
211 	 *  parent_vsi  : the uplink VSI. It's NULL for main VSI.
212 	 *  veb         : the VEB associates with the VSI.
213 	 */
214 	struct ice_vsi_list sib_vsi_list; /* sibling vsi list */
215 	struct ice_vsi *parent_vsi;
216 	enum ice_vsi_type type; /* VSI types */
217 	uint16_t vlan_num;       /* Total VLAN number */
218 	uint16_t mac_num;        /* Total mac number */
219 	struct ice_mac_filter_list mac_list; /* macvlan filter list */
220 	struct ice_vlan_filter_list vlan_list; /* vlan filter list */
221 	uint16_t nb_qps;         /* Number of queue pairs VSI can occupy */
222 	uint16_t nb_used_qps;    /* Number of queue pairs VSI uses */
223 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
224 	uint16_t base_queue;     /* The first queue index of this VSI */
225 	uint16_t vsi_id;         /* Hardware Id */
226 	uint16_t idx;            /* vsi_handle: SW index in hw->vsi_ctx */
227 	/* VF number to which the VSI connects, valid when VSI is VF type */
228 	uint8_t vf_num;
229 	uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
230 	uint16_t nb_msix;   /* The max number of msix vector */
231 	uint8_t enabled_tc; /* The traffic class enabled */
232 	uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
233 	uint8_t vlan_filter_on; /* The VLAN filter enabled */
234 	/* information about rss configuration */
235 	u32 rss_key_size;
236 	u32 rss_lut_size;
237 	uint8_t *rss_lut;
238 	uint8_t *rss_key;
239 	struct ice_eth_stats eth_stats_offset;
240 	struct ice_eth_stats eth_stats;
241 	bool offset_loaded;
242 };
243 
244 enum proto_xtr_type {
245 	PROTO_XTR_NONE,
246 	PROTO_XTR_VLAN,
247 	PROTO_XTR_IPV4,
248 	PROTO_XTR_IPV6,
249 	PROTO_XTR_IPV6_FLOW,
250 	PROTO_XTR_TCP,
251 };
252 
253 enum ice_fdir_tunnel_type {
254 	ICE_FDIR_TUNNEL_TYPE_NONE = 0,
255 	ICE_FDIR_TUNNEL_TYPE_VXLAN,
256 	ICE_FDIR_TUNNEL_TYPE_GTPU,
257 	ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
258 };
259 
260 struct rte_flow;
261 TAILQ_HEAD(ice_flow_list, rte_flow);
262 
263 struct ice_flow_parser_node;
264 TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
265 
266 struct ice_fdir_filter_conf {
267 	struct ice_fdir_fltr input;
268 	enum ice_fdir_tunnel_type tunnel_type;
269 
270 	struct ice_fdir_counter *counter; /* flow specific counter context */
271 	struct rte_flow_action_count act_count;
272 
273 	uint64_t input_set;
274 };
275 
276 #define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
277 
278 struct ice_fdir_fltr_pattern {
279 	enum ice_fltr_ptype flow_type;
280 
281 	union {
282 		struct ice_fdir_v4 v4;
283 		struct ice_fdir_v6 v6;
284 	} ip, mask;
285 
286 	struct ice_fdir_udp_gtp gtpu_data;
287 	struct ice_fdir_udp_gtp gtpu_mask;
288 
289 	struct ice_fdir_extra ext_data;
290 	struct ice_fdir_extra ext_mask;
291 
292 	enum ice_fdir_tunnel_type tunnel_type;
293 };
294 
295 #define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE	1
296 #define ICE_FDIR_COUNTER_MAX_POOL_SIZE		32
297 #define ICE_FDIR_COUNTERS_PER_BLOCK		256
298 #define ICE_FDIR_COUNTER_INDEX(base_idx) \
299 				((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
300 struct ice_fdir_counter_pool;
301 
302 struct ice_fdir_counter {
303 	TAILQ_ENTRY(ice_fdir_counter) next;
304 	struct ice_fdir_counter_pool *pool;
305 	uint8_t shared;
306 	uint32_t ref_cnt;
307 	uint32_t id;
308 	uint64_t hits;
309 	uint64_t bytes;
310 	uint32_t hw_index;
311 };
312 
313 TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter);
314 
315 struct ice_fdir_counter_pool {
316 	TAILQ_ENTRY(ice_fdir_counter_pool) next;
317 	struct ice_fdir_counter_list counter_list;
318 	struct ice_fdir_counter counters[0];
319 };
320 
321 TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool);
322 
323 struct ice_fdir_counter_pool_container {
324 	struct ice_fdir_counter_pool_list pool_list;
325 	struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE];
326 	uint8_t index_free;
327 };
328 
329 /**
330  *  A structure used to define fields of a FDIR related info.
331  */
332 struct ice_fdir_info {
333 	struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
334 	struct ice_tx_queue *txq;
335 	struct ice_rx_queue *rxq;
336 	void *prg_pkt;                 /* memory for fdir program packet */
337 	uint64_t dma_addr;             /* physic address of packet memory*/
338 	const struct rte_memzone *mz;
339 	struct ice_fdir_filter_conf conf;
340 
341 	struct ice_fdir_filter_conf **hash_map;
342 	struct rte_hash *hash_table;
343 
344 	struct ice_fdir_counter_pool_container counter;
345 };
346 
347 struct ice_pf {
348 	struct ice_adapter *adapter; /* The adapter this PF associate to */
349 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
350 	/* Used for next free software vsi idx.
351 	 * To save the effort, we don't recycle the index.
352 	 * Suppose the indexes are more than enough.
353 	 */
354 	uint16_t next_vsi_idx;
355 	uint16_t vsis_allocated;
356 	uint16_t vsis_unallocated;
357 	struct ice_res_pool_info qp_pool;    /*Queue pair pool */
358 	struct ice_res_pool_info msix_pool;  /* MSIX interrupt pool */
359 	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
360 	struct rte_ether_addr dev_addr; /* PF device mac address */
361 	uint64_t flags; /* PF feature flags */
362 	uint16_t hash_lut_size; /* The size of hash lookup table */
363 	uint16_t lan_nb_qp_max;
364 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
365 	uint16_t base_queue; /* The base queue pairs index  in the device */
366 	uint8_t *proto_xtr; /* Protocol extraction type for all queues */
367 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
368 	uint16_t fdir_qp_offset;
369 	struct ice_fdir_info fdir; /* flow director info */
370 	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
371 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
372 	struct ice_hw_port_stats stats_offset;
373 	struct ice_hw_port_stats stats;
374 	/* internal packet statistics, it should be excluded from the total */
375 	struct ice_eth_stats internal_stats_offset;
376 	struct ice_eth_stats internal_stats;
377 	bool offset_loaded;
378 	bool adapter_stopped;
379 	struct ice_flow_list flow_list;
380 	struct ice_parser_list rss_parser_list;
381 	struct ice_parser_list perm_parser_list;
382 	struct ice_parser_list dist_parser_list;
383 	bool init_link_up;
384 };
385 
386 #define ICE_MAX_QUEUE_NUM  2048
387 
388 /**
389  * Cache devargs parse result.
390  */
391 struct ice_devargs {
392 	int safe_mode_support;
393 	uint8_t proto_xtr_dflt;
394 	int pipe_mode_support;
395 	int flow_mark_support;
396 	uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
397 };
398 
399 /**
400  * Structure to store private data for each PF/VF instance.
401  */
402 struct ice_adapter {
403 	/* Common for both PF and VF */
404 	struct ice_hw hw;
405 	struct rte_eth_dev *eth_dev;
406 	struct ice_pf pf;
407 	bool rx_bulk_alloc_allowed;
408 	bool rx_vec_allowed;
409 	bool tx_vec_allowed;
410 	bool tx_simple_allowed;
411 	/* ptype mapping table */
412 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
413 	bool is_safe_mode;
414 	struct ice_devargs devargs;
415 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
416 };
417 
418 struct ice_vsi_vlan_pvid_info {
419 	uint16_t on;		/* Enable or disable pvid */
420 	union {
421 		uint16_t pvid;	/* Valid in case 'on' is set to set pvid */
422 		struct {
423 			/* Valid in case 'on' is cleared. 'tagged' will reject
424 			 * tagged packets, while 'untagged' will reject
425 			 * untagged packets.
426 			 */
427 			uint8_t tagged;
428 			uint8_t untagged;
429 		} reject;
430 	} config;
431 };
432 
433 #define ICE_DEV_TO_PCI(eth_dev) \
434 	RTE_DEV_TO_PCI((eth_dev)->device)
435 
436 /* ICE_DEV_PRIVATE_TO */
437 #define ICE_DEV_PRIVATE_TO_PF(adapter) \
438 	(&((struct ice_adapter *)adapter)->pf)
439 #define ICE_DEV_PRIVATE_TO_HW(adapter) \
440 	(&((struct ice_adapter *)adapter)->hw)
441 #define ICE_DEV_PRIVATE_TO_ADAPTER(adapter) \
442 	((struct ice_adapter *)adapter)
443 
444 /* ICE_VSI_TO */
445 #define ICE_VSI_TO_HW(vsi) \
446 	(&(((struct ice_vsi *)vsi)->adapter->hw))
447 #define ICE_VSI_TO_PF(vsi) \
448 	(&(((struct ice_vsi *)vsi)->adapter->pf))
449 #define ICE_VSI_TO_ETH_DEV(vsi) \
450 	(((struct ice_vsi *)vsi)->adapter->eth_dev)
451 
452 /* ICE_PF_TO */
453 #define ICE_PF_TO_HW(pf) \
454 	(&(((struct ice_pf *)pf)->adapter->hw))
455 #define ICE_PF_TO_ADAPTER(pf) \
456 	((struct ice_adapter *)(pf)->adapter)
457 #define ICE_PF_TO_ETH_DEV(pf) \
458 	(((struct ice_pf *)pf)->adapter->eth_dev)
459 
460 struct ice_vsi *
461 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
462 int
463 ice_release_vsi(struct ice_vsi *vsi);
464 void ice_vsi_enable_queues_intr(struct ice_vsi *vsi);
465 void ice_vsi_disable_queues_intr(struct ice_vsi *vsi);
466 void ice_vsi_queues_bind_intr(struct ice_vsi *vsi);
467 
468 static inline int
469 ice_align_floor(int n)
470 {
471 	if (n == 0)
472 		return 0;
473 	return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
474 }
475 
476 #define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \
477 	(((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \
478 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \
479 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \
480 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \
481 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \
482 	((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \
483 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \
484 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \
485 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \
486 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \
487 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \
488 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \
489 	((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \
490 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \
491 	((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1))
492 
493 #define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \
494 	(((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \
495 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \
496 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \
497 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \
498 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \
499 	((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \
500 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \
501 	((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \
502 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \
503 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \
504 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \
505 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \
506 	((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR))
507 
508 #define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \
509 	(((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \
510 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \
511 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \
512 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \
513 	((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2))
514 
515 #endif /* _ICE_ETHDEV_H_ */
516