xref: /f-stack/dpdk/drivers/common/mlx5/mlx5_prm.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_PRM_H_
7 #define RTE_PMD_MLX5_PRM_H_
8 
9 #include <unistd.h>
10 
11 #include <rte_vect.h>
12 #include <rte_byteorder.h>
13 
14 #include <mlx5_glue.h>
15 #include "mlx5_autoconf.h"
16 
17 /* RSS hash key size. */
18 #define MLX5_RSS_HASH_KEY_LEN 40
19 
20 /* Get CQE owner bit. */
21 #define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
22 
23 /* Get CQE format. */
24 #define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
25 
26 /* Get CQE opcode. */
27 #define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
28 
29 /* Get CQE solicited event. */
30 #define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
31 
32 /* Invalidate a CQE. */
33 #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
34 
35 /* Hardware index widths. */
36 #define MLX5_CQ_INDEX_WIDTH 24
37 #define MLX5_WQ_INDEX_WIDTH 16
38 
39 /* WQE Segment sizes in bytes. */
40 #define MLX5_WSEG_SIZE 16u
41 #define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)
42 #define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)
43 #define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)
44 
45 /* WQE/WQEBB size in bytes. */
46 #define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)
47 
48 /*
49  * Max size of a WQE session.
50  * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,
51  * the WQE size field in Control Segment is 6 bits wide.
52  */
53 #define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)
54 
55 /*
56  * Default minimum number of Tx queues for inlining packets.
57  * If there are less queues as specified we assume we have
58  * no enough CPU resources (cycles) to perform inlining,
59  * the PCIe throughput is not supposed as bottleneck and
60  * inlining is disabled.
61  */
62 #define MLX5_INLINE_MAX_TXQS 8u
63 #define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u
64 
65 /*
66  * Default packet length threshold to be inlined with
67  * enhanced MPW. If packet length exceeds the threshold
68  * the data are not inlined. Should be aligned in WQEBB
69  * boundary with accounting the title Control and Ethernet
70  * segments.
71  */
72 #define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \
73 				  MLX5_DSEG_MIN_INLINE_SIZE)
74 /*
75  * Maximal inline data length sent with enhanced MPW.
76  * Is based on maximal WQE size.
77  */
78 #define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
79 				  MLX5_WQE_CSEG_SIZE - \
80 				  MLX5_WQE_ESEG_SIZE - \
81 				  MLX5_WQE_DSEG_SIZE + \
82 				  MLX5_DSEG_MIN_INLINE_SIZE)
83 /*
84  * Minimal amount of packets to be sent with EMPW.
85  * This limits the minimal required size of sent EMPW.
86  * If there are no enough resources to built minimal
87  * EMPW the sending loop exits.
88  */
89 #define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)
90 /*
91  * Maximal amount of packets to be sent with EMPW.
92  * This value is not recommended to exceed MLX5_TX_COMP_THRESH,
93  * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs
94  * without CQE generation request, being multiplied by
95  * MLX5_TX_COMP_MAX_CQE it may cause significant latency
96  * in tx burst routine at the moment of freeing multiple mbufs.
97  */
98 #define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH
99 #define MLX5_MPW_MAX_PACKETS 6
100 #define MLX5_MPW_INLINE_MAX_PACKETS 6
101 
102 /*
103  * Default packet length threshold to be inlined with
104  * ordinary SEND. Inlining saves the MR key search
105  * and extra PCIe data fetch transaction, but eats the
106  * CPU cycles.
107  */
108 #define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \
109 				  MLX5_ESEG_MIN_INLINE_SIZE - \
110 				  MLX5_WQE_CSEG_SIZE - \
111 				  MLX5_WQE_ESEG_SIZE - \
112 				  MLX5_WQE_DSEG_SIZE)
113 /*
114  * Maximal inline data length sent with ordinary SEND.
115  * Is based on maximal WQE size.
116  */
117 #define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
118 				  MLX5_WQE_CSEG_SIZE - \
119 				  MLX5_WQE_ESEG_SIZE - \
120 				  MLX5_WQE_DSEG_SIZE + \
121 				  MLX5_ESEG_MIN_INLINE_SIZE)
122 
123 /* Missed in mlx5dv.h, should define here. */
124 #ifndef HAVE_MLX5_OPCODE_ENHANCED_MPSW
125 #define MLX5_OPCODE_ENHANCED_MPSW 0x29u
126 #endif
127 
128 #ifndef HAVE_MLX5_OPCODE_SEND_EN
129 #define MLX5_OPCODE_SEND_EN 0x17u
130 #endif
131 
132 #ifndef HAVE_MLX5_OPCODE_WAIT
133 #define MLX5_OPCODE_WAIT 0x0fu
134 #endif
135 
136 #ifndef HAVE_MLX5_OPCODE_ACCESS_ASO
137 #define MLX5_OPCODE_ACCESS_ASO 0x2du
138 #endif
139 
140 /* CQE value to inform that VLAN is stripped. */
141 #define MLX5_CQE_VLAN_STRIPPED (1u << 0)
142 
143 /* IPv4 options. */
144 #define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
145 
146 /* IPv6 packet. */
147 #define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
148 
149 /* IPv4 packet. */
150 #define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
151 
152 /* TCP packet. */
153 #define MLX5_CQE_RX_TCP_PACKET (1u << 4)
154 
155 /* UDP packet. */
156 #define MLX5_CQE_RX_UDP_PACKET (1u << 5)
157 
158 /* IP is fragmented. */
159 #define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
160 
161 /* L2 header is valid. */
162 #define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
163 
164 /* L3 header is valid. */
165 #define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
166 
167 /* L4 header is valid. */
168 #define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
169 
170 /* Outer packet, 0 IPv4, 1 IPv6. */
171 #define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
172 
173 /* Tunnel packet bit in the CQE. */
174 #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
175 
176 /* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */
177 #define MLX5_CQE_LRO_PUSH_MASK 0x40
178 
179 /* Mask for L4 type in the CQE hdr_type_etc field. */
180 #define MLX5_CQE_L4_TYPE_MASK 0x70
181 
182 /* The bit index of L4 type in CQE hdr_type_etc field. */
183 #define MLX5_CQE_L4_TYPE_SHIFT 0x4
184 
185 /* L4 type to indicate TCP packet without acknowledgment. */
186 #define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3
187 
188 /* L4 type to indicate TCP packet with acknowledgment. */
189 #define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4
190 
191 /* Inner L3 checksum offload (Tunneled packets only). */
192 #define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
193 
194 /* Inner L4 checksum offload (Tunneled packets only). */
195 #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
196 
197 /* Outer L4 type is TCP. */
198 #define MLX5_ETH_WQE_L4_OUTER_TCP  (0u << 5)
199 
200 /* Outer L4 type is UDP. */
201 #define MLX5_ETH_WQE_L4_OUTER_UDP  (1u << 5)
202 
203 /* Outer L3 type is IPV4. */
204 #define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
205 
206 /* Outer L3 type is IPV6. */
207 #define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
208 
209 /* Inner L4 type is TCP. */
210 #define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
211 
212 /* Inner L4 type is UDP. */
213 #define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
214 
215 /* Inner L3 type is IPV4. */
216 #define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
217 
218 /* Inner L3 type is IPV6. */
219 #define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
220 
221 /* VLAN insertion flag. */
222 #define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)
223 
224 /* Data inline segment flag. */
225 #define MLX5_ETH_WQE_DATA_INLINE (1u << 31)
226 
227 /* Is flow mark valid. */
228 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
229 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
230 #else
231 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
232 #endif
233 
234 /* INVALID is used by packets matching no flow rules. */
235 #define MLX5_FLOW_MARK_INVALID 0
236 
237 /* Maximum allowed value to mark a packet. */
238 #define MLX5_FLOW_MARK_MAX 0xfffff0
239 
240 /* Default mark value used when none is provided. */
241 #define MLX5_FLOW_MARK_DEFAULT 0xffffff
242 
243 /* Default mark mask for metadata legacy mode. */
244 #define MLX5_FLOW_MARK_MASK 0xffffff
245 
246 /* Byte length mask when mark is enable in miniCQE */
247 #define MLX5_LEN_WITH_MARK_MASK 0xffffff00
248 
249 /* Maximum number of DS in WQE. Limited by 6-bit field. */
250 #define MLX5_DSEG_MAX 63
251 
252 /* The completion mode offset in the WQE control segment line 2. */
253 #define MLX5_COMP_MODE_OFFSET 2
254 
255 /* Amount of data bytes in minimal inline data segment. */
256 #define MLX5_DSEG_MIN_INLINE_SIZE 12u
257 
258 /* Amount of data bytes in minimal inline eth segment. */
259 #define MLX5_ESEG_MIN_INLINE_SIZE 18u
260 
261 /* Amount of data bytes after eth data segment. */
262 #define MLX5_ESEG_EXTRA_DATA_SIZE 32u
263 
264 /* The maximum log value of segments per RQ WQE. */
265 #define MLX5_MAX_LOG_RQ_SEGS 5u
266 
267 /* The alignment needed for WQ buffer. */
268 #define MLX5_WQE_BUF_ALIGNMENT rte_mem_page_size()
269 
270 /* The alignment needed for CQ buffer. */
271 #define MLX5_CQE_BUF_ALIGNMENT rte_mem_page_size()
272 
273 /* Completion mode. */
274 enum mlx5_completion_mode {
275 	MLX5_COMP_ONLY_ERR = 0x0,
276 	MLX5_COMP_ONLY_FIRST_ERR = 0x1,
277 	MLX5_COMP_ALWAYS = 0x2,
278 	MLX5_COMP_CQE_AND_EQE = 0x3,
279 };
280 
281 /* MPW mode. */
282 enum mlx5_mpw_mode {
283 	MLX5_MPW_DISABLED,
284 	MLX5_MPW,
285 	MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
286 };
287 
288 /* WQE Control segment. */
289 struct mlx5_wqe_cseg {
290 	uint32_t opcode;
291 	uint32_t sq_ds;
292 	uint32_t flags;
293 	uint32_t misc;
294 } __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
295 
296 /*
297  * WQE CSEG opcode field size is 32 bits, divided:
298  * Bits 31:24 OPC_MOD
299  * Bits 23:8 wqe_index
300  * Bits 7:0 OPCODE
301  */
302 #define WQE_CSEG_OPC_MOD_OFFSET		24
303 #define WQE_CSEG_WQE_INDEX_OFFSET	 8
304 
305 /* Header of data segment. Minimal size Data Segment */
306 struct mlx5_wqe_dseg {
307 	uint32_t bcount;
308 	union {
309 		uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
310 		struct {
311 			uint32_t lkey;
312 			uint64_t pbuf;
313 		} __rte_packed;
314 	};
315 } __rte_packed;
316 
317 /* Subset of struct WQE Ethernet Segment. */
318 struct mlx5_wqe_eseg {
319 	union {
320 		struct {
321 			uint32_t swp_offs;
322 			uint8_t	cs_flags;
323 			uint8_t	swp_flags;
324 			uint16_t mss;
325 			uint32_t metadata;
326 			uint16_t inline_hdr_sz;
327 			union {
328 				uint16_t inline_data;
329 				uint16_t vlan_tag;
330 			};
331 		} __rte_packed;
332 		struct {
333 			uint32_t offsets;
334 			uint32_t flags;
335 			uint32_t flow_metadata;
336 			uint32_t inline_hdr;
337 		} __rte_packed;
338 	};
339 } __rte_packed;
340 
341 struct mlx5_wqe_qseg {
342 	uint32_t reserved0;
343 	uint32_t reserved1;
344 	uint32_t max_index;
345 	uint32_t qpn_cqn;
346 } __rte_packed;
347 
348 /* The title WQEBB, header of WQE. */
349 struct mlx5_wqe {
350 	union {
351 		struct mlx5_wqe_cseg cseg;
352 		uint32_t ctrl[4];
353 	};
354 	struct mlx5_wqe_eseg eseg;
355 	union {
356 		struct mlx5_wqe_dseg dseg[2];
357 		uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
358 	};
359 } __rte_packed;
360 
361 /* WQE for Multi-Packet RQ. */
362 struct mlx5_wqe_mprq {
363 	struct mlx5_wqe_srq_next_seg next_seg;
364 	struct mlx5_wqe_data_seg dseg;
365 };
366 
367 #define MLX5_MPRQ_LEN_MASK 0x000ffff
368 #define MLX5_MPRQ_LEN_SHIFT 0
369 #define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
370 #define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
371 #define MLX5_MPRQ_FILLER_MASK 0x80000000
372 #define MLX5_MPRQ_FILLER_SHIFT 31
373 
374 #define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
375 
376 /* CQ element structure - should be equal to the cache line size */
377 struct mlx5_cqe {
378 #if (RTE_CACHE_LINE_SIZE == 128)
379 	uint8_t padding[64];
380 #endif
381 	uint8_t pkt_info;
382 	uint8_t rsvd0;
383 	uint16_t wqe_id;
384 	uint8_t lro_tcppsh_abort_dupack;
385 	uint8_t lro_min_ttl;
386 	uint16_t lro_tcp_win;
387 	uint32_t lro_ack_seq_num;
388 	uint32_t rx_hash_res;
389 	uint8_t rx_hash_type;
390 	uint8_t rsvd1[3];
391 	uint16_t csum;
392 	uint8_t rsvd2[6];
393 	uint16_t hdr_type_etc;
394 	uint16_t vlan_info;
395 	uint8_t lro_num_seg;
396 	uint8_t rsvd3[3];
397 	uint32_t flow_table_metadata;
398 	uint8_t rsvd4[4];
399 	uint32_t byte_cnt;
400 	uint64_t timestamp;
401 	uint32_t sop_drop_qpn;
402 	uint16_t wqe_counter;
403 	uint8_t rsvd5;
404 	uint8_t op_own;
405 };
406 
407 struct mlx5_cqe_ts {
408 	uint64_t timestamp;
409 	uint32_t sop_drop_qpn;
410 	uint16_t wqe_counter;
411 	uint8_t rsvd5;
412 	uint8_t op_own;
413 };
414 
415 /* MMO metadata segment */
416 
417 #define	MLX5_OPCODE_MMO	0x2f
418 #define	MLX5_OPC_MOD_MMO_REGEX 0x4
419 
420 struct mlx5_wqe_metadata_seg {
421 	uint32_t mmo_control_31_0; /* mmo_control_63_32 is in ctrl_seg.imm */
422 	uint32_t lkey;
423 	uint64_t addr;
424 };
425 
426 struct mlx5_ifc_regexp_mmo_control_bits {
427 	uint8_t reserved_at_31[0x2];
428 	uint8_t le[0x1];
429 	uint8_t reserved_at_28[0x1];
430 	uint8_t subset_id_0[0xc];
431 	uint8_t reserved_at_16[0x4];
432 	uint8_t subset_id_1[0xc];
433 	uint8_t ctrl[0x4];
434 	uint8_t subset_id_2[0xc];
435 	uint8_t reserved_at_16_1[0x4];
436 	uint8_t subset_id_3[0xc];
437 };
438 
439 struct mlx5_ifc_regexp_metadata_bits {
440 	uint8_t rof_version[0x10];
441 	uint8_t latency_count[0x10];
442 	uint8_t instruction_count[0x10];
443 	uint8_t primary_thread_count[0x10];
444 	uint8_t match_count[0x8];
445 	uint8_t detected_match_count[0x8];
446 	uint8_t status[0x10];
447 	uint8_t job_id[0x20];
448 	uint8_t reserved[0x80];
449 };
450 
451 struct mlx5_ifc_regexp_match_tuple_bits {
452 	uint8_t length[0x10];
453 	uint8_t start_ptr[0x10];
454 	uint8_t rule_id[0x20];
455 };
456 
457 /* Adding direct verbs to data-path. */
458 
459 /* CQ sequence number mask. */
460 #define MLX5_CQ_SQN_MASK 0x3
461 
462 /* CQ sequence number index. */
463 #define MLX5_CQ_SQN_OFFSET 28
464 
465 /* CQ doorbell index mask. */
466 #define MLX5_CI_MASK 0xffffff
467 
468 /* CQ doorbell offset. */
469 #define MLX5_CQ_ARM_DB 1
470 
471 /* CQ doorbell offset*/
472 #define MLX5_CQ_DOORBELL 0x20
473 
474 /* CQE format value. */
475 #define MLX5_COMPRESSED 0x3
476 
477 /* CQ doorbell cmd types. */
478 #define MLX5_CQ_DBR_CMD_SOL_ONLY (1 << 24)
479 #define MLX5_CQ_DBR_CMD_ALL (0 << 24)
480 
481 /* Action type of header modification. */
482 enum {
483 	MLX5_MODIFICATION_TYPE_SET = 0x1,
484 	MLX5_MODIFICATION_TYPE_ADD = 0x2,
485 	MLX5_MODIFICATION_TYPE_COPY = 0x3,
486 };
487 
488 /* The field of packet to be modified. */
489 enum mlx5_modification_field {
490 	MLX5_MODI_OUT_NONE = -1,
491 	MLX5_MODI_OUT_SMAC_47_16 = 1,
492 	MLX5_MODI_OUT_SMAC_15_0,
493 	MLX5_MODI_OUT_ETHERTYPE,
494 	MLX5_MODI_OUT_DMAC_47_16,
495 	MLX5_MODI_OUT_DMAC_15_0,
496 	MLX5_MODI_OUT_IP_DSCP,
497 	MLX5_MODI_OUT_TCP_FLAGS,
498 	MLX5_MODI_OUT_TCP_SPORT,
499 	MLX5_MODI_OUT_TCP_DPORT,
500 	MLX5_MODI_OUT_IPV4_TTL,
501 	MLX5_MODI_OUT_UDP_SPORT,
502 	MLX5_MODI_OUT_UDP_DPORT,
503 	MLX5_MODI_OUT_SIPV6_127_96,
504 	MLX5_MODI_OUT_SIPV6_95_64,
505 	MLX5_MODI_OUT_SIPV6_63_32,
506 	MLX5_MODI_OUT_SIPV6_31_0,
507 	MLX5_MODI_OUT_DIPV6_127_96,
508 	MLX5_MODI_OUT_DIPV6_95_64,
509 	MLX5_MODI_OUT_DIPV6_63_32,
510 	MLX5_MODI_OUT_DIPV6_31_0,
511 	MLX5_MODI_OUT_SIPV4,
512 	MLX5_MODI_OUT_DIPV4,
513 	MLX5_MODI_OUT_FIRST_VID,
514 	MLX5_MODI_IN_SMAC_47_16 = 0x31,
515 	MLX5_MODI_IN_SMAC_15_0,
516 	MLX5_MODI_IN_ETHERTYPE,
517 	MLX5_MODI_IN_DMAC_47_16,
518 	MLX5_MODI_IN_DMAC_15_0,
519 	MLX5_MODI_IN_IP_DSCP,
520 	MLX5_MODI_IN_TCP_FLAGS,
521 	MLX5_MODI_IN_TCP_SPORT,
522 	MLX5_MODI_IN_TCP_DPORT,
523 	MLX5_MODI_IN_IPV4_TTL,
524 	MLX5_MODI_IN_UDP_SPORT,
525 	MLX5_MODI_IN_UDP_DPORT,
526 	MLX5_MODI_IN_SIPV6_127_96,
527 	MLX5_MODI_IN_SIPV6_95_64,
528 	MLX5_MODI_IN_SIPV6_63_32,
529 	MLX5_MODI_IN_SIPV6_31_0,
530 	MLX5_MODI_IN_DIPV6_127_96,
531 	MLX5_MODI_IN_DIPV6_95_64,
532 	MLX5_MODI_IN_DIPV6_63_32,
533 	MLX5_MODI_IN_DIPV6_31_0,
534 	MLX5_MODI_IN_SIPV4,
535 	MLX5_MODI_IN_DIPV4,
536 	MLX5_MODI_OUT_IPV6_HOPLIMIT,
537 	MLX5_MODI_IN_IPV6_HOPLIMIT,
538 	MLX5_MODI_META_DATA_REG_A,
539 	MLX5_MODI_META_DATA_REG_B = 0x50,
540 	MLX5_MODI_META_REG_C_0,
541 	MLX5_MODI_META_REG_C_1,
542 	MLX5_MODI_META_REG_C_2,
543 	MLX5_MODI_META_REG_C_3,
544 	MLX5_MODI_META_REG_C_4,
545 	MLX5_MODI_META_REG_C_5,
546 	MLX5_MODI_META_REG_C_6,
547 	MLX5_MODI_META_REG_C_7,
548 	MLX5_MODI_OUT_TCP_SEQ_NUM,
549 	MLX5_MODI_IN_TCP_SEQ_NUM,
550 	MLX5_MODI_OUT_TCP_ACK_NUM,
551 	MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
552 };
553 
554 /* Total number of metadata reg_c's. */
555 #define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)
556 
557 enum modify_reg {
558 	REG_NON = 0,
559 	REG_A,
560 	REG_B,
561 	REG_C_0,
562 	REG_C_1,
563 	REG_C_2,
564 	REG_C_3,
565 	REG_C_4,
566 	REG_C_5,
567 	REG_C_6,
568 	REG_C_7,
569 };
570 
571 /* Modification sub command. */
572 struct mlx5_modification_cmd {
573 	union {
574 		uint32_t data0;
575 		struct {
576 			unsigned int length:5;
577 			unsigned int rsvd0:3;
578 			unsigned int offset:5;
579 			unsigned int rsvd1:3;
580 			unsigned int field:12;
581 			unsigned int action_type:4;
582 		};
583 	};
584 	union {
585 		uint32_t data1;
586 		uint8_t data[4];
587 		struct {
588 			unsigned int rsvd2:8;
589 			unsigned int dst_offset:5;
590 			unsigned int rsvd3:3;
591 			unsigned int dst_field:12;
592 			unsigned int rsvd4:4;
593 		};
594 	};
595 };
596 
597 typedef uint32_t u32;
598 typedef uint16_t u16;
599 typedef uint8_t u8;
600 
601 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
602 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
603 #define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
604 				  (&(__mlx5_nullp(typ)->fld)))
605 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
606 				    (__mlx5_bit_off(typ, fld) & 0x1f))
607 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
608 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
609 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
610 				  __mlx5_dw_bit_off(typ, fld))
611 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
612 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
613 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
614 				    (__mlx5_bit_off(typ, fld) & 0xf))
615 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
616 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << \
617 				  __mlx5_16_bit_off(typ, fld))
618 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
619 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
620 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
621 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
622 
623 /* insert a value to a struct */
624 #define MLX5_SET(typ, p, fld, v) \
625 	do { \
626 		u32 _v = v; \
627 		*((rte_be32_t *)(p) + __mlx5_dw_off(typ, fld)) = \
628 		rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
629 				  __mlx5_dw_off(typ, fld))) & \
630 				  (~__mlx5_dw_mask(typ, fld))) | \
631 				 (((_v) & __mlx5_mask(typ, fld)) << \
632 				   __mlx5_dw_bit_off(typ, fld))); \
633 	} while (0)
634 
635 #define MLX5_SET64(typ, p, fld, v) \
636 	do { \
637 		MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \
638 		*((rte_be64_t *)(p) + __mlx5_64_off(typ, fld)) = \
639 			rte_cpu_to_be_64(v); \
640 	} while (0)
641 
642 #define MLX5_SET16(typ, p, fld, v) \
643 	do { \
644 		u16 _v = v; \
645 		*((rte_be16_t *)(p) + __mlx5_16_off(typ, fld)) = \
646 		rte_cpu_to_be_16((rte_be_to_cpu_16(*((rte_be16_t *)(p) + \
647 				  __mlx5_16_off(typ, fld))) & \
648 				  (~__mlx5_16_mask(typ, fld))) | \
649 				 (((_v) & __mlx5_mask16(typ, fld)) << \
650 				  __mlx5_16_bit_off(typ, fld))); \
651 	} while (0)
652 
653 #define MLX5_GET_VOLATILE(typ, p, fld) \
654 	((rte_be_to_cpu_32(*((volatile __be32 *)(p) +\
655 	__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
656 	__mlx5_mask(typ, fld))
657 #define MLX5_GET(typ, p, fld) \
658 	((rte_be_to_cpu_32(*((rte_be32_t *)(p) +\
659 	__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
660 	__mlx5_mask(typ, fld))
661 #define MLX5_GET16(typ, p, fld) \
662 	((rte_be_to_cpu_16(*((rte_be16_t *)(p) + \
663 	  __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
664 	 __mlx5_mask16(typ, fld))
665 #define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((rte_be64_t *)(p) + \
666 						   __mlx5_64_off(typ, fld)))
667 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
668 
669 struct mlx5_ifc_fte_match_set_misc_bits {
670 	u8 gre_c_present[0x1];
671 	u8 reserved_at_1[0x1];
672 	u8 gre_k_present[0x1];
673 	u8 gre_s_present[0x1];
674 	u8 source_vhci_port[0x4];
675 	u8 source_sqn[0x18];
676 	u8 reserved_at_20[0x10];
677 	u8 source_port[0x10];
678 	u8 outer_second_prio[0x3];
679 	u8 outer_second_cfi[0x1];
680 	u8 outer_second_vid[0xc];
681 	u8 inner_second_prio[0x3];
682 	u8 inner_second_cfi[0x1];
683 	u8 inner_second_vid[0xc];
684 	u8 outer_second_cvlan_tag[0x1];
685 	u8 inner_second_cvlan_tag[0x1];
686 	u8 outer_second_svlan_tag[0x1];
687 	u8 inner_second_svlan_tag[0x1];
688 	u8 reserved_at_64[0xc];
689 	u8 gre_protocol[0x10];
690 	u8 gre_key_h[0x18];
691 	u8 gre_key_l[0x8];
692 	u8 vxlan_vni[0x18];
693 	u8 reserved_at_b8[0x8];
694 	u8 geneve_vni[0x18];
695 	u8 reserved_at_e4[0x7];
696 	u8 geneve_oam[0x1];
697 	u8 reserved_at_e0[0xc];
698 	u8 outer_ipv6_flow_label[0x14];
699 	u8 reserved_at_100[0xc];
700 	u8 inner_ipv6_flow_label[0x14];
701 	u8 reserved_at_120[0xa];
702 	u8 geneve_opt_len[0x6];
703 	u8 geneve_protocol_type[0x10];
704 	u8 reserved_at_140[0xc0];
705 };
706 
707 struct mlx5_ifc_ipv4_layout_bits {
708 	u8 reserved_at_0[0x60];
709 	u8 ipv4[0x20];
710 };
711 
712 struct mlx5_ifc_ipv6_layout_bits {
713 	u8 ipv6[16][0x8];
714 };
715 
716 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
717 	struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
718 	struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
719 	u8 reserved_at_0[0x80];
720 };
721 
722 struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
723 	u8 smac_47_16[0x20];
724 	u8 smac_15_0[0x10];
725 	u8 ethertype[0x10];
726 	u8 dmac_47_16[0x20];
727 	u8 dmac_15_0[0x10];
728 	u8 first_prio[0x3];
729 	u8 first_cfi[0x1];
730 	u8 first_vid[0xc];
731 	u8 ip_protocol[0x8];
732 	u8 ip_dscp[0x6];
733 	u8 ip_ecn[0x2];
734 	u8 cvlan_tag[0x1];
735 	u8 svlan_tag[0x1];
736 	u8 frag[0x1];
737 	u8 ip_version[0x4];
738 	u8 tcp_flags[0x9];
739 	u8 tcp_sport[0x10];
740 	u8 tcp_dport[0x10];
741 	u8 reserved_at_c0[0x18];
742 	u8 ip_ttl_hoplimit[0x8];
743 	u8 udp_sport[0x10];
744 	u8 udp_dport[0x10];
745 	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
746 	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
747 };
748 
749 struct mlx5_ifc_fte_match_mpls_bits {
750 	u8 mpls_label[0x14];
751 	u8 mpls_exp[0x3];
752 	u8 mpls_s_bos[0x1];
753 	u8 mpls_ttl[0x8];
754 };
755 
756 struct mlx5_ifc_fte_match_set_misc2_bits {
757 	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
758 	struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
759 	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
760 	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
761 	u8 metadata_reg_c_7[0x20];
762 	u8 metadata_reg_c_6[0x20];
763 	u8 metadata_reg_c_5[0x20];
764 	u8 metadata_reg_c_4[0x20];
765 	u8 metadata_reg_c_3[0x20];
766 	u8 metadata_reg_c_2[0x20];
767 	u8 metadata_reg_c_1[0x20];
768 	u8 metadata_reg_c_0[0x20];
769 	u8 metadata_reg_a[0x20];
770 	u8 metadata_reg_b[0x20];
771 	u8 reserved_at_1c0[0x40];
772 };
773 
774 struct mlx5_ifc_fte_match_set_misc3_bits {
775 	u8 inner_tcp_seq_num[0x20];
776 	u8 outer_tcp_seq_num[0x20];
777 	u8 inner_tcp_ack_num[0x20];
778 	u8 outer_tcp_ack_num[0x20];
779 	u8 reserved_at_auto1[0x8];
780 	u8 outer_vxlan_gpe_vni[0x18];
781 	u8 outer_vxlan_gpe_next_protocol[0x8];
782 	u8 outer_vxlan_gpe_flags[0x8];
783 	u8 reserved_at_a8[0x10];
784 	u8 icmp_header_data[0x20];
785 	u8 icmpv6_header_data[0x20];
786 	u8 icmp_type[0x8];
787 	u8 icmp_code[0x8];
788 	u8 icmpv6_type[0x8];
789 	u8 icmpv6_code[0x8];
790 	u8 reserved_at_120[0x20];
791 	u8 gtpu_teid[0x20];
792 	u8 gtpu_msg_type[0x08];
793 	u8 gtpu_msg_flags[0x08];
794 	u8 reserved_at_170[0x90];
795 };
796 
797 struct mlx5_ifc_fte_match_set_misc4_bits {
798 	u8 prog_sample_field_value_0[0x20];
799 	u8 prog_sample_field_id_0[0x20];
800 	u8 prog_sample_field_value_1[0x20];
801 	u8 prog_sample_field_id_1[0x20];
802 	u8 prog_sample_field_value_2[0x20];
803 	u8 prog_sample_field_id_2[0x20];
804 	u8 prog_sample_field_value_3[0x20];
805 	u8 prog_sample_field_id_3[0x20];
806 	u8 reserved_at_100[0x100];
807 };
808 
809 /* Flow matcher. */
810 struct mlx5_ifc_fte_match_param_bits {
811 	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
812 	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
813 	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
814 	struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
815 	struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
816 	struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
817 };
818 
819 enum {
820 	MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
821 	MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
822 	MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
823 	MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
824 	MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT,
825 	MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT,
826 };
827 
828 enum {
829 	MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
830 	MLX5_CMD_OP_CREATE_MKEY = 0x200,
831 	MLX5_CMD_OP_CREATE_CQ = 0x400,
832 	MLX5_CMD_OP_CREATE_QP = 0x500,
833 	MLX5_CMD_OP_RST2INIT_QP = 0x502,
834 	MLX5_CMD_OP_INIT2RTR_QP = 0x503,
835 	MLX5_CMD_OP_RTR2RTS_QP = 0x504,
836 	MLX5_CMD_OP_RTS2RTS_QP = 0x505,
837 	MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
838 	MLX5_CMD_OP_QP_2ERR = 0x507,
839 	MLX5_CMD_OP_QP_2RST = 0x50A,
840 	MLX5_CMD_OP_QUERY_QP = 0x50B,
841 	MLX5_CMD_OP_SQD2RTS_QP = 0x50C,
842 	MLX5_CMD_OP_INIT2INIT_QP = 0x50E,
843 	MLX5_CMD_OP_SUSPEND_QP = 0x50F,
844 	MLX5_CMD_OP_RESUME_QP = 0x510,
845 	MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
846 	MLX5_CMD_OP_ACCESS_REGISTER = 0x805,
847 	MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
848 	MLX5_CMD_OP_CREATE_TIR = 0x900,
849 	MLX5_CMD_OP_MODIFY_TIR = 0x901,
850 	MLX5_CMD_OP_CREATE_SQ = 0X904,
851 	MLX5_CMD_OP_MODIFY_SQ = 0X905,
852 	MLX5_CMD_OP_CREATE_RQ = 0x908,
853 	MLX5_CMD_OP_MODIFY_RQ = 0x909,
854 	MLX5_CMD_OP_CREATE_TIS = 0x912,
855 	MLX5_CMD_OP_QUERY_TIS = 0x915,
856 	MLX5_CMD_OP_CREATE_RQT = 0x916,
857 	MLX5_CMD_OP_MODIFY_RQT = 0x917,
858 	MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
859 	MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
860 	MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
861 	MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
862 	MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
863 	MLX5_CMD_SET_REGEX_PARAMS = 0xb04,
864 	MLX5_CMD_QUERY_REGEX_PARAMS = 0xb05,
865 	MLX5_CMD_SET_REGEX_REGISTERS = 0xb06,
866 	MLX5_CMD_QUERY_REGEX_REGISTERS = 0xb07,
867 	MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c,
868 };
869 
870 enum {
871 	MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
872 	MLX5_MKC_ACCESS_MODE_KLM   = 0x2,
873 	MLX5_MKC_ACCESS_MODE_KLM_FBS = 0x3,
874 };
875 
876 #define MLX5_ADAPTER_PAGE_SHIFT 12
877 #define MLX5_LOG_RQ_STRIDE_SHIFT 4
878 /**
879  * The batch counter dcs id starts from 0x800000 and none batch counter
880  * starts from 0. As currently, the counter is changed to be indexed by
881  * pool index and the offset of the counter in the pool counters_raw array.
882  * It means now the counter index is same for batch and none batch counter.
883  * Add the 0x800000 batch counter offset to the batch counter index helps
884  * indicate the counter index is from batch or none batch container pool.
885  */
886 #define MLX5_CNT_BATCH_OFFSET 0x800000
887 
888 /* The counter batch query requires ID align with 4. */
889 #define MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT 4
890 
891 /* Flow counters. */
892 struct mlx5_ifc_alloc_flow_counter_out_bits {
893 	u8         status[0x8];
894 	u8         reserved_at_8[0x18];
895 	u8         syndrome[0x20];
896 	u8         flow_counter_id[0x20];
897 	u8         reserved_at_60[0x20];
898 };
899 
900 struct mlx5_ifc_alloc_flow_counter_in_bits {
901 	u8         opcode[0x10];
902 	u8         reserved_at_10[0x10];
903 	u8         reserved_at_20[0x10];
904 	u8         op_mod[0x10];
905 	u8         flow_counter_id[0x20];
906 	u8         reserved_at_40[0x18];
907 	u8         flow_counter_bulk[0x8];
908 };
909 
910 struct mlx5_ifc_dealloc_flow_counter_out_bits {
911 	u8         status[0x8];
912 	u8         reserved_at_8[0x18];
913 	u8         syndrome[0x20];
914 	u8         reserved_at_40[0x40];
915 };
916 
917 struct mlx5_ifc_dealloc_flow_counter_in_bits {
918 	u8         opcode[0x10];
919 	u8         reserved_at_10[0x10];
920 	u8         reserved_at_20[0x10];
921 	u8         op_mod[0x10];
922 	u8         flow_counter_id[0x20];
923 	u8         reserved_at_60[0x20];
924 };
925 
926 struct mlx5_ifc_traffic_counter_bits {
927 	u8         packets[0x40];
928 	u8         octets[0x40];
929 };
930 
931 struct mlx5_ifc_query_flow_counter_out_bits {
932 	u8         status[0x8];
933 	u8         reserved_at_8[0x18];
934 	u8         syndrome[0x20];
935 	u8         reserved_at_40[0x40];
936 	struct mlx5_ifc_traffic_counter_bits flow_statistics[];
937 };
938 
939 struct mlx5_ifc_query_flow_counter_in_bits {
940 	u8         opcode[0x10];
941 	u8         reserved_at_10[0x10];
942 	u8         reserved_at_20[0x10];
943 	u8         op_mod[0x10];
944 	u8         reserved_at_40[0x20];
945 	u8         mkey[0x20];
946 	u8         address[0x40];
947 	u8         clear[0x1];
948 	u8         dump_to_memory[0x1];
949 	u8         num_of_counters[0x1e];
950 	u8         flow_counter_id[0x20];
951 };
952 
953 #define MLX5_MAX_KLM_BYTE_COUNT 0x80000000u
954 #define MLX5_MIN_KLM_FIXED_BUFFER_SIZE 0x1000u
955 
956 
957 struct mlx5_ifc_klm_bits {
958 	u8         byte_count[0x20];
959 	u8         mkey[0x20];
960 	u8         address[0x40];
961 };
962 
963 struct mlx5_ifc_mkc_bits {
964 	u8         reserved_at_0[0x1];
965 	u8         free[0x1];
966 	u8         reserved_at_2[0x1];
967 	u8         access_mode_4_2[0x3];
968 	u8         reserved_at_6[0x7];
969 	u8         relaxed_ordering_write[0x1];
970 	u8         reserved_at_e[0x1];
971 	u8         small_fence_on_rdma_read_response[0x1];
972 	u8         umr_en[0x1];
973 	u8         a[0x1];
974 	u8         rw[0x1];
975 	u8         rr[0x1];
976 	u8         lw[0x1];
977 	u8         lr[0x1];
978 	u8         access_mode_1_0[0x2];
979 	u8         reserved_at_18[0x8];
980 
981 	u8         qpn[0x18];
982 	u8         mkey_7_0[0x8];
983 
984 	u8         reserved_at_40[0x20];
985 
986 	u8         length64[0x1];
987 	u8         bsf_en[0x1];
988 	u8         sync_umr[0x1];
989 	u8         reserved_at_63[0x2];
990 	u8         expected_sigerr_count[0x1];
991 	u8         reserved_at_66[0x1];
992 	u8         en_rinval[0x1];
993 	u8         pd[0x18];
994 
995 	u8         start_addr[0x40];
996 
997 	u8         len[0x40];
998 
999 	u8         bsf_octword_size[0x20];
1000 
1001 	u8         reserved_at_120[0x80];
1002 
1003 	u8         translations_octword_size[0x20];
1004 
1005 	u8         reserved_at_1c0[0x19];
1006 	u8		   relaxed_ordering_read[0x1];
1007 	u8		   reserved_at_1da[0x1];
1008 	u8         log_page_size[0x5];
1009 
1010 	u8         reserved_at_1e0[0x20];
1011 };
1012 
1013 struct mlx5_ifc_create_mkey_out_bits {
1014 	u8         status[0x8];
1015 	u8         reserved_at_8[0x18];
1016 
1017 	u8         syndrome[0x20];
1018 
1019 	u8         reserved_at_40[0x8];
1020 	u8         mkey_index[0x18];
1021 
1022 	u8         reserved_at_60[0x20];
1023 };
1024 
1025 struct mlx5_ifc_create_mkey_in_bits {
1026 	u8         opcode[0x10];
1027 	u8         reserved_at_10[0x10];
1028 
1029 	u8         reserved_at_20[0x10];
1030 	u8         op_mod[0x10];
1031 
1032 	u8         reserved_at_40[0x20];
1033 
1034 	u8         pg_access[0x1];
1035 	u8         reserved_at_61[0x1f];
1036 
1037 	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
1038 
1039 	u8         reserved_at_280[0x80];
1040 
1041 	u8         translations_octword_actual_size[0x20];
1042 
1043 	u8         mkey_umem_id[0x20];
1044 
1045 	u8         mkey_umem_offset[0x40];
1046 
1047 	u8         reserved_at_380[0x500];
1048 
1049 	u8         klm_pas_mtt[][0x20];
1050 };
1051 
1052 enum {
1053 	MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,
1054 	MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,
1055 	MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,
1056 	MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
1057 	MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
1058 };
1059 
1060 #define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q \
1061 			(1ULL << MLX5_GENERAL_OBJ_TYPE_VIRTQ)
1062 #define MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS \
1063 			(1ULL << MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS)
1064 #define MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE \
1065 			(1ULL << MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH)
1066 #define MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO \
1067 			(1ULL << MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO)
1068 
1069 enum {
1070 	MLX5_HCA_CAP_OPMOD_GET_MAX   = 0,
1071 	MLX5_HCA_CAP_OPMOD_GET_CUR   = 1,
1072 };
1073 
1074 enum {
1075 	MLX5_CAP_INLINE_MODE_L2,
1076 	MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
1077 	MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
1078 };
1079 
1080 enum {
1081 	MLX5_INLINE_MODE_NONE,
1082 	MLX5_INLINE_MODE_L2,
1083 	MLX5_INLINE_MODE_IP,
1084 	MLX5_INLINE_MODE_TCP_UDP,
1085 	MLX5_INLINE_MODE_RESERVED4,
1086 	MLX5_INLINE_MODE_INNER_L2,
1087 	MLX5_INLINE_MODE_INNER_IP,
1088 	MLX5_INLINE_MODE_INNER_TCP_UDP,
1089 };
1090 
1091 /* HCA bit masks indicating which Flex parser protocols are already enabled. */
1092 #define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)
1093 #define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)
1094 #define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)
1095 #define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)
1096 #define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)
1097 #define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)
1098 #define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)
1099 #define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
1100 #define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
1101 #define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
1102 
1103 struct mlx5_ifc_cmd_hca_cap_bits {
1104 	u8 reserved_at_0[0x30];
1105 	u8 vhca_id[0x10];
1106 	u8 reserved_at_40[0x40];
1107 	u8 log_max_srq_sz[0x8];
1108 	u8 log_max_qp_sz[0x8];
1109 	u8 reserved_at_90[0x9];
1110 	u8 wqe_index_ignore_cap[0x1];
1111 	u8 dynamic_qp_allocation[0x1];
1112 	u8 log_max_qp[0x5];
1113 	u8 regexp[0x1];
1114 	u8 reserved_at_a1[0x3];
1115 	u8 regexp_num_of_engines[0x4];
1116 	u8 reserved_at_a8[0x3];
1117 	u8 log_max_srq[0x5];
1118 	u8 reserved_at_b0[0x3];
1119 	u8 regexp_log_crspace_size[0x5];
1120 	u8 reserved_at_b8[0x3];
1121 	u8 scatter_fcs_w_decap_disable[0x1];
1122 	u8 reserved_at_bc[0x4];
1123 	u8 reserved_at_c0[0x8];
1124 	u8 log_max_cq_sz[0x8];
1125 	u8 reserved_at_d0[0xb];
1126 	u8 log_max_cq[0x5];
1127 	u8 log_max_eq_sz[0x8];
1128 	u8 relaxed_ordering_write[0x1];
1129 	u8 relaxed_ordering_read[0x1];
1130 	u8 access_register_user[0x1];
1131 	u8 log_max_mkey[0x5];
1132 	u8 reserved_at_f0[0x8];
1133 	u8 dump_fill_mkey[0x1];
1134 	u8 reserved_at_f9[0x3];
1135 	u8 log_max_eq[0x4];
1136 	u8 max_indirection[0x8];
1137 	u8 fixed_buffer_size[0x1];
1138 	u8 log_max_mrw_sz[0x7];
1139 	u8 force_teardown[0x1];
1140 	u8 reserved_at_111[0x1];
1141 	u8 log_max_bsf_list_size[0x6];
1142 	u8 umr_extended_translation_offset[0x1];
1143 	u8 null_mkey[0x1];
1144 	u8 log_max_klm_list_size[0x6];
1145 	u8 non_wire_sq[0x1];
1146 	u8 reserved_at_121[0x9];
1147 	u8 log_max_ra_req_dc[0x6];
1148 	u8 reserved_at_130[0x3];
1149 	u8 log_max_static_sq_wq[0x5];
1150 	u8 reserved_at_138[0x2];
1151 	u8 log_max_ra_res_dc[0x6];
1152 	u8 reserved_at_140[0xa];
1153 	u8 log_max_ra_req_qp[0x6];
1154 	u8 reserved_at_150[0xa];
1155 	u8 log_max_ra_res_qp[0x6];
1156 	u8 end_pad[0x1];
1157 	u8 cc_query_allowed[0x1];
1158 	u8 cc_modify_allowed[0x1];
1159 	u8 start_pad[0x1];
1160 	u8 cache_line_128byte[0x1];
1161 	u8 reserved_at_165[0xa];
1162 	u8 qcam_reg[0x1];
1163 	u8 gid_table_size[0x10];
1164 	u8 out_of_seq_cnt[0x1];
1165 	u8 vport_counters[0x1];
1166 	u8 retransmission_q_counters[0x1];
1167 	u8 debug[0x1];
1168 	u8 modify_rq_counter_set_id[0x1];
1169 	u8 rq_delay_drop[0x1];
1170 	u8 max_qp_cnt[0xa];
1171 	u8 pkey_table_size[0x10];
1172 	u8 vport_group_manager[0x1];
1173 	u8 vhca_group_manager[0x1];
1174 	u8 ib_virt[0x1];
1175 	u8 eth_virt[0x1];
1176 	u8 vnic_env_queue_counters[0x1];
1177 	u8 ets[0x1];
1178 	u8 nic_flow_table[0x1];
1179 	u8 eswitch_manager[0x1];
1180 	u8 device_memory[0x1];
1181 	u8 mcam_reg[0x1];
1182 	u8 pcam_reg[0x1];
1183 	u8 local_ca_ack_delay[0x5];
1184 	u8 port_module_event[0x1];
1185 	u8 enhanced_error_q_counters[0x1];
1186 	u8 ports_check[0x1];
1187 	u8 reserved_at_1b3[0x1];
1188 	u8 disable_link_up[0x1];
1189 	u8 beacon_led[0x1];
1190 	u8 port_type[0x2];
1191 	u8 num_ports[0x8];
1192 	u8 reserved_at_1c0[0x1];
1193 	u8 pps[0x1];
1194 	u8 pps_modify[0x1];
1195 	u8 log_max_msg[0x5];
1196 	u8 reserved_at_1c8[0x4];
1197 	u8 max_tc[0x4];
1198 	u8 temp_warn_event[0x1];
1199 	u8 dcbx[0x1];
1200 	u8 general_notification_event[0x1];
1201 	u8 reserved_at_1d3[0x2];
1202 	u8 fpga[0x1];
1203 	u8 rol_s[0x1];
1204 	u8 rol_g[0x1];
1205 	u8 reserved_at_1d8[0x1];
1206 	u8 wol_s[0x1];
1207 	u8 wol_g[0x1];
1208 	u8 wol_a[0x1];
1209 	u8 wol_b[0x1];
1210 	u8 wol_m[0x1];
1211 	u8 wol_u[0x1];
1212 	u8 wol_p[0x1];
1213 	u8 stat_rate_support[0x10];
1214 	u8 reserved_at_1f0[0xc];
1215 	u8 cqe_version[0x4];
1216 	u8 compact_address_vector[0x1];
1217 	u8 striding_rq[0x1];
1218 	u8 reserved_at_202[0x1];
1219 	u8 ipoib_enhanced_offloads[0x1];
1220 	u8 ipoib_basic_offloads[0x1];
1221 	u8 reserved_at_205[0x1];
1222 	u8 repeated_block_disabled[0x1];
1223 	u8 umr_modify_entity_size_disabled[0x1];
1224 	u8 umr_modify_atomic_disabled[0x1];
1225 	u8 umr_indirect_mkey_disabled[0x1];
1226 	u8 umr_fence[0x2];
1227 	u8 reserved_at_20c[0x3];
1228 	u8 drain_sigerr[0x1];
1229 	u8 cmdif_checksum[0x2];
1230 	u8 sigerr_cqe[0x1];
1231 	u8 reserved_at_213[0x1];
1232 	u8 wq_signature[0x1];
1233 	u8 sctr_data_cqe[0x1];
1234 	u8 reserved_at_216[0x1];
1235 	u8 sho[0x1];
1236 	u8 tph[0x1];
1237 	u8 rf[0x1];
1238 	u8 dct[0x1];
1239 	u8 qos[0x1];
1240 	u8 eth_net_offloads[0x1];
1241 	u8 roce[0x1];
1242 	u8 atomic[0x1];
1243 	u8 reserved_at_21f[0x1];
1244 	u8 cq_oi[0x1];
1245 	u8 cq_resize[0x1];
1246 	u8 cq_moderation[0x1];
1247 	u8 reserved_at_223[0x3];
1248 	u8 cq_eq_remap[0x1];
1249 	u8 pg[0x1];
1250 	u8 block_lb_mc[0x1];
1251 	u8 reserved_at_229[0x1];
1252 	u8 scqe_break_moderation[0x1];
1253 	u8 cq_period_start_from_cqe[0x1];
1254 	u8 cd[0x1];
1255 	u8 reserved_at_22d[0x1];
1256 	u8 apm[0x1];
1257 	u8 vector_calc[0x1];
1258 	u8 umr_ptr_rlky[0x1];
1259 	u8 imaicl[0x1];
1260 	u8 reserved_at_232[0x4];
1261 	u8 qkv[0x1];
1262 	u8 pkv[0x1];
1263 	u8 set_deth_sqpn[0x1];
1264 	u8 reserved_at_239[0x3];
1265 	u8 xrc[0x1];
1266 	u8 ud[0x1];
1267 	u8 uc[0x1];
1268 	u8 rc[0x1];
1269 	u8 uar_4k[0x1];
1270 	u8 reserved_at_241[0x9];
1271 	u8 uar_sz[0x6];
1272 	u8 reserved_at_250[0x8];
1273 	u8 log_pg_sz[0x8];
1274 	u8 bf[0x1];
1275 	u8 driver_version[0x1];
1276 	u8 pad_tx_eth_packet[0x1];
1277 	u8 reserved_at_263[0x8];
1278 	u8 log_bf_reg_size[0x5];
1279 	u8 reserved_at_270[0xb];
1280 	u8 lag_master[0x1];
1281 	u8 num_lag_ports[0x4];
1282 	u8 reserved_at_280[0x10];
1283 	u8 max_wqe_sz_sq[0x10];
1284 	u8 reserved_at_2a0[0x10];
1285 	u8 max_wqe_sz_rq[0x10];
1286 	u8 max_flow_counter_31_16[0x10];
1287 	u8 max_wqe_sz_sq_dc[0x10];
1288 	u8 reserved_at_2e0[0x7];
1289 	u8 max_qp_mcg[0x19];
1290 	u8 reserved_at_300[0x10];
1291 	u8 flow_counter_bulk_alloc[0x08];
1292 	u8 log_max_mcg[0x8];
1293 	u8 reserved_at_320[0x3];
1294 	u8 log_max_transport_domain[0x5];
1295 	u8 reserved_at_328[0x3];
1296 	u8 log_max_pd[0x5];
1297 	u8 reserved_at_330[0xb];
1298 	u8 log_max_xrcd[0x5];
1299 	u8 nic_receive_steering_discard[0x1];
1300 	u8 receive_discard_vport_down[0x1];
1301 	u8 transmit_discard_vport_down[0x1];
1302 	u8 reserved_at_343[0x5];
1303 	u8 log_max_flow_counter_bulk[0x8];
1304 	u8 max_flow_counter_15_0[0x10];
1305 	u8 modify_tis[0x1];
1306 	u8 flow_counters_dump[0x1];
1307 	u8 reserved_at_360[0x1];
1308 	u8 log_max_rq[0x5];
1309 	u8 reserved_at_368[0x3];
1310 	u8 log_max_sq[0x5];
1311 	u8 reserved_at_370[0x3];
1312 	u8 log_max_tir[0x5];
1313 	u8 reserved_at_378[0x3];
1314 	u8 log_max_tis[0x5];
1315 	u8 basic_cyclic_rcv_wqe[0x1];
1316 	u8 reserved_at_381[0x2];
1317 	u8 log_max_rmp[0x5];
1318 	u8 reserved_at_388[0x3];
1319 	u8 log_max_rqt[0x5];
1320 	u8 reserved_at_390[0x3];
1321 	u8 log_max_rqt_size[0x5];
1322 	u8 reserved_at_398[0x3];
1323 	u8 log_max_tis_per_sq[0x5];
1324 	u8 ext_stride_num_range[0x1];
1325 	u8 reserved_at_3a1[0x2];
1326 	u8 log_max_stride_sz_rq[0x5];
1327 	u8 reserved_at_3a8[0x3];
1328 	u8 log_min_stride_sz_rq[0x5];
1329 	u8 reserved_at_3b0[0x3];
1330 	u8 log_max_stride_sz_sq[0x5];
1331 	u8 reserved_at_3b8[0x3];
1332 	u8 log_min_stride_sz_sq[0x5];
1333 	u8 hairpin[0x1];
1334 	u8 reserved_at_3c1[0x2];
1335 	u8 log_max_hairpin_queues[0x5];
1336 	u8 reserved_at_3c8[0x3];
1337 	u8 log_max_hairpin_wq_data_sz[0x5];
1338 	u8 reserved_at_3d0[0x3];
1339 	u8 log_max_hairpin_num_packets[0x5];
1340 	u8 reserved_at_3d8[0x3];
1341 	u8 log_max_wq_sz[0x5];
1342 	u8 nic_vport_change_event[0x1];
1343 	u8 disable_local_lb_uc[0x1];
1344 	u8 disable_local_lb_mc[0x1];
1345 	u8 log_min_hairpin_wq_data_sz[0x5];
1346 	u8 reserved_at_3e8[0x3];
1347 	u8 log_max_vlan_list[0x5];
1348 	u8 reserved_at_3f0[0x3];
1349 	u8 log_max_current_mc_list[0x5];
1350 	u8 reserved_at_3f8[0x3];
1351 	u8 log_max_current_uc_list[0x5];
1352 	u8 general_obj_types[0x40];
1353 	u8 reserved_at_440[0x20];
1354 	u8 reserved_at_460[0x10];
1355 	u8 max_num_eqs[0x10];
1356 	u8 reserved_at_480[0x3];
1357 	u8 log_max_l2_table[0x5];
1358 	u8 reserved_at_488[0x8];
1359 	u8 log_uar_page_sz[0x10];
1360 	u8 reserved_at_4a0[0x20];
1361 	u8 device_frequency_mhz[0x20];
1362 	u8 device_frequency_khz[0x20];
1363 	u8 reserved_at_500[0x20];
1364 	u8 num_of_uars_per_page[0x20];
1365 	u8 flex_parser_protocols[0x20];
1366 	u8 reserved_at_560[0x20];
1367 	u8 reserved_at_580[0x3c];
1368 	u8 mini_cqe_resp_stride_index[0x1];
1369 	u8 cqe_128_always[0x1];
1370 	u8 cqe_compression_128[0x1];
1371 	u8 cqe_compression[0x1];
1372 	u8 cqe_compression_timeout[0x10];
1373 	u8 cqe_compression_max_num[0x10];
1374 	u8 reserved_at_5e0[0x10];
1375 	u8 tag_matching[0x1];
1376 	u8 rndv_offload_rc[0x1];
1377 	u8 rndv_offload_dc[0x1];
1378 	u8 log_tag_matching_list_sz[0x5];
1379 	u8 reserved_at_5f8[0x3];
1380 	u8 log_max_xrq[0x5];
1381 	u8 affiliate_nic_vport_criteria[0x8];
1382 	u8 native_port_num[0x8];
1383 	u8 num_vhca_ports[0x8];
1384 	u8 reserved_at_618[0x6];
1385 	u8 sw_owner_id[0x1];
1386 	u8 reserved_at_61f[0x1e1];
1387 };
1388 
1389 struct mlx5_ifc_qos_cap_bits {
1390 	u8 packet_pacing[0x1];
1391 	u8 esw_scheduling[0x1];
1392 	u8 esw_bw_share[0x1];
1393 	u8 esw_rate_limit[0x1];
1394 	u8 reserved_at_4[0x1];
1395 	u8 packet_pacing_burst_bound[0x1];
1396 	u8 packet_pacing_typical_size[0x1];
1397 	u8 flow_meter_srtcm[0x1];
1398 	u8 reserved_at_8[0x8];
1399 	u8 log_max_flow_meter[0x8];
1400 	u8 flow_meter_reg_id[0x8];
1401 	u8 wqe_rate_pp[0x1];
1402 	u8 reserved_at_25[0x7];
1403 	u8 flow_meter_reg_share[0x1];
1404 	u8 reserved_at_2e[0x17];
1405 	u8 packet_pacing_max_rate[0x20];
1406 	u8 packet_pacing_min_rate[0x20];
1407 	u8 reserved_at_80[0x10];
1408 	u8 packet_pacing_rate_table_size[0x10];
1409 	u8 esw_element_type[0x10];
1410 	u8 esw_tsar_type[0x10];
1411 	u8 reserved_at_c0[0x10];
1412 	u8 max_qos_para_vport[0x10];
1413 	u8 max_tsar_bw_share[0x20];
1414 	u8 reserved_at_100[0x6e8];
1415 };
1416 
1417 struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
1418 	u8 csum_cap[0x1];
1419 	u8 vlan_cap[0x1];
1420 	u8 lro_cap[0x1];
1421 	u8 lro_psh_flag[0x1];
1422 	u8 lro_time_stamp[0x1];
1423 	u8 lro_max_msg_sz_mode[0x2];
1424 	u8 wqe_vlan_insert[0x1];
1425 	u8 self_lb_en_modifiable[0x1];
1426 	u8 self_lb_mc[0x1];
1427 	u8 self_lb_uc[0x1];
1428 	u8 max_lso_cap[0x5];
1429 	u8 multi_pkt_send_wqe[0x2];
1430 	u8 wqe_inline_mode[0x2];
1431 	u8 rss_ind_tbl_cap[0x4];
1432 	u8 reg_umr_sq[0x1];
1433 	u8 scatter_fcs[0x1];
1434 	u8 enhanced_multi_pkt_send_wqe[0x1];
1435 	u8 tunnel_lso_const_out_ip_id[0x1];
1436 	u8 tunnel_lro_gre[0x1];
1437 	u8 tunnel_lro_vxlan[0x1];
1438 	u8 tunnel_stateless_gre[0x1];
1439 	u8 tunnel_stateless_vxlan[0x1];
1440 	u8 swp[0x1];
1441 	u8 swp_csum[0x1];
1442 	u8 swp_lso[0x1];
1443 	u8 reserved_at_23[0x8];
1444 	u8 tunnel_stateless_gtp[0x1];
1445 	u8 reserved_at_25[0x4];
1446 	u8 max_vxlan_udp_ports[0x8];
1447 	u8 reserved_at_38[0x6];
1448 	u8 max_geneve_opt_len[0x1];
1449 	u8 tunnel_stateless_geneve_rx[0x1];
1450 	u8 reserved_at_40[0x10];
1451 	u8 lro_min_mss_size[0x10];
1452 	u8 reserved_at_60[0x120];
1453 	u8 lro_timer_supported_periods[4][0x20];
1454 	u8 reserved_at_200[0x600];
1455 };
1456 
1457 enum {
1458 	MLX5_VIRTQ_TYPE_SPLIT = 0,
1459 	MLX5_VIRTQ_TYPE_PACKED = 1,
1460 };
1461 
1462 enum {
1463 	MLX5_VIRTQ_EVENT_MODE_NO_MSIX = 0,
1464 	MLX5_VIRTQ_EVENT_MODE_QP = 1,
1465 	MLX5_VIRTQ_EVENT_MODE_MSIX = 2,
1466 };
1467 
1468 struct mlx5_ifc_virtio_emulation_cap_bits {
1469 	u8 desc_tunnel_offload_type[0x1];
1470 	u8 eth_frame_offload_type[0x1];
1471 	u8 virtio_version_1_0[0x1];
1472 	u8 tso_ipv4[0x1];
1473 	u8 tso_ipv6[0x1];
1474 	u8 tx_csum[0x1];
1475 	u8 rx_csum[0x1];
1476 	u8 reserved_at_7[0x1][0x9];
1477 	u8 event_mode[0x8];
1478 	u8 virtio_queue_type[0x8];
1479 	u8 reserved_at_20[0x13];
1480 	u8 log_doorbell_stride[0x5];
1481 	u8 reserved_at_3b[0x3];
1482 	u8 log_doorbell_bar_size[0x5];
1483 	u8 doorbell_bar_offset[0x40];
1484 	u8 reserved_at_80[0x8];
1485 	u8 max_num_virtio_queues[0x18];
1486 	u8 reserved_at_a0[0x60];
1487 	u8 umem_1_buffer_param_a[0x20];
1488 	u8 umem_1_buffer_param_b[0x20];
1489 	u8 umem_2_buffer_param_a[0x20];
1490 	u8 umem_2_buffer_param_b[0x20];
1491 	u8 umem_3_buffer_param_a[0x20];
1492 	u8 umem_3_buffer_param_b[0x20];
1493 	u8 reserved_at_1c0[0x620];
1494 };
1495 
1496 struct mlx5_ifc_flow_table_prop_layout_bits {
1497 	u8 ft_support[0x1];
1498 	u8 flow_tag[0x1];
1499 	u8 flow_counter[0x1];
1500 	u8 flow_modify_en[0x1];
1501 	u8 modify_root[0x1];
1502 	u8 identified_miss_table[0x1];
1503 	u8 flow_table_modify[0x1];
1504 	u8 reformat[0x1];
1505 	u8 decap[0x1];
1506 	u8 reset_root_to_default[0x1];
1507 	u8 pop_vlan[0x1];
1508 	u8 push_vlan[0x1];
1509 	u8 fpga_vendor_acceleration[0x1];
1510 	u8 pop_vlan_2[0x1];
1511 	u8 push_vlan_2[0x1];
1512 	u8 reformat_and_vlan_action[0x1];
1513 	u8 modify_and_vlan_action[0x1];
1514 	u8 sw_owner[0x1];
1515 	u8 reformat_l3_tunnel_to_l2[0x1];
1516 	u8 reformat_l2_to_l3_tunnel[0x1];
1517 	u8 reformat_and_modify_action[0x1];
1518 	u8 reserved_at_15[0x9];
1519 	u8 sw_owner_v2[0x1];
1520 	u8 reserved_at_1f[0x1];
1521 	u8 reserved_at_20[0x2];
1522 	u8 log_max_ft_size[0x6];
1523 	u8 log_max_modify_header_context[0x8];
1524 	u8 max_modify_header_actions[0x8];
1525 	u8 max_ft_level[0x8];
1526 	u8 reserved_at_40[0x8];
1527 	u8 log_max_ft_sampler_num[8];
1528 	u8 metadata_reg_b_width[0x8];
1529 	u8 metadata_reg_a_width[0x8];
1530 	u8 reserved_at_60[0x18];
1531 	u8 log_max_ft_num[0x8];
1532 	u8 reserved_at_80[0x10];
1533 	u8 log_max_flow_counter[0x8];
1534 	u8 log_max_destination[0x8];
1535 	u8 reserved_at_a0[0x18];
1536 	u8 log_max_flow[0x8];
1537 	u8 reserved_at_c0[0x140];
1538 };
1539 
1540 struct mlx5_ifc_flow_table_nic_cap_bits {
1541 	u8	   reserved_at_0[0x200];
1542 	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties;
1543 };
1544 
1545 union mlx5_ifc_hca_cap_union_bits {
1546 	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
1547 	struct mlx5_ifc_per_protocol_networking_offload_caps_bits
1548 	       per_protocol_networking_offload_caps;
1549 	struct mlx5_ifc_qos_cap_bits qos_cap;
1550 	struct mlx5_ifc_virtio_emulation_cap_bits vdpa_caps;
1551 	struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1552 	u8 reserved_at_0[0x8000];
1553 };
1554 
1555 struct mlx5_ifc_set_action_in_bits {
1556 	u8 action_type[0x4];
1557 	u8 field[0xc];
1558 	u8 reserved_at_10[0x3];
1559 	u8 offset[0x5];
1560 	u8 reserved_at_18[0x3];
1561 	u8 length[0x5];
1562 	u8 data[0x20];
1563 };
1564 
1565 struct mlx5_ifc_query_hca_cap_out_bits {
1566 	u8 status[0x8];
1567 	u8 reserved_at_8[0x18];
1568 	u8 syndrome[0x20];
1569 	u8 reserved_at_40[0x40];
1570 	union mlx5_ifc_hca_cap_union_bits capability;
1571 };
1572 
1573 struct mlx5_ifc_query_hca_cap_in_bits {
1574 	u8 opcode[0x10];
1575 	u8 reserved_at_10[0x10];
1576 	u8 reserved_at_20[0x10];
1577 	u8 op_mod[0x10];
1578 	u8 reserved_at_40[0x40];
1579 };
1580 
1581 struct mlx5_ifc_mac_address_layout_bits {
1582 	u8 reserved_at_0[0x10];
1583 	u8 mac_addr_47_32[0x10];
1584 	u8 mac_addr_31_0[0x20];
1585 };
1586 
1587 struct mlx5_ifc_nic_vport_context_bits {
1588 	u8 reserved_at_0[0x5];
1589 	u8 min_wqe_inline_mode[0x3];
1590 	u8 reserved_at_8[0x15];
1591 	u8 disable_mc_local_lb[0x1];
1592 	u8 disable_uc_local_lb[0x1];
1593 	u8 roce_en[0x1];
1594 	u8 arm_change_event[0x1];
1595 	u8 reserved_at_21[0x1a];
1596 	u8 event_on_mtu[0x1];
1597 	u8 event_on_promisc_change[0x1];
1598 	u8 event_on_vlan_change[0x1];
1599 	u8 event_on_mc_address_change[0x1];
1600 	u8 event_on_uc_address_change[0x1];
1601 	u8 reserved_at_40[0xc];
1602 	u8 affiliation_criteria[0x4];
1603 	u8 affiliated_vhca_id[0x10];
1604 	u8 reserved_at_60[0xd0];
1605 	u8 mtu[0x10];
1606 	u8 system_image_guid[0x40];
1607 	u8 port_guid[0x40];
1608 	u8 node_guid[0x40];
1609 	u8 reserved_at_200[0x140];
1610 	u8 qkey_violation_counter[0x10];
1611 	u8 reserved_at_350[0x430];
1612 	u8 promisc_uc[0x1];
1613 	u8 promisc_mc[0x1];
1614 	u8 promisc_all[0x1];
1615 	u8 reserved_at_783[0x2];
1616 	u8 allowed_list_type[0x3];
1617 	u8 reserved_at_788[0xc];
1618 	u8 allowed_list_size[0xc];
1619 	struct mlx5_ifc_mac_address_layout_bits permanent_address;
1620 	u8 reserved_at_7e0[0x20];
1621 };
1622 
1623 struct mlx5_ifc_query_nic_vport_context_out_bits {
1624 	u8 status[0x8];
1625 	u8 reserved_at_8[0x18];
1626 	u8 syndrome[0x20];
1627 	u8 reserved_at_40[0x40];
1628 	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
1629 };
1630 
1631 struct mlx5_ifc_query_nic_vport_context_in_bits {
1632 	u8 opcode[0x10];
1633 	u8 reserved_at_10[0x10];
1634 	u8 reserved_at_20[0x10];
1635 	u8 op_mod[0x10];
1636 	u8 other_vport[0x1];
1637 	u8 reserved_at_41[0xf];
1638 	u8 vport_number[0x10];
1639 	u8 reserved_at_60[0x5];
1640 	u8 allowed_list_type[0x3];
1641 	u8 reserved_at_68[0x18];
1642 };
1643 
1644 struct mlx5_ifc_tisc_bits {
1645 	u8 strict_lag_tx_port_affinity[0x1];
1646 	u8 reserved_at_1[0x3];
1647 	u8 lag_tx_port_affinity[0x04];
1648 	u8 reserved_at_8[0x4];
1649 	u8 prio[0x4];
1650 	u8 reserved_at_10[0x10];
1651 	u8 reserved_at_20[0x100];
1652 	u8 reserved_at_120[0x8];
1653 	u8 transport_domain[0x18];
1654 	u8 reserved_at_140[0x8];
1655 	u8 underlay_qpn[0x18];
1656 	u8 reserved_at_160[0x3a0];
1657 };
1658 
1659 struct mlx5_ifc_query_tis_out_bits {
1660 	u8 status[0x8];
1661 	u8 reserved_at_8[0x18];
1662 	u8 syndrome[0x20];
1663 	u8 reserved_at_40[0x40];
1664 	struct mlx5_ifc_tisc_bits tis_context;
1665 };
1666 
1667 struct mlx5_ifc_query_tis_in_bits {
1668 	u8 opcode[0x10];
1669 	u8 reserved_at_10[0x10];
1670 	u8 reserved_at_20[0x10];
1671 	u8 op_mod[0x10];
1672 	u8 reserved_at_40[0x8];
1673 	u8 tisn[0x18];
1674 	u8 reserved_at_60[0x20];
1675 };
1676 
1677 struct mlx5_ifc_alloc_transport_domain_out_bits {
1678 	u8 status[0x8];
1679 	u8 reserved_at_8[0x18];
1680 	u8 syndrome[0x20];
1681 	u8 reserved_at_40[0x8];
1682 	u8 transport_domain[0x18];
1683 	u8 reserved_at_60[0x20];
1684 };
1685 
1686 struct mlx5_ifc_alloc_transport_domain_in_bits {
1687 	u8 opcode[0x10];
1688 	u8 reserved_at_10[0x10];
1689 	u8 reserved_at_20[0x10];
1690 	u8 op_mod[0x10];
1691 	u8 reserved_at_40[0x40];
1692 };
1693 
1694 enum {
1695 	MLX5_WQ_TYPE_LINKED_LIST                = 0x0,
1696 	MLX5_WQ_TYPE_CYCLIC                     = 0x1,
1697 	MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ    = 0x2,
1698 	MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ         = 0x3,
1699 };
1700 
1701 enum {
1702 	MLX5_WQ_END_PAD_MODE_NONE  = 0x0,
1703 	MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
1704 };
1705 
1706 struct mlx5_ifc_wq_bits {
1707 	u8 wq_type[0x4];
1708 	u8 wq_signature[0x1];
1709 	u8 end_padding_mode[0x2];
1710 	u8 cd_slave[0x1];
1711 	u8 reserved_at_8[0x18];
1712 	u8 hds_skip_first_sge[0x1];
1713 	u8 log2_hds_buf_size[0x3];
1714 	u8 reserved_at_24[0x7];
1715 	u8 page_offset[0x5];
1716 	u8 lwm[0x10];
1717 	u8 reserved_at_40[0x8];
1718 	u8 pd[0x18];
1719 	u8 reserved_at_60[0x8];
1720 	u8 uar_page[0x18];
1721 	u8 dbr_addr[0x40];
1722 	u8 hw_counter[0x20];
1723 	u8 sw_counter[0x20];
1724 	u8 reserved_at_100[0xc];
1725 	u8 log_wq_stride[0x4];
1726 	u8 reserved_at_110[0x3];
1727 	u8 log_wq_pg_sz[0x5];
1728 	u8 reserved_at_118[0x3];
1729 	u8 log_wq_sz[0x5];
1730 	u8 dbr_umem_valid[0x1];
1731 	u8 wq_umem_valid[0x1];
1732 	u8 reserved_at_122[0x1];
1733 	u8 log_hairpin_num_packets[0x5];
1734 	u8 reserved_at_128[0x3];
1735 	u8 log_hairpin_data_sz[0x5];
1736 	u8 reserved_at_130[0x4];
1737 	u8 single_wqe_log_num_of_strides[0x4];
1738 	u8 two_byte_shift_en[0x1];
1739 	u8 reserved_at_139[0x4];
1740 	u8 single_stride_log_num_of_bytes[0x3];
1741 	u8 dbr_umem_id[0x20];
1742 	u8 wq_umem_id[0x20];
1743 	u8 wq_umem_offset[0x40];
1744 	u8 reserved_at_1c0[0x440];
1745 };
1746 
1747 enum {
1748 	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
1749 	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
1750 };
1751 
1752 enum {
1753 	MLX5_RQC_STATE_RST  = 0x0,
1754 	MLX5_RQC_STATE_RDY  = 0x1,
1755 	MLX5_RQC_STATE_ERR  = 0x3,
1756 };
1757 
1758 struct mlx5_ifc_rqc_bits {
1759 	u8 rlky[0x1];
1760 	u8 delay_drop_en[0x1];
1761 	u8 scatter_fcs[0x1];
1762 	u8 vsd[0x1];
1763 	u8 mem_rq_type[0x4];
1764 	u8 state[0x4];
1765 	u8 reserved_at_c[0x1];
1766 	u8 flush_in_error_en[0x1];
1767 	u8 hairpin[0x1];
1768 	u8 reserved_at_f[0x11];
1769 	u8 reserved_at_20[0x8];
1770 	u8 user_index[0x18];
1771 	u8 reserved_at_40[0x8];
1772 	u8 cqn[0x18];
1773 	u8 counter_set_id[0x8];
1774 	u8 reserved_at_68[0x18];
1775 	u8 reserved_at_80[0x8];
1776 	u8 rmpn[0x18];
1777 	u8 reserved_at_a0[0x8];
1778 	u8 hairpin_peer_sq[0x18];
1779 	u8 reserved_at_c0[0x10];
1780 	u8 hairpin_peer_vhca[0x10];
1781 	u8 reserved_at_e0[0xa0];
1782 	struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */
1783 };
1784 
1785 struct mlx5_ifc_create_rq_out_bits {
1786 	u8 status[0x8];
1787 	u8 reserved_at_8[0x18];
1788 	u8 syndrome[0x20];
1789 	u8 reserved_at_40[0x8];
1790 	u8 rqn[0x18];
1791 	u8 reserved_at_60[0x20];
1792 };
1793 
1794 struct mlx5_ifc_create_rq_in_bits {
1795 	u8 opcode[0x10];
1796 	u8 uid[0x10];
1797 	u8 reserved_at_20[0x10];
1798 	u8 op_mod[0x10];
1799 	u8 reserved_at_40[0xc0];
1800 	struct mlx5_ifc_rqc_bits ctx;
1801 };
1802 
1803 struct mlx5_ifc_modify_rq_out_bits {
1804 	u8 status[0x8];
1805 	u8 reserved_at_8[0x18];
1806 	u8 syndrome[0x20];
1807 	u8 reserved_at_40[0x40];
1808 };
1809 
1810 struct mlx5_ifc_create_tis_out_bits {
1811 	u8 status[0x8];
1812 	u8 reserved_at_8[0x18];
1813 	u8 syndrome[0x20];
1814 	u8 reserved_at_40[0x8];
1815 	u8 tisn[0x18];
1816 	u8 reserved_at_60[0x20];
1817 };
1818 
1819 struct mlx5_ifc_create_tis_in_bits {
1820 	u8 opcode[0x10];
1821 	u8 uid[0x10];
1822 	u8 reserved_at_20[0x10];
1823 	u8 op_mod[0x10];
1824 	u8 reserved_at_40[0xc0];
1825 	struct mlx5_ifc_tisc_bits ctx;
1826 };
1827 
1828 enum {
1829 	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,
1830 	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
1831 	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
1832 	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
1833 };
1834 
1835 struct mlx5_ifc_modify_rq_in_bits {
1836 	u8 opcode[0x10];
1837 	u8 uid[0x10];
1838 	u8 reserved_at_20[0x10];
1839 	u8 op_mod[0x10];
1840 	u8 rq_state[0x4];
1841 	u8 reserved_at_44[0x4];
1842 	u8 rqn[0x18];
1843 	u8 reserved_at_60[0x20];
1844 	u8 modify_bitmask[0x40];
1845 	u8 reserved_at_c0[0x40];
1846 	struct mlx5_ifc_rqc_bits ctx;
1847 };
1848 
1849 enum {
1850 	MLX5_L3_PROT_TYPE_IPV4 = 0,
1851 	MLX5_L3_PROT_TYPE_IPV6 = 1,
1852 };
1853 
1854 enum {
1855 	MLX5_L4_PROT_TYPE_TCP = 0,
1856 	MLX5_L4_PROT_TYPE_UDP = 1,
1857 };
1858 
1859 enum {
1860 	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
1861 	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
1862 	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
1863 	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
1864 	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
1865 };
1866 
1867 struct mlx5_ifc_rx_hash_field_select_bits {
1868 	u8 l3_prot_type[0x1];
1869 	u8 l4_prot_type[0x1];
1870 	u8 selected_fields[0x1e];
1871 };
1872 
1873 enum {
1874 	MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
1875 	MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
1876 };
1877 
1878 enum {
1879 	MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
1880 	MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
1881 };
1882 
1883 enum {
1884 	MLX5_RX_HASH_FN_NONE           = 0x0,
1885 	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
1886 	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
1887 };
1888 
1889 enum {
1890 	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST    = 0x1,
1891 	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST  = 0x2,
1892 };
1893 
1894 enum {
1895 	MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4    = 0x0,
1896 	MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2  = 0x1,
1897 };
1898 
1899 struct mlx5_ifc_tirc_bits {
1900 	u8 reserved_at_0[0x20];
1901 	u8 disp_type[0x4];
1902 	u8 reserved_at_24[0x1c];
1903 	u8 reserved_at_40[0x40];
1904 	u8 reserved_at_80[0x4];
1905 	u8 lro_timeout_period_usecs[0x10];
1906 	u8 lro_enable_mask[0x4];
1907 	u8 lro_max_msg_sz[0x8];
1908 	u8 reserved_at_a0[0x40];
1909 	u8 reserved_at_e0[0x8];
1910 	u8 inline_rqn[0x18];
1911 	u8 rx_hash_symmetric[0x1];
1912 	u8 reserved_at_101[0x1];
1913 	u8 tunneled_offload_en[0x1];
1914 	u8 reserved_at_103[0x5];
1915 	u8 indirect_table[0x18];
1916 	u8 rx_hash_fn[0x4];
1917 	u8 reserved_at_124[0x2];
1918 	u8 self_lb_block[0x2];
1919 	u8 transport_domain[0x18];
1920 	u8 rx_hash_toeplitz_key[10][0x20];
1921 	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
1922 	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
1923 	u8 reserved_at_2c0[0x4c0];
1924 };
1925 
1926 struct mlx5_ifc_create_tir_out_bits {
1927 	u8 status[0x8];
1928 	u8 reserved_at_8[0x18];
1929 	u8 syndrome[0x20];
1930 	u8 reserved_at_40[0x8];
1931 	u8 tirn[0x18];
1932 	u8 reserved_at_60[0x20];
1933 };
1934 
1935 struct mlx5_ifc_create_tir_in_bits {
1936 	u8 opcode[0x10];
1937 	u8 uid[0x10];
1938 	u8 reserved_at_20[0x10];
1939 	u8 op_mod[0x10];
1940 	u8 reserved_at_40[0xc0];
1941 	struct mlx5_ifc_tirc_bits ctx;
1942 };
1943 
1944 enum {
1945 	MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_LRO = 1ULL << 0,
1946 	MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE = 1ULL << 1,
1947 	MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH = 1ULL << 2,
1948 	/* bit 3 - tunneled_offload_en modify not supported. */
1949 	MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_SELF_LB_EN = 1ULL << 4,
1950 };
1951 
1952 struct mlx5_ifc_modify_tir_out_bits {
1953 	u8 status[0x8];
1954 	u8 reserved_at_8[0x18];
1955 	u8 syndrome[0x20];
1956 	u8 reserved_at_40[0x40];
1957 };
1958 
1959 struct mlx5_ifc_modify_tir_in_bits {
1960 	u8 opcode[0x10];
1961 	u8 uid[0x10];
1962 	u8 reserved_at_20[0x10];
1963 	u8 op_mod[0x10];
1964 	u8 reserved_at_40[0x8];
1965 	u8 tirn[0x18];
1966 	u8 reserved_at_60[0x20];
1967 	u8 modify_bitmask[0x40];
1968 	u8 reserved_at_c0[0x40];
1969 	struct mlx5_ifc_tirc_bits ctx;
1970 };
1971 
1972 enum {
1973 	MLX5_INLINE_Q_TYPE_RQ = 0x0,
1974 	MLX5_INLINE_Q_TYPE_VIRTQ = 0x1,
1975 };
1976 
1977 struct mlx5_ifc_rq_num_bits {
1978 	u8 reserved_at_0[0x8];
1979 	u8 rq_num[0x18];
1980 };
1981 
1982 struct mlx5_ifc_rqtc_bits {
1983 	u8 reserved_at_0[0xa5];
1984 	u8 list_q_type[0x3];
1985 	u8 reserved_at_a8[0x8];
1986 	u8 rqt_max_size[0x10];
1987 	u8 reserved_at_c0[0x10];
1988 	u8 rqt_actual_size[0x10];
1989 	u8 reserved_at_e0[0x6a0];
1990 	struct mlx5_ifc_rq_num_bits rq_num[];
1991 };
1992 
1993 struct mlx5_ifc_create_rqt_out_bits {
1994 	u8 status[0x8];
1995 	u8 reserved_at_8[0x18];
1996 	u8 syndrome[0x20];
1997 	u8 reserved_at_40[0x8];
1998 	u8 rqtn[0x18];
1999 	u8 reserved_at_60[0x20];
2000 };
2001 
2002 #ifdef PEDANTIC
2003 #pragma GCC diagnostic ignored "-Wpedantic"
2004 #endif
2005 struct mlx5_ifc_create_rqt_in_bits {
2006 	u8 opcode[0x10];
2007 	u8 uid[0x10];
2008 	u8 reserved_at_20[0x10];
2009 	u8 op_mod[0x10];
2010 	u8 reserved_at_40[0xc0];
2011 	struct mlx5_ifc_rqtc_bits rqt_context;
2012 };
2013 
2014 struct mlx5_ifc_modify_rqt_in_bits {
2015 	u8 opcode[0x10];
2016 	u8 uid[0x10];
2017 	u8 reserved_at_20[0x10];
2018 	u8 op_mod[0x10];
2019 	u8 reserved_at_40[0x8];
2020 	u8 rqtn[0x18];
2021 	u8 reserved_at_60[0x20];
2022 	u8 modify_bitmask[0x40];
2023 	u8 reserved_at_c0[0x40];
2024 	struct mlx5_ifc_rqtc_bits rqt_context;
2025 };
2026 #ifdef PEDANTIC
2027 #pragma GCC diagnostic error "-Wpedantic"
2028 #endif
2029 
2030 struct mlx5_ifc_modify_rqt_out_bits {
2031 	u8 status[0x8];
2032 	u8 reserved_at_8[0x18];
2033 	u8 syndrome[0x20];
2034 	u8 reserved_at_40[0x40];
2035 };
2036 
2037 enum {
2038 	MLX5_SQC_STATE_RST  = 0x0,
2039 	MLX5_SQC_STATE_RDY  = 0x1,
2040 	MLX5_SQC_STATE_ERR  = 0x3,
2041 };
2042 
2043 struct mlx5_ifc_sqc_bits {
2044 	u8 rlky[0x1];
2045 	u8 cd_master[0x1];
2046 	u8 fre[0x1];
2047 	u8 flush_in_error_en[0x1];
2048 	u8 allow_multi_pkt_send_wqe[0x1];
2049 	u8 min_wqe_inline_mode[0x3];
2050 	u8 state[0x4];
2051 	u8 reg_umr[0x1];
2052 	u8 allow_swp[0x1];
2053 	u8 hairpin[0x1];
2054 	u8 non_wire[0x1];
2055 	u8 static_sq_wq[0x1];
2056 	u8 reserved_at_11[0xf];
2057 	u8 reserved_at_20[0x8];
2058 	u8 user_index[0x18];
2059 	u8 reserved_at_40[0x8];
2060 	u8 cqn[0x18];
2061 	u8 reserved_at_60[0x8];
2062 	u8 hairpin_peer_rq[0x18];
2063 	u8 reserved_at_80[0x10];
2064 	u8 hairpin_peer_vhca[0x10];
2065 	u8 reserved_at_a0[0x50];
2066 	u8 packet_pacing_rate_limit_index[0x10];
2067 	u8 tis_lst_sz[0x10];
2068 	u8 reserved_at_110[0x10];
2069 	u8 reserved_at_120[0x40];
2070 	u8 reserved_at_160[0x8];
2071 	u8 tis_num_0[0x18];
2072 	struct mlx5_ifc_wq_bits wq;
2073 };
2074 
2075 struct mlx5_ifc_query_sq_in_bits {
2076 	u8 opcode[0x10];
2077 	u8 reserved_at_10[0x10];
2078 	u8 reserved_at_20[0x10];
2079 	u8 op_mod[0x10];
2080 	u8 reserved_at_40[0x8];
2081 	u8 sqn[0x18];
2082 	u8 reserved_at_60[0x20];
2083 };
2084 
2085 struct mlx5_ifc_modify_sq_out_bits {
2086 	u8 status[0x8];
2087 	u8 reserved_at_8[0x18];
2088 	u8 syndrome[0x20];
2089 	u8 reserved_at_40[0x40];
2090 };
2091 
2092 struct mlx5_ifc_modify_sq_in_bits {
2093 	u8 opcode[0x10];
2094 	u8 uid[0x10];
2095 	u8 reserved_at_20[0x10];
2096 	u8 op_mod[0x10];
2097 	u8 sq_state[0x4];
2098 	u8 reserved_at_44[0x4];
2099 	u8 sqn[0x18];
2100 	u8 reserved_at_60[0x20];
2101 	u8 modify_bitmask[0x40];
2102 	u8 reserved_at_c0[0x40];
2103 	struct mlx5_ifc_sqc_bits ctx;
2104 };
2105 
2106 struct mlx5_ifc_create_sq_out_bits {
2107 	u8 status[0x8];
2108 	u8 reserved_at_8[0x18];
2109 	u8 syndrome[0x20];
2110 	u8 reserved_at_40[0x8];
2111 	u8 sqn[0x18];
2112 	u8 reserved_at_60[0x20];
2113 };
2114 
2115 struct mlx5_ifc_create_sq_in_bits {
2116 	u8 opcode[0x10];
2117 	u8 uid[0x10];
2118 	u8 reserved_at_20[0x10];
2119 	u8 op_mod[0x10];
2120 	u8 reserved_at_40[0xc0];
2121 	struct mlx5_ifc_sqc_bits ctx;
2122 };
2123 
2124 enum {
2125 	MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),
2126 	MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),
2127 	MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),
2128 	MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),
2129 	MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),
2130 };
2131 
2132 struct mlx5_ifc_flow_meter_parameters_bits {
2133 	u8         valid[0x1];			// 00h
2134 	u8         bucket_overflow[0x1];
2135 	u8         start_color[0x2];
2136 	u8         both_buckets_on_green[0x1];
2137 	u8         meter_mode[0x2];
2138 	u8         reserved_at_1[0x19];
2139 	u8         reserved_at_2[0x20]; //04h
2140 	u8         reserved_at_3[0x3];
2141 	u8         cbs_exponent[0x5];		// 08h
2142 	u8         cbs_mantissa[0x8];
2143 	u8         reserved_at_4[0x3];
2144 	u8         cir_exponent[0x5];
2145 	u8         cir_mantissa[0x8];
2146 	u8         reserved_at_5[0x20];		// 0Ch
2147 	u8         reserved_at_6[0x3];
2148 	u8         ebs_exponent[0x5];		// 10h
2149 	u8         ebs_mantissa[0x8];
2150 	u8         reserved_at_7[0x3];
2151 	u8         eir_exponent[0x5];
2152 	u8         eir_mantissa[0x8];
2153 	u8         reserved_at_8[0x60];		// 14h-1Ch
2154 };
2155 
2156 enum {
2157 	MLX5_CQE_SIZE_64B = 0x0,
2158 	MLX5_CQE_SIZE_128B = 0x1,
2159 };
2160 
2161 struct mlx5_ifc_cqc_bits {
2162 	u8 status[0x4];
2163 	u8 as_notify[0x1];
2164 	u8 initiator_src_dct[0x1];
2165 	u8 dbr_umem_valid[0x1];
2166 	u8 reserved_at_7[0x1];
2167 	u8 cqe_sz[0x3];
2168 	u8 cc[0x1];
2169 	u8 reserved_at_c[0x1];
2170 	u8 scqe_break_moderation_en[0x1];
2171 	u8 oi[0x1];
2172 	u8 cq_period_mode[0x2];
2173 	u8 cqe_comp_en[0x1];
2174 	u8 mini_cqe_res_format[0x2];
2175 	u8 st[0x4];
2176 	u8 reserved_at_18[0x1];
2177 	u8 cqe_comp_layout[0x7];
2178 	u8 dbr_umem_id[0x20];
2179 	u8 reserved_at_40[0x14];
2180 	u8 page_offset[0x6];
2181 	u8 reserved_at_5a[0x2];
2182 	u8 mini_cqe_res_format_ext[0x2];
2183 	u8 cq_timestamp_format[0x2];
2184 	u8 reserved_at_60[0x3];
2185 	u8 log_cq_size[0x5];
2186 	u8 uar_page[0x18];
2187 	u8 reserved_at_80[0x4];
2188 	u8 cq_period[0xc];
2189 	u8 cq_max_count[0x10];
2190 	u8 reserved_at_a0[0x18];
2191 	u8 c_eqn[0x8];
2192 	u8 reserved_at_c0[0x3];
2193 	u8 log_page_size[0x5];
2194 	u8 reserved_at_c8[0x18];
2195 	u8 reserved_at_e0[0x20];
2196 	u8 reserved_at_100[0x8];
2197 	u8 last_notified_index[0x18];
2198 	u8 reserved_at_120[0x8];
2199 	u8 last_solicit_index[0x18];
2200 	u8 reserved_at_140[0x8];
2201 	u8 consumer_counter[0x18];
2202 	u8 reserved_at_160[0x8];
2203 	u8 producer_counter[0x18];
2204 	u8 local_partition_id[0xc];
2205 	u8 process_id[0x14];
2206 	u8 reserved_at_1A0[0x20];
2207 	u8 dbr_addr[0x40];
2208 };
2209 
2210 struct mlx5_ifc_create_cq_out_bits {
2211 	u8 status[0x8];
2212 	u8 reserved_at_8[0x18];
2213 	u8 syndrome[0x20];
2214 	u8 reserved_at_40[0x8];
2215 	u8 cqn[0x18];
2216 	u8 reserved_at_60[0x20];
2217 };
2218 
2219 struct mlx5_ifc_create_cq_in_bits {
2220 	u8 opcode[0x10];
2221 	u8 uid[0x10];
2222 	u8 reserved_at_20[0x10];
2223 	u8 op_mod[0x10];
2224 	u8 reserved_at_40[0x40];
2225 	struct mlx5_ifc_cqc_bits cq_context;
2226 	u8 cq_umem_offset[0x40];
2227 	u8 cq_umem_id[0x20];
2228 	u8 cq_umem_valid[0x1];
2229 	u8 reserved_at_2e1[0x1f];
2230 	u8 reserved_at_300[0x580];
2231 	u8 pas[];
2232 };
2233 
2234 enum {
2235 	MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d,
2236 	MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
2237 	MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH = 0x0022,
2238 	MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO = 0x0025,
2239 };
2240 
2241 struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
2242 	u8 opcode[0x10];
2243 	u8 reserved_at_10[0x20];
2244 	u8 obj_type[0x10];
2245 	u8 obj_id[0x20];
2246 	u8 reserved_at_60[0x20];
2247 };
2248 
2249 struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
2250 	u8 status[0x8];
2251 	u8 reserved_at_8[0x18];
2252 	u8 syndrome[0x20];
2253 	u8 obj_id[0x20];
2254 	u8 reserved_at_60[0x20];
2255 };
2256 
2257 struct mlx5_ifc_virtio_q_counters_bits {
2258 	u8 modify_field_select[0x40];
2259 	u8 reserved_at_40[0x40];
2260 	u8 received_desc[0x40];
2261 	u8 completed_desc[0x40];
2262 	u8 error_cqes[0x20];
2263 	u8 bad_desc_errors[0x20];
2264 	u8 exceed_max_chain[0x20];
2265 	u8 invalid_buffer[0x20];
2266 	u8 reserved_at_180[0x50];
2267 };
2268 
2269 struct mlx5_ifc_create_virtio_q_counters_in_bits {
2270 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2271 	struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
2272 };
2273 
2274 struct mlx5_ifc_query_virtio_q_counters_out_bits {
2275 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2276 	struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
2277 };
2278 enum {
2279 	MLX5_VIRTQ_STATE_INIT = 0,
2280 	MLX5_VIRTQ_STATE_RDY = 1,
2281 	MLX5_VIRTQ_STATE_SUSPEND = 2,
2282 	MLX5_VIRTQ_STATE_ERROR = 3,
2283 };
2284 
2285 enum {
2286 	MLX5_VIRTQ_MODIFY_TYPE_STATE = (1UL << 0),
2287 	MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS = (1UL << 3),
2288 	MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE = (1UL << 4),
2289 };
2290 
2291 struct mlx5_ifc_virtio_q_bits {
2292 	u8 virtio_q_type[0x8];
2293 	u8 reserved_at_8[0x5];
2294 	u8 event_mode[0x3];
2295 	u8 queue_index[0x10];
2296 	u8 full_emulation[0x1];
2297 	u8 virtio_version_1_0[0x1];
2298 	u8 reserved_at_22[0x2];
2299 	u8 offload_type[0x4];
2300 	u8 event_qpn_or_msix[0x18];
2301 	u8 doorbell_stride_idx[0x10];
2302 	u8 queue_size[0x10];
2303 	u8 device_emulation_id[0x20];
2304 	u8 desc_addr[0x40];
2305 	u8 used_addr[0x40];
2306 	u8 available_addr[0x40];
2307 	u8 virtio_q_mkey[0x20];
2308 	u8 reserved_at_160[0x18];
2309 	u8 error_type[0x8];
2310 	u8 umem_1_id[0x20];
2311 	u8 umem_1_size[0x20];
2312 	u8 umem_1_offset[0x40];
2313 	u8 umem_2_id[0x20];
2314 	u8 umem_2_size[0x20];
2315 	u8 umem_2_offset[0x40];
2316 	u8 umem_3_id[0x20];
2317 	u8 umem_3_size[0x20];
2318 	u8 umem_3_offset[0x40];
2319 	u8 counter_set_id[0x20];
2320 	u8 reserved_at_320[0x8];
2321 	u8 pd[0x18];
2322 	u8 reserved_at_340[0xc0];
2323 };
2324 
2325 struct mlx5_ifc_virtio_net_q_bits {
2326 	u8 modify_field_select[0x40];
2327 	u8 reserved_at_40[0x40];
2328 	u8 tso_ipv4[0x1];
2329 	u8 tso_ipv6[0x1];
2330 	u8 tx_csum[0x1];
2331 	u8 rx_csum[0x1];
2332 	u8 reserved_at_84[0x6];
2333 	u8 dirty_bitmap_dump_enable[0x1];
2334 	u8 vhost_log_page[0x5];
2335 	u8 reserved_at_90[0xc];
2336 	u8 state[0x4];
2337 	u8 reserved_at_a0[0x8];
2338 	u8 tisn_or_qpn[0x18];
2339 	u8 dirty_bitmap_mkey[0x20];
2340 	u8 dirty_bitmap_size[0x20];
2341 	u8 dirty_bitmap_addr[0x40];
2342 	u8 hw_available_index[0x10];
2343 	u8 hw_used_index[0x10];
2344 	u8 reserved_at_160[0xa0];
2345 	struct mlx5_ifc_virtio_q_bits virtio_q_context;
2346 };
2347 
2348 struct mlx5_ifc_create_virtq_in_bits {
2349 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2350 	struct mlx5_ifc_virtio_net_q_bits virtq;
2351 };
2352 
2353 struct mlx5_ifc_query_virtq_out_bits {
2354 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2355 	struct mlx5_ifc_virtio_net_q_bits virtq;
2356 };
2357 
2358 struct mlx5_ifc_flow_hit_aso_bits {
2359 	u8 modify_field_select[0x40];
2360 	u8 reserved_at_40[0x48];
2361 	u8 access_pd[0x18];
2362 	u8 reserved_at_a0[0x160];
2363 	u8 flag[0x200];
2364 };
2365 
2366 struct mlx5_ifc_create_flow_hit_aso_in_bits {
2367 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2368 	struct mlx5_ifc_flow_hit_aso_bits flow_hit_aso;
2369 };
2370 
2371 enum mlx5_access_aso_opc_mod {
2372 	ASO_OPC_MOD_IPSEC = 0x0,
2373 	ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
2374 	ASO_OPC_MOD_POLICER = 0x2,
2375 	ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
2376 	ASO_OPC_MOD_FLOW_HIT = 0x4,
2377 };
2378 
2379 #define ASO_CSEG_DATA_MASK_MODE_OFFSET	30
2380 
2381 enum mlx5_aso_data_mask_mode {
2382 	BITWISE_64BIT = 0x0,
2383 	BYTEWISE_64BYTE = 0x1,
2384 	CALCULATED_64BYTE = 0x2,
2385 };
2386 
2387 #define ASO_CSEG_COND_0_OPER_OFFSET	20
2388 #define ASO_CSEG_COND_1_OPER_OFFSET	16
2389 
2390 enum mlx5_aso_pre_cond_op {
2391 	ASO_OP_ALWAYS_FALSE = 0x0,
2392 	ASO_OP_ALWAYS_TRUE = 0x1,
2393 	ASO_OP_EQUAL = 0x2,
2394 	ASO_OP_NOT_EQUAL = 0x3,
2395 	ASO_OP_GREATER_OR_EQUAL = 0x4,
2396 	ASO_OP_LESSER_OR_EQUAL = 0x5,
2397 	ASO_OP_LESSER = 0x6,
2398 	ASO_OP_GREATER = 0x7,
2399 	ASO_OP_CYCLIC_GREATER = 0x8,
2400 	ASO_OP_CYCLIC_LESSER = 0x9,
2401 };
2402 
2403 #define ASO_CSEG_COND_OPER_OFFSET	6
2404 
2405 enum mlx5_aso_op {
2406 	ASO_OPER_LOGICAL_AND = 0x0,
2407 	ASO_OPER_LOGICAL_OR = 0x1,
2408 };
2409 
2410 /* ASO WQE CTRL segment. */
2411 struct mlx5_aso_cseg {
2412 	uint32_t va_h;
2413 	uint32_t va_l_r;
2414 	uint32_t lkey;
2415 	uint32_t operand_masks;
2416 	uint32_t condition_0_data;
2417 	uint32_t condition_0_mask;
2418 	uint32_t condition_1_data;
2419 	uint32_t condition_1_mask;
2420 	uint64_t bitwise_data;
2421 	uint64_t data_mask;
2422 } __rte_packed;
2423 
2424 #define MLX5_ASO_WQE_DSEG_SIZE	0x40
2425 
2426 /* ASO WQE Data segment. */
2427 struct mlx5_aso_dseg {
2428 	uint8_t data[MLX5_ASO_WQE_DSEG_SIZE];
2429 } __rte_packed;
2430 
2431 /* ASO WQE. */
2432 struct mlx5_aso_wqe {
2433 	struct mlx5_wqe_cseg general_cseg;
2434 	struct mlx5_aso_cseg aso_cseg;
2435 	struct mlx5_aso_dseg aso_dseg;
2436 } __rte_packed;
2437 
2438 enum {
2439 	MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
2440 };
2441 
2442 enum {
2443 	MLX5_QP_ST_RC = 0x0,
2444 };
2445 
2446 enum {
2447 	MLX5_QP_PM_MIGRATED = 0x3,
2448 };
2449 
2450 enum {
2451 	MLX5_NON_ZERO_RQ = 0x0,
2452 	MLX5_SRQ_RQ = 0x1,
2453 	MLX5_CRQ_RQ = 0x2,
2454 	MLX5_ZERO_LEN_RQ = 0x3,
2455 };
2456 
2457 struct mlx5_ifc_ads_bits {
2458 	u8 fl[0x1];
2459 	u8 free_ar[0x1];
2460 	u8 reserved_at_2[0xe];
2461 	u8 pkey_index[0x10];
2462 	u8 reserved_at_20[0x8];
2463 	u8 grh[0x1];
2464 	u8 mlid[0x7];
2465 	u8 rlid[0x10];
2466 	u8 ack_timeout[0x5];
2467 	u8 reserved_at_45[0x3];
2468 	u8 src_addr_index[0x8];
2469 	u8 reserved_at_50[0x4];
2470 	u8 stat_rate[0x4];
2471 	u8 hop_limit[0x8];
2472 	u8 reserved_at_60[0x4];
2473 	u8 tclass[0x8];
2474 	u8 flow_label[0x14];
2475 	u8 rgid_rip[16][0x8];
2476 	u8 reserved_at_100[0x4];
2477 	u8 f_dscp[0x1];
2478 	u8 f_ecn[0x1];
2479 	u8 reserved_at_106[0x1];
2480 	u8 f_eth_prio[0x1];
2481 	u8 ecn[0x2];
2482 	u8 dscp[0x6];
2483 	u8 udp_sport[0x10];
2484 	u8 dei_cfi[0x1];
2485 	u8 eth_prio[0x3];
2486 	u8 sl[0x4];
2487 	u8 vhca_port_num[0x8];
2488 	u8 rmac_47_32[0x10];
2489 	u8 rmac_31_0[0x20];
2490 };
2491 
2492 struct mlx5_ifc_qpc_bits {
2493 	u8 state[0x4];
2494 	u8 lag_tx_port_affinity[0x4];
2495 	u8 st[0x8];
2496 	u8 reserved_at_10[0x3];
2497 	u8 pm_state[0x2];
2498 	u8 reserved_at_15[0x1];
2499 	u8 req_e2e_credit_mode[0x2];
2500 	u8 offload_type[0x4];
2501 	u8 end_padding_mode[0x2];
2502 	u8 reserved_at_1e[0x2];
2503 	u8 wq_signature[0x1];
2504 	u8 block_lb_mc[0x1];
2505 	u8 atomic_like_write_en[0x1];
2506 	u8 latency_sensitive[0x1];
2507 	u8 reserved_at_24[0x1];
2508 	u8 drain_sigerr[0x1];
2509 	u8 reserved_at_26[0x2];
2510 	u8 pd[0x18];
2511 	u8 mtu[0x3];
2512 	u8 log_msg_max[0x5];
2513 	u8 reserved_at_48[0x1];
2514 	u8 log_rq_size[0x4];
2515 	u8 log_rq_stride[0x3];
2516 	u8 no_sq[0x1];
2517 	u8 log_sq_size[0x4];
2518 	u8 reserved_at_55[0x6];
2519 	u8 rlky[0x1];
2520 	u8 ulp_stateless_offload_mode[0x4];
2521 	u8 counter_set_id[0x8];
2522 	u8 uar_page[0x18];
2523 	u8 reserved_at_80[0x8];
2524 	u8 user_index[0x18];
2525 	u8 reserved_at_a0[0x3];
2526 	u8 log_page_size[0x5];
2527 	u8 remote_qpn[0x18];
2528 	struct mlx5_ifc_ads_bits primary_address_path;
2529 	struct mlx5_ifc_ads_bits secondary_address_path;
2530 	u8 log_ack_req_freq[0x4];
2531 	u8 reserved_at_384[0x4];
2532 	u8 log_sra_max[0x3];
2533 	u8 reserved_at_38b[0x2];
2534 	u8 retry_count[0x3];
2535 	u8 rnr_retry[0x3];
2536 	u8 reserved_at_393[0x1];
2537 	u8 fre[0x1];
2538 	u8 cur_rnr_retry[0x3];
2539 	u8 cur_retry_count[0x3];
2540 	u8 reserved_at_39b[0x5];
2541 	u8 reserved_at_3a0[0x20];
2542 	u8 reserved_at_3c0[0x8];
2543 	u8 next_send_psn[0x18];
2544 	u8 reserved_at_3e0[0x8];
2545 	u8 cqn_snd[0x18];
2546 	u8 reserved_at_400[0x8];
2547 	u8 deth_sqpn[0x18];
2548 	u8 reserved_at_420[0x20];
2549 	u8 reserved_at_440[0x8];
2550 	u8 last_acked_psn[0x18];
2551 	u8 reserved_at_460[0x8];
2552 	u8 ssn[0x18];
2553 	u8 reserved_at_480[0x8];
2554 	u8 log_rra_max[0x3];
2555 	u8 reserved_at_48b[0x1];
2556 	u8 atomic_mode[0x4];
2557 	u8 rre[0x1];
2558 	u8 rwe[0x1];
2559 	u8 rae[0x1];
2560 	u8 reserved_at_493[0x1];
2561 	u8 page_offset[0x6];
2562 	u8 reserved_at_49a[0x3];
2563 	u8 cd_slave_receive[0x1];
2564 	u8 cd_slave_send[0x1];
2565 	u8 cd_master[0x1];
2566 	u8 reserved_at_4a0[0x3];
2567 	u8 min_rnr_nak[0x5];
2568 	u8 next_rcv_psn[0x18];
2569 	u8 reserved_at_4c0[0x8];
2570 	u8 xrcd[0x18];
2571 	u8 reserved_at_4e0[0x8];
2572 	u8 cqn_rcv[0x18];
2573 	u8 dbr_addr[0x40];
2574 	u8 q_key[0x20];
2575 	u8 reserved_at_560[0x5];
2576 	u8 rq_type[0x3];
2577 	u8 srqn_rmpn_xrqn[0x18];
2578 	u8 reserved_at_580[0x8];
2579 	u8 rmsn[0x18];
2580 	u8 hw_sq_wqebb_counter[0x10];
2581 	u8 sw_sq_wqebb_counter[0x10];
2582 	u8 hw_rq_counter[0x20];
2583 	u8 sw_rq_counter[0x20];
2584 	u8 reserved_at_600[0x20];
2585 	u8 reserved_at_620[0xf];
2586 	u8 cgs[0x1];
2587 	u8 cs_req[0x8];
2588 	u8 cs_res[0x8];
2589 	u8 dc_access_key[0x40];
2590 	u8 reserved_at_680[0x3];
2591 	u8 dbr_umem_valid[0x1];
2592 	u8 reserved_at_684[0x9c];
2593 	u8 dbr_umem_id[0x20];
2594 };
2595 
2596 struct mlx5_ifc_create_qp_out_bits {
2597 	u8 status[0x8];
2598 	u8 reserved_at_8[0x18];
2599 	u8 syndrome[0x20];
2600 	u8 reserved_at_40[0x8];
2601 	u8 qpn[0x18];
2602 	u8 reserved_at_60[0x20];
2603 };
2604 
2605 #ifdef PEDANTIC
2606 #pragma GCC diagnostic ignored "-Wpedantic"
2607 #endif
2608 struct mlx5_ifc_create_qp_in_bits {
2609 	u8 opcode[0x10];
2610 	u8 uid[0x10];
2611 	u8 reserved_at_20[0x10];
2612 	u8 op_mod[0x10];
2613 	u8 reserved_at_40[0x40];
2614 	u8 opt_param_mask[0x20];
2615 	u8 reserved_at_a0[0x20];
2616 	struct mlx5_ifc_qpc_bits qpc;
2617 	u8 wq_umem_offset[0x40];
2618 	u8 wq_umem_id[0x20];
2619 	u8 wq_umem_valid[0x1];
2620 	u8 reserved_at_861[0x1f];
2621 	u8 pas[0][0x40];
2622 };
2623 #ifdef PEDANTIC
2624 #pragma GCC diagnostic error "-Wpedantic"
2625 #endif
2626 
2627 struct mlx5_ifc_sqerr2rts_qp_out_bits {
2628 	u8 status[0x8];
2629 	u8 reserved_at_8[0x18];
2630 	u8 syndrome[0x20];
2631 	u8 reserved_at_40[0x40];
2632 };
2633 
2634 struct mlx5_ifc_sqerr2rts_qp_in_bits {
2635 	u8 opcode[0x10];
2636 	u8 uid[0x10];
2637 	u8 reserved_at_20[0x10];
2638 	u8 op_mod[0x10];
2639 	u8 reserved_at_40[0x8];
2640 	u8 qpn[0x18];
2641 	u8 reserved_at_60[0x20];
2642 	u8 opt_param_mask[0x20];
2643 	u8 reserved_at_a0[0x20];
2644 	struct mlx5_ifc_qpc_bits qpc;
2645 	u8 reserved_at_800[0x80];
2646 };
2647 
2648 struct mlx5_ifc_sqd2rts_qp_out_bits {
2649 	u8 status[0x8];
2650 	u8 reserved_at_8[0x18];
2651 	u8 syndrome[0x20];
2652 	u8 reserved_at_40[0x40];
2653 };
2654 
2655 struct mlx5_ifc_sqd2rts_qp_in_bits {
2656 	u8 opcode[0x10];
2657 	u8 uid[0x10];
2658 	u8 reserved_at_20[0x10];
2659 	u8 op_mod[0x10];
2660 	u8 reserved_at_40[0x8];
2661 	u8 qpn[0x18];
2662 	u8 reserved_at_60[0x20];
2663 	u8 opt_param_mask[0x20];
2664 	u8 reserved_at_a0[0x20];
2665 	struct mlx5_ifc_qpc_bits qpc;
2666 	u8 reserved_at_800[0x80];
2667 };
2668 
2669 struct mlx5_ifc_rts2rts_qp_out_bits {
2670 	u8 status[0x8];
2671 	u8 reserved_at_8[0x18];
2672 	u8 syndrome[0x20];
2673 	u8 reserved_at_40[0x40];
2674 };
2675 
2676 struct mlx5_ifc_rts2rts_qp_in_bits {
2677 	u8 opcode[0x10];
2678 	u8 uid[0x10];
2679 	u8 reserved_at_20[0x10];
2680 	u8 op_mod[0x10];
2681 	u8 reserved_at_40[0x8];
2682 	u8 qpn[0x18];
2683 	u8 reserved_at_60[0x20];
2684 	u8 opt_param_mask[0x20];
2685 	u8 reserved_at_a0[0x20];
2686 	struct mlx5_ifc_qpc_bits qpc;
2687 	u8 reserved_at_800[0x80];
2688 };
2689 
2690 struct mlx5_ifc_rtr2rts_qp_out_bits {
2691 	u8 status[0x8];
2692 	u8 reserved_at_8[0x18];
2693 	u8 syndrome[0x20];
2694 	u8 reserved_at_40[0x40];
2695 };
2696 
2697 struct mlx5_ifc_rtr2rts_qp_in_bits {
2698 	u8 opcode[0x10];
2699 	u8 uid[0x10];
2700 	u8 reserved_at_20[0x10];
2701 	u8 op_mod[0x10];
2702 	u8 reserved_at_40[0x8];
2703 	u8 qpn[0x18];
2704 	u8 reserved_at_60[0x20];
2705 	u8 opt_param_mask[0x20];
2706 	u8 reserved_at_a0[0x20];
2707 	struct mlx5_ifc_qpc_bits qpc;
2708 	u8 reserved_at_800[0x80];
2709 };
2710 
2711 struct mlx5_ifc_rst2init_qp_out_bits {
2712 	u8 status[0x8];
2713 	u8 reserved_at_8[0x18];
2714 	u8 syndrome[0x20];
2715 	u8 reserved_at_40[0x40];
2716 };
2717 
2718 struct mlx5_ifc_rst2init_qp_in_bits {
2719 	u8 opcode[0x10];
2720 	u8 uid[0x10];
2721 	u8 reserved_at_20[0x10];
2722 	u8 op_mod[0x10];
2723 	u8 reserved_at_40[0x8];
2724 	u8 qpn[0x18];
2725 	u8 reserved_at_60[0x20];
2726 	u8 opt_param_mask[0x20];
2727 	u8 reserved_at_a0[0x20];
2728 	struct mlx5_ifc_qpc_bits qpc;
2729 	u8 reserved_at_800[0x80];
2730 };
2731 
2732 struct mlx5_ifc_init2rtr_qp_out_bits {
2733 	u8 status[0x8];
2734 	u8 reserved_at_8[0x18];
2735 	u8 syndrome[0x20];
2736 	u8 reserved_at_40[0x40];
2737 };
2738 
2739 struct mlx5_ifc_init2rtr_qp_in_bits {
2740 	u8 opcode[0x10];
2741 	u8 uid[0x10];
2742 	u8 reserved_at_20[0x10];
2743 	u8 op_mod[0x10];
2744 	u8 reserved_at_40[0x8];
2745 	u8 qpn[0x18];
2746 	u8 reserved_at_60[0x20];
2747 	u8 opt_param_mask[0x20];
2748 	u8 reserved_at_a0[0x20];
2749 	struct mlx5_ifc_qpc_bits qpc;
2750 	u8 reserved_at_800[0x80];
2751 };
2752 
2753 struct mlx5_ifc_init2init_qp_out_bits {
2754 	u8 status[0x8];
2755 	u8 reserved_at_8[0x18];
2756 	u8 syndrome[0x20];
2757 	u8 reserved_at_40[0x40];
2758 };
2759 
2760 struct mlx5_ifc_init2init_qp_in_bits {
2761 	u8 opcode[0x10];
2762 	u8 uid[0x10];
2763 	u8 reserved_at_20[0x10];
2764 	u8 op_mod[0x10];
2765 	u8 reserved_at_40[0x8];
2766 	u8 qpn[0x18];
2767 	u8 reserved_at_60[0x20];
2768 	u8 opt_param_mask[0x20];
2769 	u8 reserved_at_a0[0x20];
2770 	struct mlx5_ifc_qpc_bits qpc;
2771 	u8 reserved_at_800[0x80];
2772 };
2773 
2774 #ifdef PEDANTIC
2775 #pragma GCC diagnostic ignored "-Wpedantic"
2776 #endif
2777 struct mlx5_ifc_query_qp_out_bits {
2778 	u8 status[0x8];
2779 	u8 reserved_at_8[0x18];
2780 	u8 syndrome[0x20];
2781 	u8 reserved_at_40[0x40];
2782 	u8 opt_param_mask[0x20];
2783 	u8 reserved_at_a0[0x20];
2784 	struct mlx5_ifc_qpc_bits qpc;
2785 	u8 reserved_at_800[0x80];
2786 	u8 pas[0][0x40];
2787 };
2788 #ifdef PEDANTIC
2789 #pragma GCC diagnostic error "-Wpedantic"
2790 #endif
2791 
2792 struct mlx5_ifc_query_qp_in_bits {
2793 	u8 opcode[0x10];
2794 	u8 reserved_at_10[0x10];
2795 	u8 reserved_at_20[0x10];
2796 	u8 op_mod[0x10];
2797 	u8 reserved_at_40[0x8];
2798 	u8 qpn[0x18];
2799 	u8 reserved_at_60[0x20];
2800 };
2801 
2802 enum {
2803 	MLX5_DATA_RATE = 0x0,
2804 	MLX5_WQE_RATE = 0x1,
2805 };
2806 
2807 struct mlx5_ifc_set_pp_rate_limit_context_bits {
2808 	u8 rate_limit[0x20];
2809 	u8 burst_upper_bound[0x20];
2810 	u8 reserved_at_40[0xC];
2811 	u8 rate_mode[0x4];
2812 	u8 typical_packet_size[0x10];
2813 	u8 reserved_at_60[0x120];
2814 };
2815 
2816 #define MLX5_ACCESS_REGISTER_DATA_DWORD_MAX 8u
2817 
2818 #ifdef PEDANTIC
2819 #pragma GCC diagnostic ignored "-Wpedantic"
2820 #endif
2821 struct mlx5_ifc_access_register_out_bits {
2822 	u8 status[0x8];
2823 	u8 reserved_at_8[0x18];
2824 	u8 syndrome[0x20];
2825 	u8 reserved_at_40[0x40];
2826 	u8 register_data[0][0x20];
2827 };
2828 
2829 struct mlx5_ifc_access_register_in_bits {
2830 	u8 opcode[0x10];
2831 	u8 reserved_at_10[0x10];
2832 	u8 reserved_at_20[0x10];
2833 	u8 op_mod[0x10];
2834 	u8 reserved_at_40[0x10];
2835 	u8 register_id[0x10];
2836 	u8 argument[0x20];
2837 	u8 register_data[0][0x20];
2838 };
2839 #ifdef PEDANTIC
2840 #pragma GCC diagnostic error "-Wpedantic"
2841 #endif
2842 
2843 enum {
2844 	MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
2845 	MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
2846 };
2847 
2848 enum {
2849 	MLX5_REGISTER_ID_MTUTC  = 0x9055,
2850 };
2851 
2852 struct mlx5_ifc_register_mtutc_bits {
2853 	u8 time_stamp_mode[0x2];
2854 	u8 time_stamp_state[0x2];
2855 	u8 reserved_at_4[0x18];
2856 	u8 operation[0x4];
2857 	u8 freq_adjustment[0x20];
2858 	u8 reserved_at_40[0x40];
2859 	u8 utc_sec[0x20];
2860 	u8 utc_nsec[0x20];
2861 	u8 time_adjustment[0x20];
2862 };
2863 
2864 #define MLX5_MTUTC_TIMESTAMP_MODE_INTERNAL_TIMER 0
2865 #define MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME 1
2866 
2867 struct mlx5_ifc_parse_graph_arc_bits {
2868 	u8 start_inner_tunnel[0x1];
2869 	u8 reserved_at_1[0x7];
2870 	u8 arc_parse_graph_node[0x8];
2871 	u8 compare_condition_value[0x10];
2872 	u8 parse_graph_node_handle[0x20];
2873 	u8 reserved_at_40[0x40];
2874 };
2875 
2876 struct mlx5_ifc_parse_graph_flow_match_sample_bits {
2877 	u8 flow_match_sample_en[0x1];
2878 	u8 reserved_at_1[0x3];
2879 	u8 flow_match_sample_offset_mode[0x4];
2880 	u8 reserved_at_5[0x8];
2881 	u8 flow_match_sample_field_offset[0x10];
2882 	u8 reserved_at_32[0x4];
2883 	u8 flow_match_sample_field_offset_shift[0x4];
2884 	u8 flow_match_sample_field_base_offset[0x8];
2885 	u8 reserved_at_48[0xd];
2886 	u8 flow_match_sample_tunnel_mode[0x3];
2887 	u8 flow_match_sample_field_offset_mask[0x20];
2888 	u8 flow_match_sample_field_id[0x20];
2889 };
2890 
2891 struct mlx5_ifc_parse_graph_flex_bits {
2892 	u8 modify_field_select[0x40];
2893 	u8 reserved_at_64[0x20];
2894 	u8 header_length_base_value[0x10];
2895 	u8 reserved_at_112[0x4];
2896 	u8 header_length_field_shift[0x4];
2897 	u8 reserved_at_120[0x4];
2898 	u8 header_length_mode[0x4];
2899 	u8 header_length_field_offset[0x10];
2900 	u8 next_header_field_offset[0x10];
2901 	u8 reserved_at_160[0x1b];
2902 	u8 next_header_field_size[0x5];
2903 	u8 header_length_field_mask[0x20];
2904 	u8 reserved_at_224[0x20];
2905 	struct mlx5_ifc_parse_graph_flow_match_sample_bits sample_table[0x8];
2906 	struct mlx5_ifc_parse_graph_arc_bits input_arc[0x8];
2907 	struct mlx5_ifc_parse_graph_arc_bits output_arc[0x8];
2908 };
2909 
2910 struct mlx5_ifc_create_flex_parser_in_bits {
2911 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2912 	struct mlx5_ifc_parse_graph_flex_bits flex;
2913 };
2914 
2915 struct mlx5_ifc_create_flex_parser_out_bits {
2916 	struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
2917 	struct mlx5_ifc_parse_graph_flex_bits flex;
2918 };
2919 
2920 struct mlx5_ifc_parse_graph_flex_out_bits {
2921 	u8 status[0x8];
2922 	u8 reserved_at_8[0x18];
2923 	u8 syndrome[0x20];
2924 	u8 reserved_at_40[0x40];
2925 	struct mlx5_ifc_parse_graph_flex_bits capability;
2926 };
2927 
2928 struct regexp_params_field_select_bits {
2929 	u8 reserved_at_0[0x1e];
2930 	u8 stop_engine[0x1];
2931 	u8 db_umem_id[0x1];
2932 };
2933 
2934 struct mlx5_ifc_regexp_params_bits {
2935 	u8 reserved_at_0[0x1f];
2936 	u8 stop_engine[0x1];
2937 	u8 db_umem_id[0x20];
2938 	u8 db_umem_offset[0x40];
2939 	u8 reserved_at_80[0x100];
2940 };
2941 
2942 struct mlx5_ifc_set_regexp_params_in_bits {
2943 	u8 opcode[0x10];
2944 	u8 uid[0x10];
2945 	u8 reserved_at_20[0x10];
2946 	u8 op_mod[0x10];
2947 	u8 reserved_at_40[0x18];
2948 	u8 engine_id[0x8];
2949 	struct regexp_params_field_select_bits field_select;
2950 	struct mlx5_ifc_regexp_params_bits regexp_params;
2951 };
2952 
2953 struct mlx5_ifc_set_regexp_params_out_bits {
2954 	u8 status[0x8];
2955 	u8 reserved_at_8[0x18];
2956 	u8 syndrome[0x20];
2957 	u8 reserved_at_18[0x40];
2958 };
2959 
2960 struct mlx5_ifc_query_regexp_params_in_bits {
2961 	u8 opcode[0x10];
2962 	u8 uid[0x10];
2963 	u8 reserved_at_20[0x10];
2964 	u8 op_mod[0x10];
2965 	u8 reserved_at_40[0x18];
2966 	u8 engine_id[0x8];
2967 	u8 reserved[0x20];
2968 };
2969 
2970 struct mlx5_ifc_query_regexp_params_out_bits {
2971 	u8 status[0x8];
2972 	u8 reserved_at_8[0x18];
2973 	u8 syndrome[0x20];
2974 	u8 reserved[0x40];
2975 	struct mlx5_ifc_regexp_params_bits regexp_params;
2976 };
2977 
2978 struct mlx5_ifc_set_regexp_register_in_bits {
2979 	u8 opcode[0x10];
2980 	u8 uid[0x10];
2981 	u8 reserved_at_20[0x10];
2982 	u8 op_mod[0x10];
2983 	u8 reserved_at_40[0x18];
2984 	u8 engine_id[0x8];
2985 	u8 register_address[0x20];
2986 	u8 register_data[0x20];
2987 	u8 reserved[0x60];
2988 };
2989 
2990 struct mlx5_ifc_set_regexp_register_out_bits {
2991 	u8 status[0x8];
2992 	u8 reserved_at_8[0x18];
2993 	u8 syndrome[0x20];
2994 	u8 reserved[0x40];
2995 };
2996 
2997 struct mlx5_ifc_query_regexp_register_in_bits {
2998 	u8 opcode[0x10];
2999 	u8 uid[0x10];
3000 	u8 reserved_at_20[0x10];
3001 	u8 op_mod[0x10];
3002 	u8 reserved_at_40[0x18];
3003 	u8 engine_id[0x8];
3004 	u8 register_address[0x20];
3005 };
3006 
3007 struct mlx5_ifc_query_regexp_register_out_bits {
3008 	u8 status[0x8];
3009 	u8 reserved_at_8[0x18];
3010 	u8 syndrome[0x20];
3011 	u8 reserved[0x20];
3012 	u8 register_data[0x20];
3013 };
3014 
3015 /* CQE format mask. */
3016 #define MLX5E_CQE_FORMAT_MASK 0xc
3017 
3018 /* MPW opcode. */
3019 #define MLX5_OPC_MOD_MPW 0x01
3020 
3021 /* Compressed Rx CQE structure. */
3022 struct mlx5_mini_cqe8 {
3023 	union {
3024 		uint32_t rx_hash_result;
3025 		struct {
3026 			union {
3027 				uint16_t checksum;
3028 				uint16_t flow_tag_high;
3029 				struct {
3030 					uint8_t reserved;
3031 					uint8_t hdr_type;
3032 				};
3033 			};
3034 			uint16_t stride_idx;
3035 		};
3036 		struct {
3037 			uint16_t wqe_counter;
3038 			uint8_t  s_wqe_opcode;
3039 			uint8_t  reserved;
3040 		} s_wqe_info;
3041 	};
3042 	union {
3043 		uint32_t byte_cnt_flow;
3044 		uint32_t byte_cnt;
3045 	};
3046 };
3047 
3048 /* Mini CQE responder format. */
3049 enum {
3050 	MLX5_CQE_RESP_FORMAT_HASH = 0x0,
3051 	MLX5_CQE_RESP_FORMAT_CSUM = 0x1,
3052 	MLX5_CQE_RESP_FORMAT_FTAG_STRIDX = 0x2,
3053 	MLX5_CQE_RESP_FORMAT_CSUM_STRIDX = 0x3,
3054 	MLX5_CQE_RESP_FORMAT_L34H_STRIDX = 0x4,
3055 };
3056 
3057 /* srTCM PRM flow meter parameters. */
3058 enum {
3059 	MLX5_FLOW_COLOR_RED = 0,
3060 	MLX5_FLOW_COLOR_YELLOW,
3061 	MLX5_FLOW_COLOR_GREEN,
3062 	MLX5_FLOW_COLOR_UNDEFINED,
3063 };
3064 
3065 /* Maximum value of srTCM metering parameters. */
3066 #define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))
3067 #define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)
3068 #define MLX5_SRTCM_EBS_MAX 0
3069 
3070 /* The bits meter color use. */
3071 #define MLX5_MTR_COLOR_BITS 8
3072 
3073 /* Length mode of dynamic flex parser graph node. */
3074 enum mlx5_parse_graph_node_len_mode {
3075 	MLX5_GRAPH_NODE_LEN_FIXED = 0x0,
3076 	MLX5_GRAPH_NODE_LEN_FIELD = 0x1,
3077 	MLX5_GRAPH_NODE_LEN_BITMASK = 0x2,
3078 };
3079 
3080 /* Offset mode of the samples of flex parser. */
3081 enum mlx5_parse_graph_flow_match_sample_offset_mode {
3082 	MLX5_GRAPH_SAMPLE_OFFSET_FIXED = 0x0,
3083 	MLX5_GRAPH_SAMPLE_OFFSET_FIELD = 0x1,
3084 	MLX5_GRAPH_SAMPLE_OFFSET_BITMASK = 0x2,
3085 };
3086 
3087 /* Node index for an input / output arc of the flex parser graph. */
3088 enum mlx5_parse_graph_arc_node_index {
3089 	MLX5_GRAPH_ARC_NODE_NULL = 0x0,
3090 	MLX5_GRAPH_ARC_NODE_HEAD = 0x1,
3091 	MLX5_GRAPH_ARC_NODE_MAC = 0x2,
3092 	MLX5_GRAPH_ARC_NODE_IP = 0x3,
3093 	MLX5_GRAPH_ARC_NODE_GRE = 0x4,
3094 	MLX5_GRAPH_ARC_NODE_UDP = 0x5,
3095 	MLX5_GRAPH_ARC_NODE_MPLS = 0x6,
3096 	MLX5_GRAPH_ARC_NODE_TCP = 0x7,
3097 	MLX5_GRAPH_ARC_NODE_VXLAN_GPE = 0x8,
3098 	MLX5_GRAPH_ARC_NODE_GENEVE = 0x9,
3099 	MLX5_GRAPH_ARC_NODE_IPSEC_ESP = 0xa,
3100 	MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
3101 };
3102 
3103 /**
3104  * Convert a user mark to flow mark.
3105  *
3106  * @param val
3107  *   Mark value to convert.
3108  *
3109  * @return
3110  *   Converted mark value.
3111  */
3112 static inline uint32_t
mlx5_flow_mark_set(uint32_t val)3113 mlx5_flow_mark_set(uint32_t val)
3114 {
3115 	uint32_t ret;
3116 
3117 	/*
3118 	 * Add one to the user value to differentiate un-marked flows from
3119 	 * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
3120 	 * remains untouched.
3121 	 */
3122 	if (val != MLX5_FLOW_MARK_DEFAULT)
3123 		++val;
3124 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3125 	/*
3126 	 * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
3127 	 * word, byte-swapped by the kernel on little-endian systems. In this
3128 	 * case, left-shifting the resulting big-endian value ensures the
3129 	 * least significant 24 bits are retained when converting it back.
3130 	 */
3131 	ret = rte_cpu_to_be_32(val) >> 8;
3132 #else
3133 	ret = val;
3134 #endif
3135 	return ret;
3136 }
3137 
3138 /**
3139  * Convert a mark to user mark.
3140  *
3141  * @param val
3142  *   Mark value to convert.
3143  *
3144  * @return
3145  *   Converted mark value.
3146  */
3147 static inline uint32_t
mlx5_flow_mark_get(uint32_t val)3148 mlx5_flow_mark_get(uint32_t val)
3149 {
3150 	/*
3151 	 * Subtract one from the retrieved value. It was added by
3152 	 * mlx5_flow_mark_set() to distinguish unmarked flows.
3153 	 */
3154 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3155 	return (val >> 8) - 1;
3156 #else
3157 	return val - 1;
3158 #endif
3159 }
3160 
3161 #endif /* RTE_PMD_MLX5_PRM_H_ */
3162