xref: /linux-6.15/include/linux/mlx5/device.h (revision 01eadc8d)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35 
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS	0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS	0x80
44 #else
45 #error Host endianness not defined
46 #endif
47 
48 /* helper macros */
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
62 
63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
70 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
71 
72 /* insert a value to a struct */
73 #define MLX5_SET(typ, p, fld, v) do { \
74 	u32 _v = v; \
75 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
76 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
77 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
78 		     (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
79 		     << __mlx5_dw_bit_off(typ, fld))); \
80 } while (0)
81 
82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
83 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
84 	MLX5_SET(typ, p, fld[idx], v); \
85 } while (0)
86 
87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
88 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
89 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
90 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
91 		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
92 		     << __mlx5_dw_bit_off(typ, fld))); \
93 } while (0)
94 
95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
97 __mlx5_mask(typ, fld))
98 
99 #define MLX5_GET_PR(typ, p, fld) ({ \
100 	u32 ___t = MLX5_GET(typ, p, fld); \
101 	pr_debug(#fld " = 0x%x\n", ___t); \
102 	___t; \
103 })
104 
105 #define __MLX5_SET64(typ, p, fld, v) do { \
106 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
107 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
108 } while (0)
109 
110 #define MLX5_SET64(typ, p, fld, v) do { \
111 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
112 	__MLX5_SET64(typ, p, fld, v); \
113 } while (0)
114 
115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
116 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
117 	__MLX5_SET64(typ, p, fld[idx], v); \
118 } while (0)
119 
120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
121 
122 #define MLX5_GET64_PR(typ, p, fld) ({ \
123 	u64 ___t = MLX5_GET64(typ, p, fld); \
124 	pr_debug(#fld " = 0x%llx\n", ___t); \
125 	___t; \
126 })
127 
128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
130 __mlx5_mask16(typ, fld))
131 
132 #define MLX5_SET16(typ, p, fld, v) do { \
133 	u16 _v = v; \
134 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16);             \
135 	*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
136 	cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
137 		     (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
138 		     << __mlx5_16_bit_off(typ, fld))); \
139 } while (0)
140 
141 /* Big endian getters */
142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
143 	__mlx5_64_off(typ, fld)))
144 
145 #define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
146 		type_t tmp;						  \
147 		switch (sizeof(tmp)) {					  \
148 		case sizeof(u8):					  \
149 			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
150 			break;						  \
151 		case sizeof(u16):					  \
152 			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
153 			break;						  \
154 		case sizeof(u32):					  \
155 			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
156 			break;						  \
157 		case sizeof(u64):					  \
158 			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
159 			break;						  \
160 			}						  \
161 		tmp;							  \
162 		})
163 
164 enum mlx5_inline_modes {
165 	MLX5_INLINE_MODE_NONE,
166 	MLX5_INLINE_MODE_L2,
167 	MLX5_INLINE_MODE_IP,
168 	MLX5_INLINE_MODE_TCP_UDP,
169 };
170 
171 enum {
172 	MLX5_MAX_COMMANDS		= 32,
173 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
174 	MLX5_PCI_CMD_XPORT		= 7,
175 	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
176 	MLX5_MAX_PSVS			= 4,
177 };
178 
179 enum {
180 	MLX5_EXTENDED_UD_AV		= 0x80000000,
181 };
182 
183 enum {
184 	MLX5_CQ_STATE_ARMED		= 9,
185 	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
186 	MLX5_CQ_STATE_FIRED		= 0xa,
187 };
188 
189 enum {
190 	MLX5_STAT_RATE_OFFSET	= 5,
191 };
192 
193 enum {
194 	MLX5_INLINE_SEG = 0x80000000,
195 };
196 
197 enum {
198 	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
199 };
200 
201 enum {
202 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
203 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
204 };
205 
206 enum {
207 	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
208 };
209 
210 enum {
211 	MLX5_PFAULT_SUBTYPE_WQE = 0,
212 	MLX5_PFAULT_SUBTYPE_RDMA = 1,
213 };
214 
215 enum wqe_page_fault_type {
216 	MLX5_WQE_PF_TYPE_RMP = 0,
217 	MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
218 	MLX5_WQE_PF_TYPE_RESP = 2,
219 	MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
220 };
221 
222 enum {
223 	MLX5_PERM_LOCAL_READ	= 1 << 2,
224 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
225 	MLX5_PERM_REMOTE_READ	= 1 << 4,
226 	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
227 	MLX5_PERM_ATOMIC	= 1 << 6,
228 	MLX5_PERM_UMR_EN	= 1 << 7,
229 };
230 
231 enum {
232 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
233 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
234 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
235 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
236 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
237 };
238 
239 enum {
240 	MLX5_EN_RD	= (u64)1,
241 	MLX5_EN_WR	= (u64)2
242 };
243 
244 enum {
245 	MLX5_ADAPTER_PAGE_SHIFT		= 12,
246 	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
247 };
248 
249 enum {
250 	MLX5_BFREGS_PER_UAR		= 4,
251 	MLX5_MAX_UARS			= 1 << 8,
252 	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
253 	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
254 					  MLX5_NON_FP_BFREGS_PER_UAR,
255 	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
256 					  MLX5_NON_FP_BFREGS_PER_UAR,
257 	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
258 	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
259 	MLX5_MIN_DYN_BFREGS		= 512,
260 	MLX5_MAX_DYN_BFREGS		= 1024,
261 };
262 
263 enum {
264 	MLX5_MKEY_MASK_LEN		= 1ull << 0,
265 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
266 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
267 	MLX5_MKEY_MASK_PD		= 1ull << 7,
268 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
269 	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
270 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
271 	MLX5_MKEY_MASK_KEY		= 1ull << 13,
272 	MLX5_MKEY_MASK_QPN		= 1ull << 14,
273 	MLX5_MKEY_MASK_LR		= 1ull << 17,
274 	MLX5_MKEY_MASK_LW		= 1ull << 18,
275 	MLX5_MKEY_MASK_RR		= 1ull << 19,
276 	MLX5_MKEY_MASK_RW		= 1ull << 20,
277 	MLX5_MKEY_MASK_A		= 1ull << 21,
278 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
279 	MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE	= 1ull << 25,
280 	MLX5_MKEY_MASK_FREE			= 1ull << 29,
281 	MLX5_MKEY_MASK_RELAXED_ORDERING_READ	= 1ull << 47,
282 };
283 
284 enum {
285 	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
286 
287 	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
288 	MLX5_UMR_CHECK_FREE		= (2 << 5),
289 
290 	MLX5_UMR_INLINE			= (1 << 7),
291 };
292 
293 #define MLX5_UMR_MTT_ALIGNMENT 0x40
294 #define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
295 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
296 
297 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
298 
299 enum {
300 	MLX5_EVENT_QUEUE_TYPE_QP = 0,
301 	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
302 	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
303 	MLX5_EVENT_QUEUE_TYPE_DCT = 6,
304 };
305 
306 /* mlx5 components can subscribe to any one of these events via
307  * mlx5_eq_notifier_register API.
308  */
309 enum mlx5_event {
310 	/* Special value to subscribe to any event */
311 	MLX5_EVENT_TYPE_NOTIFY_ANY	   = 0x0,
312 	/* HW events enum start: comp events are not subscribable */
313 	MLX5_EVENT_TYPE_COMP		   = 0x0,
314 	/* HW Async events enum start: subscribable events */
315 	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
316 	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
317 	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
318 	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
319 	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
320 
321 	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
322 	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
323 	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
324 	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
325 	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
326 	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
327 
328 	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
329 	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
330 	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
331 	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
332 	MLX5_EVENT_TYPE_TEMP_WARN_EVENT    = 0x17,
333 	MLX5_EVENT_TYPE_XRQ_ERROR	   = 0x18,
334 	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
335 	MLX5_EVENT_TYPE_GENERAL_EVENT	   = 0x22,
336 	MLX5_EVENT_TYPE_MONITOR_COUNTER    = 0x24,
337 	MLX5_EVENT_TYPE_PPS_EVENT          = 0x25,
338 
339 	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
340 	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
341 
342 	MLX5_EVENT_TYPE_CMD		   = 0x0a,
343 	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
344 
345 	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
346 	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
347 
348 	MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
349 	MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
350 
351 	MLX5_EVENT_TYPE_DCT_DRAINED        = 0x1c,
352 	MLX5_EVENT_TYPE_DCT_KEY_VIOLATION  = 0x1d,
353 
354 	MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
355 	MLX5_EVENT_TYPE_FPGA_QP_ERROR      = 0x21,
356 
357 	MLX5_EVENT_TYPE_DEVICE_TRACER      = 0x26,
358 
359 	MLX5_EVENT_TYPE_MAX                = 0x100,
360 };
361 
362 enum {
363 	MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
364 	MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
365 };
366 
367 enum {
368 	MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
369 	MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
370 	MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
371 	MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
372 };
373 
374 enum {
375 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
376 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
377 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
378 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
379 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
380 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
381 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
382 };
383 
384 enum {
385 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
386 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
387 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
388 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
389 	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
390 	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
391 	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24,
392 	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
393 	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
394 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
395 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
396 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
397 };
398 
399 enum {
400 	MLX5_ROCE_VERSION_1		= 0,
401 	MLX5_ROCE_VERSION_2		= 2,
402 };
403 
404 enum {
405 	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
406 	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
407 };
408 
409 enum {
410 	MLX5_ROCE_L3_TYPE_IPV4		= 0,
411 	MLX5_ROCE_L3_TYPE_IPV6		= 1,
412 };
413 
414 enum {
415 	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
416 	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
417 };
418 
419 enum {
420 	MLX5_OPCODE_NOP			= 0x00,
421 	MLX5_OPCODE_SEND_INVAL		= 0x01,
422 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
423 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
424 	MLX5_OPCODE_SEND		= 0x0a,
425 	MLX5_OPCODE_SEND_IMM		= 0x0b,
426 	MLX5_OPCODE_LSO			= 0x0e,
427 	MLX5_OPCODE_RDMA_READ		= 0x10,
428 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
429 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
430 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
431 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
432 	MLX5_OPCODE_BIND_MW		= 0x18,
433 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
434 	MLX5_OPCODE_ENHANCED_MPSW	= 0x29,
435 
436 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
437 	MLX5_RECV_OPCODE_SEND		= 0x01,
438 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
439 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
440 
441 	MLX5_CQE_OPCODE_ERROR		= 0x1e,
442 	MLX5_CQE_OPCODE_RESIZE		= 0x16,
443 
444 	MLX5_OPCODE_SET_PSV		= 0x20,
445 	MLX5_OPCODE_GET_PSV		= 0x21,
446 	MLX5_OPCODE_CHECK_PSV		= 0x22,
447 	MLX5_OPCODE_DUMP		= 0x23,
448 	MLX5_OPCODE_RGET_PSV		= 0x26,
449 	MLX5_OPCODE_RCHECK_PSV		= 0x27,
450 
451 	MLX5_OPCODE_UMR			= 0x25,
452 
453 };
454 
455 enum {
456 	MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
457 	MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
458 };
459 
460 enum {
461 	MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
462 	MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
463 };
464 
465 struct mlx5_wqe_tls_static_params_seg {
466 	u8     ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
467 };
468 
469 struct mlx5_wqe_tls_progress_params_seg {
470 	__be32 tis_tir_num;
471 	u8     ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
472 };
473 
474 enum {
475 	MLX5_SET_PORT_RESET_QKEY	= 0,
476 	MLX5_SET_PORT_GUID0		= 16,
477 	MLX5_SET_PORT_NODE_GUID		= 17,
478 	MLX5_SET_PORT_SYS_GUID		= 18,
479 	MLX5_SET_PORT_GID_TABLE		= 19,
480 	MLX5_SET_PORT_PKEY_TABLE	= 20,
481 };
482 
483 enum {
484 	MLX5_BW_NO_LIMIT   = 0,
485 	MLX5_100_MBPS_UNIT = 3,
486 	MLX5_GBPS_UNIT	   = 4,
487 };
488 
489 enum {
490 	MLX5_MAX_PAGE_SHIFT		= 31
491 };
492 
493 enum {
494 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
495 };
496 
497 enum {
498 	/*
499 	 * Max wqe size for rdma read is 512 bytes, so this
500 	 * limits our max_sge_rd as the wqe needs to fit:
501 	 * - ctrl segment (16 bytes)
502 	 * - rdma segment (16 bytes)
503 	 * - scatter elements (16 bytes each)
504 	 */
505 	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
506 };
507 
508 enum mlx5_odp_transport_cap_bits {
509 	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
510 	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
511 	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
512 	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
513 };
514 
515 struct mlx5_odp_caps {
516 	char reserved[0x10];
517 	struct {
518 		__be32			rc_odp_caps;
519 		__be32			uc_odp_caps;
520 		__be32			ud_odp_caps;
521 	} per_transport_caps;
522 	char reserved2[0xe4];
523 };
524 
525 struct mlx5_cmd_layout {
526 	u8		type;
527 	u8		rsvd0[3];
528 	__be32		inlen;
529 	__be64		in_ptr;
530 	__be32		in[4];
531 	__be32		out[4];
532 	__be64		out_ptr;
533 	__be32		outlen;
534 	u8		token;
535 	u8		sig;
536 	u8		rsvd1;
537 	u8		status_own;
538 };
539 
540 enum mlx5_fatal_assert_bit_offsets {
541 	MLX5_RFR_OFFSET = 31,
542 };
543 
544 struct health_buffer {
545 	__be32		assert_var[5];
546 	__be32		rsvd0[3];
547 	__be32		assert_exit_ptr;
548 	__be32		assert_callra;
549 	__be32		rsvd1[2];
550 	__be32		fw_ver;
551 	__be32		hw_id;
552 	__be32		rfr;
553 	u8		irisc_index;
554 	u8		synd;
555 	__be16		ext_synd;
556 };
557 
558 enum mlx5_initializing_bit_offsets {
559 	MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
560 };
561 
562 enum mlx5_cmd_addr_l_sz_offset {
563 	MLX5_NIC_IFC_OFFSET = 8,
564 };
565 
566 struct mlx5_init_seg {
567 	__be32			fw_rev;
568 	__be32			cmdif_rev_fw_sub;
569 	__be32			rsvd0[2];
570 	__be32			cmdq_addr_h;
571 	__be32			cmdq_addr_l_sz;
572 	__be32			cmd_dbell;
573 	__be32			rsvd1[120];
574 	__be32			initializing;
575 	struct health_buffer	health;
576 	__be32			rsvd2[880];
577 	__be32			internal_timer_h;
578 	__be32			internal_timer_l;
579 	__be32			rsvd3[2];
580 	__be32			health_counter;
581 	__be32			rsvd4[1019];
582 	__be64			ieee1588_clk;
583 	__be32			ieee1588_clk_type;
584 	__be32			clr_intx;
585 };
586 
587 struct mlx5_eqe_comp {
588 	__be32	reserved[6];
589 	__be32	cqn;
590 };
591 
592 struct mlx5_eqe_qp_srq {
593 	__be32	reserved1[5];
594 	u8	type;
595 	u8	reserved2[3];
596 	__be32	qp_srq_n;
597 };
598 
599 struct mlx5_eqe_cq_err {
600 	__be32	cqn;
601 	u8	reserved1[7];
602 	u8	syndrome;
603 };
604 
605 struct mlx5_eqe_xrq_err {
606 	__be32	reserved1[5];
607 	__be32	type_xrqn;
608 	__be32	reserved2;
609 };
610 
611 struct mlx5_eqe_port_state {
612 	u8	reserved0[8];
613 	u8	port;
614 };
615 
616 struct mlx5_eqe_gpio {
617 	__be32	reserved0[2];
618 	__be64	gpio_event;
619 };
620 
621 struct mlx5_eqe_congestion {
622 	u8	type;
623 	u8	rsvd0;
624 	u8	congestion_level;
625 };
626 
627 struct mlx5_eqe_stall_vl {
628 	u8	rsvd0[3];
629 	u8	port_vl;
630 };
631 
632 struct mlx5_eqe_cmd {
633 	__be32	vector;
634 	__be32	rsvd[6];
635 };
636 
637 struct mlx5_eqe_page_req {
638 	__be16		ec_function;
639 	__be16		func_id;
640 	__be32		num_pages;
641 	__be32		rsvd1[5];
642 };
643 
644 struct mlx5_eqe_page_fault {
645 	__be32 bytes_committed;
646 	union {
647 		struct {
648 			u16     reserved1;
649 			__be16  wqe_index;
650 			u16	reserved2;
651 			__be16  packet_length;
652 			__be32  token;
653 			u8	reserved4[8];
654 			__be32  pftype_wq;
655 		} __packed wqe;
656 		struct {
657 			__be32  r_key;
658 			u16	reserved1;
659 			__be16  packet_length;
660 			__be32  rdma_op_len;
661 			__be64  rdma_va;
662 			__be32  pftype_token;
663 		} __packed rdma;
664 	} __packed;
665 } __packed;
666 
667 struct mlx5_eqe_vport_change {
668 	u8		rsvd0[2];
669 	__be16		vport_num;
670 	__be32		rsvd1[6];
671 } __packed;
672 
673 struct mlx5_eqe_port_module {
674 	u8        reserved_at_0[1];
675 	u8        module;
676 	u8        reserved_at_2[1];
677 	u8        module_status;
678 	u8        reserved_at_4[2];
679 	u8        error_type;
680 } __packed;
681 
682 struct mlx5_eqe_pps {
683 	u8		rsvd0[3];
684 	u8		pin;
685 	u8		rsvd1[4];
686 	union {
687 		struct {
688 			__be32		time_sec;
689 			__be32		time_nsec;
690 		};
691 		struct {
692 			__be64		time_stamp;
693 		};
694 	};
695 	u8		rsvd2[12];
696 } __packed;
697 
698 struct mlx5_eqe_dct {
699 	__be32  reserved[6];
700 	__be32  dctn;
701 };
702 
703 struct mlx5_eqe_temp_warning {
704 	__be64 sensor_warning_msb;
705 	__be64 sensor_warning_lsb;
706 } __packed;
707 
708 #define SYNC_RST_STATE_MASK    0xf
709 
710 enum sync_rst_state_type {
711 	MLX5_SYNC_RST_STATE_RESET_REQUEST	= 0x0,
712 	MLX5_SYNC_RST_STATE_RESET_NOW		= 0x1,
713 	MLX5_SYNC_RST_STATE_RESET_ABORT		= 0x2,
714 };
715 
716 struct mlx5_eqe_sync_fw_update {
717 	u8 reserved_at_0[3];
718 	u8 sync_rst_state;
719 };
720 
721 struct mlx5_eqe_vhca_state {
722 	__be16 ec_function;
723 	__be16 function_id;
724 } __packed;
725 
726 union ev_data {
727 	__be32				raw[7];
728 	struct mlx5_eqe_cmd		cmd;
729 	struct mlx5_eqe_comp		comp;
730 	struct mlx5_eqe_qp_srq		qp_srq;
731 	struct mlx5_eqe_cq_err		cq_err;
732 	struct mlx5_eqe_port_state	port;
733 	struct mlx5_eqe_gpio		gpio;
734 	struct mlx5_eqe_congestion	cong;
735 	struct mlx5_eqe_stall_vl	stall_vl;
736 	struct mlx5_eqe_page_req	req_pages;
737 	struct mlx5_eqe_page_fault	page_fault;
738 	struct mlx5_eqe_vport_change	vport_change;
739 	struct mlx5_eqe_port_module	port_module;
740 	struct mlx5_eqe_pps		pps;
741 	struct mlx5_eqe_dct             dct;
742 	struct mlx5_eqe_temp_warning	temp_warning;
743 	struct mlx5_eqe_xrq_err		xrq_err;
744 	struct mlx5_eqe_sync_fw_update	sync_fw_update;
745 	struct mlx5_eqe_vhca_state	vhca_state;
746 } __packed;
747 
748 struct mlx5_eqe {
749 	u8		rsvd0;
750 	u8		type;
751 	u8		rsvd1;
752 	u8		sub_type;
753 	__be32		rsvd2[7];
754 	union ev_data	data;
755 	__be16		rsvd3;
756 	u8		signature;
757 	u8		owner;
758 } __packed;
759 
760 struct mlx5_cmd_prot_block {
761 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
762 	u8		rsvd0[48];
763 	__be64		next;
764 	__be32		block_num;
765 	u8		rsvd1;
766 	u8		token;
767 	u8		ctrl_sig;
768 	u8		sig;
769 };
770 
771 enum {
772 	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
773 };
774 
775 struct mlx5_err_cqe {
776 	u8	rsvd0[32];
777 	__be32	srqn;
778 	u8	rsvd1[18];
779 	u8	vendor_err_synd;
780 	u8	syndrome;
781 	__be32	s_wqe_opcode_qpn;
782 	__be16	wqe_counter;
783 	u8	signature;
784 	u8	op_own;
785 };
786 
787 struct mlx5_cqe64 {
788 	u8		tls_outer_l3_tunneled;
789 	u8		rsvd0;
790 	__be16		wqe_id;
791 	u8		lro_tcppsh_abort_dupack;
792 	u8		lro_min_ttl;
793 	__be16		lro_tcp_win;
794 	__be32		lro_ack_seq_num;
795 	__be32		rss_hash_result;
796 	u8		rss_hash_type;
797 	u8		ml_path;
798 	u8		rsvd20[2];
799 	__be16		check_sum;
800 	__be16		slid;
801 	__be32		flags_rqpn;
802 	u8		hds_ip_ext;
803 	u8		l4_l3_hdr_type;
804 	__be16		vlan_info;
805 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
806 	union {
807 		__be32 immediate;
808 		__be32 inval_rkey;
809 		__be32 pkey;
810 		__be32 ft_metadata;
811 	};
812 	u8		rsvd40[4];
813 	__be32		byte_cnt;
814 	__be32		timestamp_h;
815 	__be32		timestamp_l;
816 	__be32		sop_drop_qpn;
817 	__be16		wqe_counter;
818 	u8		signature;
819 	u8		op_own;
820 };
821 
822 struct mlx5_mini_cqe8 {
823 	union {
824 		__be32 rx_hash_result;
825 		struct {
826 			__be16 checksum;
827 			__be16 stridx;
828 		};
829 		struct {
830 			__be16 wqe_counter;
831 			u8  s_wqe_opcode;
832 			u8  reserved;
833 		} s_wqe_info;
834 	};
835 	__be32 byte_cnt;
836 };
837 
838 enum {
839 	MLX5_NO_INLINE_DATA,
840 	MLX5_INLINE_DATA32_SEG,
841 	MLX5_INLINE_DATA64_SEG,
842 	MLX5_COMPRESSED,
843 };
844 
845 enum {
846 	MLX5_CQE_FORMAT_CSUM = 0x1,
847 	MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
848 };
849 
850 #define MLX5_MINI_CQE_ARRAY_SIZE 8
851 
852 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
853 {
854 	return (cqe->op_own >> 2) & 0x3;
855 }
856 
857 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
858 {
859 	return cqe->op_own >> 4;
860 }
861 
862 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
863 {
864 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
865 }
866 
867 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
868 {
869 	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
870 }
871 
872 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
873 {
874 	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
875 }
876 
877 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
878 {
879 	return cqe->tls_outer_l3_tunneled & 0x1;
880 }
881 
882 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
883 {
884 	return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
885 }
886 
887 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
888 {
889 	return cqe->l4_l3_hdr_type & 0x1;
890 }
891 
892 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
893 {
894 	u32 hi, lo;
895 
896 	hi = be32_to_cpu(cqe->timestamp_h);
897 	lo = be32_to_cpu(cqe->timestamp_l);
898 
899 	return (u64)lo | ((u64)hi << 32);
900 }
901 
902 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE	(9)
903 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE	(6)
904 
905 struct mpwrq_cqe_bc {
906 	__be16	filler_consumed_strides;
907 	__be16	byte_cnt;
908 };
909 
910 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
911 {
912 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
913 
914 	return be16_to_cpu(bc->byte_cnt);
915 }
916 
917 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
918 {
919 	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
920 }
921 
922 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
923 {
924 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
925 
926 	return mpwrq_get_cqe_bc_consumed_strides(bc);
927 }
928 
929 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
930 {
931 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
932 
933 	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
934 }
935 
936 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
937 {
938 	return be16_to_cpu(cqe->wqe_counter);
939 }
940 
941 enum {
942 	CQE_L4_HDR_TYPE_NONE			= 0x0,
943 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
944 	CQE_L4_HDR_TYPE_UDP			= 0x2,
945 	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
946 	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
947 };
948 
949 enum {
950 	CQE_RSS_HTYPE_IP	= 0x3 << 2,
951 	/* cqe->rss_hash_type[3:2] - IP destination selected for hash
952 	 * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
953 	 */
954 	CQE_RSS_HTYPE_L4	= 0x3 << 6,
955 	/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
956 	 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
957 	 */
958 };
959 
960 enum {
961 	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
962 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
963 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
964 };
965 
966 enum {
967 	CQE_L2_OK	= 1 << 0,
968 	CQE_L3_OK	= 1 << 1,
969 	CQE_L4_OK	= 1 << 2,
970 };
971 
972 enum {
973 	CQE_TLS_OFFLOAD_NOT_DECRYPTED		= 0x0,
974 	CQE_TLS_OFFLOAD_DECRYPTED		= 0x1,
975 	CQE_TLS_OFFLOAD_RESYNC			= 0x2,
976 	CQE_TLS_OFFLOAD_ERROR			= 0x3,
977 };
978 
979 struct mlx5_sig_err_cqe {
980 	u8		rsvd0[16];
981 	__be32		expected_trans_sig;
982 	__be32		actual_trans_sig;
983 	__be32		expected_reftag;
984 	__be32		actual_reftag;
985 	__be16		syndrome;
986 	u8		rsvd22[2];
987 	__be32		mkey;
988 	__be64		err_offset;
989 	u8		rsvd30[8];
990 	__be32		qpn;
991 	u8		rsvd38[2];
992 	u8		signature;
993 	u8		op_own;
994 };
995 
996 struct mlx5_wqe_srq_next_seg {
997 	u8			rsvd0[2];
998 	__be16			next_wqe_index;
999 	u8			signature;
1000 	u8			rsvd1[11];
1001 };
1002 
1003 union mlx5_ext_cqe {
1004 	struct ib_grh	grh;
1005 	u8		inl[64];
1006 };
1007 
1008 struct mlx5_cqe128 {
1009 	union mlx5_ext_cqe	inl_grh;
1010 	struct mlx5_cqe64	cqe64;
1011 };
1012 
1013 enum {
1014 	MLX5_MKEY_STATUS_FREE = 1 << 6,
1015 };
1016 
1017 enum {
1018 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
1019 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1020 	MLX5_MKEY_BSF_EN	= 1 << 30,
1021 };
1022 
1023 struct mlx5_mkey_seg {
1024 	/* This is a two bit field occupying bits 31-30.
1025 	 * bit 31 is always 0,
1026 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
1027 	 */
1028 	u8		status;
1029 	u8		pcie_control;
1030 	u8		flags;
1031 	u8		version;
1032 	__be32		qpn_mkey7_0;
1033 	u8		rsvd1[4];
1034 	__be32		flags_pd;
1035 	__be64		start_addr;
1036 	__be64		len;
1037 	__be32		bsfs_octo_size;
1038 	u8		rsvd2[16];
1039 	__be32		xlt_oct_size;
1040 	u8		rsvd3[3];
1041 	u8		log2_page_size;
1042 	u8		rsvd4[4];
1043 };
1044 
1045 #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
1046 
1047 enum {
1048 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
1049 };
1050 
1051 enum {
1052 	VPORT_STATE_DOWN		= 0x0,
1053 	VPORT_STATE_UP			= 0x1,
1054 };
1055 
1056 enum {
1057 	MLX5_VPORT_ADMIN_STATE_DOWN  = 0x0,
1058 	MLX5_VPORT_ADMIN_STATE_UP    = 0x1,
1059 	MLX5_VPORT_ADMIN_STATE_AUTO  = 0x2,
1060 };
1061 
1062 enum {
1063 	MLX5_L3_PROT_TYPE_IPV4		= 0,
1064 	MLX5_L3_PROT_TYPE_IPV6		= 1,
1065 };
1066 
1067 enum {
1068 	MLX5_L4_PROT_TYPE_TCP		= 0,
1069 	MLX5_L4_PROT_TYPE_UDP		= 1,
1070 };
1071 
1072 enum {
1073 	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
1074 	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
1075 	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
1076 	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
1077 	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
1078 };
1079 
1080 enum {
1081 	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
1082 	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
1083 	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
1084 	MLX5_MATCH_MISC_PARAMETERS_2	= 1 << 3,
1085 	MLX5_MATCH_MISC_PARAMETERS_3	= 1 << 4,
1086 	MLX5_MATCH_MISC_PARAMETERS_4	= 1 << 5,
1087 };
1088 
1089 enum {
1090 	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
1091 	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
1092 };
1093 
1094 enum {
1095 	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
1096 	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
1097 	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
1098 };
1099 
1100 enum mlx5_list_type {
1101 	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
1102 	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
1103 	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1104 };
1105 
1106 enum {
1107 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1108 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
1109 };
1110 
1111 enum mlx5_wol_mode {
1112 	MLX5_WOL_DISABLE        = 0,
1113 	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
1114 	MLX5_WOL_MAGIC          = 1 << 2,
1115 	MLX5_WOL_ARP            = 1 << 3,
1116 	MLX5_WOL_BROADCAST      = 1 << 4,
1117 	MLX5_WOL_MULTICAST      = 1 << 5,
1118 	MLX5_WOL_UNICAST        = 1 << 6,
1119 	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
1120 };
1121 
1122 enum mlx5_mpls_supported_fields {
1123 	MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1124 	MLX5_FIELD_SUPPORT_MPLS_EXP   = 1 << 1,
1125 	MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1126 	MLX5_FIELD_SUPPORT_MPLS_TTL   = 1 << 3
1127 };
1128 
1129 enum mlx5_flex_parser_protos {
1130 	MLX5_FLEX_PROTO_GENEVE	      = 1 << 3,
1131 	MLX5_FLEX_PROTO_CW_MPLS_GRE   = 1 << 4,
1132 	MLX5_FLEX_PROTO_CW_MPLS_UDP   = 1 << 5,
1133 };
1134 
1135 /* MLX5 DEV CAPs */
1136 
1137 /* TODO: EAT.ME */
1138 enum mlx5_cap_mode {
1139 	HCA_CAP_OPMOD_GET_MAX	= 0,
1140 	HCA_CAP_OPMOD_GET_CUR	= 1,
1141 };
1142 
1143 enum mlx5_cap_type {
1144 	MLX5_CAP_GENERAL = 0,
1145 	MLX5_CAP_ETHERNET_OFFLOADS,
1146 	MLX5_CAP_ODP,
1147 	MLX5_CAP_ATOMIC,
1148 	MLX5_CAP_ROCE,
1149 	MLX5_CAP_IPOIB_OFFLOADS,
1150 	MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1151 	MLX5_CAP_FLOW_TABLE,
1152 	MLX5_CAP_ESWITCH_FLOW_TABLE,
1153 	MLX5_CAP_ESWITCH,
1154 	MLX5_CAP_RESERVED,
1155 	MLX5_CAP_VECTOR_CALC,
1156 	MLX5_CAP_QOS,
1157 	MLX5_CAP_DEBUG,
1158 	MLX5_CAP_RESERVED_14,
1159 	MLX5_CAP_DEV_MEM,
1160 	MLX5_CAP_RESERVED_16,
1161 	MLX5_CAP_TLS,
1162 	MLX5_CAP_VDPA_EMULATION = 0x13,
1163 	MLX5_CAP_DEV_EVENT = 0x14,
1164 	MLX5_CAP_IPSEC,
1165 	/* NUM OF CAP Types */
1166 	MLX5_CAP_NUM
1167 };
1168 
1169 enum mlx5_pcam_reg_groups {
1170 	MLX5_PCAM_REGS_5000_TO_507F                 = 0x0,
1171 };
1172 
1173 enum mlx5_pcam_feature_groups {
1174 	MLX5_PCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1175 };
1176 
1177 enum mlx5_mcam_reg_groups {
1178 	MLX5_MCAM_REGS_FIRST_128                    = 0x0,
1179 	MLX5_MCAM_REGS_0x9080_0x90FF                = 0x1,
1180 	MLX5_MCAM_REGS_0x9100_0x917F                = 0x2,
1181 	MLX5_MCAM_REGS_NUM                          = 0x3,
1182 };
1183 
1184 enum mlx5_mcam_feature_groups {
1185 	MLX5_MCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1186 };
1187 
1188 enum mlx5_qcam_reg_groups {
1189 	MLX5_QCAM_REGS_FIRST_128                    = 0x0,
1190 };
1191 
1192 enum mlx5_qcam_feature_groups {
1193 	MLX5_QCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1194 };
1195 
1196 /* GET Dev Caps macros */
1197 #define MLX5_CAP_GEN(mdev, cap) \
1198 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1199 
1200 #define MLX5_CAP_GEN_64(mdev, cap) \
1201 	MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1202 
1203 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1204 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
1205 
1206 #define MLX5_CAP_ETH(mdev, cap) \
1207 	MLX5_GET(per_protocol_networking_offload_caps,\
1208 		 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1209 
1210 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1211 	MLX5_GET(per_protocol_networking_offload_caps,\
1212 		 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1213 
1214 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1215 	MLX5_GET(per_protocol_networking_offload_caps,\
1216 		 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
1217 
1218 #define MLX5_CAP_ROCE(mdev, cap) \
1219 	MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
1220 
1221 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1222 	MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
1223 
1224 #define MLX5_CAP_ATOMIC(mdev, cap) \
1225 	MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
1226 
1227 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1228 	MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
1229 
1230 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1231 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
1232 
1233 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1234 	MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
1235 
1236 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1237 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
1238 
1239 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1240 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1241 
1242 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1243 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1244 
1245 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1246 		MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1247 
1248 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1249 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1250 
1251 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1252 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1253 
1254 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1255 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1256 
1257 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1258 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1259 
1260 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1261 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1262 
1263 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1264 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1265 
1266 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
1267 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
1268 
1269 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1270 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1271 
1272 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
1273 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
1274 
1275 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1276 	MLX5_GET(flow_table_eswitch_cap, \
1277 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1278 
1279 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1280 	MLX5_GET(flow_table_eswitch_cap, \
1281 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1282 
1283 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1284 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1285 
1286 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1287 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1288 
1289 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1290 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1291 
1292 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1293 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1294 
1295 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1296 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1297 
1298 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1299 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1300 
1301 #define MLX5_CAP_ESW(mdev, cap) \
1302 	MLX5_GET(e_switch_cap, \
1303 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
1304 
1305 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1306 	MLX5_GET64(flow_table_eswitch_cap, \
1307 		(mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1308 
1309 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1310 	MLX5_GET(e_switch_cap, \
1311 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
1312 
1313 #define MLX5_CAP_ODP(mdev, cap)\
1314 	MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
1315 
1316 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1317 	MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
1318 
1319 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1320 	MLX5_GET(vector_calc_cap, \
1321 		 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
1322 
1323 #define MLX5_CAP_QOS(mdev, cap)\
1324 	MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
1325 
1326 #define MLX5_CAP_DEBUG(mdev, cap)\
1327 	MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
1328 
1329 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1330 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1331 
1332 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1333 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1334 
1335 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1336 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1337 		 mng_access_reg_cap_mask.access_regs.reg)
1338 
1339 #define MLX5_CAP_MCAM_REG1(mdev, reg) \
1340 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
1341 		 mng_access_reg_cap_mask.access_regs1.reg)
1342 
1343 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1344 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1345 		 mng_access_reg_cap_mask.access_regs2.reg)
1346 
1347 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1348 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1349 
1350 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1351 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1352 
1353 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1354 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1355 
1356 #define MLX5_CAP_FPGA(mdev, cap) \
1357 	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1358 
1359 #define MLX5_CAP64_FPGA(mdev, cap) \
1360 	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1361 
1362 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1363 	MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1364 
1365 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1366 	MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1367 
1368 #define MLX5_CAP_TLS(mdev, cap) \
1369 	MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
1370 
1371 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1372 	MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
1373 
1374 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1375 	MLX5_GET(virtio_emulation_cap, \
1376 		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
1377 
1378 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1379 	MLX5_GET64(virtio_emulation_cap, \
1380 		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
1381 
1382 #define MLX5_CAP_IPSEC(mdev, cap)\
1383 	MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
1384 
1385 enum {
1386 	MLX5_CMD_STAT_OK			= 0x0,
1387 	MLX5_CMD_STAT_INT_ERR			= 0x1,
1388 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1389 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1390 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1391 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1392 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1393 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1394 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1395 	MLX5_CMD_STAT_IX_ERR			= 0xa,
1396 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1397 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1398 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1399 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1400 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1401 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1402 };
1403 
1404 enum {
1405 	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1406 	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1407 	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1408 	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1409 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1410 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1411 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1412 	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1413 	MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1414 	MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1415 	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
1416 };
1417 
1418 enum {
1419 	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1420 };
1421 
1422 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1423 {
1424 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1425 		return 0;
1426 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1427 }
1428 
1429 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1430 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1431 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1432 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1433 				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1434 				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1435 
1436 #endif /* MLX5_DEVICE_H */
1437