xref: /linux-6.15/include/linux/mlx5/device.h (revision 00a62703)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35 
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS	0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS	0x80
44 #else
45 #error Host endianness not defined
46 #endif
47 
48 /* helper macros */
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
62 
63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
70 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
71 
72 /* insert a value to a struct */
73 #define MLX5_SET(typ, p, fld, v) do { \
74 	u32 _v = v; \
75 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
76 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
77 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
78 		     (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
79 		     << __mlx5_dw_bit_off(typ, fld))); \
80 } while (0)
81 
82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
83 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
84 	MLX5_SET(typ, p, fld[idx], v); \
85 } while (0)
86 
87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
88 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
89 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
90 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
91 		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
92 		     << __mlx5_dw_bit_off(typ, fld))); \
93 } while (0)
94 
95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
97 __mlx5_mask(typ, fld))
98 
99 #define MLX5_GET_PR(typ, p, fld) ({ \
100 	u32 ___t = MLX5_GET(typ, p, fld); \
101 	pr_debug(#fld " = 0x%x\n", ___t); \
102 	___t; \
103 })
104 
105 #define __MLX5_SET64(typ, p, fld, v) do { \
106 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
107 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
108 } while (0)
109 
110 #define MLX5_SET64(typ, p, fld, v) do { \
111 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
112 	__MLX5_SET64(typ, p, fld, v); \
113 } while (0)
114 
115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
116 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
117 	__MLX5_SET64(typ, p, fld[idx], v); \
118 } while (0)
119 
120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
121 
122 #define MLX5_GET64_PR(typ, p, fld) ({ \
123 	u64 ___t = MLX5_GET64(typ, p, fld); \
124 	pr_debug(#fld " = 0x%llx\n", ___t); \
125 	___t; \
126 })
127 
128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
130 __mlx5_mask16(typ, fld))
131 
132 #define MLX5_SET16(typ, p, fld, v) do { \
133 	u16 _v = v; \
134 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16);             \
135 	*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
136 	cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
137 		     (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
138 		     << __mlx5_16_bit_off(typ, fld))); \
139 } while (0)
140 
141 /* Big endian getters */
142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
143 	__mlx5_64_off(typ, fld)))
144 
145 #define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
146 		type_t tmp;						  \
147 		switch (sizeof(tmp)) {					  \
148 		case sizeof(u8):					  \
149 			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
150 			break;						  \
151 		case sizeof(u16):					  \
152 			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
153 			break;						  \
154 		case sizeof(u32):					  \
155 			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
156 			break;						  \
157 		case sizeof(u64):					  \
158 			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
159 			break;						  \
160 			}						  \
161 		tmp;							  \
162 		})
163 
164 enum mlx5_inline_modes {
165 	MLX5_INLINE_MODE_NONE,
166 	MLX5_INLINE_MODE_L2,
167 	MLX5_INLINE_MODE_IP,
168 	MLX5_INLINE_MODE_TCP_UDP,
169 };
170 
171 enum {
172 	MLX5_MAX_COMMANDS		= 32,
173 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
174 	MLX5_PCI_CMD_XPORT		= 7,
175 	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
176 	MLX5_MAX_PSVS			= 4,
177 };
178 
179 enum {
180 	MLX5_EXTENDED_UD_AV		= 0x80000000,
181 };
182 
183 enum {
184 	MLX5_CQ_STATE_ARMED		= 9,
185 	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
186 	MLX5_CQ_STATE_FIRED		= 0xa,
187 };
188 
189 enum {
190 	MLX5_STAT_RATE_OFFSET	= 5,
191 };
192 
193 enum {
194 	MLX5_INLINE_SEG = 0x80000000,
195 };
196 
197 enum {
198 	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
199 };
200 
201 enum {
202 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
203 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
204 };
205 
206 enum {
207 	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
208 };
209 
210 enum {
211 	MLX5_PFAULT_SUBTYPE_WQE = 0,
212 	MLX5_PFAULT_SUBTYPE_RDMA = 1,
213 };
214 
215 enum {
216 	MLX5_PERM_LOCAL_READ	= 1 << 2,
217 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
218 	MLX5_PERM_REMOTE_READ	= 1 << 4,
219 	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
220 	MLX5_PERM_ATOMIC	= 1 << 6,
221 	MLX5_PERM_UMR_EN	= 1 << 7,
222 };
223 
224 enum {
225 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
226 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
227 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
228 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
229 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
230 };
231 
232 enum {
233 	MLX5_EN_RD	= (u64)1,
234 	MLX5_EN_WR	= (u64)2
235 };
236 
237 enum {
238 	MLX5_ADAPTER_PAGE_SHIFT		= 12,
239 	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
240 };
241 
242 enum {
243 	MLX5_BFREGS_PER_UAR		= 4,
244 	MLX5_MAX_UARS			= 1 << 8,
245 	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
246 	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
247 					  MLX5_NON_FP_BFREGS_PER_UAR,
248 	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
249 					  MLX5_NON_FP_BFREGS_PER_UAR,
250 	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
251 	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
252 	MLX5_MIN_DYN_BFREGS		= 512,
253 	MLX5_MAX_DYN_BFREGS		= 1024,
254 };
255 
256 enum {
257 	MLX5_MKEY_MASK_LEN		= 1ull << 0,
258 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
259 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
260 	MLX5_MKEY_MASK_PD		= 1ull << 7,
261 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
262 	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
263 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
264 	MLX5_MKEY_MASK_KEY		= 1ull << 13,
265 	MLX5_MKEY_MASK_QPN		= 1ull << 14,
266 	MLX5_MKEY_MASK_LR		= 1ull << 17,
267 	MLX5_MKEY_MASK_LW		= 1ull << 18,
268 	MLX5_MKEY_MASK_RR		= 1ull << 19,
269 	MLX5_MKEY_MASK_RW		= 1ull << 20,
270 	MLX5_MKEY_MASK_A		= 1ull << 21,
271 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
272 	MLX5_MKEY_MASK_FREE		= 1ull << 29,
273 };
274 
275 enum {
276 	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
277 
278 	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
279 	MLX5_UMR_CHECK_FREE		= (2 << 5),
280 
281 	MLX5_UMR_INLINE			= (1 << 7),
282 };
283 
284 #define MLX5_UMR_MTT_ALIGNMENT 0x40
285 #define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
286 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
287 
288 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
289 
290 enum {
291 	MLX5_EVENT_QUEUE_TYPE_QP = 0,
292 	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
293 	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
294 	MLX5_EVENT_QUEUE_TYPE_DCT = 6,
295 };
296 
297 enum mlx5_event {
298 	MLX5_EVENT_TYPE_COMP		   = 0x0,
299 
300 	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
301 	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
302 	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
303 	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
304 	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
305 
306 	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
307 	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
308 	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
309 	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
310 	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
311 	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
312 
313 	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
314 	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
315 	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
316 	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
317 	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
318 	MLX5_EVENT_TYPE_GENERAL_EVENT	   = 0x22,
319 	MLX5_EVENT_TYPE_PPS_EVENT          = 0x25,
320 
321 	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
322 	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
323 
324 	MLX5_EVENT_TYPE_CMD		   = 0x0a,
325 	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
326 
327 	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
328 	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
329 
330 	MLX5_EVENT_TYPE_DCT_DRAINED        = 0x1c,
331 
332 	MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
333 };
334 
335 enum {
336 	MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
337 };
338 
339 enum {
340 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
341 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
342 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
343 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
344 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
345 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
346 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
347 };
348 
349 enum {
350 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
351 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
352 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
353 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
354 	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
355 	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
356 	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24,
357 	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
358 	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
359 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
360 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
361 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
362 };
363 
364 enum {
365 	MLX5_ROCE_VERSION_1		= 0,
366 	MLX5_ROCE_VERSION_2		= 2,
367 };
368 
369 enum {
370 	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
371 	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
372 };
373 
374 enum {
375 	MLX5_ROCE_L3_TYPE_IPV4		= 0,
376 	MLX5_ROCE_L3_TYPE_IPV6		= 1,
377 };
378 
379 enum {
380 	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
381 	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
382 };
383 
384 enum {
385 	MLX5_OPCODE_NOP			= 0x00,
386 	MLX5_OPCODE_SEND_INVAL		= 0x01,
387 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
388 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
389 	MLX5_OPCODE_SEND		= 0x0a,
390 	MLX5_OPCODE_SEND_IMM		= 0x0b,
391 	MLX5_OPCODE_LSO			= 0x0e,
392 	MLX5_OPCODE_RDMA_READ		= 0x10,
393 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
394 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
395 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
396 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
397 	MLX5_OPCODE_BIND_MW		= 0x18,
398 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
399 
400 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
401 	MLX5_RECV_OPCODE_SEND		= 0x01,
402 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
403 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
404 
405 	MLX5_CQE_OPCODE_ERROR		= 0x1e,
406 	MLX5_CQE_OPCODE_RESIZE		= 0x16,
407 
408 	MLX5_OPCODE_SET_PSV		= 0x20,
409 	MLX5_OPCODE_GET_PSV		= 0x21,
410 	MLX5_OPCODE_CHECK_PSV		= 0x22,
411 	MLX5_OPCODE_RGET_PSV		= 0x26,
412 	MLX5_OPCODE_RCHECK_PSV		= 0x27,
413 
414 	MLX5_OPCODE_UMR			= 0x25,
415 
416 };
417 
418 enum {
419 	MLX5_SET_PORT_RESET_QKEY	= 0,
420 	MLX5_SET_PORT_GUID0		= 16,
421 	MLX5_SET_PORT_NODE_GUID		= 17,
422 	MLX5_SET_PORT_SYS_GUID		= 18,
423 	MLX5_SET_PORT_GID_TABLE		= 19,
424 	MLX5_SET_PORT_PKEY_TABLE	= 20,
425 };
426 
427 enum {
428 	MLX5_BW_NO_LIMIT   = 0,
429 	MLX5_100_MBPS_UNIT = 3,
430 	MLX5_GBPS_UNIT	   = 4,
431 };
432 
433 enum {
434 	MLX5_MAX_PAGE_SHIFT		= 31
435 };
436 
437 enum {
438 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
439 };
440 
441 enum {
442 	/*
443 	 * Max wqe size for rdma read is 512 bytes, so this
444 	 * limits our max_sge_rd as the wqe needs to fit:
445 	 * - ctrl segment (16 bytes)
446 	 * - rdma segment (16 bytes)
447 	 * - scatter elements (16 bytes each)
448 	 */
449 	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
450 };
451 
452 enum mlx5_odp_transport_cap_bits {
453 	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
454 	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
455 	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
456 	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
457 };
458 
459 struct mlx5_odp_caps {
460 	char reserved[0x10];
461 	struct {
462 		__be32			rc_odp_caps;
463 		__be32			uc_odp_caps;
464 		__be32			ud_odp_caps;
465 	} per_transport_caps;
466 	char reserved2[0xe4];
467 };
468 
469 struct mlx5_cmd_layout {
470 	u8		type;
471 	u8		rsvd0[3];
472 	__be32		inlen;
473 	__be64		in_ptr;
474 	__be32		in[4];
475 	__be32		out[4];
476 	__be64		out_ptr;
477 	__be32		outlen;
478 	u8		token;
479 	u8		sig;
480 	u8		rsvd1;
481 	u8		status_own;
482 };
483 
484 struct health_buffer {
485 	__be32		assert_var[5];
486 	__be32		rsvd0[3];
487 	__be32		assert_exit_ptr;
488 	__be32		assert_callra;
489 	__be32		rsvd1[2];
490 	__be32		fw_ver;
491 	__be32		hw_id;
492 	__be32		rsvd2;
493 	u8		irisc_index;
494 	u8		synd;
495 	__be16		ext_synd;
496 };
497 
498 struct mlx5_init_seg {
499 	__be32			fw_rev;
500 	__be32			cmdif_rev_fw_sub;
501 	__be32			rsvd0[2];
502 	__be32			cmdq_addr_h;
503 	__be32			cmdq_addr_l_sz;
504 	__be32			cmd_dbell;
505 	__be32			rsvd1[120];
506 	__be32			initializing;
507 	struct health_buffer	health;
508 	__be32			rsvd2[880];
509 	__be32			internal_timer_h;
510 	__be32			internal_timer_l;
511 	__be32			rsvd3[2];
512 	__be32			health_counter;
513 	__be32			rsvd4[1019];
514 	__be64			ieee1588_clk;
515 	__be32			ieee1588_clk_type;
516 	__be32			clr_intx;
517 };
518 
519 struct mlx5_eqe_comp {
520 	__be32	reserved[6];
521 	__be32	cqn;
522 };
523 
524 struct mlx5_eqe_qp_srq {
525 	__be32	reserved1[5];
526 	u8	type;
527 	u8	reserved2[3];
528 	__be32	qp_srq_n;
529 };
530 
531 struct mlx5_eqe_cq_err {
532 	__be32	cqn;
533 	u8	reserved1[7];
534 	u8	syndrome;
535 };
536 
537 struct mlx5_eqe_port_state {
538 	u8	reserved0[8];
539 	u8	port;
540 };
541 
542 struct mlx5_eqe_gpio {
543 	__be32	reserved0[2];
544 	__be64	gpio_event;
545 };
546 
547 struct mlx5_eqe_congestion {
548 	u8	type;
549 	u8	rsvd0;
550 	u8	congestion_level;
551 };
552 
553 struct mlx5_eqe_stall_vl {
554 	u8	rsvd0[3];
555 	u8	port_vl;
556 };
557 
558 struct mlx5_eqe_cmd {
559 	__be32	vector;
560 	__be32	rsvd[6];
561 };
562 
563 struct mlx5_eqe_page_req {
564 	u8		rsvd0[2];
565 	__be16		func_id;
566 	__be32		num_pages;
567 	__be32		rsvd1[5];
568 };
569 
570 struct mlx5_eqe_page_fault {
571 	__be32 bytes_committed;
572 	union {
573 		struct {
574 			u16     reserved1;
575 			__be16  wqe_index;
576 			u16	reserved2;
577 			__be16  packet_length;
578 			__be32  token;
579 			u8	reserved4[8];
580 			__be32  pftype_wq;
581 		} __packed wqe;
582 		struct {
583 			__be32  r_key;
584 			u16	reserved1;
585 			__be16  packet_length;
586 			__be32  rdma_op_len;
587 			__be64  rdma_va;
588 			__be32  pftype_token;
589 		} __packed rdma;
590 	} __packed;
591 } __packed;
592 
593 struct mlx5_eqe_vport_change {
594 	u8		rsvd0[2];
595 	__be16		vport_num;
596 	__be32		rsvd1[6];
597 } __packed;
598 
599 struct mlx5_eqe_port_module {
600 	u8        reserved_at_0[1];
601 	u8        module;
602 	u8        reserved_at_2[1];
603 	u8        module_status;
604 	u8        reserved_at_4[2];
605 	u8        error_type;
606 } __packed;
607 
608 struct mlx5_eqe_pps {
609 	u8		rsvd0[3];
610 	u8		pin;
611 	u8		rsvd1[4];
612 	union {
613 		struct {
614 			__be32		time_sec;
615 			__be32		time_nsec;
616 		};
617 		struct {
618 			__be64		time_stamp;
619 		};
620 	};
621 	u8		rsvd2[12];
622 } __packed;
623 
624 struct mlx5_eqe_dct {
625 	__be32  reserved[6];
626 	__be32  dctn;
627 };
628 
629 union ev_data {
630 	__be32				raw[7];
631 	struct mlx5_eqe_cmd		cmd;
632 	struct mlx5_eqe_comp		comp;
633 	struct mlx5_eqe_qp_srq		qp_srq;
634 	struct mlx5_eqe_cq_err		cq_err;
635 	struct mlx5_eqe_port_state	port;
636 	struct mlx5_eqe_gpio		gpio;
637 	struct mlx5_eqe_congestion	cong;
638 	struct mlx5_eqe_stall_vl	stall_vl;
639 	struct mlx5_eqe_page_req	req_pages;
640 	struct mlx5_eqe_page_fault	page_fault;
641 	struct mlx5_eqe_vport_change	vport_change;
642 	struct mlx5_eqe_port_module	port_module;
643 	struct mlx5_eqe_pps		pps;
644 	struct mlx5_eqe_dct             dct;
645 } __packed;
646 
647 struct mlx5_eqe {
648 	u8		rsvd0;
649 	u8		type;
650 	u8		rsvd1;
651 	u8		sub_type;
652 	__be32		rsvd2[7];
653 	union ev_data	data;
654 	__be16		rsvd3;
655 	u8		signature;
656 	u8		owner;
657 } __packed;
658 
659 struct mlx5_cmd_prot_block {
660 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
661 	u8		rsvd0[48];
662 	__be64		next;
663 	__be32		block_num;
664 	u8		rsvd1;
665 	u8		token;
666 	u8		ctrl_sig;
667 	u8		sig;
668 };
669 
670 enum {
671 	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
672 };
673 
674 struct mlx5_err_cqe {
675 	u8	rsvd0[32];
676 	__be32	srqn;
677 	u8	rsvd1[18];
678 	u8	vendor_err_synd;
679 	u8	syndrome;
680 	__be32	s_wqe_opcode_qpn;
681 	__be16	wqe_counter;
682 	u8	signature;
683 	u8	op_own;
684 };
685 
686 struct mlx5_cqe64 {
687 	u8		outer_l3_tunneled;
688 	u8		rsvd0;
689 	__be16		wqe_id;
690 	u8		lro_tcppsh_abort_dupack;
691 	u8		lro_min_ttl;
692 	__be16		lro_tcp_win;
693 	__be32		lro_ack_seq_num;
694 	__be32		rss_hash_result;
695 	u8		rss_hash_type;
696 	u8		ml_path;
697 	u8		rsvd20[2];
698 	__be16		check_sum;
699 	__be16		slid;
700 	__be32		flags_rqpn;
701 	u8		hds_ip_ext;
702 	u8		l4_l3_hdr_type;
703 	__be16		vlan_info;
704 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
705 	__be32		imm_inval_pkey;
706 	u8		rsvd40[4];
707 	__be32		byte_cnt;
708 	__be32		timestamp_h;
709 	__be32		timestamp_l;
710 	__be32		sop_drop_qpn;
711 	__be16		wqe_counter;
712 	u8		signature;
713 	u8		op_own;
714 };
715 
716 struct mlx5_mini_cqe8 {
717 	union {
718 		__be32 rx_hash_result;
719 		struct {
720 			__be16 checksum;
721 			__be16 rsvd;
722 		};
723 		struct {
724 			__be16 wqe_counter;
725 			u8  s_wqe_opcode;
726 			u8  reserved;
727 		} s_wqe_info;
728 	};
729 	__be32 byte_cnt;
730 };
731 
732 enum {
733 	MLX5_NO_INLINE_DATA,
734 	MLX5_INLINE_DATA32_SEG,
735 	MLX5_INLINE_DATA64_SEG,
736 	MLX5_COMPRESSED,
737 };
738 
739 enum {
740 	MLX5_CQE_FORMAT_CSUM = 0x1,
741 };
742 
743 #define MLX5_MINI_CQE_ARRAY_SIZE 8
744 
745 static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
746 {
747 	return (cqe->op_own >> 2) & 0x3;
748 }
749 
750 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
751 {
752 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
753 }
754 
755 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
756 {
757 	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
758 }
759 
760 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
761 {
762 	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
763 }
764 
765 static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
766 {
767 	return cqe->outer_l3_tunneled & 0x1;
768 }
769 
770 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
771 {
772 	return !!(cqe->l4_l3_hdr_type & 0x1);
773 }
774 
775 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
776 {
777 	u32 hi, lo;
778 
779 	hi = be32_to_cpu(cqe->timestamp_h);
780 	lo = be32_to_cpu(cqe->timestamp_l);
781 
782 	return (u64)lo | ((u64)hi << 32);
783 }
784 
785 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE	(9)
786 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE	(6)
787 
788 struct mpwrq_cqe_bc {
789 	__be16	filler_consumed_strides;
790 	__be16	byte_cnt;
791 };
792 
793 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
794 {
795 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
796 
797 	return be16_to_cpu(bc->byte_cnt);
798 }
799 
800 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
801 {
802 	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
803 }
804 
805 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
806 {
807 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
808 
809 	return mpwrq_get_cqe_bc_consumed_strides(bc);
810 }
811 
812 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
813 {
814 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
815 
816 	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
817 }
818 
819 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
820 {
821 	return be16_to_cpu(cqe->wqe_counter);
822 }
823 
824 enum {
825 	CQE_L4_HDR_TYPE_NONE			= 0x0,
826 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
827 	CQE_L4_HDR_TYPE_UDP			= 0x2,
828 	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
829 	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
830 };
831 
832 enum {
833 	CQE_RSS_HTYPE_IP	= 0x3 << 2,
834 	/* cqe->rss_hash_type[3:2] - IP destination selected for hash
835 	 * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
836 	 */
837 	CQE_RSS_HTYPE_L4	= 0x3 << 6,
838 	/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
839 	 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
840 	 */
841 };
842 
843 enum {
844 	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
845 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
846 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
847 };
848 
849 enum {
850 	CQE_L2_OK	= 1 << 0,
851 	CQE_L3_OK	= 1 << 1,
852 	CQE_L4_OK	= 1 << 2,
853 };
854 
855 struct mlx5_sig_err_cqe {
856 	u8		rsvd0[16];
857 	__be32		expected_trans_sig;
858 	__be32		actual_trans_sig;
859 	__be32		expected_reftag;
860 	__be32		actual_reftag;
861 	__be16		syndrome;
862 	u8		rsvd22[2];
863 	__be32		mkey;
864 	__be64		err_offset;
865 	u8		rsvd30[8];
866 	__be32		qpn;
867 	u8		rsvd38[2];
868 	u8		signature;
869 	u8		op_own;
870 };
871 
872 struct mlx5_wqe_srq_next_seg {
873 	u8			rsvd0[2];
874 	__be16			next_wqe_index;
875 	u8			signature;
876 	u8			rsvd1[11];
877 };
878 
879 union mlx5_ext_cqe {
880 	struct ib_grh	grh;
881 	u8		inl[64];
882 };
883 
884 struct mlx5_cqe128 {
885 	union mlx5_ext_cqe	inl_grh;
886 	struct mlx5_cqe64	cqe64;
887 };
888 
889 enum {
890 	MLX5_MKEY_STATUS_FREE = 1 << 6,
891 };
892 
893 enum {
894 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
895 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
896 	MLX5_MKEY_BSF_EN	= 1 << 30,
897 	MLX5_MKEY_LEN64		= 1 << 31,
898 };
899 
900 struct mlx5_mkey_seg {
901 	/* This is a two bit field occupying bits 31-30.
902 	 * bit 31 is always 0,
903 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
904 	 */
905 	u8		status;
906 	u8		pcie_control;
907 	u8		flags;
908 	u8		version;
909 	__be32		qpn_mkey7_0;
910 	u8		rsvd1[4];
911 	__be32		flags_pd;
912 	__be64		start_addr;
913 	__be64		len;
914 	__be32		bsfs_octo_size;
915 	u8		rsvd2[16];
916 	__be32		xlt_oct_size;
917 	u8		rsvd3[3];
918 	u8		log2_page_size;
919 	u8		rsvd4[4];
920 };
921 
922 #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
923 
924 enum {
925 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
926 };
927 
928 enum {
929 	VPORT_STATE_DOWN		= 0x0,
930 	VPORT_STATE_UP			= 0x1,
931 };
932 
933 enum {
934 	MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
935 	MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
936 	MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
937 };
938 
939 enum {
940 	MLX5_L3_PROT_TYPE_IPV4		= 0,
941 	MLX5_L3_PROT_TYPE_IPV6		= 1,
942 };
943 
944 enum {
945 	MLX5_L4_PROT_TYPE_TCP		= 0,
946 	MLX5_L4_PROT_TYPE_UDP		= 1,
947 };
948 
949 enum {
950 	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
951 	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
952 	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
953 	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
954 	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
955 };
956 
957 enum {
958 	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
959 	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
960 	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
961 
962 };
963 
964 enum {
965 	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
966 	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
967 };
968 
969 enum {
970 	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
971 	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
972 	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
973 };
974 
975 enum mlx5_list_type {
976 	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
977 	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
978 	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
979 };
980 
981 enum {
982 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
983 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
984 };
985 
986 enum mlx5_wol_mode {
987 	MLX5_WOL_DISABLE        = 0,
988 	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
989 	MLX5_WOL_MAGIC          = 1 << 2,
990 	MLX5_WOL_ARP            = 1 << 3,
991 	MLX5_WOL_BROADCAST      = 1 << 4,
992 	MLX5_WOL_MULTICAST      = 1 << 5,
993 	MLX5_WOL_UNICAST        = 1 << 6,
994 	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
995 };
996 
997 /* MLX5 DEV CAPs */
998 
999 /* TODO: EAT.ME */
1000 enum mlx5_cap_mode {
1001 	HCA_CAP_OPMOD_GET_MAX	= 0,
1002 	HCA_CAP_OPMOD_GET_CUR	= 1,
1003 };
1004 
1005 enum mlx5_cap_type {
1006 	MLX5_CAP_GENERAL = 0,
1007 	MLX5_CAP_ETHERNET_OFFLOADS,
1008 	MLX5_CAP_ODP,
1009 	MLX5_CAP_ATOMIC,
1010 	MLX5_CAP_ROCE,
1011 	MLX5_CAP_IPOIB_OFFLOADS,
1012 	MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1013 	MLX5_CAP_FLOW_TABLE,
1014 	MLX5_CAP_ESWITCH_FLOW_TABLE,
1015 	MLX5_CAP_ESWITCH,
1016 	MLX5_CAP_RESERVED,
1017 	MLX5_CAP_VECTOR_CALC,
1018 	MLX5_CAP_QOS,
1019 	MLX5_CAP_DEBUG,
1020 	MLX5_CAP_RESERVED_14,
1021 	MLX5_CAP_DEV_MEM,
1022 	/* NUM OF CAP Types */
1023 	MLX5_CAP_NUM
1024 };
1025 
1026 enum mlx5_pcam_reg_groups {
1027 	MLX5_PCAM_REGS_5000_TO_507F                 = 0x0,
1028 };
1029 
1030 enum mlx5_pcam_feature_groups {
1031 	MLX5_PCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1032 };
1033 
1034 enum mlx5_mcam_reg_groups {
1035 	MLX5_MCAM_REGS_FIRST_128                    = 0x0,
1036 };
1037 
1038 enum mlx5_mcam_feature_groups {
1039 	MLX5_MCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1040 };
1041 
1042 enum mlx5_qcam_reg_groups {
1043 	MLX5_QCAM_REGS_FIRST_128                    = 0x0,
1044 };
1045 
1046 enum mlx5_qcam_feature_groups {
1047 	MLX5_QCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1048 };
1049 
1050 /* GET Dev Caps macros */
1051 #define MLX5_CAP_GEN(mdev, cap) \
1052 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1053 
1054 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1055 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
1056 
1057 #define MLX5_CAP_ETH(mdev, cap) \
1058 	MLX5_GET(per_protocol_networking_offload_caps,\
1059 		 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1060 
1061 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1062 	MLX5_GET(per_protocol_networking_offload_caps,\
1063 		 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1064 
1065 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1066 	MLX5_GET(per_protocol_networking_offload_caps,\
1067 		 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
1068 
1069 #define MLX5_CAP_ROCE(mdev, cap) \
1070 	MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
1071 
1072 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1073 	MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
1074 
1075 #define MLX5_CAP_ATOMIC(mdev, cap) \
1076 	MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
1077 
1078 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1079 	MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
1080 
1081 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1082 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
1083 
1084 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1085 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
1086 
1087 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1088 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1089 
1090 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1091 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1092 
1093 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1094 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1095 
1096 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1097 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1098 
1099 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1100 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1101 
1102 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1103 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1104 
1105 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1106 	MLX5_GET(flow_table_eswitch_cap, \
1107 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1108 
1109 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1110 	MLX5_GET(flow_table_eswitch_cap, \
1111 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1112 
1113 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1114 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1115 
1116 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1117 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1118 
1119 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1120 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1121 
1122 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1123 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1124 
1125 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1126 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1127 
1128 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1129 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1130 
1131 #define MLX5_CAP_ESW(mdev, cap) \
1132 	MLX5_GET(e_switch_cap, \
1133 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
1134 
1135 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1136 	MLX5_GET(e_switch_cap, \
1137 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
1138 
1139 #define MLX5_CAP_ODP(mdev, cap)\
1140 	MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
1141 
1142 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1143 	MLX5_GET(vector_calc_cap, \
1144 		 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
1145 
1146 #define MLX5_CAP_QOS(mdev, cap)\
1147 	MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
1148 
1149 #define MLX5_CAP_DEBUG(mdev, cap)\
1150 	MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
1151 
1152 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1153 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1154 
1155 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1156 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
1157 
1158 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1159 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1160 
1161 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1162 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1163 
1164 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1165 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1166 
1167 #define MLX5_CAP_FPGA(mdev, cap) \
1168 	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1169 
1170 #define MLX5_CAP64_FPGA(mdev, cap) \
1171 	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1172 
1173 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1174 	MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1175 
1176 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1177 	MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1178 
1179 enum {
1180 	MLX5_CMD_STAT_OK			= 0x0,
1181 	MLX5_CMD_STAT_INT_ERR			= 0x1,
1182 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1183 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1184 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1185 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1186 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1187 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1188 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1189 	MLX5_CMD_STAT_IX_ERR			= 0xa,
1190 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1191 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1192 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1193 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1194 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1195 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1196 };
1197 
1198 enum {
1199 	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1200 	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1201 	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1202 	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1203 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1204 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1205 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1206 	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1207 	MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1208 	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
1209 };
1210 
1211 enum {
1212 	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1213 };
1214 
1215 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1216 {
1217 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1218 		return 0;
1219 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1220 }
1221 
1222 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1223 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1224 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1225 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1226 				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1227 				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1228 
1229 #endif /* MLX5_DEVICE_H */
1230