1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_DEVICE_H 34 #define MLX5_DEVICE_H 35 36 #include <linux/types.h> 37 #include <rdma/ib_verbs.h> 38 #include <linux/mlx5/mlx5_ifc.h> 39 40 #if defined(__LITTLE_ENDIAN) 41 #define MLX5_SET_HOST_ENDIANNESS 0 42 #elif defined(__BIG_ENDIAN) 43 #define MLX5_SET_HOST_ENDIANNESS 0x80 44 #else 45 #error Host endianness not defined 46 #endif 47 48 /* helper macros */ 49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) 52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) 53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) 56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) 57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) 59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) 61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 62 63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) 67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 70 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) 71 72 /* insert a value to a struct */ 73 #define MLX5_SET(typ, p, fld, v) do { \ 74 u32 _v = v; \ 75 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 76 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 77 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 78 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ 79 << __mlx5_dw_bit_off(typ, fld))); \ 80 } while (0) 81 82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ 83 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ 84 MLX5_SET(typ, p, fld[idx], v); \ 85 } while (0) 86 87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \ 88 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 89 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 90 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 91 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ 92 << __mlx5_dw_bit_off(typ, fld))); \ 93 } while (0) 94 95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 97 __mlx5_mask(typ, fld)) 98 99 #define MLX5_GET_PR(typ, p, fld) ({ \ 100 u32 ___t = MLX5_GET(typ, p, fld); \ 101 pr_debug(#fld " = 0x%x\n", ___t); \ 102 ___t; \ 103 }) 104 105 #define __MLX5_SET64(typ, p, fld, v) do { \ 106 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ 107 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ 108 } while (0) 109 110 #define MLX5_SET64(typ, p, fld, v) do { \ 111 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 112 __MLX5_SET64(typ, p, fld, v); \ 113 } while (0) 114 115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ 116 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 117 __MLX5_SET64(typ, p, fld[idx], v); \ 118 } while (0) 119 120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 121 122 #define MLX5_GET64_PR(typ, p, fld) ({ \ 123 u64 ___t = MLX5_GET64(typ, p, fld); \ 124 pr_debug(#fld " = 0x%llx\n", ___t); \ 125 ___t; \ 126 }) 127 128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ 129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ 130 __mlx5_mask16(typ, fld)) 131 132 #define MLX5_SET16(typ, p, fld, v) do { \ 133 u16 _v = v; \ 134 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ 135 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ 136 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ 137 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ 138 << __mlx5_16_bit_off(typ, fld))); \ 139 } while (0) 140 141 /* Big endian getters */ 142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ 143 __mlx5_64_off(typ, fld))) 144 145 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ 146 type_t tmp; \ 147 switch (sizeof(tmp)) { \ 148 case sizeof(u8): \ 149 tmp = (__force type_t)MLX5_GET(typ, p, fld); \ 150 break; \ 151 case sizeof(u16): \ 152 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ 153 break; \ 154 case sizeof(u32): \ 155 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ 156 break; \ 157 case sizeof(u64): \ 158 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ 159 break; \ 160 } \ 161 tmp; \ 162 }) 163 164 enum mlx5_inline_modes { 165 MLX5_INLINE_MODE_NONE, 166 MLX5_INLINE_MODE_L2, 167 MLX5_INLINE_MODE_IP, 168 MLX5_INLINE_MODE_TCP_UDP, 169 }; 170 171 enum { 172 MLX5_MAX_COMMANDS = 32, 173 MLX5_CMD_DATA_BLOCK_SIZE = 512, 174 MLX5_PCI_CMD_XPORT = 7, 175 MLX5_MKEY_BSF_OCTO_SIZE = 4, 176 MLX5_MAX_PSVS = 4, 177 }; 178 179 enum { 180 MLX5_EXTENDED_UD_AV = 0x80000000, 181 }; 182 183 enum { 184 MLX5_CQ_STATE_ARMED = 9, 185 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, 186 MLX5_CQ_STATE_FIRED = 0xa, 187 }; 188 189 enum { 190 MLX5_STAT_RATE_OFFSET = 5, 191 }; 192 193 enum { 194 MLX5_INLINE_SEG = 0x80000000, 195 }; 196 197 enum { 198 MLX5_HW_START_PADDING = MLX5_INLINE_SEG, 199 }; 200 201 enum { 202 MLX5_MIN_PKEY_TABLE_SIZE = 128, 203 MLX5_MAX_LOG_PKEY_TABLE = 5, 204 }; 205 206 enum { 207 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 208 }; 209 210 enum { 211 MLX5_PFAULT_SUBTYPE_WQE = 0, 212 MLX5_PFAULT_SUBTYPE_RDMA = 1, 213 }; 214 215 enum wqe_page_fault_type { 216 MLX5_WQE_PF_TYPE_RMP = 0, 217 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, 218 MLX5_WQE_PF_TYPE_RESP = 2, 219 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, 220 }; 221 222 enum { 223 MLX5_PERM_LOCAL_READ = 1 << 2, 224 MLX5_PERM_LOCAL_WRITE = 1 << 3, 225 MLX5_PERM_REMOTE_READ = 1 << 4, 226 MLX5_PERM_REMOTE_WRITE = 1 << 5, 227 MLX5_PERM_ATOMIC = 1 << 6, 228 MLX5_PERM_UMR_EN = 1 << 7, 229 }; 230 231 enum { 232 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, 233 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, 234 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, 235 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, 236 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, 237 }; 238 239 enum { 240 MLX5_EN_RD = (u64)1, 241 MLX5_EN_WR = (u64)2 242 }; 243 244 enum { 245 MLX5_ADAPTER_PAGE_SHIFT = 12, 246 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, 247 }; 248 249 enum { 250 MLX5_BFREGS_PER_UAR = 4, 251 MLX5_MAX_UARS = 1 << 8, 252 MLX5_NON_FP_BFREGS_PER_UAR = 2, 253 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - 254 MLX5_NON_FP_BFREGS_PER_UAR, 255 MLX5_MAX_BFREGS = MLX5_MAX_UARS * 256 MLX5_NON_FP_BFREGS_PER_UAR, 257 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, 258 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, 259 MLX5_MIN_DYN_BFREGS = 512, 260 MLX5_MAX_DYN_BFREGS = 1024, 261 }; 262 263 enum { 264 MLX5_MKEY_MASK_LEN = 1ull << 0, 265 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, 266 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 267 MLX5_MKEY_MASK_PD = 1ull << 7, 268 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 269 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, 270 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 271 MLX5_MKEY_MASK_KEY = 1ull << 13, 272 MLX5_MKEY_MASK_QPN = 1ull << 14, 273 MLX5_MKEY_MASK_LR = 1ull << 17, 274 MLX5_MKEY_MASK_LW = 1ull << 18, 275 MLX5_MKEY_MASK_RR = 1ull << 19, 276 MLX5_MKEY_MASK_RW = 1ull << 20, 277 MLX5_MKEY_MASK_A = 1ull << 21, 278 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, 279 MLX5_MKEY_MASK_FREE = 1ull << 29, 280 }; 281 282 enum { 283 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), 284 285 MLX5_UMR_CHECK_NOT_FREE = (1 << 5), 286 MLX5_UMR_CHECK_FREE = (2 << 5), 287 288 MLX5_UMR_INLINE = (1 << 7), 289 }; 290 291 #define MLX5_UMR_MTT_ALIGNMENT 0x40 292 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 293 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 294 295 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) 296 297 enum { 298 MLX5_EVENT_QUEUE_TYPE_QP = 0, 299 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 300 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 301 MLX5_EVENT_QUEUE_TYPE_DCT = 6, 302 }; 303 304 /* mlx5 components can subscribe to any one of these events via 305 * mlx5_eq_notifier_register API. 306 */ 307 enum mlx5_event { 308 /* Special value to subscribe to any event */ 309 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, 310 /* HW events enum start: comp events are not subscribable */ 311 MLX5_EVENT_TYPE_COMP = 0x0, 312 /* HW Async events enum start: subscribable events */ 313 MLX5_EVENT_TYPE_PATH_MIG = 0x01, 314 MLX5_EVENT_TYPE_COMM_EST = 0x02, 315 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, 316 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, 317 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, 318 319 MLX5_EVENT_TYPE_CQ_ERROR = 0x04, 320 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 321 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, 322 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, 323 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, 324 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, 325 326 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, 327 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, 328 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 329 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, 330 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, 331 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, 332 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 333 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, 334 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, 335 MLX5_EVENT_TYPE_PPS_EVENT = 0x25, 336 337 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, 338 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, 339 340 MLX5_EVENT_TYPE_CMD = 0x0a, 341 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 342 343 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 344 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 345 346 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, 347 348 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, 349 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, 350 351 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, 352 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, 353 354 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, 355 356 MLX5_EVENT_TYPE_MAX = 0x100, 357 }; 358 359 enum { 360 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, 361 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, 362 }; 363 364 enum { 365 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, 366 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, 367 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, 368 }; 369 370 enum { 371 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, 372 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, 373 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, 374 MLX5_PORT_CHANGE_SUBTYPE_LID = 6, 375 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, 376 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, 377 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, 378 }; 379 380 enum { 381 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 382 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 383 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 384 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 385 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 386 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 387 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, 388 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 389 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 390 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 391 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 392 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 393 }; 394 395 enum { 396 MLX5_ROCE_VERSION_1 = 0, 397 MLX5_ROCE_VERSION_2 = 2, 398 }; 399 400 enum { 401 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, 402 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, 403 }; 404 405 enum { 406 MLX5_ROCE_L3_TYPE_IPV4 = 0, 407 MLX5_ROCE_L3_TYPE_IPV6 = 1, 408 }; 409 410 enum { 411 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, 412 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, 413 }; 414 415 enum { 416 MLX5_OPCODE_NOP = 0x00, 417 MLX5_OPCODE_SEND_INVAL = 0x01, 418 MLX5_OPCODE_RDMA_WRITE = 0x08, 419 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 420 MLX5_OPCODE_SEND = 0x0a, 421 MLX5_OPCODE_SEND_IMM = 0x0b, 422 MLX5_OPCODE_LSO = 0x0e, 423 MLX5_OPCODE_RDMA_READ = 0x10, 424 MLX5_OPCODE_ATOMIC_CS = 0x11, 425 MLX5_OPCODE_ATOMIC_FA = 0x12, 426 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, 427 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, 428 MLX5_OPCODE_BIND_MW = 0x18, 429 MLX5_OPCODE_CONFIG_CMD = 0x1f, 430 MLX5_OPCODE_ENHANCED_MPSW = 0x29, 431 432 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 433 MLX5_RECV_OPCODE_SEND = 0x01, 434 MLX5_RECV_OPCODE_SEND_IMM = 0x02, 435 MLX5_RECV_OPCODE_SEND_INVAL = 0x03, 436 437 MLX5_CQE_OPCODE_ERROR = 0x1e, 438 MLX5_CQE_OPCODE_RESIZE = 0x16, 439 440 MLX5_OPCODE_SET_PSV = 0x20, 441 MLX5_OPCODE_GET_PSV = 0x21, 442 MLX5_OPCODE_CHECK_PSV = 0x22, 443 MLX5_OPCODE_DUMP = 0x23, 444 MLX5_OPCODE_RGET_PSV = 0x26, 445 MLX5_OPCODE_RCHECK_PSV = 0x27, 446 447 MLX5_OPCODE_UMR = 0x25, 448 449 }; 450 451 enum { 452 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, 453 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, 454 }; 455 456 enum { 457 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, 458 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, 459 }; 460 461 enum { 462 MLX5_SET_PORT_RESET_QKEY = 0, 463 MLX5_SET_PORT_GUID0 = 16, 464 MLX5_SET_PORT_NODE_GUID = 17, 465 MLX5_SET_PORT_SYS_GUID = 18, 466 MLX5_SET_PORT_GID_TABLE = 19, 467 MLX5_SET_PORT_PKEY_TABLE = 20, 468 }; 469 470 enum { 471 MLX5_BW_NO_LIMIT = 0, 472 MLX5_100_MBPS_UNIT = 3, 473 MLX5_GBPS_UNIT = 4, 474 }; 475 476 enum { 477 MLX5_MAX_PAGE_SHIFT = 31 478 }; 479 480 enum { 481 MLX5_CAP_OFF_CMDIF_CSUM = 46, 482 }; 483 484 enum { 485 /* 486 * Max wqe size for rdma read is 512 bytes, so this 487 * limits our max_sge_rd as the wqe needs to fit: 488 * - ctrl segment (16 bytes) 489 * - rdma segment (16 bytes) 490 * - scatter elements (16 bytes each) 491 */ 492 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 493 }; 494 495 enum mlx5_odp_transport_cap_bits { 496 MLX5_ODP_SUPPORT_SEND = 1 << 31, 497 MLX5_ODP_SUPPORT_RECV = 1 << 30, 498 MLX5_ODP_SUPPORT_WRITE = 1 << 29, 499 MLX5_ODP_SUPPORT_READ = 1 << 28, 500 }; 501 502 struct mlx5_odp_caps { 503 char reserved[0x10]; 504 struct { 505 __be32 rc_odp_caps; 506 __be32 uc_odp_caps; 507 __be32 ud_odp_caps; 508 } per_transport_caps; 509 char reserved2[0xe4]; 510 }; 511 512 struct mlx5_cmd_layout { 513 u8 type; 514 u8 rsvd0[3]; 515 __be32 inlen; 516 __be64 in_ptr; 517 __be32 in[4]; 518 __be32 out[4]; 519 __be64 out_ptr; 520 __be32 outlen; 521 u8 token; 522 u8 sig; 523 u8 rsvd1; 524 u8 status_own; 525 }; 526 527 enum mlx5_fatal_assert_bit_offsets { 528 MLX5_RFR_OFFSET = 31, 529 }; 530 531 struct health_buffer { 532 __be32 assert_var[5]; 533 __be32 rsvd0[3]; 534 __be32 assert_exit_ptr; 535 __be32 assert_callra; 536 __be32 rsvd1[2]; 537 __be32 fw_ver; 538 __be32 hw_id; 539 __be32 rfr; 540 u8 irisc_index; 541 u8 synd; 542 __be16 ext_synd; 543 }; 544 545 enum mlx5_initializing_bit_offsets { 546 MLX5_FW_RESET_SUPPORTED_OFFSET = 30, 547 }; 548 549 enum mlx5_cmd_addr_l_sz_offset { 550 MLX5_NIC_IFC_OFFSET = 8, 551 }; 552 553 struct mlx5_init_seg { 554 __be32 fw_rev; 555 __be32 cmdif_rev_fw_sub; 556 __be32 rsvd0[2]; 557 __be32 cmdq_addr_h; 558 __be32 cmdq_addr_l_sz; 559 __be32 cmd_dbell; 560 __be32 rsvd1[120]; 561 __be32 initializing; 562 struct health_buffer health; 563 __be32 rsvd2[880]; 564 __be32 internal_timer_h; 565 __be32 internal_timer_l; 566 __be32 rsvd3[2]; 567 __be32 health_counter; 568 __be32 rsvd4[1019]; 569 __be64 ieee1588_clk; 570 __be32 ieee1588_clk_type; 571 __be32 clr_intx; 572 }; 573 574 struct mlx5_eqe_comp { 575 __be32 reserved[6]; 576 __be32 cqn; 577 }; 578 579 struct mlx5_eqe_qp_srq { 580 __be32 reserved1[5]; 581 u8 type; 582 u8 reserved2[3]; 583 __be32 qp_srq_n; 584 }; 585 586 struct mlx5_eqe_cq_err { 587 __be32 cqn; 588 u8 reserved1[7]; 589 u8 syndrome; 590 }; 591 592 struct mlx5_eqe_xrq_err { 593 __be32 reserved1[5]; 594 __be32 type_xrqn; 595 __be32 reserved2; 596 }; 597 598 struct mlx5_eqe_port_state { 599 u8 reserved0[8]; 600 u8 port; 601 }; 602 603 struct mlx5_eqe_gpio { 604 __be32 reserved0[2]; 605 __be64 gpio_event; 606 }; 607 608 struct mlx5_eqe_congestion { 609 u8 type; 610 u8 rsvd0; 611 u8 congestion_level; 612 }; 613 614 struct mlx5_eqe_stall_vl { 615 u8 rsvd0[3]; 616 u8 port_vl; 617 }; 618 619 struct mlx5_eqe_cmd { 620 __be32 vector; 621 __be32 rsvd[6]; 622 }; 623 624 struct mlx5_eqe_page_req { 625 __be16 ec_function; 626 __be16 func_id; 627 __be32 num_pages; 628 __be32 rsvd1[5]; 629 }; 630 631 struct mlx5_eqe_page_fault { 632 __be32 bytes_committed; 633 union { 634 struct { 635 u16 reserved1; 636 __be16 wqe_index; 637 u16 reserved2; 638 __be16 packet_length; 639 __be32 token; 640 u8 reserved4[8]; 641 __be32 pftype_wq; 642 } __packed wqe; 643 struct { 644 __be32 r_key; 645 u16 reserved1; 646 __be16 packet_length; 647 __be32 rdma_op_len; 648 __be64 rdma_va; 649 __be32 pftype_token; 650 } __packed rdma; 651 } __packed; 652 } __packed; 653 654 struct mlx5_eqe_vport_change { 655 u8 rsvd0[2]; 656 __be16 vport_num; 657 __be32 rsvd1[6]; 658 } __packed; 659 660 struct mlx5_eqe_port_module { 661 u8 reserved_at_0[1]; 662 u8 module; 663 u8 reserved_at_2[1]; 664 u8 module_status; 665 u8 reserved_at_4[2]; 666 u8 error_type; 667 } __packed; 668 669 struct mlx5_eqe_pps { 670 u8 rsvd0[3]; 671 u8 pin; 672 u8 rsvd1[4]; 673 union { 674 struct { 675 __be32 time_sec; 676 __be32 time_nsec; 677 }; 678 struct { 679 __be64 time_stamp; 680 }; 681 }; 682 u8 rsvd2[12]; 683 } __packed; 684 685 struct mlx5_eqe_dct { 686 __be32 reserved[6]; 687 __be32 dctn; 688 }; 689 690 struct mlx5_eqe_temp_warning { 691 __be64 sensor_warning_msb; 692 __be64 sensor_warning_lsb; 693 } __packed; 694 695 #define SYNC_RST_STATE_MASK 0xf 696 697 enum sync_rst_state_type { 698 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, 699 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, 700 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, 701 }; 702 703 struct mlx5_eqe_sync_fw_update { 704 u8 reserved_at_0[3]; 705 u8 sync_rst_state; 706 }; 707 708 union ev_data { 709 __be32 raw[7]; 710 struct mlx5_eqe_cmd cmd; 711 struct mlx5_eqe_comp comp; 712 struct mlx5_eqe_qp_srq qp_srq; 713 struct mlx5_eqe_cq_err cq_err; 714 struct mlx5_eqe_port_state port; 715 struct mlx5_eqe_gpio gpio; 716 struct mlx5_eqe_congestion cong; 717 struct mlx5_eqe_stall_vl stall_vl; 718 struct mlx5_eqe_page_req req_pages; 719 struct mlx5_eqe_page_fault page_fault; 720 struct mlx5_eqe_vport_change vport_change; 721 struct mlx5_eqe_port_module port_module; 722 struct mlx5_eqe_pps pps; 723 struct mlx5_eqe_dct dct; 724 struct mlx5_eqe_temp_warning temp_warning; 725 struct mlx5_eqe_xrq_err xrq_err; 726 struct mlx5_eqe_sync_fw_update sync_fw_update; 727 } __packed; 728 729 struct mlx5_eqe { 730 u8 rsvd0; 731 u8 type; 732 u8 rsvd1; 733 u8 sub_type; 734 __be32 rsvd2[7]; 735 union ev_data data; 736 __be16 rsvd3; 737 u8 signature; 738 u8 owner; 739 } __packed; 740 741 struct mlx5_cmd_prot_block { 742 u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; 743 u8 rsvd0[48]; 744 __be64 next; 745 __be32 block_num; 746 u8 rsvd1; 747 u8 token; 748 u8 ctrl_sig; 749 u8 sig; 750 }; 751 752 enum { 753 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, 754 }; 755 756 struct mlx5_err_cqe { 757 u8 rsvd0[32]; 758 __be32 srqn; 759 u8 rsvd1[18]; 760 u8 vendor_err_synd; 761 u8 syndrome; 762 __be32 s_wqe_opcode_qpn; 763 __be16 wqe_counter; 764 u8 signature; 765 u8 op_own; 766 }; 767 768 struct mlx5_cqe64 { 769 u8 tls_outer_l3_tunneled; 770 u8 rsvd0; 771 __be16 wqe_id; 772 u8 lro_tcppsh_abort_dupack; 773 u8 lro_min_ttl; 774 __be16 lro_tcp_win; 775 __be32 lro_ack_seq_num; 776 __be32 rss_hash_result; 777 u8 rss_hash_type; 778 u8 ml_path; 779 u8 rsvd20[2]; 780 __be16 check_sum; 781 __be16 slid; 782 __be32 flags_rqpn; 783 u8 hds_ip_ext; 784 u8 l4_l3_hdr_type; 785 __be16 vlan_info; 786 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ 787 union { 788 __be32 immediate; 789 __be32 inval_rkey; 790 __be32 pkey; 791 __be32 ft_metadata; 792 }; 793 u8 rsvd40[4]; 794 __be32 byte_cnt; 795 __be32 timestamp_h; 796 __be32 timestamp_l; 797 __be32 sop_drop_qpn; 798 __be16 wqe_counter; 799 u8 signature; 800 u8 op_own; 801 }; 802 803 struct mlx5_mini_cqe8 { 804 union { 805 __be32 rx_hash_result; 806 struct { 807 __be16 checksum; 808 __be16 rsvd; 809 }; 810 struct { 811 __be16 wqe_counter; 812 u8 s_wqe_opcode; 813 u8 reserved; 814 } s_wqe_info; 815 }; 816 __be32 byte_cnt; 817 }; 818 819 enum { 820 MLX5_NO_INLINE_DATA, 821 MLX5_INLINE_DATA32_SEG, 822 MLX5_INLINE_DATA64_SEG, 823 MLX5_COMPRESSED, 824 }; 825 826 enum { 827 MLX5_CQE_FORMAT_CSUM = 0x1, 828 }; 829 830 #define MLX5_MINI_CQE_ARRAY_SIZE 8 831 832 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 833 { 834 return (cqe->op_own >> 2) & 0x3; 835 } 836 837 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) 838 { 839 return cqe->op_own >> 4; 840 } 841 842 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 843 { 844 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; 845 } 846 847 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 848 { 849 return (cqe->l4_l3_hdr_type >> 4) & 0x7; 850 } 851 852 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) 853 { 854 return (cqe->l4_l3_hdr_type >> 2) & 0x3; 855 } 856 857 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) 858 { 859 return cqe->tls_outer_l3_tunneled & 0x1; 860 } 861 862 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) 863 { 864 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; 865 } 866 867 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) 868 { 869 return cqe->l4_l3_hdr_type & 0x1; 870 } 871 872 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) 873 { 874 u32 hi, lo; 875 876 hi = be32_to_cpu(cqe->timestamp_h); 877 lo = be32_to_cpu(cqe->timestamp_l); 878 879 return (u64)lo | ((u64)hi << 32); 880 } 881 882 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) 883 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) 884 885 struct mpwrq_cqe_bc { 886 __be16 filler_consumed_strides; 887 __be16 byte_cnt; 888 }; 889 890 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) 891 { 892 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 893 894 return be16_to_cpu(bc->byte_cnt); 895 } 896 897 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) 898 { 899 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); 900 } 901 902 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) 903 { 904 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 905 906 return mpwrq_get_cqe_bc_consumed_strides(bc); 907 } 908 909 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) 910 { 911 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 912 913 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); 914 } 915 916 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) 917 { 918 return be16_to_cpu(cqe->wqe_counter); 919 } 920 921 enum { 922 CQE_L4_HDR_TYPE_NONE = 0x0, 923 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 924 CQE_L4_HDR_TYPE_UDP = 0x2, 925 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, 926 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, 927 }; 928 929 enum { 930 CQE_RSS_HTYPE_IP = 0x3 << 2, 931 /* cqe->rss_hash_type[3:2] - IP destination selected for hash 932 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) 933 */ 934 CQE_RSS_HTYPE_L4 = 0x3 << 6, 935 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash 936 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI 937 */ 938 }; 939 940 enum { 941 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, 942 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, 943 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, 944 }; 945 946 enum { 947 CQE_L2_OK = 1 << 0, 948 CQE_L3_OK = 1 << 1, 949 CQE_L4_OK = 1 << 2, 950 }; 951 952 enum { 953 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, 954 CQE_TLS_OFFLOAD_DECRYPTED = 0x1, 955 CQE_TLS_OFFLOAD_RESYNC = 0x2, 956 CQE_TLS_OFFLOAD_ERROR = 0x3, 957 }; 958 959 struct mlx5_sig_err_cqe { 960 u8 rsvd0[16]; 961 __be32 expected_trans_sig; 962 __be32 actual_trans_sig; 963 __be32 expected_reftag; 964 __be32 actual_reftag; 965 __be16 syndrome; 966 u8 rsvd22[2]; 967 __be32 mkey; 968 __be64 err_offset; 969 u8 rsvd30[8]; 970 __be32 qpn; 971 u8 rsvd38[2]; 972 u8 signature; 973 u8 op_own; 974 }; 975 976 struct mlx5_wqe_srq_next_seg { 977 u8 rsvd0[2]; 978 __be16 next_wqe_index; 979 u8 signature; 980 u8 rsvd1[11]; 981 }; 982 983 union mlx5_ext_cqe { 984 struct ib_grh grh; 985 u8 inl[64]; 986 }; 987 988 struct mlx5_cqe128 { 989 union mlx5_ext_cqe inl_grh; 990 struct mlx5_cqe64 cqe64; 991 }; 992 993 enum { 994 MLX5_MKEY_STATUS_FREE = 1 << 6, 995 }; 996 997 enum { 998 MLX5_MKEY_REMOTE_INVAL = 1 << 24, 999 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, 1000 MLX5_MKEY_BSF_EN = 1 << 30, 1001 MLX5_MKEY_LEN64 = 1 << 31, 1002 }; 1003 1004 struct mlx5_mkey_seg { 1005 /* This is a two bit field occupying bits 31-30. 1006 * bit 31 is always 0, 1007 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 1008 */ 1009 u8 status; 1010 u8 pcie_control; 1011 u8 flags; 1012 u8 version; 1013 __be32 qpn_mkey7_0; 1014 u8 rsvd1[4]; 1015 __be32 flags_pd; 1016 __be64 start_addr; 1017 __be64 len; 1018 __be32 bsfs_octo_size; 1019 u8 rsvd2[16]; 1020 __be32 xlt_oct_size; 1021 u8 rsvd3[3]; 1022 u8 log2_page_size; 1023 u8 rsvd4[4]; 1024 }; 1025 1026 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 1027 1028 enum { 1029 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 1030 }; 1031 1032 enum { 1033 VPORT_STATE_DOWN = 0x0, 1034 VPORT_STATE_UP = 0x1, 1035 }; 1036 1037 enum { 1038 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, 1039 MLX5_VPORT_ADMIN_STATE_UP = 0x1, 1040 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, 1041 }; 1042 1043 enum { 1044 MLX5_L3_PROT_TYPE_IPV4 = 0, 1045 MLX5_L3_PROT_TYPE_IPV6 = 1, 1046 }; 1047 1048 enum { 1049 MLX5_L4_PROT_TYPE_TCP = 0, 1050 MLX5_L4_PROT_TYPE_UDP = 1, 1051 }; 1052 1053 enum { 1054 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, 1055 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, 1056 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, 1057 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, 1058 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, 1059 }; 1060 1061 enum { 1062 MLX5_MATCH_OUTER_HEADERS = 1 << 0, 1063 MLX5_MATCH_MISC_PARAMETERS = 1 << 1, 1064 MLX5_MATCH_INNER_HEADERS = 1 << 2, 1065 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, 1066 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, 1067 }; 1068 1069 enum { 1070 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, 1071 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, 1072 }; 1073 1074 enum { 1075 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, 1076 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, 1077 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, 1078 }; 1079 1080 enum mlx5_list_type { 1081 MLX5_NVPRT_LIST_TYPE_UC = 0x0, 1082 MLX5_NVPRT_LIST_TYPE_MC = 0x1, 1083 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, 1084 }; 1085 1086 enum { 1087 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1088 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1089 }; 1090 1091 enum mlx5_wol_mode { 1092 MLX5_WOL_DISABLE = 0, 1093 MLX5_WOL_SECURED_MAGIC = 1 << 1, 1094 MLX5_WOL_MAGIC = 1 << 2, 1095 MLX5_WOL_ARP = 1 << 3, 1096 MLX5_WOL_BROADCAST = 1 << 4, 1097 MLX5_WOL_MULTICAST = 1 << 5, 1098 MLX5_WOL_UNICAST = 1 << 6, 1099 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 1100 }; 1101 1102 enum mlx5_mpls_supported_fields { 1103 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0, 1104 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1, 1105 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2, 1106 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3 1107 }; 1108 1109 enum mlx5_flex_parser_protos { 1110 MLX5_FLEX_PROTO_GENEVE = 1 << 3, 1111 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, 1112 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, 1113 }; 1114 1115 /* MLX5 DEV CAPs */ 1116 1117 /* TODO: EAT.ME */ 1118 enum mlx5_cap_mode { 1119 HCA_CAP_OPMOD_GET_MAX = 0, 1120 HCA_CAP_OPMOD_GET_CUR = 1, 1121 }; 1122 1123 enum mlx5_cap_type { 1124 MLX5_CAP_GENERAL = 0, 1125 MLX5_CAP_ETHERNET_OFFLOADS, 1126 MLX5_CAP_ODP, 1127 MLX5_CAP_ATOMIC, 1128 MLX5_CAP_ROCE, 1129 MLX5_CAP_IPOIB_OFFLOADS, 1130 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, 1131 MLX5_CAP_FLOW_TABLE, 1132 MLX5_CAP_ESWITCH_FLOW_TABLE, 1133 MLX5_CAP_ESWITCH, 1134 MLX5_CAP_RESERVED, 1135 MLX5_CAP_VECTOR_CALC, 1136 MLX5_CAP_QOS, 1137 MLX5_CAP_DEBUG, 1138 MLX5_CAP_RESERVED_14, 1139 MLX5_CAP_DEV_MEM, 1140 MLX5_CAP_RESERVED_16, 1141 MLX5_CAP_TLS, 1142 MLX5_CAP_VDPA_EMULATION = 0x13, 1143 MLX5_CAP_DEV_EVENT = 0x14, 1144 MLX5_CAP_IPSEC, 1145 /* NUM OF CAP Types */ 1146 MLX5_CAP_NUM 1147 }; 1148 1149 enum mlx5_pcam_reg_groups { 1150 MLX5_PCAM_REGS_5000_TO_507F = 0x0, 1151 }; 1152 1153 enum mlx5_pcam_feature_groups { 1154 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1155 }; 1156 1157 enum mlx5_mcam_reg_groups { 1158 MLX5_MCAM_REGS_FIRST_128 = 0x0, 1159 MLX5_MCAM_REGS_0x9080_0x90FF = 0x1, 1160 MLX5_MCAM_REGS_0x9100_0x917F = 0x2, 1161 MLX5_MCAM_REGS_NUM = 0x3, 1162 }; 1163 1164 enum mlx5_mcam_feature_groups { 1165 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1166 }; 1167 1168 enum mlx5_qcam_reg_groups { 1169 MLX5_QCAM_REGS_FIRST_128 = 0x0, 1170 }; 1171 1172 enum mlx5_qcam_feature_groups { 1173 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, 1174 }; 1175 1176 /* GET Dev Caps macros */ 1177 #define MLX5_CAP_GEN(mdev, cap) \ 1178 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1179 1180 #define MLX5_CAP_GEN_64(mdev, cap) \ 1181 MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1182 1183 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 1184 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) 1185 1186 #define MLX5_CAP_ETH(mdev, cap) \ 1187 MLX5_GET(per_protocol_networking_offload_caps,\ 1188 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1189 1190 #define MLX5_CAP_ETH_MAX(mdev, cap) \ 1191 MLX5_GET(per_protocol_networking_offload_caps,\ 1192 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1193 1194 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ 1195 MLX5_GET(per_protocol_networking_offload_caps,\ 1196 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) 1197 1198 #define MLX5_CAP_ROCE(mdev, cap) \ 1199 MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) 1200 1201 #define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1202 MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) 1203 1204 #define MLX5_CAP_ATOMIC(mdev, cap) \ 1205 MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) 1206 1207 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1208 MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) 1209 1210 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1211 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1212 1213 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ 1214 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1215 1216 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1217 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) 1218 1219 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1220 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) 1221 1222 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 1223 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 1224 1225 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ 1226 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) 1227 1228 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ 1229 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) 1230 1231 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ 1232 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) 1233 1234 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ 1235 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) 1236 1237 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ 1238 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) 1239 1240 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ 1241 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) 1242 1243 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ 1244 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) 1245 1246 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ 1247 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) 1248 1249 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ 1250 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) 1251 1252 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ 1253 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) 1254 1255 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1256 MLX5_GET(flow_table_eswitch_cap, \ 1257 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1258 1259 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1260 MLX5_GET(flow_table_eswitch_cap, \ 1261 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1262 1263 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1264 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 1265 1266 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ 1267 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) 1268 1269 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ 1270 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) 1271 1272 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ 1273 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) 1274 1275 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ 1276 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) 1277 1278 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ 1279 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) 1280 1281 #define MLX5_CAP_ESW(mdev, cap) \ 1282 MLX5_GET(e_switch_cap, \ 1283 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) 1284 1285 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ 1286 MLX5_GET64(flow_table_eswitch_cap, \ 1287 (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1288 1289 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 1290 MLX5_GET(e_switch_cap, \ 1291 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) 1292 1293 #define MLX5_CAP_ODP(mdev, cap)\ 1294 MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) 1295 1296 #define MLX5_CAP_ODP_MAX(mdev, cap)\ 1297 MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) 1298 1299 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1300 MLX5_GET(vector_calc_cap, \ 1301 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) 1302 1303 #define MLX5_CAP_QOS(mdev, cap)\ 1304 MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) 1305 1306 #define MLX5_CAP_DEBUG(mdev, cap)\ 1307 MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) 1308 1309 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ 1310 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) 1311 1312 #define MLX5_CAP_PCAM_REG(mdev, reg) \ 1313 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) 1314 1315 #define MLX5_CAP_MCAM_REG(mdev, reg) \ 1316 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \ 1317 mng_access_reg_cap_mask.access_regs.reg) 1318 1319 #define MLX5_CAP_MCAM_REG1(mdev, reg) \ 1320 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \ 1321 mng_access_reg_cap_mask.access_regs1.reg) 1322 1323 #define MLX5_CAP_MCAM_REG2(mdev, reg) \ 1324 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \ 1325 mng_access_reg_cap_mask.access_regs2.reg) 1326 1327 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ 1328 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) 1329 1330 #define MLX5_CAP_QCAM_REG(mdev, fld) \ 1331 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) 1332 1333 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ 1334 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) 1335 1336 #define MLX5_CAP_FPGA(mdev, cap) \ 1337 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) 1338 1339 #define MLX5_CAP64_FPGA(mdev, cap) \ 1340 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) 1341 1342 #define MLX5_CAP_DEV_MEM(mdev, cap)\ 1343 MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1344 1345 #define MLX5_CAP64_DEV_MEM(mdev, cap)\ 1346 MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1347 1348 #define MLX5_CAP_TLS(mdev, cap) \ 1349 MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) 1350 1351 #define MLX5_CAP_DEV_EVENT(mdev, cap)\ 1352 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) 1353 1354 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ 1355 MLX5_GET(device_virtio_emulation_cap, \ 1356 (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1357 1358 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ 1359 MLX5_GET64(device_virtio_emulation_cap, \ 1360 (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1361 1362 #define MLX5_CAP_IPSEC(mdev, cap)\ 1363 MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) 1364 1365 enum { 1366 MLX5_CMD_STAT_OK = 0x0, 1367 MLX5_CMD_STAT_INT_ERR = 0x1, 1368 MLX5_CMD_STAT_BAD_OP_ERR = 0x2, 1369 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, 1370 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, 1371 MLX5_CMD_STAT_BAD_RES_ERR = 0x5, 1372 MLX5_CMD_STAT_RES_BUSY = 0x6, 1373 MLX5_CMD_STAT_LIM_ERR = 0x8, 1374 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, 1375 MLX5_CMD_STAT_IX_ERR = 0xa, 1376 MLX5_CMD_STAT_NO_RES_ERR = 0xf, 1377 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, 1378 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, 1379 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, 1380 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, 1381 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1382 }; 1383 1384 enum { 1385 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, 1386 MLX5_RFC_2863_COUNTERS_GROUP = 0x1, 1387 MLX5_RFC_2819_COUNTERS_GROUP = 0x2, 1388 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1389 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1390 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1391 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1392 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1393 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, 1394 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, 1395 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1396 }; 1397 1398 enum { 1399 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, 1400 }; 1401 1402 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1403 { 1404 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1405 return 0; 1406 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1407 } 1408 1409 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 1410 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 1411 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 1412 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ 1413 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ 1414 MLX5_BY_PASS_NUM_MULTICAST_PRIOS) 1415 1416 #endif /* MLX5_DEVICE_H */ 1417