1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX5_DEVICE_H 34 #define MLX5_DEVICE_H 35 36 #include <linux/types.h> 37 #include <rdma/ib_verbs.h> 38 #include <linux/mlx5/mlx5_ifc.h> 39 40 #if defined(__LITTLE_ENDIAN) 41 #define MLX5_SET_HOST_ENDIANNESS 0 42 #elif defined(__BIG_ENDIAN) 43 #define MLX5_SET_HOST_ENDIANNESS 0x80 44 #else 45 #error Host endianness not defined 46 #endif 47 48 /* helper macros */ 49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 51 #define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) 52 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 53 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 54 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) 55 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 56 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) 57 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 58 59 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 60 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 61 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 62 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 63 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 64 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 65 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 66 67 /* insert a value to a struct */ 68 #define MLX5_SET(typ, p, fld, v) do { \ 69 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 70 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 71 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 72 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ 73 << __mlx5_dw_bit_off(typ, fld))); \ 74 } while (0) 75 76 #define MLX5_SET_TO_ONES(typ, p, fld) do { \ 77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 78 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 79 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 80 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ 81 << __mlx5_dw_bit_off(typ, fld))); \ 82 } while (0) 83 84 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 85 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 86 __mlx5_mask(typ, fld)) 87 88 #define MLX5_GET_PR(typ, p, fld) ({ \ 89 u32 ___t = MLX5_GET(typ, p, fld); \ 90 pr_debug(#fld " = 0x%x\n", ___t); \ 91 ___t; \ 92 }) 93 94 #define MLX5_SET64(typ, p, fld, v) do { \ 95 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ 96 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 97 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ 98 } while (0) 99 100 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 101 102 #define MLX5_GET64_PR(typ, p, fld) ({ \ 103 u64 ___t = MLX5_GET64(typ, p, fld); \ 104 pr_debug(#fld " = 0x%llx\n", ___t); \ 105 ___t; \ 106 }) 107 108 /* Big endian getters */ 109 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ 110 __mlx5_64_off(typ, fld))) 111 112 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ 113 type_t tmp; \ 114 switch (sizeof(tmp)) { \ 115 case sizeof(u8): \ 116 tmp = (__force type_t)MLX5_GET(typ, p, fld); \ 117 break; \ 118 case sizeof(u16): \ 119 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ 120 break; \ 121 case sizeof(u32): \ 122 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ 123 break; \ 124 case sizeof(u64): \ 125 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ 126 break; \ 127 } \ 128 tmp; \ 129 }) 130 131 enum { 132 MLX5_MAX_COMMANDS = 32, 133 MLX5_CMD_DATA_BLOCK_SIZE = 512, 134 MLX5_PCI_CMD_XPORT = 7, 135 MLX5_MKEY_BSF_OCTO_SIZE = 4, 136 MLX5_MAX_PSVS = 4, 137 }; 138 139 enum { 140 MLX5_EXTENDED_UD_AV = 0x80000000, 141 }; 142 143 enum { 144 MLX5_CQ_STATE_ARMED = 9, 145 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, 146 MLX5_CQ_STATE_FIRED = 0xa, 147 }; 148 149 enum { 150 MLX5_STAT_RATE_OFFSET = 5, 151 }; 152 153 enum { 154 MLX5_INLINE_SEG = 0x80000000, 155 }; 156 157 enum { 158 MLX5_HW_START_PADDING = MLX5_INLINE_SEG, 159 }; 160 161 enum { 162 MLX5_MIN_PKEY_TABLE_SIZE = 128, 163 MLX5_MAX_LOG_PKEY_TABLE = 5, 164 }; 165 166 enum { 167 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 168 }; 169 170 enum { 171 MLX5_PFAULT_SUBTYPE_WQE = 0, 172 MLX5_PFAULT_SUBTYPE_RDMA = 1, 173 }; 174 175 enum { 176 MLX5_PERM_LOCAL_READ = 1 << 2, 177 MLX5_PERM_LOCAL_WRITE = 1 << 3, 178 MLX5_PERM_REMOTE_READ = 1 << 4, 179 MLX5_PERM_REMOTE_WRITE = 1 << 5, 180 MLX5_PERM_ATOMIC = 1 << 6, 181 MLX5_PERM_UMR_EN = 1 << 7, 182 }; 183 184 enum { 185 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, 186 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, 187 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, 188 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, 189 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, 190 }; 191 192 enum { 193 MLX5_ACCESS_MODE_PA = 0, 194 MLX5_ACCESS_MODE_MTT = 1, 195 MLX5_ACCESS_MODE_KLM = 2 196 }; 197 198 enum { 199 MLX5_MKEY_REMOTE_INVAL = 1 << 24, 200 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, 201 MLX5_MKEY_BSF_EN = 1 << 30, 202 MLX5_MKEY_LEN64 = 1 << 31, 203 }; 204 205 enum { 206 MLX5_EN_RD = (u64)1, 207 MLX5_EN_WR = (u64)2 208 }; 209 210 enum { 211 MLX5_BF_REGS_PER_PAGE = 4, 212 MLX5_MAX_UAR_PAGES = 1 << 8, 213 MLX5_NON_FP_BF_REGS_PER_PAGE = 2, 214 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, 215 }; 216 217 enum { 218 MLX5_MKEY_MASK_LEN = 1ull << 0, 219 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, 220 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 221 MLX5_MKEY_MASK_PD = 1ull << 7, 222 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 223 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, 224 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 225 MLX5_MKEY_MASK_KEY = 1ull << 13, 226 MLX5_MKEY_MASK_QPN = 1ull << 14, 227 MLX5_MKEY_MASK_LR = 1ull << 17, 228 MLX5_MKEY_MASK_LW = 1ull << 18, 229 MLX5_MKEY_MASK_RR = 1ull << 19, 230 MLX5_MKEY_MASK_RW = 1ull << 20, 231 MLX5_MKEY_MASK_A = 1ull << 21, 232 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, 233 MLX5_MKEY_MASK_FREE = 1ull << 29, 234 }; 235 236 enum { 237 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), 238 239 MLX5_UMR_CHECK_NOT_FREE = (1 << 5), 240 MLX5_UMR_CHECK_FREE = (2 << 5), 241 242 MLX5_UMR_INLINE = (1 << 7), 243 }; 244 245 #define MLX5_UMR_MTT_ALIGNMENT 0x40 246 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 247 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 248 249 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) 250 251 enum { 252 MLX5_EVENT_QUEUE_TYPE_QP = 0, 253 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 254 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 255 }; 256 257 enum mlx5_event { 258 MLX5_EVENT_TYPE_COMP = 0x0, 259 260 MLX5_EVENT_TYPE_PATH_MIG = 0x01, 261 MLX5_EVENT_TYPE_COMM_EST = 0x02, 262 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, 263 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, 264 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, 265 266 MLX5_EVENT_TYPE_CQ_ERROR = 0x04, 267 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 268 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, 269 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, 270 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, 271 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, 272 273 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, 274 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, 275 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 276 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 277 278 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, 279 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, 280 281 MLX5_EVENT_TYPE_CMD = 0x0a, 282 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 283 284 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 285 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 286 }; 287 288 enum { 289 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, 290 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, 291 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, 292 MLX5_PORT_CHANGE_SUBTYPE_LID = 6, 293 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, 294 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, 295 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, 296 }; 297 298 enum { 299 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 300 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 301 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 302 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 303 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 304 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 305 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, 306 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 307 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 308 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 309 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 310 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 311 }; 312 313 enum { 314 MLX5_ROCE_VERSION_1 = 0, 315 MLX5_ROCE_VERSION_2 = 2, 316 }; 317 318 enum { 319 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, 320 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, 321 }; 322 323 enum { 324 MLX5_ROCE_L3_TYPE_IPV4 = 0, 325 MLX5_ROCE_L3_TYPE_IPV6 = 1, 326 }; 327 328 enum { 329 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, 330 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, 331 }; 332 333 enum { 334 MLX5_OPCODE_NOP = 0x00, 335 MLX5_OPCODE_SEND_INVAL = 0x01, 336 MLX5_OPCODE_RDMA_WRITE = 0x08, 337 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 338 MLX5_OPCODE_SEND = 0x0a, 339 MLX5_OPCODE_SEND_IMM = 0x0b, 340 MLX5_OPCODE_LSO = 0x0e, 341 MLX5_OPCODE_RDMA_READ = 0x10, 342 MLX5_OPCODE_ATOMIC_CS = 0x11, 343 MLX5_OPCODE_ATOMIC_FA = 0x12, 344 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, 345 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, 346 MLX5_OPCODE_BIND_MW = 0x18, 347 MLX5_OPCODE_CONFIG_CMD = 0x1f, 348 349 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 350 MLX5_RECV_OPCODE_SEND = 0x01, 351 MLX5_RECV_OPCODE_SEND_IMM = 0x02, 352 MLX5_RECV_OPCODE_SEND_INVAL = 0x03, 353 354 MLX5_CQE_OPCODE_ERROR = 0x1e, 355 MLX5_CQE_OPCODE_RESIZE = 0x16, 356 357 MLX5_OPCODE_SET_PSV = 0x20, 358 MLX5_OPCODE_GET_PSV = 0x21, 359 MLX5_OPCODE_CHECK_PSV = 0x22, 360 MLX5_OPCODE_RGET_PSV = 0x26, 361 MLX5_OPCODE_RCHECK_PSV = 0x27, 362 363 MLX5_OPCODE_UMR = 0x25, 364 365 }; 366 367 enum { 368 MLX5_SET_PORT_RESET_QKEY = 0, 369 MLX5_SET_PORT_GUID0 = 16, 370 MLX5_SET_PORT_NODE_GUID = 17, 371 MLX5_SET_PORT_SYS_GUID = 18, 372 MLX5_SET_PORT_GID_TABLE = 19, 373 MLX5_SET_PORT_PKEY_TABLE = 20, 374 }; 375 376 enum { 377 MLX5_BW_NO_LIMIT = 0, 378 MLX5_100_MBPS_UNIT = 3, 379 MLX5_GBPS_UNIT = 4, 380 }; 381 382 enum { 383 MLX5_MAX_PAGE_SHIFT = 31 384 }; 385 386 enum { 387 MLX5_ADAPTER_PAGE_SHIFT = 12, 388 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, 389 }; 390 391 enum { 392 MLX5_CAP_OFF_CMDIF_CSUM = 46, 393 }; 394 395 struct mlx5_inbox_hdr { 396 __be16 opcode; 397 u8 rsvd[4]; 398 __be16 opmod; 399 }; 400 401 struct mlx5_outbox_hdr { 402 u8 status; 403 u8 rsvd[3]; 404 __be32 syndrome; 405 }; 406 407 struct mlx5_cmd_query_adapter_mbox_in { 408 struct mlx5_inbox_hdr hdr; 409 u8 rsvd[8]; 410 }; 411 412 struct mlx5_cmd_query_adapter_mbox_out { 413 struct mlx5_outbox_hdr hdr; 414 u8 rsvd0[24]; 415 u8 intapin; 416 u8 rsvd1[13]; 417 __be16 vsd_vendor_id; 418 u8 vsd[208]; 419 u8 vsd_psid[16]; 420 }; 421 422 enum mlx5_odp_transport_cap_bits { 423 MLX5_ODP_SUPPORT_SEND = 1 << 31, 424 MLX5_ODP_SUPPORT_RECV = 1 << 30, 425 MLX5_ODP_SUPPORT_WRITE = 1 << 29, 426 MLX5_ODP_SUPPORT_READ = 1 << 28, 427 }; 428 429 struct mlx5_odp_caps { 430 char reserved[0x10]; 431 struct { 432 __be32 rc_odp_caps; 433 __be32 uc_odp_caps; 434 __be32 ud_odp_caps; 435 } per_transport_caps; 436 char reserved2[0xe4]; 437 }; 438 439 struct mlx5_cmd_init_hca_mbox_in { 440 struct mlx5_inbox_hdr hdr; 441 u8 rsvd0[2]; 442 __be16 profile; 443 u8 rsvd1[4]; 444 }; 445 446 struct mlx5_cmd_init_hca_mbox_out { 447 struct mlx5_outbox_hdr hdr; 448 u8 rsvd[8]; 449 }; 450 451 struct mlx5_cmd_teardown_hca_mbox_in { 452 struct mlx5_inbox_hdr hdr; 453 u8 rsvd0[2]; 454 __be16 profile; 455 u8 rsvd1[4]; 456 }; 457 458 struct mlx5_cmd_teardown_hca_mbox_out { 459 struct mlx5_outbox_hdr hdr; 460 u8 rsvd[8]; 461 }; 462 463 struct mlx5_cmd_layout { 464 u8 type; 465 u8 rsvd0[3]; 466 __be32 inlen; 467 __be64 in_ptr; 468 __be32 in[4]; 469 __be32 out[4]; 470 __be64 out_ptr; 471 __be32 outlen; 472 u8 token; 473 u8 sig; 474 u8 rsvd1; 475 u8 status_own; 476 }; 477 478 479 struct health_buffer { 480 __be32 assert_var[5]; 481 __be32 rsvd0[3]; 482 __be32 assert_exit_ptr; 483 __be32 assert_callra; 484 __be32 rsvd1[2]; 485 __be32 fw_ver; 486 __be32 hw_id; 487 __be32 rsvd2; 488 u8 irisc_index; 489 u8 synd; 490 __be16 ext_synd; 491 }; 492 493 struct mlx5_init_seg { 494 __be32 fw_rev; 495 __be32 cmdif_rev_fw_sub; 496 __be32 rsvd0[2]; 497 __be32 cmdq_addr_h; 498 __be32 cmdq_addr_l_sz; 499 __be32 cmd_dbell; 500 __be32 rsvd1[120]; 501 __be32 initializing; 502 struct health_buffer health; 503 __be32 rsvd2[880]; 504 __be32 internal_timer_h; 505 __be32 internal_timer_l; 506 __be32 rsvd3[2]; 507 __be32 health_counter; 508 __be32 rsvd4[1019]; 509 __be64 ieee1588_clk; 510 __be32 ieee1588_clk_type; 511 __be32 clr_intx; 512 }; 513 514 struct mlx5_eqe_comp { 515 __be32 reserved[6]; 516 __be32 cqn; 517 }; 518 519 struct mlx5_eqe_qp_srq { 520 __be32 reserved1[5]; 521 u8 type; 522 u8 reserved2[3]; 523 __be32 qp_srq_n; 524 }; 525 526 struct mlx5_eqe_cq_err { 527 __be32 cqn; 528 u8 reserved1[7]; 529 u8 syndrome; 530 }; 531 532 struct mlx5_eqe_port_state { 533 u8 reserved0[8]; 534 u8 port; 535 }; 536 537 struct mlx5_eqe_gpio { 538 __be32 reserved0[2]; 539 __be64 gpio_event; 540 }; 541 542 struct mlx5_eqe_congestion { 543 u8 type; 544 u8 rsvd0; 545 u8 congestion_level; 546 }; 547 548 struct mlx5_eqe_stall_vl { 549 u8 rsvd0[3]; 550 u8 port_vl; 551 }; 552 553 struct mlx5_eqe_cmd { 554 __be32 vector; 555 __be32 rsvd[6]; 556 }; 557 558 struct mlx5_eqe_page_req { 559 u8 rsvd0[2]; 560 __be16 func_id; 561 __be32 num_pages; 562 __be32 rsvd1[5]; 563 }; 564 565 struct mlx5_eqe_page_fault { 566 __be32 bytes_committed; 567 union { 568 struct { 569 u16 reserved1; 570 __be16 wqe_index; 571 u16 reserved2; 572 __be16 packet_length; 573 u8 reserved3[12]; 574 } __packed wqe; 575 struct { 576 __be32 r_key; 577 u16 reserved1; 578 __be16 packet_length; 579 __be32 rdma_op_len; 580 __be64 rdma_va; 581 } __packed rdma; 582 } __packed; 583 __be32 flags_qpn; 584 } __packed; 585 586 struct mlx5_eqe_vport_change { 587 u8 rsvd0[2]; 588 __be16 vport_num; 589 __be32 rsvd1[6]; 590 } __packed; 591 592 union ev_data { 593 __be32 raw[7]; 594 struct mlx5_eqe_cmd cmd; 595 struct mlx5_eqe_comp comp; 596 struct mlx5_eqe_qp_srq qp_srq; 597 struct mlx5_eqe_cq_err cq_err; 598 struct mlx5_eqe_port_state port; 599 struct mlx5_eqe_gpio gpio; 600 struct mlx5_eqe_congestion cong; 601 struct mlx5_eqe_stall_vl stall_vl; 602 struct mlx5_eqe_page_req req_pages; 603 struct mlx5_eqe_page_fault page_fault; 604 struct mlx5_eqe_vport_change vport_change; 605 } __packed; 606 607 struct mlx5_eqe { 608 u8 rsvd0; 609 u8 type; 610 u8 rsvd1; 611 u8 sub_type; 612 __be32 rsvd2[7]; 613 union ev_data data; 614 __be16 rsvd3; 615 u8 signature; 616 u8 owner; 617 } __packed; 618 619 struct mlx5_cmd_prot_block { 620 u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; 621 u8 rsvd0[48]; 622 __be64 next; 623 __be32 block_num; 624 u8 rsvd1; 625 u8 token; 626 u8 ctrl_sig; 627 u8 sig; 628 }; 629 630 enum { 631 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, 632 }; 633 634 struct mlx5_err_cqe { 635 u8 rsvd0[32]; 636 __be32 srqn; 637 u8 rsvd1[18]; 638 u8 vendor_err_synd; 639 u8 syndrome; 640 __be32 s_wqe_opcode_qpn; 641 __be16 wqe_counter; 642 u8 signature; 643 u8 op_own; 644 }; 645 646 struct mlx5_cqe64 { 647 u8 rsvd0[4]; 648 u8 lro_tcppsh_abort_dupack; 649 u8 lro_min_ttl; 650 __be16 lro_tcp_win; 651 __be32 lro_ack_seq_num; 652 __be32 rss_hash_result; 653 u8 rss_hash_type; 654 u8 ml_path; 655 u8 rsvd20[2]; 656 __be16 check_sum; 657 __be16 slid; 658 __be32 flags_rqpn; 659 u8 hds_ip_ext; 660 u8 l4_hdr_type_etc; 661 __be16 vlan_info; 662 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ 663 __be32 imm_inval_pkey; 664 u8 rsvd40[4]; 665 __be32 byte_cnt; 666 __be32 timestamp_h; 667 __be32 timestamp_l; 668 __be32 sop_drop_qpn; 669 __be16 wqe_counter; 670 u8 signature; 671 u8 op_own; 672 }; 673 674 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 675 { 676 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; 677 } 678 679 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 680 { 681 return (cqe->l4_hdr_type_etc >> 4) & 0x7; 682 } 683 684 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) 685 { 686 return !!(cqe->l4_hdr_type_etc & 0x1); 687 } 688 689 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) 690 { 691 u32 hi, lo; 692 693 hi = be32_to_cpu(cqe->timestamp_h); 694 lo = be32_to_cpu(cqe->timestamp_l); 695 696 return (u64)lo | ((u64)hi << 32); 697 } 698 699 enum { 700 CQE_L4_HDR_TYPE_NONE = 0x0, 701 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 702 CQE_L4_HDR_TYPE_UDP = 0x2, 703 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, 704 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, 705 }; 706 707 enum { 708 CQE_RSS_HTYPE_IP = 0x3 << 6, 709 CQE_RSS_HTYPE_L4 = 0x3 << 2, 710 }; 711 712 enum { 713 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, 714 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, 715 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, 716 }; 717 718 enum { 719 CQE_L2_OK = 1 << 0, 720 CQE_L3_OK = 1 << 1, 721 CQE_L4_OK = 1 << 2, 722 }; 723 724 struct mlx5_sig_err_cqe { 725 u8 rsvd0[16]; 726 __be32 expected_trans_sig; 727 __be32 actual_trans_sig; 728 __be32 expected_reftag; 729 __be32 actual_reftag; 730 __be16 syndrome; 731 u8 rsvd22[2]; 732 __be32 mkey; 733 __be64 err_offset; 734 u8 rsvd30[8]; 735 __be32 qpn; 736 u8 rsvd38[2]; 737 u8 signature; 738 u8 op_own; 739 }; 740 741 struct mlx5_wqe_srq_next_seg { 742 u8 rsvd0[2]; 743 __be16 next_wqe_index; 744 u8 signature; 745 u8 rsvd1[11]; 746 }; 747 748 union mlx5_ext_cqe { 749 struct ib_grh grh; 750 u8 inl[64]; 751 }; 752 753 struct mlx5_cqe128 { 754 union mlx5_ext_cqe inl_grh; 755 struct mlx5_cqe64 cqe64; 756 }; 757 758 struct mlx5_srq_ctx { 759 u8 state_log_sz; 760 u8 rsvd0[3]; 761 __be32 flags_xrcd; 762 __be32 pgoff_cqn; 763 u8 rsvd1[4]; 764 u8 log_pg_sz; 765 u8 rsvd2[7]; 766 __be32 pd; 767 __be16 lwm; 768 __be16 wqe_cnt; 769 u8 rsvd3[8]; 770 __be64 db_record; 771 }; 772 773 struct mlx5_create_srq_mbox_in { 774 struct mlx5_inbox_hdr hdr; 775 __be32 input_srqn; 776 u8 rsvd0[4]; 777 struct mlx5_srq_ctx ctx; 778 u8 rsvd1[208]; 779 __be64 pas[0]; 780 }; 781 782 struct mlx5_create_srq_mbox_out { 783 struct mlx5_outbox_hdr hdr; 784 __be32 srqn; 785 u8 rsvd[4]; 786 }; 787 788 struct mlx5_destroy_srq_mbox_in { 789 struct mlx5_inbox_hdr hdr; 790 __be32 srqn; 791 u8 rsvd[4]; 792 }; 793 794 struct mlx5_destroy_srq_mbox_out { 795 struct mlx5_outbox_hdr hdr; 796 u8 rsvd[8]; 797 }; 798 799 struct mlx5_query_srq_mbox_in { 800 struct mlx5_inbox_hdr hdr; 801 __be32 srqn; 802 u8 rsvd0[4]; 803 }; 804 805 struct mlx5_query_srq_mbox_out { 806 struct mlx5_outbox_hdr hdr; 807 u8 rsvd0[8]; 808 struct mlx5_srq_ctx ctx; 809 u8 rsvd1[32]; 810 __be64 pas[0]; 811 }; 812 813 struct mlx5_arm_srq_mbox_in { 814 struct mlx5_inbox_hdr hdr; 815 __be32 srqn; 816 __be16 rsvd; 817 __be16 lwm; 818 }; 819 820 struct mlx5_arm_srq_mbox_out { 821 struct mlx5_outbox_hdr hdr; 822 u8 rsvd[8]; 823 }; 824 825 struct mlx5_cq_context { 826 u8 status; 827 u8 cqe_sz_flags; 828 u8 st; 829 u8 rsvd3; 830 u8 rsvd4[6]; 831 __be16 page_offset; 832 __be32 log_sz_usr_page; 833 __be16 cq_period; 834 __be16 cq_max_count; 835 __be16 rsvd20; 836 __be16 c_eqn; 837 u8 log_pg_sz; 838 u8 rsvd25[7]; 839 __be32 last_notified_index; 840 __be32 solicit_producer_index; 841 __be32 consumer_counter; 842 __be32 producer_counter; 843 u8 rsvd48[8]; 844 __be64 db_record_addr; 845 }; 846 847 struct mlx5_create_cq_mbox_in { 848 struct mlx5_inbox_hdr hdr; 849 __be32 input_cqn; 850 u8 rsvdx[4]; 851 struct mlx5_cq_context ctx; 852 u8 rsvd6[192]; 853 __be64 pas[0]; 854 }; 855 856 struct mlx5_create_cq_mbox_out { 857 struct mlx5_outbox_hdr hdr; 858 __be32 cqn; 859 u8 rsvd0[4]; 860 }; 861 862 struct mlx5_destroy_cq_mbox_in { 863 struct mlx5_inbox_hdr hdr; 864 __be32 cqn; 865 u8 rsvd0[4]; 866 }; 867 868 struct mlx5_destroy_cq_mbox_out { 869 struct mlx5_outbox_hdr hdr; 870 u8 rsvd0[8]; 871 }; 872 873 struct mlx5_query_cq_mbox_in { 874 struct mlx5_inbox_hdr hdr; 875 __be32 cqn; 876 u8 rsvd0[4]; 877 }; 878 879 struct mlx5_query_cq_mbox_out { 880 struct mlx5_outbox_hdr hdr; 881 u8 rsvd0[8]; 882 struct mlx5_cq_context ctx; 883 u8 rsvd6[16]; 884 __be64 pas[0]; 885 }; 886 887 struct mlx5_modify_cq_mbox_in { 888 struct mlx5_inbox_hdr hdr; 889 __be32 cqn; 890 __be32 field_select; 891 struct mlx5_cq_context ctx; 892 u8 rsvd[192]; 893 __be64 pas[0]; 894 }; 895 896 struct mlx5_modify_cq_mbox_out { 897 struct mlx5_outbox_hdr hdr; 898 u8 rsvd[8]; 899 }; 900 901 struct mlx5_enable_hca_mbox_in { 902 struct mlx5_inbox_hdr hdr; 903 u8 rsvd[8]; 904 }; 905 906 struct mlx5_enable_hca_mbox_out { 907 struct mlx5_outbox_hdr hdr; 908 u8 rsvd[8]; 909 }; 910 911 struct mlx5_disable_hca_mbox_in { 912 struct mlx5_inbox_hdr hdr; 913 u8 rsvd[8]; 914 }; 915 916 struct mlx5_disable_hca_mbox_out { 917 struct mlx5_outbox_hdr hdr; 918 u8 rsvd[8]; 919 }; 920 921 struct mlx5_eq_context { 922 u8 status; 923 u8 ec_oi; 924 u8 st; 925 u8 rsvd2[7]; 926 __be16 page_pffset; 927 __be32 log_sz_usr_page; 928 u8 rsvd3[7]; 929 u8 intr; 930 u8 log_page_size; 931 u8 rsvd4[15]; 932 __be32 consumer_counter; 933 __be32 produser_counter; 934 u8 rsvd5[16]; 935 }; 936 937 struct mlx5_create_eq_mbox_in { 938 struct mlx5_inbox_hdr hdr; 939 u8 rsvd0[3]; 940 u8 input_eqn; 941 u8 rsvd1[4]; 942 struct mlx5_eq_context ctx; 943 u8 rsvd2[8]; 944 __be64 events_mask; 945 u8 rsvd3[176]; 946 __be64 pas[0]; 947 }; 948 949 struct mlx5_create_eq_mbox_out { 950 struct mlx5_outbox_hdr hdr; 951 u8 rsvd0[3]; 952 u8 eq_number; 953 u8 rsvd1[4]; 954 }; 955 956 struct mlx5_destroy_eq_mbox_in { 957 struct mlx5_inbox_hdr hdr; 958 u8 rsvd0[3]; 959 u8 eqn; 960 u8 rsvd1[4]; 961 }; 962 963 struct mlx5_destroy_eq_mbox_out { 964 struct mlx5_outbox_hdr hdr; 965 u8 rsvd[8]; 966 }; 967 968 struct mlx5_map_eq_mbox_in { 969 struct mlx5_inbox_hdr hdr; 970 __be64 mask; 971 u8 mu; 972 u8 rsvd0[2]; 973 u8 eqn; 974 u8 rsvd1[24]; 975 }; 976 977 struct mlx5_map_eq_mbox_out { 978 struct mlx5_outbox_hdr hdr; 979 u8 rsvd[8]; 980 }; 981 982 struct mlx5_query_eq_mbox_in { 983 struct mlx5_inbox_hdr hdr; 984 u8 rsvd0[3]; 985 u8 eqn; 986 u8 rsvd1[4]; 987 }; 988 989 struct mlx5_query_eq_mbox_out { 990 struct mlx5_outbox_hdr hdr; 991 u8 rsvd[8]; 992 struct mlx5_eq_context ctx; 993 }; 994 995 enum { 996 MLX5_MKEY_STATUS_FREE = 1 << 6, 997 }; 998 999 struct mlx5_mkey_seg { 1000 /* This is a two bit field occupying bits 31-30. 1001 * bit 31 is always 0, 1002 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 1003 */ 1004 u8 status; 1005 u8 pcie_control; 1006 u8 flags; 1007 u8 version; 1008 __be32 qpn_mkey7_0; 1009 u8 rsvd1[4]; 1010 __be32 flags_pd; 1011 __be64 start_addr; 1012 __be64 len; 1013 __be32 bsfs_octo_size; 1014 u8 rsvd2[16]; 1015 __be32 xlt_oct_size; 1016 u8 rsvd3[3]; 1017 u8 log2_page_size; 1018 u8 rsvd4[4]; 1019 }; 1020 1021 struct mlx5_query_special_ctxs_mbox_in { 1022 struct mlx5_inbox_hdr hdr; 1023 u8 rsvd[8]; 1024 }; 1025 1026 struct mlx5_query_special_ctxs_mbox_out { 1027 struct mlx5_outbox_hdr hdr; 1028 __be32 dump_fill_mkey; 1029 __be32 reserved_lkey; 1030 }; 1031 1032 struct mlx5_create_mkey_mbox_in { 1033 struct mlx5_inbox_hdr hdr; 1034 __be32 input_mkey_index; 1035 __be32 flags; 1036 struct mlx5_mkey_seg seg; 1037 u8 rsvd1[16]; 1038 __be32 xlat_oct_act_size; 1039 __be32 rsvd2; 1040 u8 rsvd3[168]; 1041 __be64 pas[0]; 1042 }; 1043 1044 struct mlx5_create_mkey_mbox_out { 1045 struct mlx5_outbox_hdr hdr; 1046 __be32 mkey; 1047 u8 rsvd[4]; 1048 }; 1049 1050 struct mlx5_destroy_mkey_mbox_in { 1051 struct mlx5_inbox_hdr hdr; 1052 __be32 mkey; 1053 u8 rsvd[4]; 1054 }; 1055 1056 struct mlx5_destroy_mkey_mbox_out { 1057 struct mlx5_outbox_hdr hdr; 1058 u8 rsvd[8]; 1059 }; 1060 1061 struct mlx5_query_mkey_mbox_in { 1062 struct mlx5_inbox_hdr hdr; 1063 __be32 mkey; 1064 }; 1065 1066 struct mlx5_query_mkey_mbox_out { 1067 struct mlx5_outbox_hdr hdr; 1068 __be64 pas[0]; 1069 }; 1070 1071 struct mlx5_modify_mkey_mbox_in { 1072 struct mlx5_inbox_hdr hdr; 1073 __be32 mkey; 1074 __be64 pas[0]; 1075 }; 1076 1077 struct mlx5_modify_mkey_mbox_out { 1078 struct mlx5_outbox_hdr hdr; 1079 u8 rsvd[8]; 1080 }; 1081 1082 struct mlx5_dump_mkey_mbox_in { 1083 struct mlx5_inbox_hdr hdr; 1084 }; 1085 1086 struct mlx5_dump_mkey_mbox_out { 1087 struct mlx5_outbox_hdr hdr; 1088 __be32 mkey; 1089 }; 1090 1091 struct mlx5_mad_ifc_mbox_in { 1092 struct mlx5_inbox_hdr hdr; 1093 __be16 remote_lid; 1094 u8 rsvd0; 1095 u8 port; 1096 u8 rsvd1[4]; 1097 u8 data[256]; 1098 }; 1099 1100 struct mlx5_mad_ifc_mbox_out { 1101 struct mlx5_outbox_hdr hdr; 1102 u8 rsvd[8]; 1103 u8 data[256]; 1104 }; 1105 1106 struct mlx5_access_reg_mbox_in { 1107 struct mlx5_inbox_hdr hdr; 1108 u8 rsvd0[2]; 1109 __be16 register_id; 1110 __be32 arg; 1111 __be32 data[0]; 1112 }; 1113 1114 struct mlx5_access_reg_mbox_out { 1115 struct mlx5_outbox_hdr hdr; 1116 u8 rsvd[8]; 1117 __be32 data[0]; 1118 }; 1119 1120 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 1121 1122 enum { 1123 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 1124 }; 1125 1126 struct mlx5_allocate_psv_in { 1127 struct mlx5_inbox_hdr hdr; 1128 __be32 npsv_pd; 1129 __be32 rsvd_psv0; 1130 }; 1131 1132 struct mlx5_allocate_psv_out { 1133 struct mlx5_outbox_hdr hdr; 1134 u8 rsvd[8]; 1135 __be32 psv_idx[4]; 1136 }; 1137 1138 struct mlx5_destroy_psv_in { 1139 struct mlx5_inbox_hdr hdr; 1140 __be32 psv_number; 1141 u8 rsvd[4]; 1142 }; 1143 1144 struct mlx5_destroy_psv_out { 1145 struct mlx5_outbox_hdr hdr; 1146 u8 rsvd[8]; 1147 }; 1148 1149 #define MLX5_CMD_OP_MAX 0x920 1150 1151 enum { 1152 VPORT_STATE_DOWN = 0x0, 1153 VPORT_STATE_UP = 0x1, 1154 }; 1155 1156 enum { 1157 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, 1158 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, 1159 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, 1160 }; 1161 1162 enum { 1163 MLX5_L3_PROT_TYPE_IPV4 = 0, 1164 MLX5_L3_PROT_TYPE_IPV6 = 1, 1165 }; 1166 1167 enum { 1168 MLX5_L4_PROT_TYPE_TCP = 0, 1169 MLX5_L4_PROT_TYPE_UDP = 1, 1170 }; 1171 1172 enum { 1173 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, 1174 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, 1175 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, 1176 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, 1177 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, 1178 }; 1179 1180 enum { 1181 MLX5_MATCH_OUTER_HEADERS = 1 << 0, 1182 MLX5_MATCH_MISC_PARAMETERS = 1 << 1, 1183 MLX5_MATCH_INNER_HEADERS = 1 << 2, 1184 1185 }; 1186 1187 enum { 1188 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, 1189 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, 1190 }; 1191 1192 enum { 1193 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, 1194 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, 1195 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, 1196 }; 1197 1198 enum mlx5_list_type { 1199 MLX5_NVPRT_LIST_TYPE_UC = 0x0, 1200 MLX5_NVPRT_LIST_TYPE_MC = 0x1, 1201 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, 1202 }; 1203 1204 enum { 1205 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1206 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1207 }; 1208 1209 enum mlx5_wol_mode { 1210 MLX5_WOL_DISABLE = 0, 1211 MLX5_WOL_SECURED_MAGIC = 1 << 1, 1212 MLX5_WOL_MAGIC = 1 << 2, 1213 MLX5_WOL_ARP = 1 << 3, 1214 MLX5_WOL_BROADCAST = 1 << 4, 1215 MLX5_WOL_MULTICAST = 1 << 5, 1216 MLX5_WOL_UNICAST = 1 << 6, 1217 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 1218 }; 1219 1220 /* MLX5 DEV CAPs */ 1221 1222 /* TODO: EAT.ME */ 1223 enum mlx5_cap_mode { 1224 HCA_CAP_OPMOD_GET_MAX = 0, 1225 HCA_CAP_OPMOD_GET_CUR = 1, 1226 }; 1227 1228 enum mlx5_cap_type { 1229 MLX5_CAP_GENERAL = 0, 1230 MLX5_CAP_ETHERNET_OFFLOADS, 1231 MLX5_CAP_ODP, 1232 MLX5_CAP_ATOMIC, 1233 MLX5_CAP_ROCE, 1234 MLX5_CAP_IPOIB_OFFLOADS, 1235 MLX5_CAP_EOIB_OFFLOADS, 1236 MLX5_CAP_FLOW_TABLE, 1237 MLX5_CAP_ESWITCH_FLOW_TABLE, 1238 MLX5_CAP_ESWITCH, 1239 MLX5_CAP_RESERVED, 1240 MLX5_CAP_VECTOR_CALC, 1241 /* NUM OF CAP Types */ 1242 MLX5_CAP_NUM 1243 }; 1244 1245 /* GET Dev Caps macros */ 1246 #define MLX5_CAP_GEN(mdev, cap) \ 1247 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) 1248 1249 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 1250 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) 1251 1252 #define MLX5_CAP_ETH(mdev, cap) \ 1253 MLX5_GET(per_protocol_networking_offload_caps,\ 1254 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1255 1256 #define MLX5_CAP_ETH_MAX(mdev, cap) \ 1257 MLX5_GET(per_protocol_networking_offload_caps,\ 1258 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1259 1260 #define MLX5_CAP_ROCE(mdev, cap) \ 1261 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) 1262 1263 #define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1264 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) 1265 1266 #define MLX5_CAP_ATOMIC(mdev, cap) \ 1267 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) 1268 1269 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1270 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) 1271 1272 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1273 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) 1274 1275 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1276 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1277 1278 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1279 MLX5_GET(flow_table_eswitch_cap, \ 1280 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1281 1282 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1283 MLX5_GET(flow_table_eswitch_cap, \ 1284 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1285 1286 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1287 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 1288 1289 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ 1290 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) 1291 1292 #define MLX5_CAP_ESW(mdev, cap) \ 1293 MLX5_GET(e_switch_cap, \ 1294 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) 1295 1296 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 1297 MLX5_GET(e_switch_cap, \ 1298 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) 1299 1300 #define MLX5_CAP_ODP(mdev, cap)\ 1301 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 1302 1303 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1304 MLX5_GET(vector_calc_cap, \ 1305 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) 1306 1307 enum { 1308 MLX5_CMD_STAT_OK = 0x0, 1309 MLX5_CMD_STAT_INT_ERR = 0x1, 1310 MLX5_CMD_STAT_BAD_OP_ERR = 0x2, 1311 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, 1312 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, 1313 MLX5_CMD_STAT_BAD_RES_ERR = 0x5, 1314 MLX5_CMD_STAT_RES_BUSY = 0x6, 1315 MLX5_CMD_STAT_LIM_ERR = 0x8, 1316 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, 1317 MLX5_CMD_STAT_IX_ERR = 0xa, 1318 MLX5_CMD_STAT_NO_RES_ERR = 0xf, 1319 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, 1320 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, 1321 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, 1322 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, 1323 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1324 }; 1325 1326 enum { 1327 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, 1328 MLX5_RFC_2863_COUNTERS_GROUP = 0x1, 1329 MLX5_RFC_2819_COUNTERS_GROUP = 0x2, 1330 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1331 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1332 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1333 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1334 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1335 }; 1336 1337 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1338 { 1339 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1340 return 0; 1341 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1342 } 1343 1344 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 1345 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 1346 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 1347 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ 1348 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ 1349 MLX5_BY_PASS_NUM_MULTICAST_PRIOS) 1350 1351 #endif /* MLX5_DEVICE_H */ 1352