1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef MLX4_DEVICE_H 34 #define MLX4_DEVICE_H 35 36 #include <linux/pci.h> 37 #include <linux/completion.h> 38 #include <linux/radix-tree.h> 39 40 #include <linux/atomic.h> 41 42 #define MAX_MSIX_P_PORT 17 43 #define MAX_MSIX 64 44 #define MSIX_LEGACY_SZ 4 45 #define MIN_MSIX_P_PORT 5 46 47 enum { 48 MLX4_FLAG_MSI_X = 1 << 0, 49 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, 50 }; 51 52 enum { 53 MLX4_MAX_PORTS = 2 54 }; 55 56 enum { 57 MLX4_BOARD_ID_LEN = 64 58 }; 59 60 enum { 61 MLX4_DEV_CAP_FLAG_RC = 1LL << 0, 62 MLX4_DEV_CAP_FLAG_UC = 1LL << 1, 63 MLX4_DEV_CAP_FLAG_UD = 1LL << 2, 64 MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, 65 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, 66 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 67 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 68 MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12, 69 MLX4_DEV_CAP_FLAG_BLH = 1LL << 15, 70 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16, 71 MLX4_DEV_CAP_FLAG_APM = 1LL << 17, 72 MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 73 MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19, 74 MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20, 75 MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, 76 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, 77 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, 78 MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, 79 MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, 80 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, 81 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 82 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 83 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 84 }; 85 86 enum { 87 MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, 88 MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, 89 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 90 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 91 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 92 }; 93 94 enum mlx4_event { 95 MLX4_EVENT_TYPE_COMP = 0x00, 96 MLX4_EVENT_TYPE_PATH_MIG = 0x01, 97 MLX4_EVENT_TYPE_COMM_EST = 0x02, 98 MLX4_EVENT_TYPE_SQ_DRAINED = 0x03, 99 MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, 100 MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14, 101 MLX4_EVENT_TYPE_CQ_ERROR = 0x04, 102 MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 103 MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, 104 MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07, 105 MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, 106 MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, 107 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, 108 MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, 109 MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, 110 MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, 111 MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, 112 MLX4_EVENT_TYPE_CMD = 0x0a 113 }; 114 115 enum { 116 MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, 117 MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 118 }; 119 120 enum { 121 MLX4_PERM_LOCAL_READ = 1 << 10, 122 MLX4_PERM_LOCAL_WRITE = 1 << 11, 123 MLX4_PERM_REMOTE_READ = 1 << 12, 124 MLX4_PERM_REMOTE_WRITE = 1 << 13, 125 MLX4_PERM_ATOMIC = 1 << 14 126 }; 127 128 enum { 129 MLX4_OPCODE_NOP = 0x00, 130 MLX4_OPCODE_SEND_INVAL = 0x01, 131 MLX4_OPCODE_RDMA_WRITE = 0x08, 132 MLX4_OPCODE_RDMA_WRITE_IMM = 0x09, 133 MLX4_OPCODE_SEND = 0x0a, 134 MLX4_OPCODE_SEND_IMM = 0x0b, 135 MLX4_OPCODE_LSO = 0x0e, 136 MLX4_OPCODE_RDMA_READ = 0x10, 137 MLX4_OPCODE_ATOMIC_CS = 0x11, 138 MLX4_OPCODE_ATOMIC_FA = 0x12, 139 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14, 140 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15, 141 MLX4_OPCODE_BIND_MW = 0x18, 142 MLX4_OPCODE_FMR = 0x19, 143 MLX4_OPCODE_LOCAL_INVAL = 0x1b, 144 MLX4_OPCODE_CONFIG_CMD = 0x1f, 145 146 MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 147 MLX4_RECV_OPCODE_SEND = 0x01, 148 MLX4_RECV_OPCODE_SEND_IMM = 0x02, 149 MLX4_RECV_OPCODE_SEND_INVAL = 0x03, 150 151 MLX4_CQE_OPCODE_ERROR = 0x1e, 152 MLX4_CQE_OPCODE_RESIZE = 0x16, 153 }; 154 155 enum { 156 MLX4_STAT_RATE_OFFSET = 5 157 }; 158 159 enum mlx4_protocol { 160 MLX4_PROT_IB_IPV6 = 0, 161 MLX4_PROT_ETH, 162 MLX4_PROT_IB_IPV4, 163 MLX4_PROT_FCOE 164 }; 165 166 enum { 167 MLX4_MTT_FLAG_PRESENT = 1 168 }; 169 170 enum mlx4_qp_region { 171 MLX4_QP_REGION_FW = 0, 172 MLX4_QP_REGION_ETH_ADDR, 173 MLX4_QP_REGION_FC_ADDR, 174 MLX4_QP_REGION_FC_EXCH, 175 MLX4_NUM_QP_REGION 176 }; 177 178 enum mlx4_port_type { 179 MLX4_PORT_TYPE_IB = 1, 180 MLX4_PORT_TYPE_ETH = 2, 181 MLX4_PORT_TYPE_AUTO = 3 182 }; 183 184 enum mlx4_special_vlan_idx { 185 MLX4_NO_VLAN_IDX = 0, 186 MLX4_VLAN_MISS_IDX, 187 MLX4_VLAN_REGULAR 188 }; 189 190 enum mlx4_steer_type { 191 MLX4_MC_STEER = 0, 192 MLX4_UC_STEER, 193 MLX4_NUM_STEERS 194 }; 195 196 enum { 197 MLX4_NUM_FEXCH = 64 * 1024, 198 }; 199 200 enum { 201 MLX4_MAX_FAST_REG_PAGES = 511, 202 }; 203 204 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 205 { 206 return (major << 32) | (minor << 16) | subminor; 207 } 208 209 struct mlx4_caps { 210 u64 fw_ver; 211 int num_ports; 212 int vl_cap[MLX4_MAX_PORTS + 1]; 213 int ib_mtu_cap[MLX4_MAX_PORTS + 1]; 214 __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1]; 215 u64 def_mac[MLX4_MAX_PORTS + 1]; 216 int eth_mtu_cap[MLX4_MAX_PORTS + 1]; 217 int gid_table_len[MLX4_MAX_PORTS + 1]; 218 int pkey_table_len[MLX4_MAX_PORTS + 1]; 219 int trans_type[MLX4_MAX_PORTS + 1]; 220 int vendor_oui[MLX4_MAX_PORTS + 1]; 221 int wavelength[MLX4_MAX_PORTS + 1]; 222 u64 trans_code[MLX4_MAX_PORTS + 1]; 223 int local_ca_ack_delay; 224 int num_uars; 225 int bf_reg_size; 226 int bf_regs_per_page; 227 int max_sq_sg; 228 int max_rq_sg; 229 int num_qps; 230 int max_wqes; 231 int max_sq_desc_sz; 232 int max_rq_desc_sz; 233 int max_qp_init_rdma; 234 int max_qp_dest_rdma; 235 int sqp_start; 236 int num_srqs; 237 int max_srq_wqes; 238 int max_srq_sge; 239 int reserved_srqs; 240 int num_cqs; 241 int max_cqes; 242 int reserved_cqs; 243 int num_eqs; 244 int reserved_eqs; 245 int num_comp_vectors; 246 int comp_pool; 247 int num_mpts; 248 int num_mtt_segs; 249 int mtts_per_seg; 250 int fmr_reserved_mtts; 251 int reserved_mtts; 252 int reserved_mrws; 253 int reserved_uars; 254 int num_mgms; 255 int num_amgms; 256 int reserved_mcgs; 257 int num_qp_per_mgm; 258 int num_pds; 259 int reserved_pds; 260 int mtt_entry_sz; 261 u32 max_msg_sz; 262 u32 page_size_cap; 263 u64 flags; 264 u32 bmme_flags; 265 u32 reserved_lkey; 266 u16 stat_rate_support; 267 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 268 int max_gso_sz; 269 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; 270 int reserved_qps; 271 int reserved_qps_base[MLX4_NUM_QP_REGION]; 272 int log_num_macs; 273 int log_num_vlans; 274 int log_num_prios; 275 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 276 u8 supported_type[MLX4_MAX_PORTS + 1]; 277 u32 port_mask; 278 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; 279 u32 max_counters; 280 }; 281 282 struct mlx4_buf_list { 283 void *buf; 284 dma_addr_t map; 285 }; 286 287 struct mlx4_buf { 288 struct mlx4_buf_list direct; 289 struct mlx4_buf_list *page_list; 290 int nbufs; 291 int npages; 292 int page_shift; 293 }; 294 295 struct mlx4_mtt { 296 u32 first_seg; 297 int order; 298 int page_shift; 299 }; 300 301 enum { 302 MLX4_DB_PER_PAGE = PAGE_SIZE / 4 303 }; 304 305 struct mlx4_db_pgdir { 306 struct list_head list; 307 DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); 308 DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); 309 unsigned long *bits[2]; 310 __be32 *db_page; 311 dma_addr_t db_dma; 312 }; 313 314 struct mlx4_ib_user_db_page; 315 316 struct mlx4_db { 317 __be32 *db; 318 union { 319 struct mlx4_db_pgdir *pgdir; 320 struct mlx4_ib_user_db_page *user_page; 321 } u; 322 dma_addr_t dma; 323 int index; 324 int order; 325 }; 326 327 struct mlx4_hwq_resources { 328 struct mlx4_db db; 329 struct mlx4_mtt mtt; 330 struct mlx4_buf buf; 331 }; 332 333 struct mlx4_mr { 334 struct mlx4_mtt mtt; 335 u64 iova; 336 u64 size; 337 u32 key; 338 u32 pd; 339 u32 access; 340 int enabled; 341 }; 342 343 struct mlx4_fmr { 344 struct mlx4_mr mr; 345 struct mlx4_mpt_entry *mpt; 346 __be64 *mtts; 347 dma_addr_t dma_handle; 348 int max_pages; 349 int max_maps; 350 int maps; 351 u8 page_shift; 352 }; 353 354 struct mlx4_uar { 355 unsigned long pfn; 356 int index; 357 struct list_head bf_list; 358 unsigned free_bf_bmap; 359 void __iomem *map; 360 void __iomem *bf_map; 361 }; 362 363 struct mlx4_bf { 364 unsigned long offset; 365 int buf_size; 366 struct mlx4_uar *uar; 367 void __iomem *reg; 368 }; 369 370 struct mlx4_cq { 371 void (*comp) (struct mlx4_cq *); 372 void (*event) (struct mlx4_cq *, enum mlx4_event); 373 374 struct mlx4_uar *uar; 375 376 u32 cons_index; 377 378 __be32 *set_ci_db; 379 __be32 *arm_db; 380 int arm_sn; 381 382 int cqn; 383 unsigned vector; 384 385 atomic_t refcount; 386 struct completion free; 387 }; 388 389 struct mlx4_qp { 390 void (*event) (struct mlx4_qp *, enum mlx4_event); 391 392 int qpn; 393 394 atomic_t refcount; 395 struct completion free; 396 }; 397 398 struct mlx4_srq { 399 void (*event) (struct mlx4_srq *, enum mlx4_event); 400 401 int srqn; 402 int max; 403 int max_gs; 404 int wqe_shift; 405 406 atomic_t refcount; 407 struct completion free; 408 }; 409 410 struct mlx4_av { 411 __be32 port_pd; 412 u8 reserved1; 413 u8 g_slid; 414 __be16 dlid; 415 u8 reserved2; 416 u8 gid_index; 417 u8 stat_rate; 418 u8 hop_limit; 419 __be32 sl_tclass_flowlabel; 420 u8 dgid[16]; 421 }; 422 423 struct mlx4_eth_av { 424 __be32 port_pd; 425 u8 reserved1; 426 u8 smac_idx; 427 u16 reserved2; 428 u8 reserved3; 429 u8 gid_index; 430 u8 stat_rate; 431 u8 hop_limit; 432 __be32 sl_tclass_flowlabel; 433 u8 dgid[16]; 434 u32 reserved4[2]; 435 __be16 vlan; 436 u8 mac[6]; 437 }; 438 439 union mlx4_ext_av { 440 struct mlx4_av ib; 441 struct mlx4_eth_av eth; 442 }; 443 444 struct mlx4_counter { 445 u8 reserved1[3]; 446 u8 counter_mode; 447 __be32 num_ifc; 448 u32 reserved2[2]; 449 __be64 rx_frames; 450 __be64 rx_bytes; 451 __be64 tx_frames; 452 __be64 tx_bytes; 453 }; 454 455 struct mlx4_dev { 456 struct pci_dev *pdev; 457 unsigned long flags; 458 struct mlx4_caps caps; 459 struct radix_tree_root qp_table_tree; 460 u8 rev_id; 461 char board_id[MLX4_BOARD_ID_LEN]; 462 }; 463 464 struct mlx4_init_port_param { 465 int set_guid0; 466 int set_node_guid; 467 int set_si_guid; 468 u16 mtu; 469 int port_width_cap; 470 u16 vl_cap; 471 u16 max_gid; 472 u16 max_pkey; 473 u64 guid0; 474 u64 node_guid; 475 u64 si_guid; 476 }; 477 478 #define mlx4_foreach_port(port, dev, type) \ 479 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 480 if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ 481 ~(dev)->caps.port_mask) & 1 << ((port) - 1)) 482 483 #define mlx4_foreach_ib_transport_port(port, dev) \ 484 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 485 if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ 486 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 487 488 489 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 490 struct mlx4_buf *buf); 491 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); 492 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) 493 { 494 if (BITS_PER_LONG == 64 || buf->nbufs == 1) 495 return buf->direct.buf + offset; 496 else 497 return buf->page_list[offset >> PAGE_SHIFT].buf + 498 (offset & (PAGE_SIZE - 1)); 499 } 500 501 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); 502 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); 503 504 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); 505 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); 506 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); 507 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); 508 509 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 510 struct mlx4_mtt *mtt); 511 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 512 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 513 514 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 515 int npages, int page_shift, struct mlx4_mr *mr); 516 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); 517 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); 518 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 519 int start_index, int npages, u64 *page_list); 520 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 521 struct mlx4_buf *buf); 522 523 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); 524 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); 525 526 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 527 int size, int max_direct); 528 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, 529 int size); 530 531 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 532 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 533 unsigned vector, int collapsed); 534 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 535 536 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); 537 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 538 539 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); 540 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 541 542 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, 543 u64 db_rec, struct mlx4_srq *srq); 544 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); 545 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); 546 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); 547 548 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); 549 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); 550 551 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 552 int block_mcast_loopback, enum mlx4_protocol protocol); 553 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 554 enum mlx4_protocol protocol); 555 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 556 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 557 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 558 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 559 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); 560 561 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); 562 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); 563 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); 564 565 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 566 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 567 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 568 569 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 570 int npages, u64 iova, u32 *lkey, u32 *rkey); 571 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 572 int max_maps, u8 page_shift, struct mlx4_fmr *fmr); 573 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 574 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 575 u32 *lkey, u32 *rkey); 576 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 577 int mlx4_SYNC_TPT(struct mlx4_dev *dev); 578 int mlx4_test_interrupts(struct mlx4_dev *dev); 579 int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector); 580 void mlx4_release_eq(struct mlx4_dev *dev, int vec); 581 582 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 583 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 584 585 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 586 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 587 588 #endif /* MLX4_DEVICE_H */ 589