xref: /linux-6.15/include/linux/mlx4/device.h (revision 6fcd5f2c)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <linux/atomic.h>
41 
42 #define MAX_MSIX_P_PORT		17
43 #define MAX_MSIX		64
44 #define MSIX_LEGACY_SZ		4
45 #define MIN_MSIX_P_PORT		5
46 
47 enum {
48 	MLX4_FLAG_MSI_X		= 1 << 0,
49 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
50 	MLX4_FLAG_MASTER	= 1 << 2,
51 	MLX4_FLAG_SLAVE		= 1 << 3,
52 	MLX4_FLAG_SRIOV		= 1 << 4,
53 };
54 
55 enum {
56 	MLX4_MAX_PORTS		= 2
57 };
58 
59 enum {
60 	MLX4_BOARD_ID_LEN = 64
61 };
62 
63 enum {
64 	MLX4_MAX_NUM_PF		= 16,
65 	MLX4_MAX_NUM_VF		= 64,
66 	MLX4_MFUNC_MAX		= 80,
67 	MLX4_MAX_EQ_NUM		= 1024,
68 	MLX4_MFUNC_EQ_NUM	= 4,
69 	MLX4_MFUNC_MAX_EQES     = 8,
70 	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
71 };
72 
73 enum {
74 	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0,
75 	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1,
76 	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2,
77 	MLX4_DEV_CAP_FLAG_XRC		= 1LL <<  3,
78 	MLX4_DEV_CAP_FLAG_SRQ		= 1LL <<  6,
79 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1LL <<  7,
80 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
81 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
82 	MLX4_DEV_CAP_FLAG_DPDP		= 1LL << 12,
83 	MLX4_DEV_CAP_FLAG_BLH		= 1LL << 15,
84 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1LL << 16,
85 	MLX4_DEV_CAP_FLAG_APM		= 1LL << 17,
86 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
87 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1LL << 19,
88 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1LL << 20,
89 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1LL << 21,
90 	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30,
91 	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32,
92 	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34,
93 	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37,
94 	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38,
95 	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40,
96 	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41,
97 	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42,
98 	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48,
99 	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55
100 };
101 
102 enum {
103 	MLX4_DEV_CAP_FLAG2_RSS			= 1LL <<  0,
104 	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
105 	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2
106 };
107 
108 #define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
109 
110 enum {
111 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
112 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
113 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
114 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
115 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
116 };
117 
118 enum mlx4_event {
119 	MLX4_EVENT_TYPE_COMP		   = 0x00,
120 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
121 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
122 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
123 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
124 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
125 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
126 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
127 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
128 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
129 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
130 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
131 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
132 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
133 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
134 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
135 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
136 	MLX4_EVENT_TYPE_CMD		   = 0x0a,
137 	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19,
138 	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18,
139 	MLX4_EVENT_TYPE_FATAL_WARNING	   = 0x1b,
140 	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c,
141 	MLX4_EVENT_TYPE_NONE		   = 0xff,
142 };
143 
144 enum {
145 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
146 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
147 };
148 
149 enum {
150 	MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
151 };
152 
153 enum {
154 	MLX4_PERM_LOCAL_READ	= 1 << 10,
155 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
156 	MLX4_PERM_REMOTE_READ	= 1 << 12,
157 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
158 	MLX4_PERM_ATOMIC	= 1 << 14
159 };
160 
161 enum {
162 	MLX4_OPCODE_NOP			= 0x00,
163 	MLX4_OPCODE_SEND_INVAL		= 0x01,
164 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
165 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
166 	MLX4_OPCODE_SEND		= 0x0a,
167 	MLX4_OPCODE_SEND_IMM		= 0x0b,
168 	MLX4_OPCODE_LSO			= 0x0e,
169 	MLX4_OPCODE_RDMA_READ		= 0x10,
170 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
171 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
172 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
173 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
174 	MLX4_OPCODE_BIND_MW		= 0x18,
175 	MLX4_OPCODE_FMR			= 0x19,
176 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
177 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
178 
179 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
180 	MLX4_RECV_OPCODE_SEND		= 0x01,
181 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
182 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
183 
184 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
185 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
186 };
187 
188 enum {
189 	MLX4_STAT_RATE_OFFSET	= 5
190 };
191 
192 enum mlx4_protocol {
193 	MLX4_PROT_IB_IPV6 = 0,
194 	MLX4_PROT_ETH,
195 	MLX4_PROT_IB_IPV4,
196 	MLX4_PROT_FCOE
197 };
198 
199 enum {
200 	MLX4_MTT_FLAG_PRESENT		= 1
201 };
202 
203 enum mlx4_qp_region {
204 	MLX4_QP_REGION_FW = 0,
205 	MLX4_QP_REGION_ETH_ADDR,
206 	MLX4_QP_REGION_FC_ADDR,
207 	MLX4_QP_REGION_FC_EXCH,
208 	MLX4_NUM_QP_REGION
209 };
210 
211 enum mlx4_port_type {
212 	MLX4_PORT_TYPE_NONE	= 0,
213 	MLX4_PORT_TYPE_IB	= 1,
214 	MLX4_PORT_TYPE_ETH	= 2,
215 	MLX4_PORT_TYPE_AUTO	= 3
216 };
217 
218 enum mlx4_special_vlan_idx {
219 	MLX4_NO_VLAN_IDX        = 0,
220 	MLX4_VLAN_MISS_IDX,
221 	MLX4_VLAN_REGULAR
222 };
223 
224 enum mlx4_steer_type {
225 	MLX4_MC_STEER = 0,
226 	MLX4_UC_STEER,
227 	MLX4_NUM_STEERS
228 };
229 
230 enum {
231 	MLX4_NUM_FEXCH          = 64 * 1024,
232 };
233 
234 enum {
235 	MLX4_MAX_FAST_REG_PAGES = 511,
236 };
237 
238 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
239 {
240 	return (major << 32) | (minor << 16) | subminor;
241 }
242 
243 struct mlx4_phys_caps {
244 	u32			num_phys_eqs;
245 };
246 
247 struct mlx4_caps {
248 	u64			fw_ver;
249 	u32			function;
250 	int			num_ports;
251 	int			vl_cap[MLX4_MAX_PORTS + 1];
252 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
253 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
254 	u64			def_mac[MLX4_MAX_PORTS + 1];
255 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
256 	int			gid_table_len[MLX4_MAX_PORTS + 1];
257 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
258 	int			trans_type[MLX4_MAX_PORTS + 1];
259 	int			vendor_oui[MLX4_MAX_PORTS + 1];
260 	int			wavelength[MLX4_MAX_PORTS + 1];
261 	u64			trans_code[MLX4_MAX_PORTS + 1];
262 	int			local_ca_ack_delay;
263 	int			num_uars;
264 	u32			uar_page_size;
265 	int			bf_reg_size;
266 	int			bf_regs_per_page;
267 	int			max_sq_sg;
268 	int			max_rq_sg;
269 	int			num_qps;
270 	int			max_wqes;
271 	int			max_sq_desc_sz;
272 	int			max_rq_desc_sz;
273 	int			max_qp_init_rdma;
274 	int			max_qp_dest_rdma;
275 	int			sqp_start;
276 	int			num_srqs;
277 	int			max_srq_wqes;
278 	int			max_srq_sge;
279 	int			reserved_srqs;
280 	int			num_cqs;
281 	int			max_cqes;
282 	int			reserved_cqs;
283 	int			num_eqs;
284 	int			reserved_eqs;
285 	int			num_comp_vectors;
286 	int			comp_pool;
287 	int			num_mpts;
288 	int			max_fmr_maps;
289 	int			num_mtts;
290 	int			fmr_reserved_mtts;
291 	int			reserved_mtts;
292 	int			reserved_mrws;
293 	int			reserved_uars;
294 	int			num_mgms;
295 	int			num_amgms;
296 	int			reserved_mcgs;
297 	int			num_qp_per_mgm;
298 	int			num_pds;
299 	int			reserved_pds;
300 	int			max_xrcds;
301 	int			reserved_xrcds;
302 	int			mtt_entry_sz;
303 	u32			max_msg_sz;
304 	u32			page_size_cap;
305 	u64			flags;
306 	u64			flags2;
307 	u32			bmme_flags;
308 	u32			reserved_lkey;
309 	u16			stat_rate_support;
310 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
311 	int			max_gso_sz;
312 	int			max_rss_tbl_sz;
313 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
314 	int			reserved_qps;
315 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
316 	int                     log_num_macs;
317 	int                     log_num_vlans;
318 	int                     log_num_prios;
319 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
320 	u8			supported_type[MLX4_MAX_PORTS + 1];
321 	u8                      suggested_type[MLX4_MAX_PORTS + 1];
322 	u8                      default_sense[MLX4_MAX_PORTS + 1];
323 	u32			port_mask[MLX4_MAX_PORTS + 1];
324 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
325 	u32			max_counters;
326 	u8			port_ib_mtu[MLX4_MAX_PORTS + 1];
327 };
328 
329 struct mlx4_buf_list {
330 	void		       *buf;
331 	dma_addr_t		map;
332 };
333 
334 struct mlx4_buf {
335 	struct mlx4_buf_list	direct;
336 	struct mlx4_buf_list   *page_list;
337 	int			nbufs;
338 	int			npages;
339 	int			page_shift;
340 };
341 
342 struct mlx4_mtt {
343 	u32			offset;
344 	int			order;
345 	int			page_shift;
346 };
347 
348 enum {
349 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
350 };
351 
352 struct mlx4_db_pgdir {
353 	struct list_head	list;
354 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
355 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
356 	unsigned long	       *bits[2];
357 	__be32		       *db_page;
358 	dma_addr_t		db_dma;
359 };
360 
361 struct mlx4_ib_user_db_page;
362 
363 struct mlx4_db {
364 	__be32			*db;
365 	union {
366 		struct mlx4_db_pgdir		*pgdir;
367 		struct mlx4_ib_user_db_page	*user_page;
368 	}			u;
369 	dma_addr_t		dma;
370 	int			index;
371 	int			order;
372 };
373 
374 struct mlx4_hwq_resources {
375 	struct mlx4_db		db;
376 	struct mlx4_mtt		mtt;
377 	struct mlx4_buf		buf;
378 };
379 
380 struct mlx4_mr {
381 	struct mlx4_mtt		mtt;
382 	u64			iova;
383 	u64			size;
384 	u32			key;
385 	u32			pd;
386 	u32			access;
387 	int			enabled;
388 };
389 
390 struct mlx4_fmr {
391 	struct mlx4_mr		mr;
392 	struct mlx4_mpt_entry  *mpt;
393 	__be64		       *mtts;
394 	dma_addr_t		dma_handle;
395 	int			max_pages;
396 	int			max_maps;
397 	int			maps;
398 	u8			page_shift;
399 };
400 
401 struct mlx4_uar {
402 	unsigned long		pfn;
403 	int			index;
404 	struct list_head	bf_list;
405 	unsigned		free_bf_bmap;
406 	void __iomem	       *map;
407 	void __iomem	       *bf_map;
408 };
409 
410 struct mlx4_bf {
411 	unsigned long		offset;
412 	int			buf_size;
413 	struct mlx4_uar	       *uar;
414 	void __iomem	       *reg;
415 };
416 
417 struct mlx4_cq {
418 	void (*comp)		(struct mlx4_cq *);
419 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
420 
421 	struct mlx4_uar	       *uar;
422 
423 	u32			cons_index;
424 
425 	__be32		       *set_ci_db;
426 	__be32		       *arm_db;
427 	int			arm_sn;
428 
429 	int			cqn;
430 	unsigned		vector;
431 
432 	atomic_t		refcount;
433 	struct completion	free;
434 };
435 
436 struct mlx4_qp {
437 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
438 
439 	int			qpn;
440 
441 	atomic_t		refcount;
442 	struct completion	free;
443 };
444 
445 struct mlx4_srq {
446 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
447 
448 	int			srqn;
449 	int			max;
450 	int			max_gs;
451 	int			wqe_shift;
452 
453 	atomic_t		refcount;
454 	struct completion	free;
455 };
456 
457 struct mlx4_av {
458 	__be32			port_pd;
459 	u8			reserved1;
460 	u8			g_slid;
461 	__be16			dlid;
462 	u8			reserved2;
463 	u8			gid_index;
464 	u8			stat_rate;
465 	u8			hop_limit;
466 	__be32			sl_tclass_flowlabel;
467 	u8			dgid[16];
468 };
469 
470 struct mlx4_eth_av {
471 	__be32		port_pd;
472 	u8		reserved1;
473 	u8		smac_idx;
474 	u16		reserved2;
475 	u8		reserved3;
476 	u8		gid_index;
477 	u8		stat_rate;
478 	u8		hop_limit;
479 	__be32		sl_tclass_flowlabel;
480 	u8		dgid[16];
481 	u32		reserved4[2];
482 	__be16		vlan;
483 	u8		mac[6];
484 };
485 
486 union mlx4_ext_av {
487 	struct mlx4_av		ib;
488 	struct mlx4_eth_av	eth;
489 };
490 
491 struct mlx4_counter {
492 	u8	reserved1[3];
493 	u8	counter_mode;
494 	__be32	num_ifc;
495 	u32	reserved2[2];
496 	__be64	rx_frames;
497 	__be64	rx_bytes;
498 	__be64	tx_frames;
499 	__be64	tx_bytes;
500 };
501 
502 struct mlx4_dev {
503 	struct pci_dev	       *pdev;
504 	unsigned long		flags;
505 	unsigned long		num_slaves;
506 	struct mlx4_caps	caps;
507 	struct mlx4_phys_caps	phys_caps;
508 	struct radix_tree_root	qp_table_tree;
509 	u8			rev_id;
510 	char			board_id[MLX4_BOARD_ID_LEN];
511 	int			num_vfs;
512 };
513 
514 struct mlx4_init_port_param {
515 	int			set_guid0;
516 	int			set_node_guid;
517 	int			set_si_guid;
518 	u16			mtu;
519 	int			port_width_cap;
520 	u16			vl_cap;
521 	u16			max_gid;
522 	u16			max_pkey;
523 	u64			guid0;
524 	u64			node_guid;
525 	u64			si_guid;
526 };
527 
528 #define mlx4_foreach_port(port, dev, type)				\
529 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
530 		if ((type) == (dev)->caps.port_mask[(port)])
531 
532 #define mlx4_foreach_ib_transport_port(port, dev)                         \
533 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \
534 		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
535 			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
536 
537 static inline int mlx4_is_master(struct mlx4_dev *dev)
538 {
539 	return dev->flags & MLX4_FLAG_MASTER;
540 }
541 
542 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
543 {
544 	return (qpn < dev->caps.sqp_start + 8);
545 }
546 
547 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
548 {
549 	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
550 }
551 
552 static inline int mlx4_is_slave(struct mlx4_dev *dev)
553 {
554 	return dev->flags & MLX4_FLAG_SLAVE;
555 }
556 
557 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
558 		   struct mlx4_buf *buf);
559 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
560 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
561 {
562 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
563 		return buf->direct.buf + offset;
564 	else
565 		return buf->page_list[offset >> PAGE_SHIFT].buf +
566 			(offset & (PAGE_SIZE - 1));
567 }
568 
569 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
570 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
571 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
572 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
573 
574 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
575 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
576 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
577 void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
578 
579 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
580 		  struct mlx4_mtt *mtt);
581 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
582 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
583 
584 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
585 		  int npages, int page_shift, struct mlx4_mr *mr);
586 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
587 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
588 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
589 		   int start_index, int npages, u64 *page_list);
590 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
591 		       struct mlx4_buf *buf);
592 
593 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
594 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
595 
596 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
597 		       int size, int max_direct);
598 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
599 		       int size);
600 
601 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
602 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
603 		  unsigned vector, int collapsed);
604 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
605 
606 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
607 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
608 
609 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
610 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
611 
612 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
613 		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
614 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
615 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
616 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
617 
618 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
619 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
620 
621 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
622 			int block_mcast_loopback, enum mlx4_protocol prot);
623 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
624 			enum mlx4_protocol prot);
625 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
626 			  int block_mcast_loopback, enum mlx4_protocol protocol);
627 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
628 			  enum mlx4_protocol protocol);
629 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
630 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
631 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
632 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
633 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
634 
635 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
636 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
637 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
638 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
639 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
640 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
641 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
642 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
643 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
644 			   u8 promisc);
645 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
646 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
647 		u8 *pg, u16 *ratelimit);
648 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
649 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
650 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
651 
652 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
653 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
654 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
655 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
656 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
657 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
658 		    u32 *lkey, u32 *rkey);
659 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
660 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
661 int mlx4_test_interrupts(struct mlx4_dev *dev);
662 int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector);
663 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
664 
665 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
666 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
667 
668 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
669 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
670 
671 #endif /* MLX4_DEVICE_H */
672