xref: /linux-6.15/include/linux/mlx4/device.h (revision 71ccc212)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <asm/atomic.h>
41 
42 enum {
43 	MLX4_FLAG_MSI_X		= 1 << 0,
44 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
45 };
46 
47 enum {
48 	MLX4_MAX_PORTS		= 2
49 };
50 
51 enum {
52 	MLX4_BOARD_ID_LEN = 64
53 };
54 
55 enum {
56 	MLX4_DEV_CAP_FLAG_RC		= 1 <<  0,
57 	MLX4_DEV_CAP_FLAG_UC		= 1 <<  1,
58 	MLX4_DEV_CAP_FLAG_UD		= 1 <<  2,
59 	MLX4_DEV_CAP_FLAG_SRQ		= 1 <<  6,
60 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1 <<  7,
61 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1 <<  8,
62 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1 <<  9,
63 	MLX4_DEV_CAP_FLAG_DPDP		= 1 << 12,
64 	MLX4_DEV_CAP_FLAG_BLH		= 1 << 15,
65 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1 << 16,
66 	MLX4_DEV_CAP_FLAG_APM		= 1 << 17,
67 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1 << 18,
68 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1 << 19,
69 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1 << 20,
70 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1 << 21
71 };
72 
73 enum {
74 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
75 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
76 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
77 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
78 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
79 };
80 
81 enum mlx4_event {
82 	MLX4_EVENT_TYPE_COMP		   = 0x00,
83 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
84 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
85 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
86 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
87 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
88 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
89 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
90 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
91 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
92 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
93 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
94 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
95 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
96 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
97 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
98 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
99 	MLX4_EVENT_TYPE_CMD		   = 0x0a
100 };
101 
102 enum {
103 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
104 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
105 };
106 
107 enum {
108 	MLX4_PERM_LOCAL_READ	= 1 << 10,
109 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
110 	MLX4_PERM_REMOTE_READ	= 1 << 12,
111 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
112 	MLX4_PERM_ATOMIC	= 1 << 14
113 };
114 
115 enum {
116 	MLX4_OPCODE_NOP			= 0x00,
117 	MLX4_OPCODE_SEND_INVAL		= 0x01,
118 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
119 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
120 	MLX4_OPCODE_SEND		= 0x0a,
121 	MLX4_OPCODE_SEND_IMM		= 0x0b,
122 	MLX4_OPCODE_LSO			= 0x0e,
123 	MLX4_OPCODE_RDMA_READ		= 0x10,
124 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
125 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
126 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
127 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
128 	MLX4_OPCODE_BIND_MW		= 0x18,
129 	MLX4_OPCODE_FMR			= 0x19,
130 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
131 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
132 
133 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
134 	MLX4_RECV_OPCODE_SEND		= 0x01,
135 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
136 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
137 
138 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
139 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
140 };
141 
142 enum {
143 	MLX4_STAT_RATE_OFFSET	= 5
144 };
145 
146 enum {
147 	MLX4_MTT_FLAG_PRESENT		= 1
148 };
149 
150 enum mlx4_qp_region {
151 	MLX4_QP_REGION_FW = 0,
152 	MLX4_QP_REGION_ETH_ADDR,
153 	MLX4_QP_REGION_FC_ADDR,
154 	MLX4_QP_REGION_FC_EXCH,
155 	MLX4_NUM_QP_REGION
156 };
157 
158 enum mlx4_port_type {
159 	MLX4_PORT_TYPE_IB	= 1,
160 	MLX4_PORT_TYPE_ETH	= 2,
161 	MLX4_PORT_TYPE_AUTO	= 3
162 };
163 
164 enum mlx4_special_vlan_idx {
165 	MLX4_NO_VLAN_IDX        = 0,
166 	MLX4_VLAN_MISS_IDX,
167 	MLX4_VLAN_REGULAR
168 };
169 
170 enum {
171 	MLX4_NUM_FEXCH          = 64 * 1024,
172 };
173 
174 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
175 {
176 	return (major << 32) | (minor << 16) | subminor;
177 }
178 
179 struct mlx4_caps {
180 	u64			fw_ver;
181 	int			num_ports;
182 	int			vl_cap[MLX4_MAX_PORTS + 1];
183 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
184 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
185 	u64			def_mac[MLX4_MAX_PORTS + 1];
186 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
187 	int			gid_table_len[MLX4_MAX_PORTS + 1];
188 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
189 	int			trans_type[MLX4_MAX_PORTS + 1];
190 	int			vendor_oui[MLX4_MAX_PORTS + 1];
191 	int			wavelength[MLX4_MAX_PORTS + 1];
192 	u64			trans_code[MLX4_MAX_PORTS + 1];
193 	int			local_ca_ack_delay;
194 	int			num_uars;
195 	int			bf_reg_size;
196 	int			bf_regs_per_page;
197 	int			max_sq_sg;
198 	int			max_rq_sg;
199 	int			num_qps;
200 	int			max_wqes;
201 	int			max_sq_desc_sz;
202 	int			max_rq_desc_sz;
203 	int			max_qp_init_rdma;
204 	int			max_qp_dest_rdma;
205 	int			sqp_start;
206 	int			num_srqs;
207 	int			max_srq_wqes;
208 	int			max_srq_sge;
209 	int			reserved_srqs;
210 	int			num_cqs;
211 	int			max_cqes;
212 	int			reserved_cqs;
213 	int			num_eqs;
214 	int			reserved_eqs;
215 	int			num_comp_vectors;
216 	int			num_mpts;
217 	int			num_mtt_segs;
218 	int			mtts_per_seg;
219 	int			fmr_reserved_mtts;
220 	int			reserved_mtts;
221 	int			reserved_mrws;
222 	int			reserved_uars;
223 	int			num_mgms;
224 	int			num_amgms;
225 	int			reserved_mcgs;
226 	int			num_qp_per_mgm;
227 	int			num_pds;
228 	int			reserved_pds;
229 	int			mtt_entry_sz;
230 	u32			max_msg_sz;
231 	u32			page_size_cap;
232 	u32			flags;
233 	u32			bmme_flags;
234 	u32			reserved_lkey;
235 	u16			stat_rate_support;
236 	int			udp_rss;
237 	int			loopback_support;
238 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
239 	int			max_gso_sz;
240 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
241 	int			reserved_qps;
242 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
243 	int                     log_num_macs;
244 	int                     log_num_vlans;
245 	int                     log_num_prios;
246 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
247 	u8			supported_type[MLX4_MAX_PORTS + 1];
248 	u32			port_mask;
249 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
250 };
251 
252 struct mlx4_buf_list {
253 	void		       *buf;
254 	dma_addr_t		map;
255 };
256 
257 struct mlx4_buf {
258 	struct mlx4_buf_list	direct;
259 	struct mlx4_buf_list   *page_list;
260 	int			nbufs;
261 	int			npages;
262 	int			page_shift;
263 };
264 
265 struct mlx4_mtt {
266 	u32			first_seg;
267 	int			order;
268 	int			page_shift;
269 };
270 
271 enum {
272 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
273 };
274 
275 struct mlx4_db_pgdir {
276 	struct list_head	list;
277 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
278 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
279 	unsigned long	       *bits[2];
280 	__be32		       *db_page;
281 	dma_addr_t		db_dma;
282 };
283 
284 struct mlx4_ib_user_db_page;
285 
286 struct mlx4_db {
287 	__be32			*db;
288 	union {
289 		struct mlx4_db_pgdir		*pgdir;
290 		struct mlx4_ib_user_db_page	*user_page;
291 	}			u;
292 	dma_addr_t		dma;
293 	int			index;
294 	int			order;
295 };
296 
297 struct mlx4_hwq_resources {
298 	struct mlx4_db		db;
299 	struct mlx4_mtt		mtt;
300 	struct mlx4_buf		buf;
301 };
302 
303 struct mlx4_mr {
304 	struct mlx4_mtt		mtt;
305 	u64			iova;
306 	u64			size;
307 	u32			key;
308 	u32			pd;
309 	u32			access;
310 	int			enabled;
311 };
312 
313 struct mlx4_fmr {
314 	struct mlx4_mr		mr;
315 	struct mlx4_mpt_entry  *mpt;
316 	__be64		       *mtts;
317 	dma_addr_t		dma_handle;
318 	int			max_pages;
319 	int			max_maps;
320 	int			maps;
321 	u8			page_shift;
322 };
323 
324 struct mlx4_uar {
325 	unsigned long		pfn;
326 	int			index;
327 };
328 
329 struct mlx4_cq {
330 	void (*comp)		(struct mlx4_cq *);
331 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
332 
333 	struct mlx4_uar	       *uar;
334 
335 	u32			cons_index;
336 
337 	__be32		       *set_ci_db;
338 	__be32		       *arm_db;
339 	int			arm_sn;
340 
341 	int			cqn;
342 	unsigned		vector;
343 
344 	atomic_t		refcount;
345 	struct completion	free;
346 };
347 
348 struct mlx4_qp {
349 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
350 
351 	int			qpn;
352 
353 	atomic_t		refcount;
354 	struct completion	free;
355 };
356 
357 struct mlx4_srq {
358 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
359 
360 	int			srqn;
361 	int			max;
362 	int			max_gs;
363 	int			wqe_shift;
364 
365 	atomic_t		refcount;
366 	struct completion	free;
367 };
368 
369 struct mlx4_av {
370 	__be32			port_pd;
371 	u8			reserved1;
372 	u8			g_slid;
373 	__be16			dlid;
374 	u8			reserved2;
375 	u8			gid_index;
376 	u8			stat_rate;
377 	u8			hop_limit;
378 	__be32			sl_tclass_flowlabel;
379 	u8			dgid[16];
380 };
381 
382 struct mlx4_dev {
383 	struct pci_dev	       *pdev;
384 	unsigned long		flags;
385 	struct mlx4_caps	caps;
386 	struct radix_tree_root	qp_table_tree;
387 	u32			rev_id;
388 	char			board_id[MLX4_BOARD_ID_LEN];
389 };
390 
391 struct mlx4_init_port_param {
392 	int			set_guid0;
393 	int			set_node_guid;
394 	int			set_si_guid;
395 	u16			mtu;
396 	int			port_width_cap;
397 	u16			vl_cap;
398 	u16			max_gid;
399 	u16			max_pkey;
400 	u64			guid0;
401 	u64			node_guid;
402 	u64			si_guid;
403 };
404 
405 #define mlx4_foreach_port(port, dev, type)				\
406 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
407 		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
408 		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
409 
410 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
411 		   struct mlx4_buf *buf);
412 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
413 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
414 {
415 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
416 		return buf->direct.buf + offset;
417 	else
418 		return buf->page_list[offset >> PAGE_SHIFT].buf +
419 			(offset & (PAGE_SIZE - 1));
420 }
421 
422 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
423 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
424 
425 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
426 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
427 
428 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
429 		  struct mlx4_mtt *mtt);
430 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
431 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
432 
433 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
434 		  int npages, int page_shift, struct mlx4_mr *mr);
435 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
436 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
437 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
438 		   int start_index, int npages, u64 *page_list);
439 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
440 		       struct mlx4_buf *buf);
441 
442 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
443 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
444 
445 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
446 		       int size, int max_direct);
447 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
448 		       int size);
449 
450 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
451 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
452 		  unsigned vector, int collapsed);
453 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
454 
455 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
456 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
457 
458 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
459 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
460 
461 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
462 		   u64 db_rec, struct mlx4_srq *srq);
463 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
464 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
465 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
466 
467 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
468 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
469 
470 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
471 			  int block_mcast_loopback);
472 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
473 
474 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
475 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
476 
477 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
478 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
479 
480 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
481 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
482 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
483 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
484 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
485 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
486 		    u32 *lkey, u32 *rkey);
487 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
488 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
489 int mlx4_test_interrupts(struct mlx4_dev *dev);
490 
491 #endif /* MLX4_DEVICE_H */
492