xref: /linux-6.15/include/linux/mlx4/device.h (revision 266fe2f2)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <asm/atomic.h>
41 
42 enum {
43 	MLX4_FLAG_MSI_X		= 1 << 0,
44 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
45 };
46 
47 enum {
48 	MLX4_MAX_PORTS		= 2
49 };
50 
51 enum {
52 	MLX4_BOARD_ID_LEN = 64
53 };
54 
55 enum {
56 	MLX4_DEV_CAP_FLAG_RC		= 1 <<  0,
57 	MLX4_DEV_CAP_FLAG_UC		= 1 <<  1,
58 	MLX4_DEV_CAP_FLAG_UD		= 1 <<  2,
59 	MLX4_DEV_CAP_FLAG_SRQ		= 1 <<  6,
60 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1 <<  7,
61 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1 <<  8,
62 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1 <<  9,
63 	MLX4_DEV_CAP_FLAG_DPDP		= 1 << 12,
64 	MLX4_DEV_CAP_FLAG_BLH		= 1 << 15,
65 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1 << 16,
66 	MLX4_DEV_CAP_FLAG_APM		= 1 << 17,
67 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1 << 18,
68 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1 << 19,
69 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1 << 20,
70 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1 << 21
71 };
72 
73 enum {
74 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
75 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
76 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
77 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
78 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
79 };
80 
81 enum mlx4_event {
82 	MLX4_EVENT_TYPE_COMP		   = 0x00,
83 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
84 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
85 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
86 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
87 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
88 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
89 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
90 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
91 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
92 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
93 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
94 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
95 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
96 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
97 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
98 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
99 	MLX4_EVENT_TYPE_CMD		   = 0x0a
100 };
101 
102 enum {
103 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
104 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
105 };
106 
107 enum {
108 	MLX4_PERM_LOCAL_READ	= 1 << 10,
109 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
110 	MLX4_PERM_REMOTE_READ	= 1 << 12,
111 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
112 	MLX4_PERM_ATOMIC	= 1 << 14
113 };
114 
115 enum {
116 	MLX4_OPCODE_NOP			= 0x00,
117 	MLX4_OPCODE_SEND_INVAL		= 0x01,
118 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
119 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
120 	MLX4_OPCODE_SEND		= 0x0a,
121 	MLX4_OPCODE_SEND_IMM		= 0x0b,
122 	MLX4_OPCODE_LSO			= 0x0e,
123 	MLX4_OPCODE_RDMA_READ		= 0x10,
124 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
125 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
126 	MLX4_OPCODE_ATOMIC_MASK_CS	= 0x14,
127 	MLX4_OPCODE_ATOMIC_MASK_FA	= 0x15,
128 	MLX4_OPCODE_BIND_MW		= 0x18,
129 	MLX4_OPCODE_FMR			= 0x19,
130 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
131 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
132 
133 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
134 	MLX4_RECV_OPCODE_SEND		= 0x01,
135 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
136 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
137 
138 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
139 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
140 };
141 
142 enum {
143 	MLX4_STAT_RATE_OFFSET	= 5
144 };
145 
146 enum {
147 	MLX4_MTT_FLAG_PRESENT		= 1
148 };
149 
150 enum mlx4_qp_region {
151 	MLX4_QP_REGION_FW = 0,
152 	MLX4_QP_REGION_ETH_ADDR,
153 	MLX4_QP_REGION_FC_ADDR,
154 	MLX4_QP_REGION_FC_EXCH,
155 	MLX4_NUM_QP_REGION
156 };
157 
158 enum mlx4_port_type {
159 	MLX4_PORT_TYPE_IB	= 1,
160 	MLX4_PORT_TYPE_ETH	= 2,
161 	MLX4_PORT_TYPE_AUTO	= 3
162 };
163 
164 enum mlx4_special_vlan_idx {
165 	MLX4_NO_VLAN_IDX        = 0,
166 	MLX4_VLAN_MISS_IDX,
167 	MLX4_VLAN_REGULAR
168 };
169 
170 enum {
171 	MLX4_NUM_FEXCH          = 64 * 1024,
172 };
173 
174 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
175 {
176 	return (major << 32) | (minor << 16) | subminor;
177 }
178 
179 struct mlx4_caps {
180 	u64			fw_ver;
181 	int			num_ports;
182 	int			vl_cap[MLX4_MAX_PORTS + 1];
183 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
184 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
185 	u64			def_mac[MLX4_MAX_PORTS + 1];
186 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
187 	int			gid_table_len[MLX4_MAX_PORTS + 1];
188 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
189 	int			local_ca_ack_delay;
190 	int			num_uars;
191 	int			bf_reg_size;
192 	int			bf_regs_per_page;
193 	int			max_sq_sg;
194 	int			max_rq_sg;
195 	int			num_qps;
196 	int			max_wqes;
197 	int			max_sq_desc_sz;
198 	int			max_rq_desc_sz;
199 	int			max_qp_init_rdma;
200 	int			max_qp_dest_rdma;
201 	int			sqp_start;
202 	int			num_srqs;
203 	int			max_srq_wqes;
204 	int			max_srq_sge;
205 	int			reserved_srqs;
206 	int			num_cqs;
207 	int			max_cqes;
208 	int			reserved_cqs;
209 	int			num_eqs;
210 	int			reserved_eqs;
211 	int			num_comp_vectors;
212 	int			num_mpts;
213 	int			num_mtt_segs;
214 	int			mtts_per_seg;
215 	int			fmr_reserved_mtts;
216 	int			reserved_mtts;
217 	int			reserved_mrws;
218 	int			reserved_uars;
219 	int			num_mgms;
220 	int			num_amgms;
221 	int			reserved_mcgs;
222 	int			num_qp_per_mgm;
223 	int			num_pds;
224 	int			reserved_pds;
225 	int			mtt_entry_sz;
226 	u32			max_msg_sz;
227 	u32			page_size_cap;
228 	u32			flags;
229 	u32			bmme_flags;
230 	u32			reserved_lkey;
231 	u16			stat_rate_support;
232 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
233 	int			max_gso_sz;
234 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
235 	int			reserved_qps;
236 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
237 	int                     log_num_macs;
238 	int                     log_num_vlans;
239 	int                     log_num_prios;
240 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
241 	u8			supported_type[MLX4_MAX_PORTS + 1];
242 	u32			port_mask;
243 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
244 };
245 
246 struct mlx4_buf_list {
247 	void		       *buf;
248 	dma_addr_t		map;
249 };
250 
251 struct mlx4_buf {
252 	struct mlx4_buf_list	direct;
253 	struct mlx4_buf_list   *page_list;
254 	int			nbufs;
255 	int			npages;
256 	int			page_shift;
257 };
258 
259 struct mlx4_mtt {
260 	u32			first_seg;
261 	int			order;
262 	int			page_shift;
263 };
264 
265 enum {
266 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
267 };
268 
269 struct mlx4_db_pgdir {
270 	struct list_head	list;
271 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
272 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
273 	unsigned long	       *bits[2];
274 	__be32		       *db_page;
275 	dma_addr_t		db_dma;
276 };
277 
278 struct mlx4_ib_user_db_page;
279 
280 struct mlx4_db {
281 	__be32			*db;
282 	union {
283 		struct mlx4_db_pgdir		*pgdir;
284 		struct mlx4_ib_user_db_page	*user_page;
285 	}			u;
286 	dma_addr_t		dma;
287 	int			index;
288 	int			order;
289 };
290 
291 struct mlx4_hwq_resources {
292 	struct mlx4_db		db;
293 	struct mlx4_mtt		mtt;
294 	struct mlx4_buf		buf;
295 };
296 
297 struct mlx4_mr {
298 	struct mlx4_mtt		mtt;
299 	u64			iova;
300 	u64			size;
301 	u32			key;
302 	u32			pd;
303 	u32			access;
304 	int			enabled;
305 };
306 
307 struct mlx4_fmr {
308 	struct mlx4_mr		mr;
309 	struct mlx4_mpt_entry  *mpt;
310 	__be64		       *mtts;
311 	dma_addr_t		dma_handle;
312 	int			max_pages;
313 	int			max_maps;
314 	int			maps;
315 	u8			page_shift;
316 };
317 
318 struct mlx4_uar {
319 	unsigned long		pfn;
320 	int			index;
321 };
322 
323 struct mlx4_cq {
324 	void (*comp)		(struct mlx4_cq *);
325 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
326 
327 	struct mlx4_uar	       *uar;
328 
329 	u32			cons_index;
330 
331 	__be32		       *set_ci_db;
332 	__be32		       *arm_db;
333 	int			arm_sn;
334 
335 	int			cqn;
336 	unsigned		vector;
337 
338 	atomic_t		refcount;
339 	struct completion	free;
340 };
341 
342 struct mlx4_qp {
343 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
344 
345 	int			qpn;
346 
347 	atomic_t		refcount;
348 	struct completion	free;
349 };
350 
351 struct mlx4_srq {
352 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
353 
354 	int			srqn;
355 	int			max;
356 	int			max_gs;
357 	int			wqe_shift;
358 
359 	atomic_t		refcount;
360 	struct completion	free;
361 };
362 
363 struct mlx4_av {
364 	__be32			port_pd;
365 	u8			reserved1;
366 	u8			g_slid;
367 	__be16			dlid;
368 	u8			reserved2;
369 	u8			gid_index;
370 	u8			stat_rate;
371 	u8			hop_limit;
372 	__be32			sl_tclass_flowlabel;
373 	u8			dgid[16];
374 };
375 
376 struct mlx4_dev {
377 	struct pci_dev	       *pdev;
378 	unsigned long		flags;
379 	struct mlx4_caps	caps;
380 	struct radix_tree_root	qp_table_tree;
381 	u32			rev_id;
382 	char			board_id[MLX4_BOARD_ID_LEN];
383 };
384 
385 struct mlx4_init_port_param {
386 	int			set_guid0;
387 	int			set_node_guid;
388 	int			set_si_guid;
389 	u16			mtu;
390 	int			port_width_cap;
391 	u16			vl_cap;
392 	u16			max_gid;
393 	u16			max_pkey;
394 	u64			guid0;
395 	u64			node_guid;
396 	u64			si_guid;
397 };
398 
399 #define mlx4_foreach_port(port, dev, type)				\
400 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
401 		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
402 		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
403 
404 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
405 		   struct mlx4_buf *buf);
406 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
407 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
408 {
409 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
410 		return buf->direct.buf + offset;
411 	else
412 		return buf->page_list[offset >> PAGE_SHIFT].buf +
413 			(offset & (PAGE_SIZE - 1));
414 }
415 
416 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
417 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
418 
419 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
420 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
421 
422 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
423 		  struct mlx4_mtt *mtt);
424 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
425 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
426 
427 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
428 		  int npages, int page_shift, struct mlx4_mr *mr);
429 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
430 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
431 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
432 		   int start_index, int npages, u64 *page_list);
433 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
434 		       struct mlx4_buf *buf);
435 
436 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
437 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
438 
439 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
440 		       int size, int max_direct);
441 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
442 		       int size);
443 
444 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
445 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
446 		  unsigned vector, int collapsed);
447 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
448 
449 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
450 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
451 
452 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
453 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
454 
455 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
456 		   u64 db_rec, struct mlx4_srq *srq);
457 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
458 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
459 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
460 
461 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
462 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
463 
464 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
465 			  int block_mcast_loopback);
466 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
467 
468 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
469 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
470 
471 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
472 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
473 
474 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
475 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
476 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
477 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
478 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
479 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
480 		    u32 *lkey, u32 *rkey);
481 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
482 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
483 
484 #endif /* MLX4_DEVICE_H */
485