xref: /linux-6.15/include/linux/mlx4/device.h (revision facb4edc)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <asm/atomic.h>
41 
42 enum {
43 	MLX4_FLAG_MSI_X		= 1 << 0,
44 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
45 };
46 
47 enum {
48 	MLX4_MAX_PORTS		= 2
49 };
50 
51 enum {
52 	MLX4_BOARD_ID_LEN = 64
53 };
54 
55 enum {
56 	MLX4_DEV_CAP_FLAG_RC		= 1 <<  0,
57 	MLX4_DEV_CAP_FLAG_UC		= 1 <<  1,
58 	MLX4_DEV_CAP_FLAG_UD		= 1 <<  2,
59 	MLX4_DEV_CAP_FLAG_SRQ		= 1 <<  6,
60 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1 <<  7,
61 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1 <<  8,
62 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1 <<  9,
63 	MLX4_DEV_CAP_FLAG_DPDP		= 1 << 12,
64 	MLX4_DEV_CAP_FLAG_BLH		= 1 << 15,
65 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1 << 16,
66 	MLX4_DEV_CAP_FLAG_APM		= 1 << 17,
67 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1 << 18,
68 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1 << 19,
69 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1 << 20,
70 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1 << 21,
71 	MLX4_DEV_CAP_FLAG_IBOE		= 1 << 30
72 };
73 
74 enum {
75 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
76 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
77 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
78 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
79 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
80 };
81 
82 enum mlx4_event {
83 	MLX4_EVENT_TYPE_COMP		   = 0x00,
84 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
85 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
86 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
87 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
88 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
89 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
90 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
91 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
92 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
93 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
94 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
95 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
96 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
97 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
98 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
99 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
100 	MLX4_EVENT_TYPE_CMD		   = 0x0a
101 };
102 
103 enum {
104 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
105 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
106 };
107 
108 enum {
109 	MLX4_PERM_LOCAL_READ	= 1 << 10,
110 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
111 	MLX4_PERM_REMOTE_READ	= 1 << 12,
112 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
113 	MLX4_PERM_ATOMIC	= 1 << 14
114 };
115 
116 enum {
117 	MLX4_OPCODE_NOP			= 0x00,
118 	MLX4_OPCODE_SEND_INVAL		= 0x01,
119 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
120 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
121 	MLX4_OPCODE_SEND		= 0x0a,
122 	MLX4_OPCODE_SEND_IMM		= 0x0b,
123 	MLX4_OPCODE_LSO			= 0x0e,
124 	MLX4_OPCODE_RDMA_READ		= 0x10,
125 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
126 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
127 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
128 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
129 	MLX4_OPCODE_BIND_MW		= 0x18,
130 	MLX4_OPCODE_FMR			= 0x19,
131 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
132 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
133 
134 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
135 	MLX4_RECV_OPCODE_SEND		= 0x01,
136 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
137 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
138 
139 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
140 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
141 };
142 
143 enum {
144 	MLX4_STAT_RATE_OFFSET	= 5
145 };
146 
147 enum {
148 	MLX4_MTT_FLAG_PRESENT		= 1
149 };
150 
151 enum mlx4_qp_region {
152 	MLX4_QP_REGION_FW = 0,
153 	MLX4_QP_REGION_ETH_ADDR,
154 	MLX4_QP_REGION_FC_ADDR,
155 	MLX4_QP_REGION_FC_EXCH,
156 	MLX4_NUM_QP_REGION
157 };
158 
159 enum mlx4_port_type {
160 	MLX4_PORT_TYPE_IB	= 1,
161 	MLX4_PORT_TYPE_ETH	= 2,
162 	MLX4_PORT_TYPE_AUTO	= 3
163 };
164 
165 enum mlx4_special_vlan_idx {
166 	MLX4_NO_VLAN_IDX        = 0,
167 	MLX4_VLAN_MISS_IDX,
168 	MLX4_VLAN_REGULAR
169 };
170 
171 enum {
172 	MLX4_NUM_FEXCH          = 64 * 1024,
173 };
174 
175 enum {
176 	MLX4_MAX_FAST_REG_PAGES = 511,
177 };
178 
179 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
180 {
181 	return (major << 32) | (minor << 16) | subminor;
182 }
183 
184 struct mlx4_caps {
185 	u64			fw_ver;
186 	int			num_ports;
187 	int			vl_cap[MLX4_MAX_PORTS + 1];
188 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
189 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
190 	u64			def_mac[MLX4_MAX_PORTS + 1];
191 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
192 	int			gid_table_len[MLX4_MAX_PORTS + 1];
193 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
194 	int			trans_type[MLX4_MAX_PORTS + 1];
195 	int			vendor_oui[MLX4_MAX_PORTS + 1];
196 	int			wavelength[MLX4_MAX_PORTS + 1];
197 	u64			trans_code[MLX4_MAX_PORTS + 1];
198 	int			local_ca_ack_delay;
199 	int			num_uars;
200 	int			bf_reg_size;
201 	int			bf_regs_per_page;
202 	int			max_sq_sg;
203 	int			max_rq_sg;
204 	int			num_qps;
205 	int			max_wqes;
206 	int			max_sq_desc_sz;
207 	int			max_rq_desc_sz;
208 	int			max_qp_init_rdma;
209 	int			max_qp_dest_rdma;
210 	int			sqp_start;
211 	int			num_srqs;
212 	int			max_srq_wqes;
213 	int			max_srq_sge;
214 	int			reserved_srqs;
215 	int			num_cqs;
216 	int			max_cqes;
217 	int			reserved_cqs;
218 	int			num_eqs;
219 	int			reserved_eqs;
220 	int			num_comp_vectors;
221 	int			num_mpts;
222 	int			num_mtt_segs;
223 	int			mtts_per_seg;
224 	int			fmr_reserved_mtts;
225 	int			reserved_mtts;
226 	int			reserved_mrws;
227 	int			reserved_uars;
228 	int			num_mgms;
229 	int			num_amgms;
230 	int			reserved_mcgs;
231 	int			num_qp_per_mgm;
232 	int			num_pds;
233 	int			reserved_pds;
234 	int			mtt_entry_sz;
235 	u32			max_msg_sz;
236 	u32			page_size_cap;
237 	u32			flags;
238 	u32			bmme_flags;
239 	u32			reserved_lkey;
240 	u16			stat_rate_support;
241 	int			udp_rss;
242 	int			loopback_support;
243 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
244 	int			max_gso_sz;
245 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
246 	int			reserved_qps;
247 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
248 	int                     log_num_macs;
249 	int                     log_num_vlans;
250 	int                     log_num_prios;
251 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
252 	u8			supported_type[MLX4_MAX_PORTS + 1];
253 	u32			port_mask;
254 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
255 };
256 
257 struct mlx4_buf_list {
258 	void		       *buf;
259 	dma_addr_t		map;
260 };
261 
262 struct mlx4_buf {
263 	struct mlx4_buf_list	direct;
264 	struct mlx4_buf_list   *page_list;
265 	int			nbufs;
266 	int			npages;
267 	int			page_shift;
268 };
269 
270 struct mlx4_mtt {
271 	u32			first_seg;
272 	int			order;
273 	int			page_shift;
274 };
275 
276 enum {
277 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
278 };
279 
280 struct mlx4_db_pgdir {
281 	struct list_head	list;
282 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
283 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
284 	unsigned long	       *bits[2];
285 	__be32		       *db_page;
286 	dma_addr_t		db_dma;
287 };
288 
289 struct mlx4_ib_user_db_page;
290 
291 struct mlx4_db {
292 	__be32			*db;
293 	union {
294 		struct mlx4_db_pgdir		*pgdir;
295 		struct mlx4_ib_user_db_page	*user_page;
296 	}			u;
297 	dma_addr_t		dma;
298 	int			index;
299 	int			order;
300 };
301 
302 struct mlx4_hwq_resources {
303 	struct mlx4_db		db;
304 	struct mlx4_mtt		mtt;
305 	struct mlx4_buf		buf;
306 };
307 
308 struct mlx4_mr {
309 	struct mlx4_mtt		mtt;
310 	u64			iova;
311 	u64			size;
312 	u32			key;
313 	u32			pd;
314 	u32			access;
315 	int			enabled;
316 };
317 
318 struct mlx4_fmr {
319 	struct mlx4_mr		mr;
320 	struct mlx4_mpt_entry  *mpt;
321 	__be64		       *mtts;
322 	dma_addr_t		dma_handle;
323 	int			max_pages;
324 	int			max_maps;
325 	int			maps;
326 	u8			page_shift;
327 };
328 
329 struct mlx4_uar {
330 	unsigned long		pfn;
331 	int			index;
332 };
333 
334 struct mlx4_cq {
335 	void (*comp)		(struct mlx4_cq *);
336 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
337 
338 	struct mlx4_uar	       *uar;
339 
340 	u32			cons_index;
341 
342 	__be32		       *set_ci_db;
343 	__be32		       *arm_db;
344 	int			arm_sn;
345 
346 	int			cqn;
347 	unsigned		vector;
348 
349 	atomic_t		refcount;
350 	struct completion	free;
351 };
352 
353 struct mlx4_qp {
354 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
355 
356 	int			qpn;
357 
358 	atomic_t		refcount;
359 	struct completion	free;
360 };
361 
362 struct mlx4_srq {
363 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
364 
365 	int			srqn;
366 	int			max;
367 	int			max_gs;
368 	int			wqe_shift;
369 
370 	atomic_t		refcount;
371 	struct completion	free;
372 };
373 
374 struct mlx4_av {
375 	__be32			port_pd;
376 	u8			reserved1;
377 	u8			g_slid;
378 	__be16			dlid;
379 	u8			reserved2;
380 	u8			gid_index;
381 	u8			stat_rate;
382 	u8			hop_limit;
383 	__be32			sl_tclass_flowlabel;
384 	u8			dgid[16];
385 };
386 
387 struct mlx4_eth_av {
388 	__be32		port_pd;
389 	u8		reserved1;
390 	u8		smac_idx;
391 	u16		reserved2;
392 	u8		reserved3;
393 	u8		gid_index;
394 	u8		stat_rate;
395 	u8		hop_limit;
396 	__be32		sl_tclass_flowlabel;
397 	u8		dgid[16];
398 	u32		reserved4[2];
399 	__be16		vlan;
400 	u8		mac[6];
401 };
402 
403 union mlx4_ext_av {
404 	struct mlx4_av		ib;
405 	struct mlx4_eth_av	eth;
406 };
407 
408 struct mlx4_dev {
409 	struct pci_dev	       *pdev;
410 	unsigned long		flags;
411 	struct mlx4_caps	caps;
412 	struct radix_tree_root	qp_table_tree;
413 	u32			rev_id;
414 	char			board_id[MLX4_BOARD_ID_LEN];
415 };
416 
417 struct mlx4_init_port_param {
418 	int			set_guid0;
419 	int			set_node_guid;
420 	int			set_si_guid;
421 	u16			mtu;
422 	int			port_width_cap;
423 	u16			vl_cap;
424 	u16			max_gid;
425 	u16			max_pkey;
426 	u64			guid0;
427 	u64			node_guid;
428 	u64			si_guid;
429 };
430 
431 #define mlx4_foreach_port(port, dev, type)				\
432 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
433 		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
434 		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
435 
436 #define mlx4_foreach_ib_transport_port(port, dev)			\
437 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
438 		if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||	\
439 		    ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
440 
441 
442 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
443 		   struct mlx4_buf *buf);
444 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
445 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
446 {
447 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
448 		return buf->direct.buf + offset;
449 	else
450 		return buf->page_list[offset >> PAGE_SHIFT].buf +
451 			(offset & (PAGE_SIZE - 1));
452 }
453 
454 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
455 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
456 
457 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
458 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
459 
460 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
461 		  struct mlx4_mtt *mtt);
462 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
463 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
464 
465 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
466 		  int npages, int page_shift, struct mlx4_mr *mr);
467 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
468 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
469 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
470 		   int start_index, int npages, u64 *page_list);
471 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
472 		       struct mlx4_buf *buf);
473 
474 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
475 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
476 
477 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
478 		       int size, int max_direct);
479 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
480 		       int size);
481 
482 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
483 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
484 		  unsigned vector, int collapsed);
485 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
486 
487 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
488 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
489 
490 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
491 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
492 
493 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
494 		   u64 db_rec, struct mlx4_srq *srq);
495 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
496 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
497 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
498 
499 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
500 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
501 
502 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
503 			  int block_mcast_loopback);
504 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
505 
506 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
507 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
508 
509 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
510 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
511 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
512 
513 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
514 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
515 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
516 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
517 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
518 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
519 		    u32 *lkey, u32 *rkey);
520 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
521 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
522 int mlx4_test_interrupts(struct mlx4_dev *dev);
523 
524 #endif /* MLX4_DEVICE_H */
525