xref: /linux-6.15/include/linux/mlx4/device.h (revision c1d0df34)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *	- Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *	- Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX4_DEVICE_H
34 #define MLX4_DEVICE_H
35 
36 #include <linux/pci.h>
37 #include <linux/completion.h>
38 #include <linux/radix-tree.h>
39 
40 #include <asm/atomic.h>
41 
42 enum {
43 	MLX4_FLAG_MSI_X		= 1 << 0,
44 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
45 };
46 
47 enum {
48 	MLX4_MAX_PORTS		= 2
49 };
50 
51 enum {
52 	MLX4_BOARD_ID_LEN = 64
53 };
54 
55 enum {
56 	MLX4_DEV_CAP_FLAG_RC		= 1 <<  0,
57 	MLX4_DEV_CAP_FLAG_UC		= 1 <<  1,
58 	MLX4_DEV_CAP_FLAG_UD		= 1 <<  2,
59 	MLX4_DEV_CAP_FLAG_SRQ		= 1 <<  6,
60 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1 <<  7,
61 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1 <<  8,
62 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1 <<  9,
63 	MLX4_DEV_CAP_FLAG_DPDP		= 1 << 12,
64 	MLX4_DEV_CAP_FLAG_BLH		= 1 << 15,
65 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1 << 16,
66 	MLX4_DEV_CAP_FLAG_APM		= 1 << 17,
67 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1 << 18,
68 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1 << 19,
69 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1 << 20,
70 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1 << 21,
71 	MLX4_DEV_CAP_FLAG_IBOE		= 1 << 30
72 };
73 
74 enum {
75 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6,
76 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7,
77 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9,
78 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10,
79 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11,
80 };
81 
82 enum mlx4_event {
83 	MLX4_EVENT_TYPE_COMP		   = 0x00,
84 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01,
85 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02,
86 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03,
87 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13,
88 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14,
89 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04,
90 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
91 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06,
92 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
93 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
94 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
95 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
96 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,
97 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
98 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
99 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
100 	MLX4_EVENT_TYPE_CMD		   = 0x0a
101 };
102 
103 enum {
104 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1,
105 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4
106 };
107 
108 enum {
109 	MLX4_PERM_LOCAL_READ	= 1 << 10,
110 	MLX4_PERM_LOCAL_WRITE	= 1 << 11,
111 	MLX4_PERM_REMOTE_READ	= 1 << 12,
112 	MLX4_PERM_REMOTE_WRITE	= 1 << 13,
113 	MLX4_PERM_ATOMIC	= 1 << 14
114 };
115 
116 enum {
117 	MLX4_OPCODE_NOP			= 0x00,
118 	MLX4_OPCODE_SEND_INVAL		= 0x01,
119 	MLX4_OPCODE_RDMA_WRITE		= 0x08,
120 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
121 	MLX4_OPCODE_SEND		= 0x0a,
122 	MLX4_OPCODE_SEND_IMM		= 0x0b,
123 	MLX4_OPCODE_LSO			= 0x0e,
124 	MLX4_OPCODE_RDMA_READ		= 0x10,
125 	MLX4_OPCODE_ATOMIC_CS		= 0x11,
126 	MLX4_OPCODE_ATOMIC_FA		= 0x12,
127 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14,
128 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15,
129 	MLX4_OPCODE_BIND_MW		= 0x18,
130 	MLX4_OPCODE_FMR			= 0x19,
131 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
132 	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
133 
134 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
135 	MLX4_RECV_OPCODE_SEND		= 0x01,
136 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
137 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
138 
139 	MLX4_CQE_OPCODE_ERROR		= 0x1e,
140 	MLX4_CQE_OPCODE_RESIZE		= 0x16,
141 };
142 
143 enum {
144 	MLX4_STAT_RATE_OFFSET	= 5
145 };
146 
147 enum mlx4_protocol {
148 	MLX4_PROTOCOL_IB,
149 	MLX4_PROTOCOL_EN,
150 };
151 
152 enum {
153 	MLX4_MTT_FLAG_PRESENT		= 1
154 };
155 
156 enum mlx4_qp_region {
157 	MLX4_QP_REGION_FW = 0,
158 	MLX4_QP_REGION_ETH_ADDR,
159 	MLX4_QP_REGION_FC_ADDR,
160 	MLX4_QP_REGION_FC_EXCH,
161 	MLX4_NUM_QP_REGION
162 };
163 
164 enum mlx4_port_type {
165 	MLX4_PORT_TYPE_IB	= 1,
166 	MLX4_PORT_TYPE_ETH	= 2,
167 	MLX4_PORT_TYPE_AUTO	= 3
168 };
169 
170 enum mlx4_special_vlan_idx {
171 	MLX4_NO_VLAN_IDX        = 0,
172 	MLX4_VLAN_MISS_IDX,
173 	MLX4_VLAN_REGULAR
174 };
175 
176 enum {
177 	MLX4_NUM_FEXCH          = 64 * 1024,
178 };
179 
180 enum {
181 	MLX4_MAX_FAST_REG_PAGES = 511,
182 };
183 
184 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
185 {
186 	return (major << 32) | (minor << 16) | subminor;
187 }
188 
189 struct mlx4_caps {
190 	u64			fw_ver;
191 	int			num_ports;
192 	int			vl_cap[MLX4_MAX_PORTS + 1];
193 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1];
194 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1];
195 	u64			def_mac[MLX4_MAX_PORTS + 1];
196 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
197 	int			gid_table_len[MLX4_MAX_PORTS + 1];
198 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
199 	int			trans_type[MLX4_MAX_PORTS + 1];
200 	int			vendor_oui[MLX4_MAX_PORTS + 1];
201 	int			wavelength[MLX4_MAX_PORTS + 1];
202 	u64			trans_code[MLX4_MAX_PORTS + 1];
203 	int			local_ca_ack_delay;
204 	int			num_uars;
205 	int			bf_reg_size;
206 	int			bf_regs_per_page;
207 	int			max_sq_sg;
208 	int			max_rq_sg;
209 	int			num_qps;
210 	int			max_wqes;
211 	int			max_sq_desc_sz;
212 	int			max_rq_desc_sz;
213 	int			max_qp_init_rdma;
214 	int			max_qp_dest_rdma;
215 	int			sqp_start;
216 	int			num_srqs;
217 	int			max_srq_wqes;
218 	int			max_srq_sge;
219 	int			reserved_srqs;
220 	int			num_cqs;
221 	int			max_cqes;
222 	int			reserved_cqs;
223 	int			num_eqs;
224 	int			reserved_eqs;
225 	int			num_comp_vectors;
226 	int			num_mpts;
227 	int			num_mtt_segs;
228 	int			mtts_per_seg;
229 	int			fmr_reserved_mtts;
230 	int			reserved_mtts;
231 	int			reserved_mrws;
232 	int			reserved_uars;
233 	int			num_mgms;
234 	int			num_amgms;
235 	int			reserved_mcgs;
236 	int			num_qp_per_mgm;
237 	int			num_pds;
238 	int			reserved_pds;
239 	int			mtt_entry_sz;
240 	u32			max_msg_sz;
241 	u32			page_size_cap;
242 	u32			flags;
243 	u32			bmme_flags;
244 	u32			reserved_lkey;
245 	u16			stat_rate_support;
246 	int			udp_rss;
247 	int			loopback_support;
248 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
249 	int			max_gso_sz;
250 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
251 	int			reserved_qps;
252 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
253 	int                     log_num_macs;
254 	int                     log_num_vlans;
255 	int                     log_num_prios;
256 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
257 	u8			supported_type[MLX4_MAX_PORTS + 1];
258 	u32			port_mask;
259 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
260 };
261 
262 struct mlx4_buf_list {
263 	void		       *buf;
264 	dma_addr_t		map;
265 };
266 
267 struct mlx4_buf {
268 	struct mlx4_buf_list	direct;
269 	struct mlx4_buf_list   *page_list;
270 	int			nbufs;
271 	int			npages;
272 	int			page_shift;
273 };
274 
275 struct mlx4_mtt {
276 	u32			first_seg;
277 	int			order;
278 	int			page_shift;
279 };
280 
281 enum {
282 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4
283 };
284 
285 struct mlx4_db_pgdir {
286 	struct list_head	list;
287 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
288 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
289 	unsigned long	       *bits[2];
290 	__be32		       *db_page;
291 	dma_addr_t		db_dma;
292 };
293 
294 struct mlx4_ib_user_db_page;
295 
296 struct mlx4_db {
297 	__be32			*db;
298 	union {
299 		struct mlx4_db_pgdir		*pgdir;
300 		struct mlx4_ib_user_db_page	*user_page;
301 	}			u;
302 	dma_addr_t		dma;
303 	int			index;
304 	int			order;
305 };
306 
307 struct mlx4_hwq_resources {
308 	struct mlx4_db		db;
309 	struct mlx4_mtt		mtt;
310 	struct mlx4_buf		buf;
311 };
312 
313 struct mlx4_mr {
314 	struct mlx4_mtt		mtt;
315 	u64			iova;
316 	u64			size;
317 	u32			key;
318 	u32			pd;
319 	u32			access;
320 	int			enabled;
321 };
322 
323 struct mlx4_fmr {
324 	struct mlx4_mr		mr;
325 	struct mlx4_mpt_entry  *mpt;
326 	__be64		       *mtts;
327 	dma_addr_t		dma_handle;
328 	int			max_pages;
329 	int			max_maps;
330 	int			maps;
331 	u8			page_shift;
332 };
333 
334 struct mlx4_uar {
335 	unsigned long		pfn;
336 	int			index;
337 };
338 
339 struct mlx4_cq {
340 	void (*comp)		(struct mlx4_cq *);
341 	void (*event)		(struct mlx4_cq *, enum mlx4_event);
342 
343 	struct mlx4_uar	       *uar;
344 
345 	u32			cons_index;
346 
347 	__be32		       *set_ci_db;
348 	__be32		       *arm_db;
349 	int			arm_sn;
350 
351 	int			cqn;
352 	unsigned		vector;
353 
354 	atomic_t		refcount;
355 	struct completion	free;
356 };
357 
358 struct mlx4_qp {
359 	void (*event)		(struct mlx4_qp *, enum mlx4_event);
360 
361 	int			qpn;
362 
363 	atomic_t		refcount;
364 	struct completion	free;
365 };
366 
367 struct mlx4_srq {
368 	void (*event)		(struct mlx4_srq *, enum mlx4_event);
369 
370 	int			srqn;
371 	int			max;
372 	int			max_gs;
373 	int			wqe_shift;
374 
375 	atomic_t		refcount;
376 	struct completion	free;
377 };
378 
379 struct mlx4_av {
380 	__be32			port_pd;
381 	u8			reserved1;
382 	u8			g_slid;
383 	__be16			dlid;
384 	u8			reserved2;
385 	u8			gid_index;
386 	u8			stat_rate;
387 	u8			hop_limit;
388 	__be32			sl_tclass_flowlabel;
389 	u8			dgid[16];
390 };
391 
392 struct mlx4_eth_av {
393 	__be32		port_pd;
394 	u8		reserved1;
395 	u8		smac_idx;
396 	u16		reserved2;
397 	u8		reserved3;
398 	u8		gid_index;
399 	u8		stat_rate;
400 	u8		hop_limit;
401 	__be32		sl_tclass_flowlabel;
402 	u8		dgid[16];
403 	u32		reserved4[2];
404 	__be16		vlan;
405 	u8		mac[6];
406 };
407 
408 union mlx4_ext_av {
409 	struct mlx4_av		ib;
410 	struct mlx4_eth_av	eth;
411 };
412 
413 struct mlx4_dev {
414 	struct pci_dev	       *pdev;
415 	unsigned long		flags;
416 	struct mlx4_caps	caps;
417 	struct radix_tree_root	qp_table_tree;
418 	u32			rev_id;
419 	char			board_id[MLX4_BOARD_ID_LEN];
420 };
421 
422 struct mlx4_init_port_param {
423 	int			set_guid0;
424 	int			set_node_guid;
425 	int			set_si_guid;
426 	u16			mtu;
427 	int			port_width_cap;
428 	u16			vl_cap;
429 	u16			max_gid;
430 	u16			max_pkey;
431 	u64			guid0;
432 	u64			node_guid;
433 	u64			si_guid;
434 };
435 
436 #define mlx4_foreach_port(port, dev, type)				\
437 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
438 		if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
439 		     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
440 
441 #define mlx4_foreach_ib_transport_port(port, dev)			\
442 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\
443 		if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||	\
444 		    ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
445 
446 
447 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
448 		   struct mlx4_buf *buf);
449 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
450 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
451 {
452 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
453 		return buf->direct.buf + offset;
454 	else
455 		return buf->page_list[offset >> PAGE_SHIFT].buf +
456 			(offset & (PAGE_SIZE - 1));
457 }
458 
459 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
460 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
461 
462 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
463 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
464 
465 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
466 		  struct mlx4_mtt *mtt);
467 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
468 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
469 
470 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
471 		  int npages, int page_shift, struct mlx4_mr *mr);
472 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
473 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
474 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
475 		   int start_index, int npages, u64 *page_list);
476 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
477 		       struct mlx4_buf *buf);
478 
479 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
480 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
481 
482 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
483 		       int size, int max_direct);
484 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
485 		       int size);
486 
487 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
488 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
489 		  unsigned vector, int collapsed);
490 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
491 
492 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
493 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
494 
495 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
496 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
497 
498 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
499 		   u64 db_rec, struct mlx4_srq *srq);
500 void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
501 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
502 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
503 
504 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
505 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
506 
507 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
508 			  int block_mcast_loopback, enum mlx4_protocol protocol);
509 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
510 			  enum mlx4_protocol protocol);
511 
512 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
513 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
514 
515 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
516 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
517 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
518 
519 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
520 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
521 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
522 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
523 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
524 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
525 		    u32 *lkey, u32 *rkey);
526 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
527 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
528 int mlx4_test_interrupts(struct mlx4_dev *dev);
529 
530 #endif /* MLX4_DEVICE_H */
531