xref: /linux-6.15/include/linux/mlx5/driver.h (revision f5e4e7fd)
1 /*
2  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DRIVER_H
34 #define MLX5_DRIVER_H
35 
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/vmalloc.h>
42 #include <linux/radix-tree.h>
43 #include <linux/mlx5/device.h>
44 #include <linux/mlx5/doorbell.h>
45 
46 enum {
47 	MLX5_BOARD_ID_LEN = 64,
48 	MLX5_MAX_NAME_LEN = 16,
49 };
50 
51 enum {
52 	/* one minute for the sake of bringup. Generally, commands must always
53 	 * complete and we may need to increase this timeout value
54 	 */
55 	MLX5_CMD_TIMEOUT_MSEC	= 7200 * 1000,
56 	MLX5_CMD_WQ_MAX_NAME	= 32,
57 };
58 
59 enum {
60 	CMD_OWNER_SW		= 0x0,
61 	CMD_OWNER_HW		= 0x1,
62 	CMD_STATUS_SUCCESS	= 0,
63 };
64 
65 enum mlx5_sqp_t {
66 	MLX5_SQP_SMI		= 0,
67 	MLX5_SQP_GSI		= 1,
68 	MLX5_SQP_IEEE_1588	= 2,
69 	MLX5_SQP_SNIFFER	= 3,
70 	MLX5_SQP_SYNC_UMR	= 4,
71 };
72 
73 enum {
74 	MLX5_MAX_PORTS	= 2,
75 };
76 
77 enum {
78 	MLX5_EQ_VEC_PAGES	 = 0,
79 	MLX5_EQ_VEC_CMD		 = 1,
80 	MLX5_EQ_VEC_ASYNC	 = 2,
81 	MLX5_EQ_VEC_COMP_BASE,
82 };
83 
84 enum {
85 	MLX5_MAX_EQ_NAME	= 20
86 };
87 
88 enum {
89 	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16,
90 	MLX5_ATOMIC_MODE_CX		= 2 << 16,
91 	MLX5_ATOMIC_MODE_8B		= 3 << 16,
92 	MLX5_ATOMIC_MODE_16B		= 4 << 16,
93 	MLX5_ATOMIC_MODE_32B		= 5 << 16,
94 	MLX5_ATOMIC_MODE_64B		= 6 << 16,
95 	MLX5_ATOMIC_MODE_128B		= 7 << 16,
96 	MLX5_ATOMIC_MODE_256B		= 8 << 16,
97 };
98 
99 enum {
100 	MLX5_CMD_OP_QUERY_HCA_CAP		= 0x100,
101 	MLX5_CMD_OP_QUERY_ADAPTER		= 0x101,
102 	MLX5_CMD_OP_INIT_HCA			= 0x102,
103 	MLX5_CMD_OP_TEARDOWN_HCA		= 0x103,
104 	MLX5_CMD_OP_ENABLE_HCA			= 0x104,
105 	MLX5_CMD_OP_DISABLE_HCA			= 0x105,
106 	MLX5_CMD_OP_QUERY_PAGES			= 0x107,
107 	MLX5_CMD_OP_MANAGE_PAGES		= 0x108,
108 	MLX5_CMD_OP_SET_HCA_CAP			= 0x109,
109 
110 	MLX5_CMD_OP_CREATE_MKEY			= 0x200,
111 	MLX5_CMD_OP_QUERY_MKEY			= 0x201,
112 	MLX5_CMD_OP_DESTROY_MKEY		= 0x202,
113 	MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS	= 0x203,
114 
115 	MLX5_CMD_OP_CREATE_EQ			= 0x301,
116 	MLX5_CMD_OP_DESTROY_EQ			= 0x302,
117 	MLX5_CMD_OP_QUERY_EQ			= 0x303,
118 
119 	MLX5_CMD_OP_CREATE_CQ			= 0x400,
120 	MLX5_CMD_OP_DESTROY_CQ			= 0x401,
121 	MLX5_CMD_OP_QUERY_CQ			= 0x402,
122 	MLX5_CMD_OP_MODIFY_CQ			= 0x403,
123 
124 	MLX5_CMD_OP_CREATE_QP			= 0x500,
125 	MLX5_CMD_OP_DESTROY_QP			= 0x501,
126 	MLX5_CMD_OP_RST2INIT_QP			= 0x502,
127 	MLX5_CMD_OP_INIT2RTR_QP			= 0x503,
128 	MLX5_CMD_OP_RTR2RTS_QP			= 0x504,
129 	MLX5_CMD_OP_RTS2RTS_QP			= 0x505,
130 	MLX5_CMD_OP_SQERR2RTS_QP		= 0x506,
131 	MLX5_CMD_OP_2ERR_QP			= 0x507,
132 	MLX5_CMD_OP_RTS2SQD_QP			= 0x508,
133 	MLX5_CMD_OP_SQD2RTS_QP			= 0x509,
134 	MLX5_CMD_OP_2RST_QP			= 0x50a,
135 	MLX5_CMD_OP_QUERY_QP			= 0x50b,
136 	MLX5_CMD_OP_CONF_SQP			= 0x50c,
137 	MLX5_CMD_OP_MAD_IFC			= 0x50d,
138 	MLX5_CMD_OP_INIT2INIT_QP		= 0x50e,
139 	MLX5_CMD_OP_SUSPEND_QP			= 0x50f,
140 	MLX5_CMD_OP_UNSUSPEND_QP		= 0x510,
141 	MLX5_CMD_OP_SQD2SQD_QP			= 0x511,
142 	MLX5_CMD_OP_ALLOC_QP_COUNTER_SET	= 0x512,
143 	MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET	= 0x513,
144 	MLX5_CMD_OP_QUERY_QP_COUNTER_SET	= 0x514,
145 
146 	MLX5_CMD_OP_CREATE_PSV			= 0x600,
147 	MLX5_CMD_OP_DESTROY_PSV			= 0x601,
148 	MLX5_CMD_OP_QUERY_PSV			= 0x602,
149 	MLX5_CMD_OP_QUERY_SIG_RULE_TABLE	= 0x603,
150 	MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE	= 0x604,
151 
152 	MLX5_CMD_OP_CREATE_SRQ			= 0x700,
153 	MLX5_CMD_OP_DESTROY_SRQ			= 0x701,
154 	MLX5_CMD_OP_QUERY_SRQ			= 0x702,
155 	MLX5_CMD_OP_ARM_RQ			= 0x703,
156 	MLX5_CMD_OP_RESIZE_SRQ			= 0x704,
157 
158 	MLX5_CMD_OP_ALLOC_PD			= 0x800,
159 	MLX5_CMD_OP_DEALLOC_PD			= 0x801,
160 	MLX5_CMD_OP_ALLOC_UAR			= 0x802,
161 	MLX5_CMD_OP_DEALLOC_UAR			= 0x803,
162 
163 	MLX5_CMD_OP_ATTACH_TO_MCG		= 0x806,
164 	MLX5_CMD_OP_DETACH_FROM_MCG		= 0x807,
165 
166 
167 	MLX5_CMD_OP_ALLOC_XRCD			= 0x80e,
168 	MLX5_CMD_OP_DEALLOC_XRCD		= 0x80f,
169 
170 	MLX5_CMD_OP_ACCESS_REG			= 0x805,
171 	MLX5_CMD_OP_MAX				= 0x810,
172 };
173 
174 enum {
175 	MLX5_REG_PCAP		 = 0x5001,
176 	MLX5_REG_PMTU		 = 0x5003,
177 	MLX5_REG_PTYS		 = 0x5004,
178 	MLX5_REG_PAOS		 = 0x5006,
179 	MLX5_REG_PMAOS		 = 0x5012,
180 	MLX5_REG_PUDE		 = 0x5009,
181 	MLX5_REG_PMPE		 = 0x5010,
182 	MLX5_REG_PELC		 = 0x500e,
183 	MLX5_REG_PMLP		 = 0, /* TBD */
184 	MLX5_REG_NODE_DESC	 = 0x6001,
185 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
186 };
187 
188 enum dbg_rsc_type {
189 	MLX5_DBG_RSC_QP,
190 	MLX5_DBG_RSC_EQ,
191 	MLX5_DBG_RSC_CQ,
192 };
193 
194 struct mlx5_field_desc {
195 	struct dentry	       *dent;
196 	int			i;
197 };
198 
199 struct mlx5_rsc_debug {
200 	struct mlx5_core_dev   *dev;
201 	void		       *object;
202 	enum dbg_rsc_type	type;
203 	struct dentry	       *root;
204 	struct mlx5_field_desc	fields[0];
205 };
206 
207 enum mlx5_dev_event {
208 	MLX5_DEV_EVENT_SYS_ERROR,
209 	MLX5_DEV_EVENT_PORT_UP,
210 	MLX5_DEV_EVENT_PORT_DOWN,
211 	MLX5_DEV_EVENT_PORT_INITIALIZED,
212 	MLX5_DEV_EVENT_LID_CHANGE,
213 	MLX5_DEV_EVENT_PKEY_CHANGE,
214 	MLX5_DEV_EVENT_GUID_CHANGE,
215 	MLX5_DEV_EVENT_CLIENT_REREG,
216 };
217 
218 struct mlx5_uuar_info {
219 	struct mlx5_uar	       *uars;
220 	int			num_uars;
221 	int			num_low_latency_uuars;
222 	unsigned long	       *bitmap;
223 	unsigned int	       *count;
224 	struct mlx5_bf	       *bfs;
225 
226 	/*
227 	 * protect uuar allocation data structs
228 	 */
229 	struct mutex		lock;
230 };
231 
232 struct mlx5_bf {
233 	void __iomem	       *reg;
234 	void __iomem	       *regreg;
235 	int			buf_size;
236 	struct mlx5_uar	       *uar;
237 	unsigned long		offset;
238 	int			need_lock;
239 	/* protect blue flame buffer selection when needed
240 	 */
241 	spinlock_t		lock;
242 
243 	/* serialize 64 bit writes when done as two 32 bit accesses
244 	 */
245 	spinlock_t		lock32;
246 	int			uuarn;
247 };
248 
249 struct mlx5_cmd_first {
250 	__be32		data[4];
251 };
252 
253 struct mlx5_cmd_msg {
254 	struct list_head		list;
255 	struct cache_ent	       *cache;
256 	u32				len;
257 	struct mlx5_cmd_first		first;
258 	struct mlx5_cmd_mailbox	       *next;
259 };
260 
261 struct mlx5_cmd_debug {
262 	struct dentry	       *dbg_root;
263 	struct dentry	       *dbg_in;
264 	struct dentry	       *dbg_out;
265 	struct dentry	       *dbg_outlen;
266 	struct dentry	       *dbg_status;
267 	struct dentry	       *dbg_run;
268 	void		       *in_msg;
269 	void		       *out_msg;
270 	u8			status;
271 	u16			inlen;
272 	u16			outlen;
273 };
274 
275 struct cache_ent {
276 	/* protect block chain allocations
277 	 */
278 	spinlock_t		lock;
279 	struct list_head	head;
280 };
281 
282 struct cmd_msg_cache {
283 	struct cache_ent	large;
284 	struct cache_ent	med;
285 
286 };
287 
288 struct mlx5_cmd_stats {
289 	u64		sum;
290 	u64		n;
291 	struct dentry  *root;
292 	struct dentry  *avg;
293 	struct dentry  *count;
294 	/* protect command average calculations */
295 	spinlock_t	lock;
296 };
297 
298 struct mlx5_cmd {
299 	void	       *cmd_buf;
300 	dma_addr_t	dma;
301 	u16		cmdif_rev;
302 	u8		log_sz;
303 	u8		log_stride;
304 	int		max_reg_cmds;
305 	int		events;
306 	u32 __iomem    *vector;
307 
308 	/* protect command queue allocations
309 	 */
310 	spinlock_t	alloc_lock;
311 
312 	/* protect token allocations
313 	 */
314 	spinlock_t	token_lock;
315 	u8		token;
316 	unsigned long	bitmask;
317 	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
318 	struct workqueue_struct *wq;
319 	struct semaphore sem;
320 	struct semaphore pages_sem;
321 	int	mode;
322 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
323 	struct pci_pool *pool;
324 	struct mlx5_cmd_debug dbg;
325 	struct cmd_msg_cache cache;
326 	int checksum_disabled;
327 	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
328 };
329 
330 struct mlx5_port_caps {
331 	int	gid_table_len;
332 	int	pkey_table_len;
333 };
334 
335 struct mlx5_caps {
336 	u8	log_max_eq;
337 	u8	log_max_cq;
338 	u8	log_max_qp;
339 	u8	log_max_mkey;
340 	u8	log_max_pd;
341 	u8	log_max_srq;
342 	u32	max_cqes;
343 	int	max_wqes;
344 	int	max_sq_desc_sz;
345 	int	max_rq_desc_sz;
346 	u64	flags;
347 	u16	stat_rate_support;
348 	int	log_max_msg;
349 	int	num_ports;
350 	int	max_ra_res_qp;
351 	int	max_ra_req_qp;
352 	int	max_srq_wqes;
353 	int	bf_reg_size;
354 	int	bf_regs_per_page;
355 	struct mlx5_port_caps	port[MLX5_MAX_PORTS];
356 	u8			ext_port_cap[MLX5_MAX_PORTS];
357 	int	max_vf;
358 	u32	reserved_lkey;
359 	u8	local_ca_ack_delay;
360 	u8	log_max_mcg;
361 	u16	max_qp_mcg;
362 	int	min_page_sz;
363 };
364 
365 struct mlx5_cmd_mailbox {
366 	void	       *buf;
367 	dma_addr_t	dma;
368 	struct mlx5_cmd_mailbox *next;
369 };
370 
371 struct mlx5_buf_list {
372 	void		       *buf;
373 	dma_addr_t		map;
374 };
375 
376 struct mlx5_buf {
377 	struct mlx5_buf_list	direct;
378 	struct mlx5_buf_list   *page_list;
379 	int			nbufs;
380 	int			npages;
381 	int			page_shift;
382 	int			size;
383 };
384 
385 struct mlx5_eq {
386 	struct mlx5_core_dev   *dev;
387 	__be32 __iomem	       *doorbell;
388 	u32			cons_index;
389 	struct mlx5_buf		buf;
390 	int			size;
391 	u8			irqn;
392 	u8			eqn;
393 	int			nent;
394 	u64			mask;
395 	char			name[MLX5_MAX_EQ_NAME];
396 	struct list_head	list;
397 	int			index;
398 	struct mlx5_rsc_debug	*dbg;
399 };
400 
401 
402 struct mlx5_core_mr {
403 	u64			iova;
404 	u64			size;
405 	u32			key;
406 	u32			pd;
407 	u32			access;
408 };
409 
410 struct mlx5_core_srq {
411 	u32		srqn;
412 	int		max;
413 	int		max_gs;
414 	int		max_avail_gather;
415 	int		wqe_shift;
416 	void (*event)	(struct mlx5_core_srq *, enum mlx5_event);
417 
418 	atomic_t		refcount;
419 	struct completion	free;
420 };
421 
422 struct mlx5_eq_table {
423 	void __iomem	       *update_ci;
424 	void __iomem	       *update_arm_ci;
425 	struct list_head       *comp_eq_head;
426 	struct mlx5_eq		pages_eq;
427 	struct mlx5_eq		async_eq;
428 	struct mlx5_eq		cmd_eq;
429 	struct msix_entry	*msix_arr;
430 	int			num_comp_vectors;
431 	/* protect EQs list
432 	 */
433 	spinlock_t		lock;
434 };
435 
436 struct mlx5_uar {
437 	u32			index;
438 	struct list_head	bf_list;
439 	unsigned		free_bf_bmap;
440 	void __iomem	       *wc_map;
441 	void __iomem	       *map;
442 };
443 
444 
445 struct mlx5_core_health {
446 	struct health_buffer __iomem   *health;
447 	__be32 __iomem		       *health_counter;
448 	struct timer_list		timer;
449 	struct list_head		list;
450 	u32				prev;
451 	int				miss_counter;
452 };
453 
454 struct mlx5_cq_table {
455 	/* protect radix tree
456 	 */
457 	spinlock_t		lock;
458 	struct radix_tree_root	tree;
459 };
460 
461 struct mlx5_qp_table {
462 	/* protect radix tree
463 	 */
464 	spinlock_t		lock;
465 	struct radix_tree_root	tree;
466 };
467 
468 struct mlx5_srq_table {
469 	/* protect radix tree
470 	 */
471 	spinlock_t		lock;
472 	struct radix_tree_root	tree;
473 };
474 
475 struct mlx5_priv {
476 	char			name[MLX5_MAX_NAME_LEN];
477 	struct mlx5_eq_table	eq_table;
478 	struct mlx5_uuar_info	uuari;
479 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
480 
481 	/* pages stuff */
482 	struct workqueue_struct *pg_wq;
483 	struct rb_root		page_root;
484 	int			fw_pages;
485 	int			reg_pages;
486 
487 	struct mlx5_core_health health;
488 
489 	struct mlx5_srq_table	srq_table;
490 
491 	/* start: qp staff */
492 	struct mlx5_qp_table	qp_table;
493 	struct dentry	       *qp_debugfs;
494 	struct dentry	       *eq_debugfs;
495 	struct dentry	       *cq_debugfs;
496 	struct dentry	       *cmdif_debugfs;
497 	/* end: qp staff */
498 
499 	/* start: cq staff */
500 	struct mlx5_cq_table	cq_table;
501 	/* end: cq staff */
502 
503 	/* start: alloc staff */
504 	struct mutex            pgdir_mutex;
505 	struct list_head        pgdir_list;
506 	/* end: alloc staff */
507 	struct dentry	       *dbg_root;
508 
509 	/* protect mkey key part */
510 	spinlock_t		mkey_lock;
511 	u8			mkey_key;
512 };
513 
514 struct mlx5_core_dev {
515 	struct pci_dev	       *pdev;
516 	u8			rev_id;
517 	char			board_id[MLX5_BOARD_ID_LEN];
518 	struct mlx5_cmd		cmd;
519 	struct mlx5_caps	caps;
520 	phys_addr_t		iseg_base;
521 	struct mlx5_init_seg __iomem *iseg;
522 	void			(*event) (struct mlx5_core_dev *dev,
523 					  enum mlx5_dev_event event,
524 					  void *data);
525 	struct mlx5_priv	priv;
526 	struct mlx5_profile	*profile;
527 	atomic_t		num_qps;
528 };
529 
530 struct mlx5_db {
531 	__be32			*db;
532 	union {
533 		struct mlx5_db_pgdir		*pgdir;
534 		struct mlx5_ib_user_db_page	*user_page;
535 	}			u;
536 	dma_addr_t		dma;
537 	int			index;
538 };
539 
540 enum {
541 	MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
542 };
543 
544 enum {
545 	MLX5_COMP_EQ_SIZE = 1024,
546 };
547 
548 struct mlx5_db_pgdir {
549 	struct list_head	list;
550 	DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
551 	__be32		       *db_page;
552 	dma_addr_t		db_dma;
553 };
554 
555 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
556 
557 struct mlx5_cmd_work_ent {
558 	struct mlx5_cmd_msg    *in;
559 	struct mlx5_cmd_msg    *out;
560 	mlx5_cmd_cbk_t		callback;
561 	void		       *context;
562 	int idx;
563 	struct completion	done;
564 	struct mlx5_cmd        *cmd;
565 	struct work_struct	work;
566 	struct mlx5_cmd_layout *lay;
567 	int			ret;
568 	int			page_queue;
569 	u8			status;
570 	u8			token;
571 	struct timespec		ts1;
572 	struct timespec		ts2;
573 };
574 
575 struct mlx5_pas {
576 	u64	pa;
577 	u8	log_sz;
578 };
579 
580 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
581 {
582 	if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
583 		return buf->direct.buf + offset;
584 	else
585 		return buf->page_list[offset >> PAGE_SHIFT].buf +
586 			(offset & (PAGE_SIZE - 1));
587 }
588 
589 extern struct workqueue_struct *mlx5_core_wq;
590 
591 #define STRUCT_FIELD(header, field) \
592 	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
593 	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
594 
595 struct ib_field {
596 	size_t struct_offset_bytes;
597 	size_t struct_size_bytes;
598 	int    offset_bits;
599 	int    size_bits;
600 };
601 
602 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
603 {
604 	return pci_get_drvdata(pdev);
605 }
606 
607 extern struct dentry *mlx5_debugfs_root;
608 
609 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
610 {
611 	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
612 }
613 
614 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
615 {
616 	return ioread32be(&dev->iseg->fw_rev) >> 16;
617 }
618 
619 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
620 {
621 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
622 }
623 
624 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
625 {
626 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
627 }
628 
629 static inline void *mlx5_vzalloc(unsigned long size)
630 {
631 	void *rtn;
632 
633 	rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
634 	if (!rtn)
635 		rtn = vzalloc(size);
636 	return rtn;
637 }
638 
639 static inline void mlx5_vfree(const void *addr)
640 {
641 	if (addr && is_vmalloc_addr(addr))
642 		vfree(addr);
643 	else
644 		kfree(addr);
645 }
646 
647 int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
648 void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
649 int mlx5_cmd_init(struct mlx5_core_dev *dev);
650 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
651 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
652 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
653 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
654 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
655 		  int out_size);
656 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
657 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
658 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
659 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
660 void mlx5_health_cleanup(void);
661 void  __init mlx5_health_init(void);
662 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
663 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
664 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
665 		   struct mlx5_buf *buf);
666 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
667 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
668 						      gfp_t flags, int npages);
669 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
670 				 struct mlx5_cmd_mailbox *head);
671 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
672 			 struct mlx5_create_srq_mbox_in *in, int inlen);
673 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
674 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
675 			struct mlx5_query_srq_mbox_out *out);
676 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
677 		      u16 lwm, int is_srq);
678 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
679 			  struct mlx5_create_mkey_mbox_in *in, int inlen);
680 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
681 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
682 			 struct mlx5_query_mkey_mbox_out *out, int outlen);
683 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
684 			     u32 *mkey);
685 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
686 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
687 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
688 		      u16 opmod, int port);
689 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
690 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
691 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
692 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
693 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
694 				 s16 npages);
695 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
696 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
697 void mlx5_register_debugfs(void);
698 void mlx5_unregister_debugfs(void);
699 int mlx5_eq_init(struct mlx5_core_dev *dev);
700 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
701 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
702 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
703 void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
704 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
705 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
706 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
707 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
708 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
709 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
710 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
711 int mlx5_start_eqs(struct mlx5_core_dev *dev);
712 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
713 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
714 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
715 
716 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
717 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
718 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
719 			 int size_in, void *data_out, int size_out,
720 			 u16 reg_num, int arg, int write);
721 int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
722 
723 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
724 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
725 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
726 		       struct mlx5_query_eq_mbox_out *out, int outlen);
727 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
728 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
729 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
730 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
731 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
732 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
733 
734 typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
735 int mlx5_register_health_report_handler(health_handler_t handler);
736 void mlx5_unregister_health_report_handler(void);
737 const char *mlx5_command_str(int command);
738 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
739 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
740 
741 static inline u32 mlx5_mkey_to_idx(u32 mkey)
742 {
743 	return mkey >> 8;
744 }
745 
746 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
747 {
748 	return mkey_idx << 8;
749 }
750 
751 enum {
752 	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
753 	MLX5_PROF_MASK_CMDIF_CSUM	= (u64)1 << 1,
754 	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 2,
755 };
756 
757 enum {
758 	MAX_MR_CACHE_ENTRIES    = 16,
759 };
760 
761 struct mlx5_profile {
762 	u64	mask;
763 	u32	log_max_qp;
764 	int	cmdif_csum;
765 	struct {
766 		int	size;
767 		int	limit;
768 	} mr_cache[MAX_MR_CACHE_ENTRIES];
769 };
770 
771 #endif /* MLX5_DRIVER_H */
772