xref: /linux-6.15/include/linux/mlx5/driver.h (revision f41e137a)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DRIVER_H
34 #define MLX5_DRIVER_H
35 
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/xarray.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
50 #include <linux/refcount.h>
51 #include <linux/auxiliary_bus.h>
52 
53 #include <linux/mlx5/device.h>
54 #include <linux/mlx5/doorbell.h>
55 #include <linux/mlx5/eq.h>
56 #include <linux/timecounter.h>
57 #include <linux/ptp_clock_kernel.h>
58 #include <net/devlink.h>
59 
60 #define MLX5_ADEV_NAME "mlx5_core"
61 
62 #define MLX5_IRQ_EQ_CTRL (U8_MAX)
63 
64 enum {
65 	MLX5_BOARD_ID_LEN = 64,
66 };
67 
68 enum {
69 	/* one minute for the sake of bringup. Generally, commands must always
70 	 * complete and we may need to increase this timeout value
71 	 */
72 	MLX5_CMD_TIMEOUT_MSEC	= 60 * 1000,
73 	MLX5_CMD_WQ_MAX_NAME	= 32,
74 };
75 
76 enum {
77 	CMD_OWNER_SW		= 0x0,
78 	CMD_OWNER_HW		= 0x1,
79 	CMD_STATUS_SUCCESS	= 0,
80 };
81 
82 enum mlx5_sqp_t {
83 	MLX5_SQP_SMI		= 0,
84 	MLX5_SQP_GSI		= 1,
85 	MLX5_SQP_IEEE_1588	= 2,
86 	MLX5_SQP_SNIFFER	= 3,
87 	MLX5_SQP_SYNC_UMR	= 4,
88 };
89 
90 enum {
91 	MLX5_MAX_PORTS	= 2,
92 };
93 
94 enum {
95 	MLX5_ATOMIC_MODE_OFFSET = 16,
96 	MLX5_ATOMIC_MODE_IB_COMP = 1,
97 	MLX5_ATOMIC_MODE_CX = 2,
98 	MLX5_ATOMIC_MODE_8B = 3,
99 	MLX5_ATOMIC_MODE_16B = 4,
100 	MLX5_ATOMIC_MODE_32B = 5,
101 	MLX5_ATOMIC_MODE_64B = 6,
102 	MLX5_ATOMIC_MODE_128B = 7,
103 	MLX5_ATOMIC_MODE_256B = 8,
104 };
105 
106 enum {
107 	MLX5_REG_QPTS            = 0x4002,
108 	MLX5_REG_QETCR		 = 0x4005,
109 	MLX5_REG_QTCT		 = 0x400a,
110 	MLX5_REG_QPDPM           = 0x4013,
111 	MLX5_REG_QCAM            = 0x4019,
112 	MLX5_REG_DCBX_PARAM      = 0x4020,
113 	MLX5_REG_DCBX_APP        = 0x4021,
114 	MLX5_REG_FPGA_CAP	 = 0x4022,
115 	MLX5_REG_FPGA_CTRL	 = 0x4023,
116 	MLX5_REG_FPGA_ACCESS_REG = 0x4024,
117 	MLX5_REG_CORE_DUMP	 = 0x402e,
118 	MLX5_REG_PCAP		 = 0x5001,
119 	MLX5_REG_PMTU		 = 0x5003,
120 	MLX5_REG_PTYS		 = 0x5004,
121 	MLX5_REG_PAOS		 = 0x5006,
122 	MLX5_REG_PFCC            = 0x5007,
123 	MLX5_REG_PPCNT		 = 0x5008,
124 	MLX5_REG_PPTB            = 0x500b,
125 	MLX5_REG_PBMC            = 0x500c,
126 	MLX5_REG_PMAOS		 = 0x5012,
127 	MLX5_REG_PUDE		 = 0x5009,
128 	MLX5_REG_PMPE		 = 0x5010,
129 	MLX5_REG_PELC		 = 0x500e,
130 	MLX5_REG_PVLC		 = 0x500f,
131 	MLX5_REG_PCMR		 = 0x5041,
132 	MLX5_REG_PDDR		 = 0x5031,
133 	MLX5_REG_PMLP		 = 0x5002,
134 	MLX5_REG_PPLM		 = 0x5023,
135 	MLX5_REG_PCAM		 = 0x507f,
136 	MLX5_REG_NODE_DESC	 = 0x6001,
137 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
138 	MLX5_REG_MCIA		 = 0x9014,
139 	MLX5_REG_MFRL		 = 0x9028,
140 	MLX5_REG_MLCR		 = 0x902b,
141 	MLX5_REG_MTRC_CAP	 = 0x9040,
142 	MLX5_REG_MTRC_CONF	 = 0x9041,
143 	MLX5_REG_MTRC_STDB	 = 0x9042,
144 	MLX5_REG_MTRC_CTRL	 = 0x9043,
145 	MLX5_REG_MPEIN		 = 0x9050,
146 	MLX5_REG_MPCNT		 = 0x9051,
147 	MLX5_REG_MTPPS		 = 0x9053,
148 	MLX5_REG_MTPPSE		 = 0x9054,
149 	MLX5_REG_MTUTC		 = 0x9055,
150 	MLX5_REG_MPEGC		 = 0x9056,
151 	MLX5_REG_MCQS		 = 0x9060,
152 	MLX5_REG_MCQI		 = 0x9061,
153 	MLX5_REG_MCC		 = 0x9062,
154 	MLX5_REG_MCDA		 = 0x9063,
155 	MLX5_REG_MCAM		 = 0x907f,
156 	MLX5_REG_MIRC		 = 0x9162,
157 	MLX5_REG_SBCAM		 = 0xB01F,
158 	MLX5_REG_RESOURCE_DUMP   = 0xC000,
159 };
160 
161 enum mlx5_qpts_trust_state {
162 	MLX5_QPTS_TRUST_PCP  = 1,
163 	MLX5_QPTS_TRUST_DSCP = 2,
164 };
165 
166 enum mlx5_dcbx_oper_mode {
167 	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
168 	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
169 };
170 
171 enum {
172 	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
173 	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
174 	MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
175 	MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
176 };
177 
178 enum mlx5_page_fault_resume_flags {
179 	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
180 	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
181 	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
182 	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
183 };
184 
185 enum dbg_rsc_type {
186 	MLX5_DBG_RSC_QP,
187 	MLX5_DBG_RSC_EQ,
188 	MLX5_DBG_RSC_CQ,
189 };
190 
191 enum port_state_policy {
192 	MLX5_POLICY_DOWN	= 0,
193 	MLX5_POLICY_UP		= 1,
194 	MLX5_POLICY_FOLLOW	= 2,
195 	MLX5_POLICY_INVALID	= 0xffffffff
196 };
197 
198 enum mlx5_coredev_type {
199 	MLX5_COREDEV_PF,
200 	MLX5_COREDEV_VF,
201 	MLX5_COREDEV_SF,
202 };
203 
204 struct mlx5_field_desc {
205 	int			i;
206 };
207 
208 struct mlx5_rsc_debug {
209 	struct mlx5_core_dev   *dev;
210 	void		       *object;
211 	enum dbg_rsc_type	type;
212 	struct dentry	       *root;
213 	struct mlx5_field_desc	fields[];
214 };
215 
216 enum mlx5_dev_event {
217 	MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
218 	MLX5_DEV_EVENT_PORT_AFFINITY = 129,
219 };
220 
221 enum mlx5_port_status {
222 	MLX5_PORT_UP        = 1,
223 	MLX5_PORT_DOWN      = 2,
224 };
225 
226 enum mlx5_cmdif_state {
227 	MLX5_CMDIF_STATE_UNINITIALIZED,
228 	MLX5_CMDIF_STATE_UP,
229 	MLX5_CMDIF_STATE_DOWN,
230 };
231 
232 struct mlx5_cmd_first {
233 	__be32		data[4];
234 };
235 
236 struct mlx5_cmd_msg {
237 	struct list_head		list;
238 	struct cmd_msg_cache	       *parent;
239 	u32				len;
240 	struct mlx5_cmd_first		first;
241 	struct mlx5_cmd_mailbox	       *next;
242 };
243 
244 struct mlx5_cmd_debug {
245 	struct dentry	       *dbg_root;
246 	void		       *in_msg;
247 	void		       *out_msg;
248 	u8			status;
249 	u16			inlen;
250 	u16			outlen;
251 };
252 
253 struct cmd_msg_cache {
254 	/* protect block chain allocations
255 	 */
256 	spinlock_t		lock;
257 	struct list_head	head;
258 	unsigned int		max_inbox_size;
259 	unsigned int		num_ent;
260 };
261 
262 enum {
263 	MLX5_NUM_COMMAND_CACHES = 5,
264 };
265 
266 struct mlx5_cmd_stats {
267 	u64		sum;
268 	u64		n;
269 	struct dentry  *root;
270 	/* protect command average calculations */
271 	spinlock_t	lock;
272 };
273 
274 struct mlx5_cmd {
275 	struct mlx5_nb    nb;
276 
277 	enum mlx5_cmdif_state	state;
278 	void	       *cmd_alloc_buf;
279 	dma_addr_t	alloc_dma;
280 	int		alloc_size;
281 	void	       *cmd_buf;
282 	dma_addr_t	dma;
283 	u16		cmdif_rev;
284 	u8		log_sz;
285 	u8		log_stride;
286 	int		max_reg_cmds;
287 	int		events;
288 	u32 __iomem    *vector;
289 
290 	/* protect command queue allocations
291 	 */
292 	spinlock_t	alloc_lock;
293 
294 	/* protect token allocations
295 	 */
296 	spinlock_t	token_lock;
297 	u8		token;
298 	unsigned long	bitmask;
299 	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
300 	struct workqueue_struct *wq;
301 	struct semaphore sem;
302 	struct semaphore pages_sem;
303 	int	mode;
304 	u16     allowed_opcode;
305 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
306 	struct dma_pool *pool;
307 	struct mlx5_cmd_debug dbg;
308 	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
309 	int checksum_disabled;
310 	struct mlx5_cmd_stats *stats;
311 };
312 
313 struct mlx5_cmd_mailbox {
314 	void	       *buf;
315 	dma_addr_t	dma;
316 	struct mlx5_cmd_mailbox *next;
317 };
318 
319 struct mlx5_buf_list {
320 	void		       *buf;
321 	dma_addr_t		map;
322 };
323 
324 struct mlx5_frag_buf {
325 	struct mlx5_buf_list	*frags;
326 	int			npages;
327 	int			size;
328 	u8			page_shift;
329 };
330 
331 struct mlx5_frag_buf_ctrl {
332 	struct mlx5_buf_list   *frags;
333 	u32			sz_m1;
334 	u16			frag_sz_m1;
335 	u16			strides_offset;
336 	u8			log_sz;
337 	u8			log_stride;
338 	u8			log_frag_strides;
339 };
340 
341 struct mlx5_core_psv {
342 	u32	psv_idx;
343 	struct psv_layout {
344 		u32	pd;
345 		u16	syndrome;
346 		u16	reserved;
347 		u16	bg;
348 		u16	app_tag;
349 		u32	ref_tag;
350 	} psv;
351 };
352 
353 struct mlx5_core_sig_ctx {
354 	struct mlx5_core_psv	psv_memory;
355 	struct mlx5_core_psv	psv_wire;
356 	struct ib_sig_err       err_item;
357 	bool			sig_status_checked;
358 	bool			sig_err_exists;
359 	u32			sigerr_count;
360 };
361 
362 enum {
363 	MLX5_MKEY_MR = 1,
364 	MLX5_MKEY_MW,
365 	MLX5_MKEY_INDIRECT_DEVX,
366 };
367 
368 struct mlx5_core_mkey {
369 	u64			iova;
370 	u64			size;
371 	u32			key;
372 	u32			pd;
373 	u32			type;
374 	struct wait_queue_head wait;
375 	refcount_t usecount;
376 };
377 
378 #define MLX5_24BIT_MASK		((1 << 24) - 1)
379 
380 enum mlx5_res_type {
381 	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
382 	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
383 	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
384 	MLX5_RES_SRQ	= 3,
385 	MLX5_RES_XSRQ	= 4,
386 	MLX5_RES_XRQ	= 5,
387 	MLX5_RES_DCT	= MLX5_EVENT_QUEUE_TYPE_DCT,
388 };
389 
390 struct mlx5_core_rsc_common {
391 	enum mlx5_res_type	res;
392 	refcount_t		refcount;
393 	struct completion	free;
394 };
395 
396 struct mlx5_uars_page {
397 	void __iomem	       *map;
398 	bool			wc;
399 	u32			index;
400 	struct list_head	list;
401 	unsigned int		bfregs;
402 	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
403 	unsigned long	       *fp_bitmap;
404 	unsigned int		reg_avail;
405 	unsigned int		fp_avail;
406 	struct kref		ref_count;
407 	struct mlx5_core_dev   *mdev;
408 };
409 
410 struct mlx5_bfreg_head {
411 	/* protect blue flame registers allocations */
412 	struct mutex		lock;
413 	struct list_head	list;
414 };
415 
416 struct mlx5_bfreg_data {
417 	struct mlx5_bfreg_head	reg_head;
418 	struct mlx5_bfreg_head	wc_head;
419 };
420 
421 struct mlx5_sq_bfreg {
422 	void __iomem	       *map;
423 	struct mlx5_uars_page  *up;
424 	bool			wc;
425 	u32			index;
426 	unsigned int		offset;
427 };
428 
429 struct mlx5_core_health {
430 	struct health_buffer __iomem   *health;
431 	__be32 __iomem		       *health_counter;
432 	struct timer_list		timer;
433 	u32				prev;
434 	int				miss_counter;
435 	u8				synd;
436 	u32				fatal_error;
437 	u32				crdump_size;
438 	/* wq spinlock to synchronize draining */
439 	spinlock_t			wq_lock;
440 	struct workqueue_struct	       *wq;
441 	unsigned long			flags;
442 	struct work_struct		fatal_report_work;
443 	struct work_struct		report_work;
444 	struct devlink_health_reporter *fw_reporter;
445 	struct devlink_health_reporter *fw_fatal_reporter;
446 };
447 
448 struct mlx5_qp_table {
449 	struct notifier_block   nb;
450 
451 	/* protect radix tree
452 	 */
453 	spinlock_t		lock;
454 	struct radix_tree_root	tree;
455 };
456 
457 struct mlx5_vf_context {
458 	int	enabled;
459 	u64	port_guid;
460 	u64	node_guid;
461 	/* Valid bits are used to validate administrative guid only.
462 	 * Enabled after ndo_set_vf_guid
463 	 */
464 	u8	port_guid_valid:1;
465 	u8	node_guid_valid:1;
466 	enum port_state_policy	policy;
467 };
468 
469 struct mlx5_core_sriov {
470 	struct mlx5_vf_context	*vfs_ctx;
471 	int			num_vfs;
472 	u16			max_vfs;
473 };
474 
475 struct mlx5_fc_pool {
476 	struct mlx5_core_dev *dev;
477 	struct mutex pool_lock; /* protects pool lists */
478 	struct list_head fully_used;
479 	struct list_head partially_used;
480 	struct list_head unused;
481 	int available_fcs;
482 	int used_fcs;
483 	int threshold;
484 };
485 
486 struct mlx5_fc_stats {
487 	spinlock_t counters_idr_lock; /* protects counters_idr */
488 	struct idr counters_idr;
489 	struct list_head counters;
490 	struct llist_head addlist;
491 	struct llist_head dellist;
492 
493 	struct workqueue_struct *wq;
494 	struct delayed_work work;
495 	unsigned long next_query;
496 	unsigned long sampling_interval; /* jiffies */
497 	u32 *bulk_query_out;
498 	struct mlx5_fc_pool fc_pool;
499 };
500 
501 struct mlx5_events;
502 struct mlx5_mpfs;
503 struct mlx5_eswitch;
504 struct mlx5_lag;
505 struct mlx5_devcom;
506 struct mlx5_fw_reset;
507 struct mlx5_eq_table;
508 struct mlx5_irq_table;
509 struct mlx5_vhca_state_notifier;
510 struct mlx5_sf_dev_table;
511 struct mlx5_sf_hw_table;
512 struct mlx5_sf_table;
513 
514 struct mlx5_rate_limit {
515 	u32			rate;
516 	u32			max_burst_sz;
517 	u16			typical_pkt_sz;
518 };
519 
520 struct mlx5_rl_entry {
521 	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
522 	u64 refcount;
523 	u16 index;
524 	u16 uid;
525 	u8 dedicated : 1;
526 };
527 
528 struct mlx5_rl_table {
529 	/* protect rate limit table */
530 	struct mutex            rl_lock;
531 	u16                     max_size;
532 	u32                     max_rate;
533 	u32                     min_rate;
534 	struct mlx5_rl_entry   *rl_entry;
535 	u64 refcount;
536 };
537 
538 struct mlx5_core_roce {
539 	struct mlx5_flow_table *ft;
540 	struct mlx5_flow_group *fg;
541 	struct mlx5_flow_handle *allow_rule;
542 };
543 
544 enum {
545 	MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
546 	MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
547 	/* Set during device detach to block any further devices
548 	 * creation/deletion on drivers rescan. Unset during device attach.
549 	 */
550 	MLX5_PRIV_FLAGS_DETACH = 1 << 2,
551 };
552 
553 struct mlx5_adev {
554 	struct auxiliary_device adev;
555 	struct mlx5_core_dev *mdev;
556 	int idx;
557 };
558 
559 struct mlx5_ft_pool;
560 struct mlx5_priv {
561 	/* IRQ table valid only for real pci devices PF or VF */
562 	struct mlx5_irq_table   *irq_table;
563 	struct mlx5_eq_table	*eq_table;
564 
565 	/* pages stuff */
566 	struct mlx5_nb          pg_nb;
567 	struct workqueue_struct *pg_wq;
568 	struct xarray           page_root_xa;
569 	int			fw_pages;
570 	atomic_t		reg_pages;
571 	struct list_head	free_list;
572 	int			vfs_pages;
573 	int			host_pf_pages;
574 
575 	struct mlx5_core_health health;
576 	struct list_head	traps;
577 
578 	/* start: qp staff */
579 	struct dentry	       *qp_debugfs;
580 	struct dentry	       *eq_debugfs;
581 	struct dentry	       *cq_debugfs;
582 	struct dentry	       *cmdif_debugfs;
583 	/* end: qp staff */
584 
585 	/* start: alloc staff */
586 	/* protect buffer allocation according to numa node */
587 	struct mutex            alloc_mutex;
588 	int                     numa_node;
589 
590 	struct mutex            pgdir_mutex;
591 	struct list_head        pgdir_list;
592 	/* end: alloc staff */
593 	struct dentry	       *dbg_root;
594 
595 	struct list_head        ctx_list;
596 	spinlock_t              ctx_lock;
597 	struct mlx5_adev       **adev;
598 	int			adev_idx;
599 	struct mlx5_events      *events;
600 
601 	struct mlx5_flow_steering *steering;
602 	struct mlx5_mpfs        *mpfs;
603 	struct mlx5_eswitch     *eswitch;
604 	struct mlx5_core_sriov	sriov;
605 	struct mlx5_lag		*lag;
606 	u32			flags;
607 	struct mlx5_devcom	*devcom;
608 	struct mlx5_fw_reset	*fw_reset;
609 	struct mlx5_core_roce	roce;
610 	struct mlx5_fc_stats		fc_stats;
611 	struct mlx5_rl_table            rl_table;
612 	struct mlx5_ft_pool		*ft_pool;
613 
614 	struct mlx5_bfreg_data		bfregs;
615 	struct mlx5_uars_page	       *uar;
616 #ifdef CONFIG_MLX5_SF
617 	struct mlx5_vhca_state_notifier *vhca_state_notifier;
618 	struct mlx5_sf_dev_table *sf_dev_table;
619 	struct mlx5_core_dev *parent_mdev;
620 #endif
621 #ifdef CONFIG_MLX5_SF_MANAGER
622 	struct mlx5_sf_hw_table *sf_hw_table;
623 	struct mlx5_sf_table *sf_table;
624 #endif
625 };
626 
627 enum mlx5_device_state {
628 	MLX5_DEVICE_STATE_UP = 1,
629 	MLX5_DEVICE_STATE_INTERNAL_ERROR,
630 };
631 
632 enum mlx5_interface_state {
633 	MLX5_INTERFACE_STATE_UP = BIT(0),
634 };
635 
636 enum mlx5_pci_status {
637 	MLX5_PCI_STATUS_DISABLED,
638 	MLX5_PCI_STATUS_ENABLED,
639 };
640 
641 enum mlx5_pagefault_type_flags {
642 	MLX5_PFAULT_REQUESTOR = 1 << 0,
643 	MLX5_PFAULT_WRITE     = 1 << 1,
644 	MLX5_PFAULT_RDMA      = 1 << 2,
645 };
646 
647 struct mlx5_td {
648 	/* protects tirs list changes while tirs refresh */
649 	struct mutex     list_lock;
650 	struct list_head tirs_list;
651 	u32              tdn;
652 };
653 
654 struct mlx5e_resources {
655 	struct mlx5e_hw_objs {
656 		u32                        pdn;
657 		struct mlx5_td             td;
658 		struct mlx5_core_mkey      mkey;
659 		struct mlx5_sq_bfreg       bfreg;
660 	} hw_objs;
661 	struct devlink_port dl_port;
662 	struct net_device *uplink_netdev;
663 };
664 
665 enum mlx5_sw_icm_type {
666 	MLX5_SW_ICM_TYPE_STEERING,
667 	MLX5_SW_ICM_TYPE_HEADER_MODIFY,
668 };
669 
670 #define MLX5_MAX_RESERVED_GIDS 8
671 
672 struct mlx5_rsvd_gids {
673 	unsigned int start;
674 	unsigned int count;
675 	struct ida ida;
676 };
677 
678 #define MAX_PIN_NUM	8
679 struct mlx5_pps {
680 	u8                         pin_caps[MAX_PIN_NUM];
681 	struct work_struct         out_work;
682 	u64                        start[MAX_PIN_NUM];
683 	u8                         enabled;
684 };
685 
686 struct mlx5_timer {
687 	struct cyclecounter        cycles;
688 	struct timecounter         tc;
689 	u32                        nominal_c_mult;
690 	unsigned long              overflow_period;
691 	struct delayed_work        overflow_work;
692 };
693 
694 struct mlx5_clock {
695 	struct mlx5_nb             pps_nb;
696 	seqlock_t                  lock;
697 	struct hwtstamp_config     hwtstamp_config;
698 	struct ptp_clock          *ptp;
699 	struct ptp_clock_info      ptp_info;
700 	struct mlx5_pps            pps_info;
701 	struct mlx5_timer          timer;
702 };
703 
704 struct mlx5_dm;
705 struct mlx5_fw_tracer;
706 struct mlx5_vxlan;
707 struct mlx5_geneve;
708 struct mlx5_hv_vhca;
709 
710 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
711 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
712 
713 enum {
714 	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
715 	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
716 };
717 
718 enum {
719 	MR_CACHE_LAST_STD_ENTRY = 20,
720 	MLX5_IMR_MTT_CACHE_ENTRY,
721 	MLX5_IMR_KSM_CACHE_ENTRY,
722 	MAX_MR_CACHE_ENTRIES
723 };
724 
725 struct mlx5_profile {
726 	u64	mask;
727 	u8	log_max_qp;
728 	struct {
729 		int	size;
730 		int	limit;
731 	} mr_cache[MAX_MR_CACHE_ENTRIES];
732 };
733 
734 struct mlx5_hca_cap {
735 	u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
736 	u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
737 };
738 
739 struct mlx5_core_dev {
740 	struct device *device;
741 	enum mlx5_coredev_type coredev_type;
742 	struct pci_dev	       *pdev;
743 	/* sync pci state */
744 	struct mutex		pci_status_mutex;
745 	enum mlx5_pci_status	pci_status;
746 	u8			rev_id;
747 	char			board_id[MLX5_BOARD_ID_LEN];
748 	struct mlx5_cmd		cmd;
749 	struct {
750 		struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
751 		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
752 		u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
753 		u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
754 		u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
755 		u8  embedded_cpu;
756 	} caps;
757 	u64			sys_image_guid;
758 	phys_addr_t		iseg_base;
759 	struct mlx5_init_seg __iomem *iseg;
760 	phys_addr_t             bar_addr;
761 	enum mlx5_device_state	state;
762 	/* sync interface state */
763 	struct mutex		intf_state_mutex;
764 	unsigned long		intf_state;
765 	struct mlx5_priv	priv;
766 	struct mlx5_profile	profile;
767 	u32			issi;
768 	struct mlx5e_resources  mlx5e_res;
769 	struct mlx5_dm          *dm;
770 	struct mlx5_vxlan       *vxlan;
771 	struct mlx5_geneve      *geneve;
772 	struct {
773 		struct mlx5_rsvd_gids	reserved_gids;
774 		u32			roce_en;
775 	} roce;
776 #ifdef CONFIG_MLX5_FPGA
777 	struct mlx5_fpga_device *fpga;
778 #endif
779 #ifdef CONFIG_MLX5_ACCEL
780 	const struct mlx5_accel_ipsec_ops *ipsec_ops;
781 #endif
782 	struct mlx5_clock        clock;
783 	struct mlx5_ib_clock_info  *clock_info;
784 	struct mlx5_fw_tracer   *tracer;
785 	struct mlx5_rsc_dump    *rsc_dump;
786 	u32                      vsc_addr;
787 	struct mlx5_hv_vhca	*hv_vhca;
788 };
789 
790 struct mlx5_db {
791 	__be32			*db;
792 	union {
793 		struct mlx5_db_pgdir		*pgdir;
794 		struct mlx5_ib_user_db_page	*user_page;
795 	}			u;
796 	dma_addr_t		dma;
797 	int			index;
798 };
799 
800 enum {
801 	MLX5_COMP_EQ_SIZE = 1024,
802 };
803 
804 enum {
805 	MLX5_PTYS_IB = 1 << 0,
806 	MLX5_PTYS_EN = 1 << 2,
807 };
808 
809 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
810 
811 enum {
812 	MLX5_CMD_ENT_STATE_PENDING_COMP,
813 };
814 
815 struct mlx5_cmd_work_ent {
816 	unsigned long		state;
817 	struct mlx5_cmd_msg    *in;
818 	struct mlx5_cmd_msg    *out;
819 	void		       *uout;
820 	int			uout_size;
821 	mlx5_cmd_cbk_t		callback;
822 	struct delayed_work	cb_timeout_work;
823 	void		       *context;
824 	int			idx;
825 	struct completion	handling;
826 	struct completion	done;
827 	struct mlx5_cmd        *cmd;
828 	struct work_struct	work;
829 	struct mlx5_cmd_layout *lay;
830 	int			ret;
831 	int			page_queue;
832 	u8			status;
833 	u8			token;
834 	u64			ts1;
835 	u64			ts2;
836 	u16			op;
837 	bool			polling;
838 	/* Track the max comp handlers */
839 	refcount_t              refcnt;
840 };
841 
842 struct mlx5_pas {
843 	u64	pa;
844 	u8	log_sz;
845 };
846 
847 enum phy_port_state {
848 	MLX5_AAA_111
849 };
850 
851 struct mlx5_hca_vport_context {
852 	u32			field_select;
853 	bool			sm_virt_aware;
854 	bool			has_smi;
855 	bool			has_raw;
856 	enum port_state_policy	policy;
857 	enum phy_port_state	phys_state;
858 	enum ib_port_state	vport_state;
859 	u8			port_physical_state;
860 	u64			sys_image_guid;
861 	u64			port_guid;
862 	u64			node_guid;
863 	u32			cap_mask1;
864 	u32			cap_mask1_perm;
865 	u16			cap_mask2;
866 	u16			cap_mask2_perm;
867 	u16			lid;
868 	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
869 	u8			lmc;
870 	u8			subnet_timeout;
871 	u16			sm_lid;
872 	u8			sm_sl;
873 	u16			qkey_violation_counter;
874 	u16			pkey_violation_counter;
875 	bool			grh_required;
876 };
877 
878 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
879 {
880 		return buf->frags->buf + offset;
881 }
882 
883 #define STRUCT_FIELD(header, field) \
884 	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
885 	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
886 
887 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
888 {
889 	return pci_get_drvdata(pdev);
890 }
891 
892 extern struct dentry *mlx5_debugfs_root;
893 
894 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
895 {
896 	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
897 }
898 
899 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
900 {
901 	return ioread32be(&dev->iseg->fw_rev) >> 16;
902 }
903 
904 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
905 {
906 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
907 }
908 
909 static inline u32 mlx5_base_mkey(const u32 key)
910 {
911 	return key & 0xffffff00u;
912 }
913 
914 static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
915 {
916 	return ((u32)1 << log_sz) << log_stride;
917 }
918 
919 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
920 					u8 log_stride, u8 log_sz,
921 					u16 strides_offset,
922 					struct mlx5_frag_buf_ctrl *fbc)
923 {
924 	fbc->frags      = frags;
925 	fbc->log_stride = log_stride;
926 	fbc->log_sz     = log_sz;
927 	fbc->sz_m1	= (1 << fbc->log_sz) - 1;
928 	fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
929 	fbc->frag_sz_m1	= (1 << fbc->log_frag_strides) - 1;
930 	fbc->strides_offset = strides_offset;
931 }
932 
933 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
934 				 u8 log_stride, u8 log_sz,
935 				 struct mlx5_frag_buf_ctrl *fbc)
936 {
937 	mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
938 }
939 
940 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
941 					  u32 ix)
942 {
943 	unsigned int frag;
944 
945 	ix  += fbc->strides_offset;
946 	frag = ix >> fbc->log_frag_strides;
947 
948 	return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
949 }
950 
951 static inline u32
952 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
953 {
954 	u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
955 
956 	return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
957 }
958 
959 enum {
960 	CMD_ALLOWED_OPCODE_ALL,
961 };
962 
963 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
964 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
965 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
966 
967 struct mlx5_async_ctx {
968 	struct mlx5_core_dev *dev;
969 	atomic_t num_inflight;
970 	struct wait_queue_head wait;
971 };
972 
973 struct mlx5_async_work;
974 
975 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
976 
977 struct mlx5_async_work {
978 	struct mlx5_async_ctx *ctx;
979 	mlx5_async_cbk_t user_callback;
980 };
981 
982 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
983 			     struct mlx5_async_ctx *ctx);
984 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
985 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
986 		     void *out, int out_size, mlx5_async_cbk_t callback,
987 		     struct mlx5_async_work *work);
988 
989 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
990 		  int out_size);
991 
992 #define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out)                             \
993 	({                                                                     \
994 		mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out,    \
995 			      MLX5_ST_SZ_BYTES(ifc_cmd##_out));                \
996 	})
997 
998 #define mlx5_cmd_exec_in(dev, ifc_cmd, in)                                     \
999 	({                                                                     \
1000 		u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {};                   \
1001 		mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out);                   \
1002 	})
1003 
1004 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1005 			  void *out, int out_size);
1006 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
1007 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
1008 
1009 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
1010 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
1011 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
1012 void mlx5_health_flush(struct mlx5_core_dev *dev);
1013 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1014 int mlx5_health_init(struct mlx5_core_dev *dev);
1015 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1016 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1017 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1018 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1019 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
1020 		   int size, struct mlx5_frag_buf *buf);
1021 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1022 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1023 			     struct mlx5_frag_buf *buf, int node);
1024 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1025 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1026 						      gfp_t flags, int npages);
1027 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1028 				 struct mlx5_cmd_mailbox *head);
1029 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1030 			  struct mlx5_core_mkey *mkey,
1031 			  u32 *in, int inlen);
1032 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
1033 			   struct mlx5_core_mkey *mkey);
1034 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
1035 			 u32 *out, int outlen);
1036 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1037 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1038 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1039 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1040 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1041 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1042 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1043 				 s32 npages, bool ec_function);
1044 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1045 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1046 void mlx5_register_debugfs(void);
1047 void mlx5_unregister_debugfs(void);
1048 
1049 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
1050 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
1051 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1052 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
1053 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1054 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1055 
1056 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1057 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1058 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1059 			 int size_in, void *data_out, int size_out,
1060 			 u16 reg_num, int arg, int write);
1061 
1062 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1063 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1064 		       int node);
1065 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1066 
1067 const char *mlx5_command_str(int command);
1068 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1069 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1070 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1071 			 int npsvs, u32 *sig_index);
1072 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1073 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1074 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1075 			struct mlx5_odp_caps *odp_caps);
1076 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1077 			     u8 port_num, void *out, size_t sz);
1078 
1079 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1080 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1081 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
1082 		     struct mlx5_rate_limit *rl);
1083 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
1084 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1085 int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
1086 			 bool dedicated_entry, u16 *index);
1087 void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
1088 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
1089 		       struct mlx5_rate_limit *rl_1);
1090 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1091 		     bool map_wc, bool fast_path);
1092 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1093 
1094 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1095 struct cpumask *
1096 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1097 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1098 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1099 			   u8 roce_version, u8 roce_l3_type, const u8 *gid,
1100 			   const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1101 
1102 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1103 {
1104 	return mkey >> 8;
1105 }
1106 
1107 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1108 {
1109 	return mkey_idx << 8;
1110 }
1111 
1112 static inline u8 mlx5_mkey_variant(u32 mkey)
1113 {
1114 	return mkey & 0xff;
1115 }
1116 
1117 /* Async-atomic event notifier used by mlx5 core to forward FW
1118  * evetns received from event queue to mlx5 consumers.
1119  * Optimise event queue dipatching.
1120  */
1121 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1122 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1123 
1124 /* Async-atomic event notifier used for forwarding
1125  * evetns from the event queue into the to mlx5 events dispatcher,
1126  * eswitch, clock and others.
1127  */
1128 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1129 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1130 
1131 /* Blocking event notifier used to forward SW events, used for slow path */
1132 int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1133 int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1134 int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
1135 				      void *data);
1136 
1137 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1138 
1139 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1140 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1141 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1142 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1143 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
1144 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1145 bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
1146 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
1147 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1148 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1149 			   struct net_device *slave);
1150 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1151 				 u64 *values,
1152 				 int num_counters,
1153 				 size_t *offsets);
1154 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
1155 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1156 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1157 int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1158 			 u64 length, u32 log_alignment, u16 uid,
1159 			 phys_addr_t *addr, u32 *obj_id);
1160 int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1161 			   u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
1162 
1163 #ifdef CONFIG_MLX5_CORE_IPOIB
1164 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1165 					  struct ib_device *ibdev,
1166 					  const char *name,
1167 					  void (*setup)(struct net_device *));
1168 #endif /* CONFIG_MLX5_CORE_IPOIB */
1169 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1170 			    struct ib_device *device,
1171 			    struct rdma_netdev_alloc_params *params);
1172 
1173 enum {
1174 	MLX5_PCI_DEV_IS_VF		= 1 << 0,
1175 };
1176 
1177 static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
1178 {
1179 	return dev->coredev_type == MLX5_COREDEV_PF;
1180 }
1181 
1182 static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
1183 {
1184 	return dev->coredev_type == MLX5_COREDEV_VF;
1185 }
1186 
1187 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
1188 {
1189 	return dev->caps.embedded_cpu;
1190 }
1191 
1192 static inline bool
1193 mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1194 {
1195 	return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1196 }
1197 
1198 static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1199 {
1200 	return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1201 }
1202 
1203 static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1204 {
1205 	return dev->priv.sriov.max_vfs;
1206 }
1207 
1208 static inline int mlx5_get_gid_table_len(u16 param)
1209 {
1210 	if (param > 4) {
1211 		pr_warn("gid table length is zero\n");
1212 		return 0;
1213 	}
1214 
1215 	return 8 * (1 << param);
1216 }
1217 
1218 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1219 {
1220 	return !!(dev->priv.rl_table.max_size);
1221 }
1222 
1223 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1224 {
1225 	return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1226 	       MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1227 }
1228 
1229 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1230 {
1231 	return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1232 }
1233 
1234 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1235 {
1236 	return mlx5_core_is_mp_slave(dev) ||
1237 	       mlx5_core_is_mp_master(dev);
1238 }
1239 
1240 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1241 {
1242 	if (!mlx5_core_mp_enabled(dev))
1243 		return 1;
1244 
1245 	return MLX5_CAP_GEN(dev, native_port_num);
1246 }
1247 
1248 enum {
1249 	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1250 };
1251 
1252 static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
1253 {
1254 	struct devlink *devlink = priv_to_devlink(dev);
1255 	union devlink_param_value val;
1256 
1257 	devlink_param_driverinit_value_get(devlink,
1258 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1259 					   &val);
1260 	return val.vbool;
1261 }
1262 
1263 #endif /* MLX5_DRIVER_H */
1264