xref: /linux-6.15/include/linux/mlx5/driver.h (revision a93dcaad)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DRIVER_H
34 #define MLX5_DRIVER_H
35 
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/xarray.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
50 #include <linux/refcount.h>
51 #include <linux/auxiliary_bus.h>
52 
53 #include <linux/mlx5/device.h>
54 #include <linux/mlx5/doorbell.h>
55 #include <linux/mlx5/eq.h>
56 #include <linux/timecounter.h>
57 #include <linux/ptp_clock_kernel.h>
58 #include <net/devlink.h>
59 
60 #define MLX5_ADEV_NAME "mlx5_core"
61 
62 enum {
63 	MLX5_BOARD_ID_LEN = 64,
64 };
65 
66 enum {
67 	/* one minute for the sake of bringup. Generally, commands must always
68 	 * complete and we may need to increase this timeout value
69 	 */
70 	MLX5_CMD_TIMEOUT_MSEC	= 60 * 1000,
71 	MLX5_CMD_WQ_MAX_NAME	= 32,
72 };
73 
74 enum {
75 	CMD_OWNER_SW		= 0x0,
76 	CMD_OWNER_HW		= 0x1,
77 	CMD_STATUS_SUCCESS	= 0,
78 };
79 
80 enum mlx5_sqp_t {
81 	MLX5_SQP_SMI		= 0,
82 	MLX5_SQP_GSI		= 1,
83 	MLX5_SQP_IEEE_1588	= 2,
84 	MLX5_SQP_SNIFFER	= 3,
85 	MLX5_SQP_SYNC_UMR	= 4,
86 };
87 
88 enum {
89 	MLX5_MAX_PORTS	= 2,
90 };
91 
92 enum {
93 	MLX5_ATOMIC_MODE_OFFSET = 16,
94 	MLX5_ATOMIC_MODE_IB_COMP = 1,
95 	MLX5_ATOMIC_MODE_CX = 2,
96 	MLX5_ATOMIC_MODE_8B = 3,
97 	MLX5_ATOMIC_MODE_16B = 4,
98 	MLX5_ATOMIC_MODE_32B = 5,
99 	MLX5_ATOMIC_MODE_64B = 6,
100 	MLX5_ATOMIC_MODE_128B = 7,
101 	MLX5_ATOMIC_MODE_256B = 8,
102 };
103 
104 enum {
105 	MLX5_REG_QPTS            = 0x4002,
106 	MLX5_REG_QETCR		 = 0x4005,
107 	MLX5_REG_QTCT		 = 0x400a,
108 	MLX5_REG_QPDPM           = 0x4013,
109 	MLX5_REG_QCAM            = 0x4019,
110 	MLX5_REG_DCBX_PARAM      = 0x4020,
111 	MLX5_REG_DCBX_APP        = 0x4021,
112 	MLX5_REG_FPGA_CAP	 = 0x4022,
113 	MLX5_REG_FPGA_CTRL	 = 0x4023,
114 	MLX5_REG_FPGA_ACCESS_REG = 0x4024,
115 	MLX5_REG_CORE_DUMP	 = 0x402e,
116 	MLX5_REG_PCAP		 = 0x5001,
117 	MLX5_REG_PMTU		 = 0x5003,
118 	MLX5_REG_PTYS		 = 0x5004,
119 	MLX5_REG_PAOS		 = 0x5006,
120 	MLX5_REG_PFCC            = 0x5007,
121 	MLX5_REG_PPCNT		 = 0x5008,
122 	MLX5_REG_PPTB            = 0x500b,
123 	MLX5_REG_PBMC            = 0x500c,
124 	MLX5_REG_PMAOS		 = 0x5012,
125 	MLX5_REG_PUDE		 = 0x5009,
126 	MLX5_REG_PMPE		 = 0x5010,
127 	MLX5_REG_PELC		 = 0x500e,
128 	MLX5_REG_PVLC		 = 0x500f,
129 	MLX5_REG_PCMR		 = 0x5041,
130 	MLX5_REG_PMLP		 = 0x5002,
131 	MLX5_REG_PPLM		 = 0x5023,
132 	MLX5_REG_PCAM		 = 0x507f,
133 	MLX5_REG_NODE_DESC	 = 0x6001,
134 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
135 	MLX5_REG_MCIA		 = 0x9014,
136 	MLX5_REG_MFRL		 = 0x9028,
137 	MLX5_REG_MLCR		 = 0x902b,
138 	MLX5_REG_MTRC_CAP	 = 0x9040,
139 	MLX5_REG_MTRC_CONF	 = 0x9041,
140 	MLX5_REG_MTRC_STDB	 = 0x9042,
141 	MLX5_REG_MTRC_CTRL	 = 0x9043,
142 	MLX5_REG_MPEIN		 = 0x9050,
143 	MLX5_REG_MPCNT		 = 0x9051,
144 	MLX5_REG_MTPPS		 = 0x9053,
145 	MLX5_REG_MTPPSE		 = 0x9054,
146 	MLX5_REG_MTUTC		 = 0x9055,
147 	MLX5_REG_MPEGC		 = 0x9056,
148 	MLX5_REG_MCQS		 = 0x9060,
149 	MLX5_REG_MCQI		 = 0x9061,
150 	MLX5_REG_MCC		 = 0x9062,
151 	MLX5_REG_MCDA		 = 0x9063,
152 	MLX5_REG_MCAM		 = 0x907f,
153 	MLX5_REG_MIRC		 = 0x9162,
154 	MLX5_REG_SBCAM		 = 0xB01F,
155 	MLX5_REG_RESOURCE_DUMP   = 0xC000,
156 };
157 
158 enum mlx5_qpts_trust_state {
159 	MLX5_QPTS_TRUST_PCP  = 1,
160 	MLX5_QPTS_TRUST_DSCP = 2,
161 };
162 
163 enum mlx5_dcbx_oper_mode {
164 	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
165 	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
166 };
167 
168 enum {
169 	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
170 	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
171 	MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
172 	MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
173 };
174 
175 enum mlx5_page_fault_resume_flags {
176 	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
177 	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
178 	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
179 	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
180 };
181 
182 enum dbg_rsc_type {
183 	MLX5_DBG_RSC_QP,
184 	MLX5_DBG_RSC_EQ,
185 	MLX5_DBG_RSC_CQ,
186 };
187 
188 enum port_state_policy {
189 	MLX5_POLICY_DOWN	= 0,
190 	MLX5_POLICY_UP		= 1,
191 	MLX5_POLICY_FOLLOW	= 2,
192 	MLX5_POLICY_INVALID	= 0xffffffff
193 };
194 
195 enum mlx5_coredev_type {
196 	MLX5_COREDEV_PF,
197 	MLX5_COREDEV_VF,
198 	MLX5_COREDEV_SF,
199 };
200 
201 struct mlx5_field_desc {
202 	int			i;
203 };
204 
205 struct mlx5_rsc_debug {
206 	struct mlx5_core_dev   *dev;
207 	void		       *object;
208 	enum dbg_rsc_type	type;
209 	struct dentry	       *root;
210 	struct mlx5_field_desc	fields[];
211 };
212 
213 enum mlx5_dev_event {
214 	MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
215 	MLX5_DEV_EVENT_PORT_AFFINITY = 129,
216 };
217 
218 enum mlx5_port_status {
219 	MLX5_PORT_UP        = 1,
220 	MLX5_PORT_DOWN      = 2,
221 };
222 
223 enum mlx5_cmdif_state {
224 	MLX5_CMDIF_STATE_UNINITIALIZED,
225 	MLX5_CMDIF_STATE_UP,
226 	MLX5_CMDIF_STATE_DOWN,
227 };
228 
229 struct mlx5_cmd_first {
230 	__be32		data[4];
231 };
232 
233 struct mlx5_cmd_msg {
234 	struct list_head		list;
235 	struct cmd_msg_cache	       *parent;
236 	u32				len;
237 	struct mlx5_cmd_first		first;
238 	struct mlx5_cmd_mailbox	       *next;
239 };
240 
241 struct mlx5_cmd_debug {
242 	struct dentry	       *dbg_root;
243 	void		       *in_msg;
244 	void		       *out_msg;
245 	u8			status;
246 	u16			inlen;
247 	u16			outlen;
248 };
249 
250 struct cmd_msg_cache {
251 	/* protect block chain allocations
252 	 */
253 	spinlock_t		lock;
254 	struct list_head	head;
255 	unsigned int		max_inbox_size;
256 	unsigned int		num_ent;
257 };
258 
259 enum {
260 	MLX5_NUM_COMMAND_CACHES = 5,
261 };
262 
263 struct mlx5_cmd_stats {
264 	u64		sum;
265 	u64		n;
266 	struct dentry  *root;
267 	/* protect command average calculations */
268 	spinlock_t	lock;
269 };
270 
271 struct mlx5_cmd {
272 	struct mlx5_nb    nb;
273 
274 	enum mlx5_cmdif_state	state;
275 	void	       *cmd_alloc_buf;
276 	dma_addr_t	alloc_dma;
277 	int		alloc_size;
278 	void	       *cmd_buf;
279 	dma_addr_t	dma;
280 	u16		cmdif_rev;
281 	u8		log_sz;
282 	u8		log_stride;
283 	int		max_reg_cmds;
284 	int		events;
285 	u32 __iomem    *vector;
286 
287 	/* protect command queue allocations
288 	 */
289 	spinlock_t	alloc_lock;
290 
291 	/* protect token allocations
292 	 */
293 	spinlock_t	token_lock;
294 	u8		token;
295 	unsigned long	bitmask;
296 	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
297 	struct workqueue_struct *wq;
298 	struct semaphore sem;
299 	struct semaphore pages_sem;
300 	int	mode;
301 	u16     allowed_opcode;
302 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
303 	struct dma_pool *pool;
304 	struct mlx5_cmd_debug dbg;
305 	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
306 	int checksum_disabled;
307 	struct mlx5_cmd_stats *stats;
308 };
309 
310 struct mlx5_port_caps {
311 	int	gid_table_len;
312 	int	pkey_table_len;
313 	u8	ext_port_cap;
314 	bool	has_smi;
315 };
316 
317 struct mlx5_cmd_mailbox {
318 	void	       *buf;
319 	dma_addr_t	dma;
320 	struct mlx5_cmd_mailbox *next;
321 };
322 
323 struct mlx5_buf_list {
324 	void		       *buf;
325 	dma_addr_t		map;
326 };
327 
328 struct mlx5_frag_buf {
329 	struct mlx5_buf_list	*frags;
330 	int			npages;
331 	int			size;
332 	u8			page_shift;
333 };
334 
335 struct mlx5_frag_buf_ctrl {
336 	struct mlx5_buf_list   *frags;
337 	u32			sz_m1;
338 	u16			frag_sz_m1;
339 	u16			strides_offset;
340 	u8			log_sz;
341 	u8			log_stride;
342 	u8			log_frag_strides;
343 };
344 
345 struct mlx5_core_psv {
346 	u32	psv_idx;
347 	struct psv_layout {
348 		u32	pd;
349 		u16	syndrome;
350 		u16	reserved;
351 		u16	bg;
352 		u16	app_tag;
353 		u32	ref_tag;
354 	} psv;
355 };
356 
357 struct mlx5_core_sig_ctx {
358 	struct mlx5_core_psv	psv_memory;
359 	struct mlx5_core_psv	psv_wire;
360 	struct ib_sig_err       err_item;
361 	bool			sig_status_checked;
362 	bool			sig_err_exists;
363 	u32			sigerr_count;
364 };
365 
366 enum {
367 	MLX5_MKEY_MR = 1,
368 	MLX5_MKEY_MW,
369 	MLX5_MKEY_INDIRECT_DEVX,
370 };
371 
372 struct mlx5_core_mkey {
373 	u64			iova;
374 	u64			size;
375 	u32			key;
376 	u32			pd;
377 	u32			type;
378 };
379 
380 #define MLX5_24BIT_MASK		((1 << 24) - 1)
381 
382 enum mlx5_res_type {
383 	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
384 	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
385 	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
386 	MLX5_RES_SRQ	= 3,
387 	MLX5_RES_XSRQ	= 4,
388 	MLX5_RES_XRQ	= 5,
389 	MLX5_RES_DCT	= MLX5_EVENT_QUEUE_TYPE_DCT,
390 };
391 
392 struct mlx5_core_rsc_common {
393 	enum mlx5_res_type	res;
394 	refcount_t		refcount;
395 	struct completion	free;
396 };
397 
398 struct mlx5_uars_page {
399 	void __iomem	       *map;
400 	bool			wc;
401 	u32			index;
402 	struct list_head	list;
403 	unsigned int		bfregs;
404 	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
405 	unsigned long	       *fp_bitmap;
406 	unsigned int		reg_avail;
407 	unsigned int		fp_avail;
408 	struct kref		ref_count;
409 	struct mlx5_core_dev   *mdev;
410 };
411 
412 struct mlx5_bfreg_head {
413 	/* protect blue flame registers allocations */
414 	struct mutex		lock;
415 	struct list_head	list;
416 };
417 
418 struct mlx5_bfreg_data {
419 	struct mlx5_bfreg_head	reg_head;
420 	struct mlx5_bfreg_head	wc_head;
421 };
422 
423 struct mlx5_sq_bfreg {
424 	void __iomem	       *map;
425 	struct mlx5_uars_page  *up;
426 	bool			wc;
427 	u32			index;
428 	unsigned int		offset;
429 };
430 
431 struct mlx5_core_health {
432 	struct health_buffer __iomem   *health;
433 	__be32 __iomem		       *health_counter;
434 	struct timer_list		timer;
435 	u32				prev;
436 	int				miss_counter;
437 	u8				synd;
438 	u32				fatal_error;
439 	u32				crdump_size;
440 	/* wq spinlock to synchronize draining */
441 	spinlock_t			wq_lock;
442 	struct workqueue_struct	       *wq;
443 	unsigned long			flags;
444 	struct work_struct		fatal_report_work;
445 	struct work_struct		report_work;
446 	struct delayed_work		recover_work;
447 	struct devlink_health_reporter *fw_reporter;
448 	struct devlink_health_reporter *fw_fatal_reporter;
449 };
450 
451 struct mlx5_qp_table {
452 	struct notifier_block   nb;
453 
454 	/* protect radix tree
455 	 */
456 	spinlock_t		lock;
457 	struct radix_tree_root	tree;
458 };
459 
460 struct mlx5_vf_context {
461 	int	enabled;
462 	u64	port_guid;
463 	u64	node_guid;
464 	/* Valid bits are used to validate administrative guid only.
465 	 * Enabled after ndo_set_vf_guid
466 	 */
467 	u8	port_guid_valid:1;
468 	u8	node_guid_valid:1;
469 	enum port_state_policy	policy;
470 };
471 
472 struct mlx5_core_sriov {
473 	struct mlx5_vf_context	*vfs_ctx;
474 	int			num_vfs;
475 	u16			max_vfs;
476 };
477 
478 struct mlx5_fc_pool {
479 	struct mlx5_core_dev *dev;
480 	struct mutex pool_lock; /* protects pool lists */
481 	struct list_head fully_used;
482 	struct list_head partially_used;
483 	struct list_head unused;
484 	int available_fcs;
485 	int used_fcs;
486 	int threshold;
487 };
488 
489 struct mlx5_fc_stats {
490 	spinlock_t counters_idr_lock; /* protects counters_idr */
491 	struct idr counters_idr;
492 	struct list_head counters;
493 	struct llist_head addlist;
494 	struct llist_head dellist;
495 
496 	struct workqueue_struct *wq;
497 	struct delayed_work work;
498 	unsigned long next_query;
499 	unsigned long sampling_interval; /* jiffies */
500 	u32 *bulk_query_out;
501 	struct mlx5_fc_pool fc_pool;
502 };
503 
504 struct mlx5_events;
505 struct mlx5_mpfs;
506 struct mlx5_eswitch;
507 struct mlx5_lag;
508 struct mlx5_devcom;
509 struct mlx5_fw_reset;
510 struct mlx5_eq_table;
511 struct mlx5_irq_table;
512 struct mlx5_vhca_state_notifier;
513 struct mlx5_sf_dev_table;
514 struct mlx5_sf_hw_table;
515 struct mlx5_sf_table;
516 
517 struct mlx5_rate_limit {
518 	u32			rate;
519 	u32			max_burst_sz;
520 	u16			typical_pkt_sz;
521 };
522 
523 struct mlx5_rl_entry {
524 	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
525 	u16 index;
526 	u64 refcount;
527 	u16 uid;
528 	u8 dedicated : 1;
529 };
530 
531 struct mlx5_rl_table {
532 	/* protect rate limit table */
533 	struct mutex            rl_lock;
534 	u16                     max_size;
535 	u32                     max_rate;
536 	u32                     min_rate;
537 	struct mlx5_rl_entry   *rl_entry;
538 };
539 
540 struct mlx5_core_roce {
541 	struct mlx5_flow_table *ft;
542 	struct mlx5_flow_group *fg;
543 	struct mlx5_flow_handle *allow_rule;
544 };
545 
546 enum {
547 	MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
548 	MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
549 };
550 
551 struct mlx5_adev {
552 	struct auxiliary_device adev;
553 	struct mlx5_core_dev *mdev;
554 	int idx;
555 };
556 
557 struct mlx5_priv {
558 	/* IRQ table valid only for real pci devices PF or VF */
559 	struct mlx5_irq_table   *irq_table;
560 	struct mlx5_eq_table	*eq_table;
561 
562 	/* pages stuff */
563 	struct mlx5_nb          pg_nb;
564 	struct workqueue_struct *pg_wq;
565 	struct xarray           page_root_xa;
566 	int			fw_pages;
567 	atomic_t		reg_pages;
568 	struct list_head	free_list;
569 	int			vfs_pages;
570 	int			host_pf_pages;
571 
572 	struct mlx5_core_health health;
573 	struct list_head	traps;
574 
575 	/* start: qp staff */
576 	struct dentry	       *qp_debugfs;
577 	struct dentry	       *eq_debugfs;
578 	struct dentry	       *cq_debugfs;
579 	struct dentry	       *cmdif_debugfs;
580 	/* end: qp staff */
581 
582 	/* start: alloc staff */
583 	/* protect buffer alocation according to numa node */
584 	struct mutex            alloc_mutex;
585 	int                     numa_node;
586 
587 	struct mutex            pgdir_mutex;
588 	struct list_head        pgdir_list;
589 	/* end: alloc staff */
590 	struct dentry	       *dbg_root;
591 
592 	struct list_head        ctx_list;
593 	spinlock_t              ctx_lock;
594 	struct mlx5_adev       **adev;
595 	int			adev_idx;
596 	struct mlx5_events      *events;
597 
598 	struct mlx5_flow_steering *steering;
599 	struct mlx5_mpfs        *mpfs;
600 	struct mlx5_eswitch     *eswitch;
601 	struct mlx5_core_sriov	sriov;
602 	struct mlx5_lag		*lag;
603 	u32			flags;
604 	struct mlx5_devcom	*devcom;
605 	struct mlx5_fw_reset	*fw_reset;
606 	struct mlx5_core_roce	roce;
607 	struct mlx5_fc_stats		fc_stats;
608 	struct mlx5_rl_table            rl_table;
609 
610 	struct mlx5_bfreg_data		bfregs;
611 	struct mlx5_uars_page	       *uar;
612 #ifdef CONFIG_MLX5_SF
613 	struct mlx5_vhca_state_notifier *vhca_state_notifier;
614 	struct mlx5_sf_dev_table *sf_dev_table;
615 	struct mlx5_core_dev *parent_mdev;
616 #endif
617 #ifdef CONFIG_MLX5_SF_MANAGER
618 	struct mlx5_sf_hw_table *sf_hw_table;
619 	struct mlx5_sf_table *sf_table;
620 #endif
621 };
622 
623 enum mlx5_device_state {
624 	MLX5_DEVICE_STATE_UNINITIALIZED,
625 	MLX5_DEVICE_STATE_UP,
626 	MLX5_DEVICE_STATE_INTERNAL_ERROR,
627 };
628 
629 enum mlx5_interface_state {
630 	MLX5_INTERFACE_STATE_UP = BIT(0),
631 };
632 
633 enum mlx5_pci_status {
634 	MLX5_PCI_STATUS_DISABLED,
635 	MLX5_PCI_STATUS_ENABLED,
636 };
637 
638 enum mlx5_pagefault_type_flags {
639 	MLX5_PFAULT_REQUESTOR = 1 << 0,
640 	MLX5_PFAULT_WRITE     = 1 << 1,
641 	MLX5_PFAULT_RDMA      = 1 << 2,
642 };
643 
644 struct mlx5_td {
645 	/* protects tirs list changes while tirs refresh */
646 	struct mutex     list_lock;
647 	struct list_head tirs_list;
648 	u32              tdn;
649 };
650 
651 struct mlx5e_resources {
652 	u32                        pdn;
653 	struct mlx5_td             td;
654 	struct mlx5_core_mkey      mkey;
655 	struct mlx5_sq_bfreg       bfreg;
656 };
657 
658 enum mlx5_sw_icm_type {
659 	MLX5_SW_ICM_TYPE_STEERING,
660 	MLX5_SW_ICM_TYPE_HEADER_MODIFY,
661 };
662 
663 #define MLX5_MAX_RESERVED_GIDS 8
664 
665 struct mlx5_rsvd_gids {
666 	unsigned int start;
667 	unsigned int count;
668 	struct ida ida;
669 };
670 
671 #define MAX_PIN_NUM	8
672 struct mlx5_pps {
673 	u8                         pin_caps[MAX_PIN_NUM];
674 	struct work_struct         out_work;
675 	u64                        start[MAX_PIN_NUM];
676 	u8                         enabled;
677 };
678 
679 struct mlx5_timer {
680 	struct cyclecounter        cycles;
681 	struct timecounter         tc;
682 	u32                        nominal_c_mult;
683 	unsigned long              overflow_period;
684 	struct delayed_work        overflow_work;
685 };
686 
687 struct mlx5_clock {
688 	struct mlx5_nb             pps_nb;
689 	seqlock_t                  lock;
690 	struct hwtstamp_config     hwtstamp_config;
691 	struct ptp_clock          *ptp;
692 	struct ptp_clock_info      ptp_info;
693 	struct mlx5_pps            pps_info;
694 	struct mlx5_timer          timer;
695 };
696 
697 struct mlx5_dm;
698 struct mlx5_fw_tracer;
699 struct mlx5_vxlan;
700 struct mlx5_geneve;
701 struct mlx5_hv_vhca;
702 
703 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
704 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
705 
706 struct mlx5_core_dev {
707 	struct device *device;
708 	enum mlx5_coredev_type coredev_type;
709 	struct pci_dev	       *pdev;
710 	/* sync pci state */
711 	struct mutex		pci_status_mutex;
712 	enum mlx5_pci_status	pci_status;
713 	u8			rev_id;
714 	char			board_id[MLX5_BOARD_ID_LEN];
715 	struct mlx5_cmd		cmd;
716 	struct mlx5_port_caps	port_caps[MLX5_MAX_PORTS];
717 	struct {
718 		u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
719 		u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
720 		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
721 		u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
722 		u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
723 		u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
724 		u8  embedded_cpu;
725 	} caps;
726 	u64			sys_image_guid;
727 	phys_addr_t		iseg_base;
728 	struct mlx5_init_seg __iomem *iseg;
729 	phys_addr_t             bar_addr;
730 	enum mlx5_device_state	state;
731 	/* sync interface state */
732 	struct mutex		intf_state_mutex;
733 	unsigned long		intf_state;
734 	struct mlx5_priv	priv;
735 	struct mlx5_profile	*profile;
736 	u32			issi;
737 	struct mlx5e_resources  mlx5e_res;
738 	struct mlx5_dm          *dm;
739 	struct mlx5_vxlan       *vxlan;
740 	struct mlx5_geneve      *geneve;
741 	struct {
742 		struct mlx5_rsvd_gids	reserved_gids;
743 		u32			roce_en;
744 	} roce;
745 #ifdef CONFIG_MLX5_FPGA
746 	struct mlx5_fpga_device *fpga;
747 #endif
748 #ifdef CONFIG_MLX5_ACCEL
749 	const struct mlx5_accel_ipsec_ops *ipsec_ops;
750 #endif
751 	struct mlx5_clock        clock;
752 	struct mlx5_ib_clock_info  *clock_info;
753 	struct mlx5_fw_tracer   *tracer;
754 	struct mlx5_rsc_dump    *rsc_dump;
755 	u32                      vsc_addr;
756 	struct mlx5_hv_vhca	*hv_vhca;
757 };
758 
759 struct mlx5_db {
760 	__be32			*db;
761 	union {
762 		struct mlx5_db_pgdir		*pgdir;
763 		struct mlx5_ib_user_db_page	*user_page;
764 	}			u;
765 	dma_addr_t		dma;
766 	int			index;
767 };
768 
769 enum {
770 	MLX5_COMP_EQ_SIZE = 1024,
771 };
772 
773 enum {
774 	MLX5_PTYS_IB = 1 << 0,
775 	MLX5_PTYS_EN = 1 << 2,
776 };
777 
778 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
779 
780 enum {
781 	MLX5_CMD_ENT_STATE_PENDING_COMP,
782 };
783 
784 struct mlx5_cmd_work_ent {
785 	unsigned long		state;
786 	struct mlx5_cmd_msg    *in;
787 	struct mlx5_cmd_msg    *out;
788 	void		       *uout;
789 	int			uout_size;
790 	mlx5_cmd_cbk_t		callback;
791 	struct delayed_work	cb_timeout_work;
792 	void		       *context;
793 	int			idx;
794 	struct completion	handling;
795 	struct completion	done;
796 	struct mlx5_cmd        *cmd;
797 	struct work_struct	work;
798 	struct mlx5_cmd_layout *lay;
799 	int			ret;
800 	int			page_queue;
801 	u8			status;
802 	u8			token;
803 	u64			ts1;
804 	u64			ts2;
805 	u16			op;
806 	bool			polling;
807 	/* Track the max comp handlers */
808 	refcount_t              refcnt;
809 };
810 
811 struct mlx5_pas {
812 	u64	pa;
813 	u8	log_sz;
814 };
815 
816 enum phy_port_state {
817 	MLX5_AAA_111
818 };
819 
820 struct mlx5_hca_vport_context {
821 	u32			field_select;
822 	bool			sm_virt_aware;
823 	bool			has_smi;
824 	bool			has_raw;
825 	enum port_state_policy	policy;
826 	enum phy_port_state	phys_state;
827 	enum ib_port_state	vport_state;
828 	u8			port_physical_state;
829 	u64			sys_image_guid;
830 	u64			port_guid;
831 	u64			node_guid;
832 	u32			cap_mask1;
833 	u32			cap_mask1_perm;
834 	u16			cap_mask2;
835 	u16			cap_mask2_perm;
836 	u16			lid;
837 	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
838 	u8			lmc;
839 	u8			subnet_timeout;
840 	u16			sm_lid;
841 	u8			sm_sl;
842 	u16			qkey_violation_counter;
843 	u16			pkey_violation_counter;
844 	bool			grh_required;
845 };
846 
847 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
848 {
849 		return buf->frags->buf + offset;
850 }
851 
852 #define STRUCT_FIELD(header, field) \
853 	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
854 	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
855 
856 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
857 {
858 	return pci_get_drvdata(pdev);
859 }
860 
861 extern struct dentry *mlx5_debugfs_root;
862 
863 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
864 {
865 	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
866 }
867 
868 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
869 {
870 	return ioread32be(&dev->iseg->fw_rev) >> 16;
871 }
872 
873 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
874 {
875 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
876 }
877 
878 static inline u32 mlx5_base_mkey(const u32 key)
879 {
880 	return key & 0xffffff00u;
881 }
882 
883 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
884 					u8 log_stride, u8 log_sz,
885 					u16 strides_offset,
886 					struct mlx5_frag_buf_ctrl *fbc)
887 {
888 	fbc->frags      = frags;
889 	fbc->log_stride = log_stride;
890 	fbc->log_sz     = log_sz;
891 	fbc->sz_m1	= (1 << fbc->log_sz) - 1;
892 	fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
893 	fbc->frag_sz_m1	= (1 << fbc->log_frag_strides) - 1;
894 	fbc->strides_offset = strides_offset;
895 }
896 
897 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
898 				 u8 log_stride, u8 log_sz,
899 				 struct mlx5_frag_buf_ctrl *fbc)
900 {
901 	mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
902 }
903 
904 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
905 					  u32 ix)
906 {
907 	unsigned int frag;
908 
909 	ix  += fbc->strides_offset;
910 	frag = ix >> fbc->log_frag_strides;
911 
912 	return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
913 }
914 
915 static inline u32
916 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
917 {
918 	u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
919 
920 	return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
921 }
922 
923 enum {
924 	CMD_ALLOWED_OPCODE_ALL,
925 };
926 
927 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
928 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
929 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
930 
931 struct mlx5_async_ctx {
932 	struct mlx5_core_dev *dev;
933 	atomic_t num_inflight;
934 	struct wait_queue_head wait;
935 };
936 
937 struct mlx5_async_work;
938 
939 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
940 
941 struct mlx5_async_work {
942 	struct mlx5_async_ctx *ctx;
943 	mlx5_async_cbk_t user_callback;
944 };
945 
946 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
947 			     struct mlx5_async_ctx *ctx);
948 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
949 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
950 		     void *out, int out_size, mlx5_async_cbk_t callback,
951 		     struct mlx5_async_work *work);
952 
953 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
954 		  int out_size);
955 
956 #define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out)                             \
957 	({                                                                     \
958 		mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out,    \
959 			      MLX5_ST_SZ_BYTES(ifc_cmd##_out));                \
960 	})
961 
962 #define mlx5_cmd_exec_in(dev, ifc_cmd, in)                                     \
963 	({                                                                     \
964 		u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {};                   \
965 		mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out);                   \
966 	})
967 
968 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
969 			  void *out, int out_size);
970 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
971 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
972 
973 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
974 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
975 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
976 void mlx5_health_flush(struct mlx5_core_dev *dev);
977 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
978 int mlx5_health_init(struct mlx5_core_dev *dev);
979 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
980 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
981 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
982 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
983 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
984 		   int size, struct mlx5_frag_buf *buf);
985 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
986 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
987 			     struct mlx5_frag_buf *buf, int node);
988 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
989 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
990 						      gfp_t flags, int npages);
991 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
992 				 struct mlx5_cmd_mailbox *head);
993 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
994 			  struct mlx5_core_mkey *mkey,
995 			  u32 *in, int inlen);
996 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
997 			   struct mlx5_core_mkey *mkey);
998 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
999 			 u32 *out, int outlen);
1000 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1001 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1002 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1003 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1004 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1005 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1006 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1007 				 s32 npages, bool ec_function);
1008 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1009 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1010 void mlx5_register_debugfs(void);
1011 void mlx5_unregister_debugfs(void);
1012 
1013 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
1014 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
1015 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1016 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1017 		    unsigned int *irqn);
1018 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1019 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1020 
1021 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1022 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1023 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1024 			 int size_in, void *data_out, int size_out,
1025 			 u16 reg_num, int arg, int write);
1026 
1027 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1028 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1029 		       int node);
1030 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1031 
1032 const char *mlx5_command_str(int command);
1033 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1034 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1035 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1036 			 int npsvs, u32 *sig_index);
1037 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1038 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1039 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1040 			struct mlx5_odp_caps *odp_caps);
1041 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1042 			     u8 port_num, void *out, size_t sz);
1043 
1044 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1045 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1046 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
1047 		     struct mlx5_rate_limit *rl);
1048 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
1049 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1050 int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
1051 			 bool dedicated_entry, u16 *index);
1052 void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
1053 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
1054 		       struct mlx5_rate_limit *rl_1);
1055 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1056 		     bool map_wc, bool fast_path);
1057 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1058 
1059 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1060 struct cpumask *
1061 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1062 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1063 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1064 			   u8 roce_version, u8 roce_l3_type, const u8 *gid,
1065 			   const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1066 
1067 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1068 {
1069 	return mkey >> 8;
1070 }
1071 
1072 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1073 {
1074 	return mkey_idx << 8;
1075 }
1076 
1077 static inline u8 mlx5_mkey_variant(u32 mkey)
1078 {
1079 	return mkey & 0xff;
1080 }
1081 
1082 enum {
1083 	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
1084 	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
1085 };
1086 
1087 enum {
1088 	MR_CACHE_LAST_STD_ENTRY = 20,
1089 	MLX5_IMR_MTT_CACHE_ENTRY,
1090 	MLX5_IMR_KSM_CACHE_ENTRY,
1091 	MAX_MR_CACHE_ENTRIES
1092 };
1093 
1094 /* Async-atomic event notifier used by mlx5 core to forward FW
1095  * evetns recived from event queue to mlx5 consumers.
1096  * Optimise event queue dipatching.
1097  */
1098 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1099 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1100 
1101 /* Async-atomic event notifier used for forwarding
1102  * evetns from the event queue into the to mlx5 events dispatcher,
1103  * eswitch, clock and others.
1104  */
1105 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1106 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1107 
1108 /* Blocking event notifier used to forward SW events, used for slow path */
1109 int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1110 int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1111 int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
1112 				      void *data);
1113 
1114 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1115 
1116 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1117 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1118 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1119 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1120 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
1121 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1122 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1123 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1124 			   struct net_device *slave);
1125 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1126 				 u64 *values,
1127 				 int num_counters,
1128 				 size_t *offsets);
1129 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1130 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1131 int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1132 			 u64 length, u32 log_alignment, u16 uid,
1133 			 phys_addr_t *addr, u32 *obj_id);
1134 int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1135 			   u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
1136 
1137 #ifdef CONFIG_MLX5_CORE_IPOIB
1138 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1139 					  struct ib_device *ibdev,
1140 					  const char *name,
1141 					  void (*setup)(struct net_device *));
1142 #endif /* CONFIG_MLX5_CORE_IPOIB */
1143 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1144 			    struct ib_device *device,
1145 			    struct rdma_netdev_alloc_params *params);
1146 
1147 struct mlx5_profile {
1148 	u64	mask;
1149 	u8	log_max_qp;
1150 	struct {
1151 		int	size;
1152 		int	limit;
1153 	} mr_cache[MAX_MR_CACHE_ENTRIES];
1154 };
1155 
1156 enum {
1157 	MLX5_PCI_DEV_IS_VF		= 1 << 0,
1158 };
1159 
1160 static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
1161 {
1162 	return dev->coredev_type == MLX5_COREDEV_PF;
1163 }
1164 
1165 static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
1166 {
1167 	return dev->coredev_type == MLX5_COREDEV_VF;
1168 }
1169 
1170 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
1171 {
1172 	return dev->caps.embedded_cpu;
1173 }
1174 
1175 static inline bool
1176 mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1177 {
1178 	return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1179 }
1180 
1181 static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1182 {
1183 	return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1184 }
1185 
1186 static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1187 {
1188 	return dev->priv.sriov.max_vfs;
1189 }
1190 
1191 static inline int mlx5_get_gid_table_len(u16 param)
1192 {
1193 	if (param > 4) {
1194 		pr_warn("gid table length is zero\n");
1195 		return 0;
1196 	}
1197 
1198 	return 8 * (1 << param);
1199 }
1200 
1201 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1202 {
1203 	return !!(dev->priv.rl_table.max_size);
1204 }
1205 
1206 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1207 {
1208 	return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1209 	       MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1210 }
1211 
1212 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1213 {
1214 	return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1215 }
1216 
1217 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1218 {
1219 	return mlx5_core_is_mp_slave(dev) ||
1220 	       mlx5_core_is_mp_master(dev);
1221 }
1222 
1223 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1224 {
1225 	if (!mlx5_core_mp_enabled(dev))
1226 		return 1;
1227 
1228 	return MLX5_CAP_GEN(dev, native_port_num);
1229 }
1230 
1231 enum {
1232 	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1233 };
1234 
1235 static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
1236 {
1237 	struct devlink *devlink = priv_to_devlink(dev);
1238 	union devlink_param_value val;
1239 
1240 	devlink_param_driverinit_value_get(devlink,
1241 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1242 					   &val);
1243 	return val.vbool;
1244 }
1245 
1246 #endif /* MLX5_DRIVER_H */
1247