1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #define LINUXKPI_PARAM_PREFIX mlx4_
37
38 #include <linux/kmod.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/slab.h>
44 #include <linux/io-mapping.h>
45 #include <linux/delay.h>
46 #include <linux/netdevice.h>
47 #include <linux/string.h>
48 #include <linux/fs.h>
49 #include <linux/cache.h>
50 #include <linux/random.h>
51
52 #include <dev/mlx4/device.h>
53 #include <dev/mlx4/doorbell.h>
54
55 #include "mlx4.h"
56 #include "fw.h"
57 #include "icm.h"
58 #include <dev/mlx4/stats.h>
59
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63
64 struct workqueue_struct *mlx4_wq;
65
66 #ifdef CONFIG_MLX4_DEBUG
67
68 int mlx4_debug_level = 0;
69 module_param_named(debug_level, mlx4_debug_level, int, 0644);
70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
71
72 #endif /* CONFIG_MLX4_DEBUG */
73
74 #ifdef CONFIG_PCI_MSI
75
76 static int msi_x = 1;
77 module_param(msi_x, int, 0444);
78 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
79
80 #else /* CONFIG_PCI_MSI */
81
82 #define msi_x (0)
83
84 #endif /* CONFIG_PCI_MSI */
85
86 static uint8_t num_vfs[3] = {0, 0, 0};
87 static int num_vfs_argc;
88 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
89 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
90 "num_vfs=port1,port2,port1+2");
91
92 static uint8_t probe_vf[3] = {0, 0, 0};
93 static int probe_vfs_argc;
94 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
96 "probe_vf=port1,port2,port1+2");
97
98 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
99 module_param_named(log_num_mgm_entry_size,
100 mlx4_log_num_mgm_entry_size, int, 0444);
101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
102 " of qp per mcg, for example:"
103 " 10 gives 248.range: 7 <="
104 " log_num_mgm_entry_size <= 12."
105 " To activate device managed"
106 " flow steering when available, set to -1");
107
108 static bool enable_64b_cqe_eqe = true;
109 module_param(enable_64b_cqe_eqe, bool, 0444);
110 MODULE_PARM_DESC(enable_64b_cqe_eqe,
111 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
112
113 static bool enable_4k_uar;
114 module_param(enable_4k_uar, bool, 0444);
115 MODULE_PARM_DESC(enable_4k_uar,
116 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
117
118 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
119 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
120 MLX4_FUNC_CAP_DMFS_A0_STATIC)
121
122 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
123
124 static char mlx4_description[] = "Mellanox driver"
125 " (" DRV_VERSION ")";
126
127 static char mlx4_version[] =
128 DRV_NAME ": Mellanox ConnectX core driver v"
129 DRV_VERSION " (" DRV_RELDATE ")\n";
130
131 static struct mlx4_profile default_profile = {
132 .num_qp = 1 << 18,
133 .num_srq = 1 << 16,
134 .rdmarc_per_qp = 1 << 4,
135 .num_cq = 1 << 16,
136 .num_mcg = 1 << 13,
137 .num_mpt = 1 << 19,
138 .num_mtt = 1 << 20, /* It is really num mtt segements */
139 };
140
141 static struct mlx4_profile low_mem_profile = {
142 .num_qp = 1 << 17,
143 .num_srq = 1 << 6,
144 .rdmarc_per_qp = 1 << 4,
145 .num_cq = 1 << 8,
146 .num_mcg = 1 << 8,
147 .num_mpt = 1 << 9,
148 .num_mtt = 1 << 7,
149 };
150
151 static int log_num_mac = 7;
152 module_param_named(log_num_mac, log_num_mac, int, 0444);
153 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
154
155 static int log_num_vlan;
156 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
157 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
158 /* Log2 max number of VLANs per ETH port (0-7) */
159 #define MLX4_LOG_NUM_VLANS 7
160 #define MLX4_MIN_LOG_NUM_VLANS 0
161 #define MLX4_MIN_LOG_NUM_MAC 1
162
163 static bool use_prio;
164 module_param_named(use_prio, use_prio, bool, 0444);
165 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
166
167 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
168 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
169 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
170
171 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
172
173 struct mlx4_port_config {
174 struct list_head list;
175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
176 struct pci_dev *pdev;
177 };
178
179 static atomic_t pf_loading = ATOMIC_INIT(0);
180
mlx4_set_num_reserved_uars(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)181 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
182 struct mlx4_dev_cap *dev_cap)
183 {
184 /* The reserved_uars is calculated by system page size unit.
185 * Therefore, adjustment is added when the uar page size is less
186 * than the system page size
187 */
188 dev->caps.reserved_uars =
189 max_t(int,
190 mlx4_get_num_reserved_uar(dev),
191 dev_cap->reserved_uars /
192 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
193 }
194
mlx4_check_port_params(struct mlx4_dev * dev,enum mlx4_port_type * port_type)195 int mlx4_check_port_params(struct mlx4_dev *dev,
196 enum mlx4_port_type *port_type)
197 {
198 int i;
199
200 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
201 for (i = 0; i < dev->caps.num_ports - 1; i++) {
202 if (port_type[i] != port_type[i + 1]) {
203 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
204 return -EINVAL;
205 }
206 }
207 }
208
209 for (i = 0; i < dev->caps.num_ports; i++) {
210 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
211 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
212 i + 1);
213 return -EINVAL;
214 }
215 }
216 return 0;
217 }
218
mlx4_set_port_mask(struct mlx4_dev * dev)219 static void mlx4_set_port_mask(struct mlx4_dev *dev)
220 {
221 int i;
222
223 for (i = 1; i <= dev->caps.num_ports; ++i)
224 dev->caps.port_mask[i] = dev->caps.port_type[i];
225 }
226
227 enum {
228 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
229 };
230
mlx4_query_func(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)231 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
232 {
233 int err = 0;
234 struct mlx4_func func;
235
236 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
237 err = mlx4_QUERY_FUNC(dev, &func, 0);
238 if (err) {
239 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
240 return err;
241 }
242 dev_cap->max_eqs = func.max_eq;
243 dev_cap->reserved_eqs = func.rsvd_eqs;
244 dev_cap->reserved_uars = func.rsvd_uars;
245 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
246 }
247 return err;
248 }
249
mlx4_enable_cqe_eqe_stride(struct mlx4_dev * dev)250 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
251 {
252 struct mlx4_caps *dev_cap = &dev->caps;
253
254 /* FW not supporting or cancelled by user */
255 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
256 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
257 return;
258
259 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
260 * When FW has NCSI it may decide not to report 64B CQE/EQEs
261 */
262 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
263 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
264 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
265 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
266 return;
267 }
268
269 if (cache_line_size() == 128 || cache_line_size() == 256) {
270 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
271 /* Changing the real data inside CQE size to 32B */
272 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
273 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
274
275 if (mlx4_is_master(dev))
276 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
277 } else {
278 if (cache_line_size() != 32 && cache_line_size() != 64)
279 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
280 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
281 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
282 }
283 }
284
_mlx4_dev_port(struct mlx4_dev * dev,int port,struct mlx4_port_cap * port_cap)285 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
286 struct mlx4_port_cap *port_cap)
287 {
288 dev->caps.vl_cap[port] = port_cap->max_vl;
289 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
290 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
291 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
292 /* set gid and pkey table operating lengths by default
293 * to non-sriov values
294 */
295 dev->caps.gid_table_len[port] = port_cap->max_gids;
296 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
297 dev->caps.port_width_cap[port] = port_cap->max_port_width;
298 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
299 dev->caps.max_tc_eth = port_cap->max_tc_eth;
300 dev->caps.def_mac[port] = port_cap->def_mac;
301 dev->caps.supported_type[port] = port_cap->supported_port_types;
302 dev->caps.suggested_type[port] = port_cap->suggested_type;
303 dev->caps.default_sense[port] = port_cap->default_sense;
304 dev->caps.trans_type[port] = port_cap->trans_type;
305 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
306 dev->caps.wavelength[port] = port_cap->wavelength;
307 dev->caps.trans_code[port] = port_cap->trans_code;
308
309 return 0;
310 }
311
mlx4_dev_port(struct mlx4_dev * dev,int port,struct mlx4_port_cap * port_cap)312 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
313 struct mlx4_port_cap *port_cap)
314 {
315 int err = 0;
316
317 err = mlx4_QUERY_PORT(dev, port, port_cap);
318
319 if (err)
320 mlx4_err(dev, "QUERY_PORT command failed.\n");
321
322 return err;
323 }
324
mlx4_enable_ignore_fcs(struct mlx4_dev * dev)325 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
326 {
327 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
328 return;
329
330 if (mlx4_is_mfunc(dev)) {
331 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
332 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
333 return;
334 }
335
336 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
337 mlx4_dbg(dev,
338 "Keep FCS is not supported - Disabling Ignore FCS");
339 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
340 return;
341 }
342 }
343
344 #define MLX4_A0_STEERING_TABLE_SIZE 256
mlx4_dev_cap(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)345 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
346 {
347 int err;
348 int i;
349
350 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
351 if (err) {
352 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
353 return err;
354 }
355 mlx4_dev_cap_dump(dev, dev_cap);
356
357 if (dev_cap->min_page_sz > PAGE_SIZE) {
358 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
359 dev_cap->min_page_sz, (long)PAGE_SIZE);
360 return -ENODEV;
361 }
362 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
363 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
364 dev_cap->num_ports, MLX4_MAX_PORTS);
365 return -ENODEV;
366 }
367
368 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
369 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
370 dev_cap->uar_size,
371 (unsigned long long)
372 pci_resource_len(dev->persist->pdev, 2));
373 return -ENODEV;
374 }
375
376 dev->caps.num_ports = dev_cap->num_ports;
377 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
378 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
379 dev->caps.num_sys_eqs :
380 MLX4_MAX_EQ_NUM;
381 for (i = 1; i <= dev->caps.num_ports; ++i) {
382 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
383 if (err) {
384 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
385 return err;
386 }
387 }
388
389 dev->caps.uar_page_size = PAGE_SIZE;
390 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
391 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
392 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
393 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
394 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
395 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
396 dev->caps.max_wqes = dev_cap->max_qp_sz;
397 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
398 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
399 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
400 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
401 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
402 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
403 /*
404 * Subtract 1 from the limit because we need to allocate a
405 * spare CQE so the HCA HW can tell the difference between an
406 * empty CQ and a full CQ.
407 */
408 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
409 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
410 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
411 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
412 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
413
414 dev->caps.reserved_pds = dev_cap->reserved_pds;
415 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
416 dev_cap->reserved_xrcds : 0;
417 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
418 dev_cap->max_xrcds : 0;
419 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
420
421 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
422 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
423 dev->caps.flags = dev_cap->flags;
424 dev->caps.flags2 = dev_cap->flags2;
425 dev->caps.bmme_flags = dev_cap->bmme_flags;
426 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
427 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
428 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
429 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
430
431 /* Save uar page shift */
432 if (!mlx4_is_slave(dev)) {
433 /* Virtual PCI function needs to determine UAR page size from
434 * firmware. Only master PCI function can set the uar page size
435 */
436 if (enable_4k_uar)
437 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
438 else
439 dev->uar_page_shift = PAGE_SHIFT;
440
441 mlx4_set_num_reserved_uars(dev, dev_cap);
442 }
443
444 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
445 struct mlx4_init_hca_param hca_param;
446
447 memset(&hca_param, 0, sizeof(hca_param));
448 err = mlx4_QUERY_HCA(dev, &hca_param);
449 /* Turn off PHV_EN flag in case phv_check_en is set.
450 * phv_check_en is a HW check that parse the packet and verify
451 * phv bit was reported correctly in the wqe. To allow QinQ
452 * PHV_EN flag should be set and phv_check_en must be cleared
453 * otherwise QinQ packets will be drop by the HW.
454 */
455 if (err || hca_param.phv_check_en)
456 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
457 }
458
459 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
460 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
461 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
462 /* Don't do sense port on multifunction devices (for now at least) */
463 if (mlx4_is_mfunc(dev))
464 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
465
466 if (mlx4_low_memory_profile()) {
467 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
468 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
469 } else {
470 dev->caps.log_num_macs = log_num_mac;
471 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
472 }
473
474 for (i = 1; i <= dev->caps.num_ports; ++i) {
475 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
476 if (dev->caps.supported_type[i]) {
477 /* if only ETH is supported - assign ETH */
478 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
479 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
480 /* if only IB is supported, assign IB */
481 else if (dev->caps.supported_type[i] ==
482 MLX4_PORT_TYPE_IB)
483 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
484 else {
485 /* if IB and ETH are supported, we set the port
486 * type according to user selection of port type;
487 * if user selected none, take the FW hint */
488 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
489 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
490 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
491 else
492 dev->caps.port_type[i] = port_type_array[i - 1];
493 }
494 }
495 /*
496 * Link sensing is allowed on the port if 3 conditions are true:
497 * 1. Both protocols are supported on the port.
498 * 2. Different types are supported on the port
499 * 3. FW declared that it supports link sensing
500 */
501 mlx4_priv(dev)->sense.sense_allowed[i] =
502 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
503 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
504 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
505
506 /*
507 * If "default_sense" bit is set, we move the port to "AUTO" mode
508 * and perform sense_port FW command to try and set the correct
509 * port type from beginning
510 */
511 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
512 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
513 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
514 mlx4_SENSE_PORT(dev, i, &sensed_port);
515 if (sensed_port != MLX4_PORT_TYPE_NONE)
516 dev->caps.port_type[i] = sensed_port;
517 } else {
518 dev->caps.possible_type[i] = dev->caps.port_type[i];
519 }
520
521 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
522 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
523 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
524 i, 1 << dev->caps.log_num_macs);
525 }
526 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
527 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
528 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
529 i, 1 << dev->caps.log_num_vlans);
530 }
531 }
532
533 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
534 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
535 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
536 mlx4_warn(dev,
537 "Granular QoS per VF not supported with IB/Eth configuration\n");
538 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
539 }
540
541 dev->caps.max_counters = dev_cap->max_counters;
542
543 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
545 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
546 (1 << dev->caps.log_num_macs) *
547 (1 << dev->caps.log_num_vlans) *
548 dev->caps.num_ports;
549 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
550
551 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
552 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
553 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
554 else
555 dev->caps.dmfs_high_rate_qpn_base =
556 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
557
558 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
559 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
560 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
561 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
562 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
563 } else {
564 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
565 dev->caps.dmfs_high_rate_qpn_base =
566 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
567 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
568 }
569
570 dev->caps.rl_caps = dev_cap->rl_caps;
571
572 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
573 dev->caps.dmfs_high_rate_qpn_range;
574
575 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
576 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
577 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
578 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
579
580 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
581
582 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
583 if (dev_cap->flags &
584 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
585 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
586 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
587 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
588 }
589
590 if (dev_cap->flags2 &
591 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
592 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
593 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
594 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
595 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
596 }
597 }
598
599 if ((dev->caps.flags &
600 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
601 mlx4_is_master(dev))
602 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
603
604 if (!mlx4_is_slave(dev)) {
605 mlx4_enable_cqe_eqe_stride(dev);
606 dev->caps.alloc_res_qp_mask =
607 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
608 MLX4_RESERVE_A0_QP;
609
610 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
611 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
612 mlx4_warn(dev, "Old device ETS support detected\n");
613 mlx4_warn(dev, "Consider upgrading device FW.\n");
614 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
615 }
616
617 } else {
618 dev->caps.alloc_res_qp_mask = 0;
619 }
620
621 mlx4_enable_ignore_fcs(dev);
622
623 return 0;
624 }
625
mlx4_get_pcie_dev_link_caps(struct mlx4_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)626 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
627 enum pci_bus_speed *speed,
628 enum pcie_link_width *width)
629 {
630 u32 lnkcap1, lnkcap2;
631 int err1, err2;
632
633 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
634
635 *speed = PCI_SPEED_UNKNOWN;
636 *width = PCIE_LNK_WIDTH_UNKNOWN;
637
638 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
639 &lnkcap1);
640 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
641 &lnkcap2);
642 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
643 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
644 *speed = PCIE_SPEED_8_0GT;
645 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
646 *speed = PCIE_SPEED_5_0GT;
647 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
648 *speed = PCIE_SPEED_2_5GT;
649 }
650 if (!err1) {
651 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
652 if (!lnkcap2) { /* pre-r3.0 */
653 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
654 *speed = PCIE_SPEED_5_0GT;
655 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
656 *speed = PCIE_SPEED_2_5GT;
657 }
658 }
659
660 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
661 return err1 ? err1 :
662 err2 ? err2 : -EINVAL;
663 }
664 return 0;
665 }
666
mlx4_check_pcie_caps(struct mlx4_dev * dev)667 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
668 {
669 enum pcie_link_width width, width_cap;
670 enum pci_bus_speed speed, speed_cap;
671 int err;
672
673 #define PCIE_SPEED_STR(speed) \
674 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
675 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
676 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
677 "Unknown")
678
679 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
680 if (err) {
681 mlx4_warn(dev,
682 "Unable to determine PCIe device BW capabilities\n");
683 return;
684 }
685
686 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
687 if (err || speed == PCI_SPEED_UNKNOWN ||
688 width == PCIE_LNK_WIDTH_UNKNOWN) {
689 mlx4_warn(dev,
690 "Unable to determine PCI device chain minimum BW\n");
691 return;
692 }
693
694 if (width != width_cap || speed != speed_cap)
695 mlx4_warn(dev,
696 "PCIe BW is different than device's capability\n");
697
698 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
699 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
700 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
701 width, width_cap);
702 return;
703 }
704
705 /*The function checks if there are live vf, return the num of them*/
mlx4_how_many_lives_vf(struct mlx4_dev * dev)706 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
707 {
708 struct mlx4_priv *priv = mlx4_priv(dev);
709 struct mlx4_slave_state *s_state;
710 int i;
711 int ret = 0;
712
713 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
714 s_state = &priv->mfunc.master.slave_state[i];
715 if (s_state->active && s_state->last_cmd !=
716 MLX4_COMM_CMD_RESET) {
717 mlx4_warn(dev, "%s: slave: %d is still active\n",
718 __func__, i);
719 ret++;
720 }
721 }
722 return ret;
723 }
724
mlx4_get_parav_qkey(struct mlx4_dev * dev,u32 qpn,u32 * qkey)725 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
726 {
727 u32 qk = MLX4_RESERVED_QKEY_BASE;
728
729 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
730 qpn < dev->phys_caps.base_proxy_sqpn)
731 return -EINVAL;
732
733 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
734 /* tunnel qp */
735 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
736 else
737 qk += qpn - dev->phys_caps.base_proxy_sqpn;
738 *qkey = qk;
739 return 0;
740 }
741 EXPORT_SYMBOL(mlx4_get_parav_qkey);
742
mlx4_sync_pkey_table(struct mlx4_dev * dev,int slave,int port,int i,int val)743 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
744 {
745 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
746
747 if (!mlx4_is_master(dev))
748 return;
749
750 priv->virt2phys_pkey[slave][port - 1][i] = val;
751 }
752 EXPORT_SYMBOL(mlx4_sync_pkey_table);
753
mlx4_put_slave_node_guid(struct mlx4_dev * dev,int slave,__be64 guid)754 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
755 {
756 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
757
758 if (!mlx4_is_master(dev))
759 return;
760
761 priv->slave_node_guids[slave] = guid;
762 }
763 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
764
mlx4_get_slave_node_guid(struct mlx4_dev * dev,int slave)765 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
766 {
767 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
768
769 if (!mlx4_is_master(dev))
770 return 0;
771
772 return priv->slave_node_guids[slave];
773 }
774 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
775
mlx4_is_slave_active(struct mlx4_dev * dev,int slave)776 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
777 {
778 struct mlx4_priv *priv = mlx4_priv(dev);
779 struct mlx4_slave_state *s_slave;
780
781 if (!mlx4_is_master(dev))
782 return 0;
783
784 s_slave = &priv->mfunc.master.slave_state[slave];
785 return !!s_slave->active;
786 }
787 EXPORT_SYMBOL(mlx4_is_slave_active);
788
slave_adjust_steering_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,struct mlx4_init_hca_param * hca_param)789 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
790 struct mlx4_dev_cap *dev_cap,
791 struct mlx4_init_hca_param *hca_param)
792 {
793 dev->caps.steering_mode = hca_param->steering_mode;
794 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
795 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
796 dev->caps.fs_log_max_ucast_qp_range_size =
797 dev_cap->fs_log_max_ucast_qp_range_size;
798 } else
799 dev->caps.num_qp_per_mgm =
800 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
801
802 mlx4_dbg(dev, "Steering mode is: %s\n",
803 mlx4_steering_mode_str(dev->caps.steering_mode));
804 }
805
mlx4_slave_cap(struct mlx4_dev * dev)806 static int mlx4_slave_cap(struct mlx4_dev *dev)
807 {
808 int err;
809 u32 page_size;
810 struct mlx4_dev_cap dev_cap;
811 struct mlx4_func_cap func_cap;
812 struct mlx4_init_hca_param hca_param;
813 u8 i;
814
815 memset(&hca_param, 0, sizeof(hca_param));
816 err = mlx4_QUERY_HCA(dev, &hca_param);
817 if (err) {
818 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
819 return err;
820 }
821
822 /* fail if the hca has an unknown global capability
823 * at this time global_caps should be always zeroed
824 */
825 if (hca_param.global_caps) {
826 mlx4_err(dev, "Unknown hca global capabilities\n");
827 return -ENOSYS;
828 }
829
830 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
831
832 dev->caps.hca_core_clock = hca_param.hca_core_clock;
833
834 memset(&dev_cap, 0, sizeof(dev_cap));
835 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
836 err = mlx4_dev_cap(dev, &dev_cap);
837 if (err) {
838 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
839 return err;
840 }
841
842 err = mlx4_QUERY_FW(dev);
843 if (err)
844 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
845
846 page_size = ~dev->caps.page_size_cap + 1;
847 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
848 if (page_size > PAGE_SIZE) {
849 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
850 page_size, (long)PAGE_SIZE);
851 return -ENODEV;
852 }
853
854 /* Set uar_page_shift for VF */
855 dev->uar_page_shift = hca_param.uar_page_sz + 12;
856
857 /* Make sure the master uar page size is valid */
858 if (dev->uar_page_shift > PAGE_SHIFT) {
859 mlx4_err(dev,
860 "Invalid configuration: uar page size is larger than system page size\n");
861 return -ENODEV;
862 }
863
864 /* Set reserved_uars based on the uar_page_shift */
865 mlx4_set_num_reserved_uars(dev, &dev_cap);
866
867 /* Although uar page size in FW differs from system page size,
868 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
869 * still works with assumption that uar page size == system page size
870 */
871 dev->caps.uar_page_size = PAGE_SIZE;
872
873 memset(&func_cap, 0, sizeof(func_cap));
874 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
875 if (err) {
876 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
877 err);
878 return err;
879 }
880
881 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
882 PF_CONTEXT_BEHAVIOUR_MASK) {
883 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
884 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
885 return -ENOSYS;
886 }
887
888 dev->caps.num_ports = func_cap.num_ports;
889 dev->quotas.qp = func_cap.qp_quota;
890 dev->quotas.srq = func_cap.srq_quota;
891 dev->quotas.cq = func_cap.cq_quota;
892 dev->quotas.mpt = func_cap.mpt_quota;
893 dev->quotas.mtt = func_cap.mtt_quota;
894 dev->caps.num_qps = 1 << hca_param.log_num_qps;
895 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
896 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
897 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
898 dev->caps.num_eqs = func_cap.max_eq;
899 dev->caps.reserved_eqs = func_cap.reserved_eq;
900 dev->caps.reserved_lkey = func_cap.reserved_lkey;
901 dev->caps.num_pds = MLX4_NUM_PDS;
902 dev->caps.num_mgms = 0;
903 dev->caps.num_amgms = 0;
904
905 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
906 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
907 dev->caps.num_ports, MLX4_MAX_PORTS);
908 return -ENODEV;
909 }
910
911 mlx4_replace_zero_macs(dev);
912
913 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
914 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
915 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
916 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
917 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
918
919 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
920 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
921 !dev->caps.qp0_qkey) {
922 err = -ENOMEM;
923 goto err_mem;
924 }
925
926 for (i = 1; i <= dev->caps.num_ports; ++i) {
927 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
928 if (err) {
929 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
930 i, err);
931 goto err_mem;
932 }
933 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
934 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
935 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
936 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
937 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
938 dev->caps.port_mask[i] = dev->caps.port_type[i];
939 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
940 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
941 &dev->caps.gid_table_len[i],
942 &dev->caps.pkey_table_len[i]);
943 if (err)
944 goto err_mem;
945 }
946
947 if (dev->caps.uar_page_size * (dev->caps.num_uars -
948 dev->caps.reserved_uars) >
949 pci_resource_len(dev->persist->pdev,
950 2)) {
951 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
952 dev->caps.uar_page_size * dev->caps.num_uars,
953 (unsigned long long)
954 pci_resource_len(dev->persist->pdev, 2));
955 err = -ENOMEM;
956 goto err_mem;
957 }
958
959 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
960 dev->caps.eqe_size = 64;
961 dev->caps.eqe_factor = 1;
962 } else {
963 dev->caps.eqe_size = 32;
964 dev->caps.eqe_factor = 0;
965 }
966
967 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
968 dev->caps.cqe_size = 64;
969 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
970 } else {
971 dev->caps.cqe_size = 32;
972 }
973
974 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
975 dev->caps.eqe_size = hca_param.eqe_size;
976 dev->caps.eqe_factor = 0;
977 }
978
979 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
980 dev->caps.cqe_size = hca_param.cqe_size;
981 /* User still need to know when CQE > 32B */
982 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
983 }
984
985 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
986 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
987
988 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
989 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
990 hca_param.rss_ip_frags ? "on" : "off");
991
992 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
993 dev->caps.bf_reg_size)
994 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
995
996 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
997 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
998
999 return 0;
1000
1001 err_mem:
1002 kfree(dev->caps.qp0_qkey);
1003 kfree(dev->caps.qp0_tunnel);
1004 kfree(dev->caps.qp0_proxy);
1005 kfree(dev->caps.qp1_tunnel);
1006 kfree(dev->caps.qp1_proxy);
1007 dev->caps.qp0_qkey = NULL;
1008 dev->caps.qp0_tunnel = NULL;
1009 dev->caps.qp0_proxy = NULL;
1010 dev->caps.qp1_tunnel = NULL;
1011 dev->caps.qp1_proxy = NULL;
1012
1013 return err;
1014 }
1015
mlx4_request_modules(struct mlx4_dev * dev)1016 static void mlx4_request_modules(struct mlx4_dev *dev)
1017 {
1018 int port;
1019 int has_ib_port = false;
1020 int has_eth_port = false;
1021 #define EN_DRV_NAME "mlx4_en"
1022 #define IB_DRV_NAME "mlx4_ib"
1023
1024 for (port = 1; port <= dev->caps.num_ports; port++) {
1025 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1026 has_ib_port = true;
1027 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1028 has_eth_port = true;
1029 }
1030
1031 if (has_eth_port)
1032 request_module_nowait(EN_DRV_NAME);
1033 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1034 request_module_nowait(IB_DRV_NAME);
1035 }
1036
1037 /*
1038 * Change the port configuration of the device.
1039 * Every user of this function must hold the port mutex.
1040 */
mlx4_change_port_types(struct mlx4_dev * dev,enum mlx4_port_type * port_types)1041 int mlx4_change_port_types(struct mlx4_dev *dev,
1042 enum mlx4_port_type *port_types)
1043 {
1044 int err = 0;
1045 int change = 0;
1046 int port;
1047
1048 for (port = 0; port < dev->caps.num_ports; port++) {
1049 /* Change the port type only if the new type is different
1050 * from the current, and not set to Auto */
1051 if (port_types[port] != dev->caps.port_type[port + 1])
1052 change = 1;
1053 }
1054 if (change) {
1055 mlx4_unregister_device(dev);
1056 for (port = 1; port <= dev->caps.num_ports; port++) {
1057 mlx4_CLOSE_PORT(dev, port);
1058 dev->caps.port_type[port] = port_types[port - 1];
1059 err = mlx4_SET_PORT(dev, port, -1);
1060 if (err) {
1061 mlx4_err(dev, "Failed to set port %d, aborting\n",
1062 port);
1063 goto out;
1064 }
1065 }
1066 mlx4_set_port_mask(dev);
1067 err = mlx4_register_device(dev);
1068 if (err) {
1069 mlx4_err(dev, "Failed to register device\n");
1070 goto out;
1071 }
1072 mlx4_request_modules(dev);
1073 }
1074
1075 out:
1076 return err;
1077 }
1078
show_port_type(struct device * dev,struct device_attribute * attr,char * buf)1079 static ssize_t show_port_type(struct device *dev,
1080 struct device_attribute *attr,
1081 char *buf)
1082 {
1083 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1084 port_attr);
1085 struct mlx4_dev *mdev = info->dev;
1086 char type[8];
1087
1088 sprintf(type, "%s",
1089 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1090 "ib" : "eth");
1091 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1092 sprintf(buf, "auto (%s)\n", type);
1093 else
1094 sprintf(buf, "%s\n", type);
1095
1096 return strlen(buf);
1097 }
1098
__set_port_type(struct mlx4_port_info * info,enum mlx4_port_type port_type)1099 static int __set_port_type(struct mlx4_port_info *info,
1100 enum mlx4_port_type port_type)
1101 {
1102 struct mlx4_dev *mdev = info->dev;
1103 struct mlx4_priv *priv = mlx4_priv(mdev);
1104 enum mlx4_port_type types[MLX4_MAX_PORTS];
1105 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1106 int i;
1107 int err = 0;
1108
1109 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1110 mlx4_err(mdev,
1111 "Requested port type for port %d is not supported on this HCA\n",
1112 info->port);
1113 err = -EINVAL;
1114 goto err_sup;
1115 }
1116
1117 mlx4_stop_sense(mdev);
1118 mutex_lock(&priv->port_mutex);
1119 info->tmp_type = port_type;
1120
1121 /* Possible type is always the one that was delivered */
1122 mdev->caps.possible_type[info->port] = info->tmp_type;
1123
1124 for (i = 0; i < mdev->caps.num_ports; i++) {
1125 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1126 mdev->caps.possible_type[i+1];
1127 if (types[i] == MLX4_PORT_TYPE_AUTO)
1128 types[i] = mdev->caps.port_type[i+1];
1129 }
1130
1131 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1132 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1133 for (i = 1; i <= mdev->caps.num_ports; i++) {
1134 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1135 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1136 err = -EINVAL;
1137 }
1138 }
1139 }
1140 if (err) {
1141 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1142 goto out;
1143 }
1144
1145 mlx4_do_sense_ports(mdev, new_types, types);
1146
1147 err = mlx4_check_port_params(mdev, new_types);
1148 if (err)
1149 goto out;
1150
1151 /* We are about to apply the changes after the configuration
1152 * was verified, no need to remember the temporary types
1153 * any more */
1154 for (i = 0; i < mdev->caps.num_ports; i++)
1155 priv->port[i + 1].tmp_type = 0;
1156
1157 err = mlx4_change_port_types(mdev, new_types);
1158
1159 out:
1160 mutex_unlock(&priv->port_mutex);
1161 mlx4_start_sense(mdev);
1162 err_sup:
1163 return err;
1164 }
1165
set_port_type(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1166 static ssize_t set_port_type(struct device *dev,
1167 struct device_attribute *attr,
1168 const char *buf, size_t count)
1169 {
1170 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1171 port_attr);
1172 struct mlx4_dev *mdev = info->dev;
1173 enum mlx4_port_type port_type;
1174 static DEFINE_MUTEX(set_port_type_mutex);
1175 int err;
1176
1177 mutex_lock(&set_port_type_mutex);
1178
1179 if (!strcmp(buf, "ib\n")) {
1180 port_type = MLX4_PORT_TYPE_IB;
1181 } else if (!strcmp(buf, "eth\n")) {
1182 port_type = MLX4_PORT_TYPE_ETH;
1183 } else if (!strcmp(buf, "auto\n")) {
1184 port_type = MLX4_PORT_TYPE_AUTO;
1185 } else {
1186 mlx4_err(mdev, "%s is not supported port type\n", buf);
1187 err = -EINVAL;
1188 goto err_out;
1189 }
1190
1191 err = __set_port_type(info, port_type);
1192
1193 err_out:
1194 mutex_unlock(&set_port_type_mutex);
1195
1196 return err ? err : count;
1197 }
1198
1199 enum ibta_mtu {
1200 IB_MTU_256 = 1,
1201 IB_MTU_512 = 2,
1202 IB_MTU_1024 = 3,
1203 IB_MTU_2048 = 4,
1204 IB_MTU_4096 = 5
1205 };
1206
int_to_ibta_mtu(int mtu)1207 static inline int int_to_ibta_mtu(int mtu)
1208 {
1209 switch (mtu) {
1210 case 256: return IB_MTU_256;
1211 case 512: return IB_MTU_512;
1212 case 1024: return IB_MTU_1024;
1213 case 2048: return IB_MTU_2048;
1214 case 4096: return IB_MTU_4096;
1215 default: return -1;
1216 }
1217 }
1218
ibta_mtu_to_int(enum ibta_mtu mtu)1219 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1220 {
1221 switch (mtu) {
1222 case IB_MTU_256: return 256;
1223 case IB_MTU_512: return 512;
1224 case IB_MTU_1024: return 1024;
1225 case IB_MTU_2048: return 2048;
1226 case IB_MTU_4096: return 4096;
1227 default: return -1;
1228 }
1229 }
1230
show_port_ib_mtu(struct device * dev,struct device_attribute * attr,char * buf)1231 static ssize_t show_port_ib_mtu(struct device *dev,
1232 struct device_attribute *attr,
1233 char *buf)
1234 {
1235 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1236 port_mtu_attr);
1237 struct mlx4_dev *mdev = info->dev;
1238
1239 sprintf(buf, "%d\n",
1240 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1241 return strlen(buf);
1242 }
1243
set_port_ib_mtu(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1244 static ssize_t set_port_ib_mtu(struct device *dev,
1245 struct device_attribute *attr,
1246 const char *buf, size_t count)
1247 {
1248 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1249 port_mtu_attr);
1250 struct mlx4_dev *mdev = info->dev;
1251 struct mlx4_priv *priv = mlx4_priv(mdev);
1252 int err, port, mtu, ibta_mtu = -1;
1253
1254 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1255 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1256 return -EINVAL;
1257 }
1258
1259 err = kstrtoint(buf, 0, &mtu);
1260 if (!err)
1261 ibta_mtu = int_to_ibta_mtu(mtu);
1262
1263 if (err || ibta_mtu < 0) {
1264 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1265 return -EINVAL;
1266 }
1267
1268 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1269
1270 mlx4_stop_sense(mdev);
1271 mutex_lock(&priv->port_mutex);
1272 mlx4_unregister_device(mdev);
1273 for (port = 1; port <= mdev->caps.num_ports; port++) {
1274 mlx4_CLOSE_PORT(mdev, port);
1275 err = mlx4_SET_PORT(mdev, port, -1);
1276 if (err) {
1277 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1278 port);
1279 goto err_set_port;
1280 }
1281 }
1282 err = mlx4_register_device(mdev);
1283 err_set_port:
1284 mutex_unlock(&priv->port_mutex);
1285 mlx4_start_sense(mdev);
1286 return err ? err : count;
1287 }
1288
1289 /* bond for multi-function device */
1290 #define MAX_MF_BOND_ALLOWED_SLAVES 63
mlx4_mf_bond(struct mlx4_dev * dev)1291 static int mlx4_mf_bond(struct mlx4_dev *dev)
1292 {
1293 int err = 0;
1294 int nvfs;
1295 struct mlx4_slaves_pport slaves_port1;
1296 struct mlx4_slaves_pport slaves_port2;
1297 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
1298
1299 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1300 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1301 bitmap_and(slaves_port_1_2,
1302 slaves_port1.slaves, slaves_port2.slaves,
1303 dev->persist->num_vfs + 1);
1304
1305 /* only single port vfs are allowed */
1306 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
1307 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1308 return -EINVAL;
1309 }
1310
1311 /* number of virtual functions is number of total functions minus one
1312 * physical function for each port.
1313 */
1314 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1315 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1316
1317 /* limit on maximum allowed VFs */
1318 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1319 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1320 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1321 return -EINVAL;
1322 }
1323
1324 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1325 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1326 return -EINVAL;
1327 }
1328
1329 err = mlx4_bond_mac_table(dev);
1330 if (err)
1331 return err;
1332 err = mlx4_bond_vlan_table(dev);
1333 if (err)
1334 goto err1;
1335 err = mlx4_bond_fs_rules(dev);
1336 if (err)
1337 goto err2;
1338
1339 return 0;
1340 err2:
1341 (void)mlx4_unbond_vlan_table(dev);
1342 err1:
1343 (void)mlx4_unbond_mac_table(dev);
1344 return err;
1345 }
1346
mlx4_mf_unbond(struct mlx4_dev * dev)1347 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1348 {
1349 int ret, ret1;
1350
1351 ret = mlx4_unbond_fs_rules(dev);
1352 if (ret)
1353 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
1354 ret1 = mlx4_unbond_mac_table(dev);
1355 if (ret1) {
1356 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1357 ret = ret1;
1358 }
1359 ret1 = mlx4_unbond_vlan_table(dev);
1360 if (ret1) {
1361 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1362 ret = ret1;
1363 }
1364 return ret;
1365 }
1366
mlx4_bond(struct mlx4_dev * dev)1367 int mlx4_bond(struct mlx4_dev *dev)
1368 {
1369 int ret = 0;
1370 struct mlx4_priv *priv = mlx4_priv(dev);
1371
1372 mutex_lock(&priv->bond_mutex);
1373
1374 if (!mlx4_is_bonded(dev)) {
1375 ret = mlx4_do_bond(dev, true);
1376 if (ret)
1377 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1378 if (!ret && mlx4_is_master(dev)) {
1379 ret = mlx4_mf_bond(dev);
1380 if (ret) {
1381 mlx4_err(dev, "bond for multifunction failed\n");
1382 mlx4_do_bond(dev, false);
1383 }
1384 }
1385 }
1386
1387 mutex_unlock(&priv->bond_mutex);
1388 if (!ret)
1389 mlx4_dbg(dev, "Device is bonded\n");
1390
1391 return ret;
1392 }
1393 EXPORT_SYMBOL_GPL(mlx4_bond);
1394
mlx4_unbond(struct mlx4_dev * dev)1395 int mlx4_unbond(struct mlx4_dev *dev)
1396 {
1397 int ret = 0;
1398 struct mlx4_priv *priv = mlx4_priv(dev);
1399
1400 mutex_lock(&priv->bond_mutex);
1401
1402 if (mlx4_is_bonded(dev)) {
1403 int ret2 = 0;
1404
1405 ret = mlx4_do_bond(dev, false);
1406 if (ret)
1407 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1408 if (mlx4_is_master(dev))
1409 ret2 = mlx4_mf_unbond(dev);
1410 if (ret2) {
1411 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1412 ret = ret2;
1413 }
1414 }
1415
1416 mutex_unlock(&priv->bond_mutex);
1417 if (!ret)
1418 mlx4_dbg(dev, "Device is unbonded\n");
1419
1420 return ret;
1421 }
1422 EXPORT_SYMBOL_GPL(mlx4_unbond);
1423
1424
mlx4_port_map_set(struct mlx4_dev * dev,struct mlx4_port_map * v2p)1425 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1426 {
1427 u8 port1 = v2p->port1;
1428 u8 port2 = v2p->port2;
1429 struct mlx4_priv *priv = mlx4_priv(dev);
1430 int err;
1431
1432 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1433 return -ENOTSUPP;
1434
1435 mutex_lock(&priv->bond_mutex);
1436
1437 /* zero means keep current mapping for this port */
1438 if (port1 == 0)
1439 port1 = priv->v2p.port1;
1440 if (port2 == 0)
1441 port2 = priv->v2p.port2;
1442
1443 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1444 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1445 (port1 == 2 && port2 == 1)) {
1446 /* besides boundary checks cross mapping makes
1447 * no sense and therefore not allowed */
1448 err = -EINVAL;
1449 } else if ((port1 == priv->v2p.port1) &&
1450 (port2 == priv->v2p.port2)) {
1451 err = 0;
1452 } else {
1453 err = mlx4_virt2phy_port_map(dev, port1, port2);
1454 if (!err) {
1455 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1456 port1, port2);
1457 priv->v2p.port1 = port1;
1458 priv->v2p.port2 = port2;
1459 } else {
1460 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1461 }
1462 }
1463
1464 mutex_unlock(&priv->bond_mutex);
1465 return err;
1466 }
1467 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1468
mlx4_load_fw(struct mlx4_dev * dev)1469 static int mlx4_load_fw(struct mlx4_dev *dev)
1470 {
1471 struct mlx4_priv *priv = mlx4_priv(dev);
1472 int err;
1473
1474 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1475 GFP_HIGHUSER | __GFP_NOWARN, 0);
1476 if (!priv->fw.fw_icm) {
1477 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1478 return -ENOMEM;
1479 }
1480
1481 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1482 if (err) {
1483 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1484 goto err_free;
1485 }
1486
1487 err = mlx4_RUN_FW(dev);
1488 if (err) {
1489 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1490 goto err_unmap_fa;
1491 }
1492
1493 return 0;
1494
1495 err_unmap_fa:
1496 mlx4_UNMAP_FA(dev);
1497
1498 err_free:
1499 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1500 return err;
1501 }
1502
mlx4_init_cmpt_table(struct mlx4_dev * dev,u64 cmpt_base,int cmpt_entry_sz)1503 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1504 int cmpt_entry_sz)
1505 {
1506 struct mlx4_priv *priv = mlx4_priv(dev);
1507 int err;
1508 int num_eqs;
1509
1510 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1511 cmpt_base +
1512 ((u64) (MLX4_CMPT_TYPE_QP *
1513 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1514 cmpt_entry_sz, dev->caps.num_qps,
1515 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1516 0, 0);
1517 if (err)
1518 goto err;
1519
1520 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1521 cmpt_base +
1522 ((u64) (MLX4_CMPT_TYPE_SRQ *
1523 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1524 cmpt_entry_sz, dev->caps.num_srqs,
1525 dev->caps.reserved_srqs, 0, 0);
1526 if (err)
1527 goto err_qp;
1528
1529 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1530 cmpt_base +
1531 ((u64) (MLX4_CMPT_TYPE_CQ *
1532 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1533 cmpt_entry_sz, dev->caps.num_cqs,
1534 dev->caps.reserved_cqs, 0, 0);
1535 if (err)
1536 goto err_srq;
1537
1538 num_eqs = dev->phys_caps.num_phys_eqs;
1539 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1540 cmpt_base +
1541 ((u64) (MLX4_CMPT_TYPE_EQ *
1542 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1543 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1544 if (err)
1545 goto err_cq;
1546
1547 return 0;
1548
1549 err_cq:
1550 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1551
1552 err_srq:
1553 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1554
1555 err_qp:
1556 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1557
1558 err:
1559 return err;
1560 }
1561
mlx4_init_icm(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,struct mlx4_init_hca_param * init_hca,u64 icm_size)1562 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1563 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1564 {
1565 struct mlx4_priv *priv = mlx4_priv(dev);
1566 u64 aux_pages;
1567 int num_eqs;
1568 int err;
1569
1570 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1571 if (err) {
1572 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1573 return err;
1574 }
1575
1576 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1577 (unsigned long long) icm_size >> 10,
1578 (unsigned long long) aux_pages << 2);
1579
1580 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1581 GFP_HIGHUSER | __GFP_NOWARN, 0);
1582 if (!priv->fw.aux_icm) {
1583 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1584 return -ENOMEM;
1585 }
1586
1587 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1588 if (err) {
1589 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1590 goto err_free_aux;
1591 }
1592
1593 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1594 if (err) {
1595 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1596 goto err_unmap_aux;
1597 }
1598
1599
1600 num_eqs = dev->phys_caps.num_phys_eqs;
1601 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1602 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1603 num_eqs, num_eqs, 0, 0);
1604 if (err) {
1605 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1606 goto err_unmap_cmpt;
1607 }
1608
1609 /*
1610 * Reserved MTT entries must be aligned up to a cacheline
1611 * boundary, since the FW will write to them, while the driver
1612 * writes to all other MTT entries. (The variable
1613 * dev->caps.mtt_entry_sz below is really the MTT segment
1614 * size, not the raw entry size)
1615 */
1616 dev->caps.reserved_mtts =
1617 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1618 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1619
1620 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1621 init_hca->mtt_base,
1622 dev->caps.mtt_entry_sz,
1623 dev->caps.num_mtts,
1624 dev->caps.reserved_mtts, 1, 0);
1625 if (err) {
1626 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1627 goto err_unmap_eq;
1628 }
1629
1630 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1631 init_hca->dmpt_base,
1632 dev_cap->dmpt_entry_sz,
1633 dev->caps.num_mpts,
1634 dev->caps.reserved_mrws, 1, 1);
1635 if (err) {
1636 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1637 goto err_unmap_mtt;
1638 }
1639
1640 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1641 init_hca->qpc_base,
1642 dev_cap->qpc_entry_sz,
1643 dev->caps.num_qps,
1644 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1645 0, 0);
1646 if (err) {
1647 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1648 goto err_unmap_dmpt;
1649 }
1650
1651 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1652 init_hca->auxc_base,
1653 dev_cap->aux_entry_sz,
1654 dev->caps.num_qps,
1655 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1656 0, 0);
1657 if (err) {
1658 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1659 goto err_unmap_qp;
1660 }
1661
1662 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1663 init_hca->altc_base,
1664 dev_cap->altc_entry_sz,
1665 dev->caps.num_qps,
1666 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1667 0, 0);
1668 if (err) {
1669 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1670 goto err_unmap_auxc;
1671 }
1672
1673 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1674 init_hca->rdmarc_base,
1675 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1676 dev->caps.num_qps,
1677 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1678 0, 0);
1679 if (err) {
1680 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1681 goto err_unmap_altc;
1682 }
1683
1684 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1685 init_hca->cqc_base,
1686 dev_cap->cqc_entry_sz,
1687 dev->caps.num_cqs,
1688 dev->caps.reserved_cqs, 0, 0);
1689 if (err) {
1690 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1691 goto err_unmap_rdmarc;
1692 }
1693
1694 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1695 init_hca->srqc_base,
1696 dev_cap->srq_entry_sz,
1697 dev->caps.num_srqs,
1698 dev->caps.reserved_srqs, 0, 0);
1699 if (err) {
1700 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1701 goto err_unmap_cq;
1702 }
1703
1704 /*
1705 * For flow steering device managed mode it is required to use
1706 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1707 * required, but for simplicity just map the whole multicast
1708 * group table now. The table isn't very big and it's a lot
1709 * easier than trying to track ref counts.
1710 */
1711 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1712 init_hca->mc_base,
1713 mlx4_get_mgm_entry_size(dev),
1714 dev->caps.num_mgms + dev->caps.num_amgms,
1715 dev->caps.num_mgms + dev->caps.num_amgms,
1716 0, 0);
1717 if (err) {
1718 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1719 goto err_unmap_srq;
1720 }
1721
1722 return 0;
1723
1724 err_unmap_srq:
1725 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1726
1727 err_unmap_cq:
1728 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1729
1730 err_unmap_rdmarc:
1731 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1732
1733 err_unmap_altc:
1734 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1735
1736 err_unmap_auxc:
1737 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1738
1739 err_unmap_qp:
1740 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1741
1742 err_unmap_dmpt:
1743 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1744
1745 err_unmap_mtt:
1746 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1747
1748 err_unmap_eq:
1749 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1750
1751 err_unmap_cmpt:
1752 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1753 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1754 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1755 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1756
1757 err_unmap_aux:
1758 mlx4_UNMAP_ICM_AUX(dev);
1759
1760 err_free_aux:
1761 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1762
1763 return err;
1764 }
1765
mlx4_free_icms(struct mlx4_dev * dev)1766 static void mlx4_free_icms(struct mlx4_dev *dev)
1767 {
1768 struct mlx4_priv *priv = mlx4_priv(dev);
1769
1770 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1771 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1772 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1773 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1774 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1775 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1776 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1777 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1778 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1779 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1780 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1781 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1782 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1783 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1784
1785 mlx4_UNMAP_ICM_AUX(dev);
1786 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1787 }
1788
mlx4_slave_exit(struct mlx4_dev * dev)1789 static void mlx4_slave_exit(struct mlx4_dev *dev)
1790 {
1791 struct mlx4_priv *priv = mlx4_priv(dev);
1792
1793 mutex_lock(&priv->cmd.slave_cmd_mutex);
1794 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1795 MLX4_COMM_TIME))
1796 mlx4_warn(dev, "Failed to close slave function\n");
1797 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1798 }
1799
map_bf_area(struct mlx4_dev * dev)1800 static int map_bf_area(struct mlx4_dev *dev)
1801 {
1802 struct mlx4_priv *priv = mlx4_priv(dev);
1803 resource_size_t bf_start;
1804 resource_size_t bf_len;
1805 int err = 0;
1806
1807 if (!dev->caps.bf_reg_size)
1808 return -ENXIO;
1809
1810 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1811 (dev->caps.num_uars << PAGE_SHIFT);
1812 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1813 (dev->caps.num_uars << PAGE_SHIFT);
1814 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1815 if (!priv->bf_mapping)
1816 err = -ENOMEM;
1817
1818 return err;
1819 }
1820
unmap_bf_area(struct mlx4_dev * dev)1821 static void unmap_bf_area(struct mlx4_dev *dev)
1822 {
1823 if (mlx4_priv(dev)->bf_mapping)
1824 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1825 }
1826
mlx4_read_clock(struct mlx4_dev * dev)1827 s64 mlx4_read_clock(struct mlx4_dev *dev)
1828 {
1829 u32 clockhi, clocklo, clockhi1;
1830 s64 cycles;
1831 int i;
1832 struct mlx4_priv *priv = mlx4_priv(dev);
1833
1834 if (!priv->clock_mapping)
1835 return -ENOTSUPP;
1836
1837 for (i = 0; i < 10; i++) {
1838 clockhi = swab32(readl(priv->clock_mapping));
1839 clocklo = swab32(readl(priv->clock_mapping + 4));
1840 clockhi1 = swab32(readl(priv->clock_mapping));
1841 if (clockhi == clockhi1)
1842 break;
1843 }
1844
1845 cycles = (u64) clockhi << 32 | (u64) clocklo;
1846
1847 return cycles & CORE_CLOCK_MASK;
1848 }
1849 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1850
1851
map_internal_clock(struct mlx4_dev * dev)1852 static int map_internal_clock(struct mlx4_dev *dev)
1853 {
1854 struct mlx4_priv *priv = mlx4_priv(dev);
1855
1856 priv->clock_mapping =
1857 ioremap(pci_resource_start(dev->persist->pdev,
1858 priv->fw.clock_bar) +
1859 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1860
1861 if (!priv->clock_mapping)
1862 return -ENOMEM;
1863
1864 return 0;
1865 }
1866
mlx4_get_internal_clock_params(struct mlx4_dev * dev,struct mlx4_clock_params * params)1867 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1868 struct mlx4_clock_params *params)
1869 {
1870 struct mlx4_priv *priv = mlx4_priv(dev);
1871
1872 if (mlx4_is_slave(dev))
1873 return -ENOTSUPP;
1874
1875 if (!params)
1876 return -EINVAL;
1877
1878 params->bar = priv->fw.clock_bar;
1879 params->offset = priv->fw.clock_offset;
1880 params->size = MLX4_CLOCK_SIZE;
1881
1882 return 0;
1883 }
1884 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1885
unmap_internal_clock(struct mlx4_dev * dev)1886 static void unmap_internal_clock(struct mlx4_dev *dev)
1887 {
1888 struct mlx4_priv *priv = mlx4_priv(dev);
1889
1890 if (priv->clock_mapping)
1891 iounmap(priv->clock_mapping);
1892 }
1893
mlx4_close_hca(struct mlx4_dev * dev)1894 static void mlx4_close_hca(struct mlx4_dev *dev)
1895 {
1896 sysctl_ctx_free(&dev->hw_ctx);
1897 unmap_internal_clock(dev);
1898 unmap_bf_area(dev);
1899 if (mlx4_is_slave(dev))
1900 mlx4_slave_exit(dev);
1901 else {
1902 mlx4_CLOSE_HCA(dev, 0);
1903 mlx4_free_icms(dev);
1904 }
1905 }
1906
mlx4_close_fw(struct mlx4_dev * dev)1907 static void mlx4_close_fw(struct mlx4_dev *dev)
1908 {
1909 if (!mlx4_is_slave(dev)) {
1910 mlx4_UNMAP_FA(dev);
1911 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1912 }
1913 }
1914
mlx4_comm_check_offline(struct mlx4_dev * dev)1915 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1916 {
1917 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1918
1919 u32 comm_flags;
1920 u32 offline_bit;
1921 unsigned long end;
1922 struct mlx4_priv *priv = mlx4_priv(dev);
1923
1924 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1925 while (time_before(jiffies, end)) {
1926 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1927 MLX4_COMM_CHAN_FLAGS));
1928 offline_bit = (comm_flags &
1929 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1930 if (!offline_bit)
1931 return 0;
1932 /* There are cases as part of AER/Reset flow that PF needs
1933 * around 100 msec to load. We therefore sleep for 100 msec
1934 * to allow other tasks to make use of that CPU during this
1935 * time interval.
1936 */
1937 msleep(100);
1938 }
1939 mlx4_err(dev, "Communication channel is offline.\n");
1940 return -EIO;
1941 }
1942
mlx4_reset_vf_support(struct mlx4_dev * dev)1943 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1944 {
1945 #define COMM_CHAN_RST_OFFSET 0x1e
1946
1947 struct mlx4_priv *priv = mlx4_priv(dev);
1948 u32 comm_rst;
1949 u32 comm_caps;
1950
1951 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1952 MLX4_COMM_CHAN_CAPS));
1953 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1954
1955 if (comm_rst)
1956 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1957 }
1958
mlx4_init_slave(struct mlx4_dev * dev)1959 static int mlx4_init_slave(struct mlx4_dev *dev)
1960 {
1961 struct mlx4_priv *priv = mlx4_priv(dev);
1962 u64 dma = (u64) priv->mfunc.vhcr_dma;
1963 int ret_from_reset = 0;
1964 u32 slave_read;
1965 u32 cmd_channel_ver;
1966
1967 if (atomic_read(&pf_loading)) {
1968 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1969 return -EAGAIN;
1970 }
1971
1972 mutex_lock(&priv->cmd.slave_cmd_mutex);
1973 priv->cmd.max_cmds = 1;
1974 if (mlx4_comm_check_offline(dev)) {
1975 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1976 goto err_offline;
1977 }
1978
1979 mlx4_reset_vf_support(dev);
1980 mlx4_warn(dev, "Sending reset\n");
1981 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1982 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1983 /* if we are in the middle of flr the slave will try
1984 * NUM_OF_RESET_RETRIES times before leaving.*/
1985 if (ret_from_reset) {
1986 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1987 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1988 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1989 return -EAGAIN;
1990 } else
1991 goto err;
1992 }
1993
1994 /* check the driver version - the slave I/F revision
1995 * must match the master's */
1996 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1997 cmd_channel_ver = mlx4_comm_get_version();
1998
1999 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2000 MLX4_COMM_GET_IF_REV(slave_read)) {
2001 mlx4_err(dev, "slave driver version is not supported by the master\n");
2002 goto err;
2003 }
2004
2005 mlx4_warn(dev, "Sending vhcr0\n");
2006 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2007 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2008 goto err;
2009 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2010 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2011 goto err;
2012 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2013 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2014 goto err;
2015 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2016 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2017 goto err;
2018
2019 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2020 return 0;
2021
2022 err:
2023 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2024 err_offline:
2025 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2026 return -EIO;
2027 }
2028
mlx4_parav_master_pf_caps(struct mlx4_dev * dev)2029 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2030 {
2031 int i;
2032
2033 for (i = 1; i <= dev->caps.num_ports; i++) {
2034 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2035 dev->caps.gid_table_len[i] =
2036 mlx4_get_slave_num_gids(dev, 0, i);
2037 else
2038 dev->caps.gid_table_len[i] = 1;
2039 dev->caps.pkey_table_len[i] =
2040 dev->phys_caps.pkey_phys_table_len[i] - 1;
2041 }
2042 }
2043
choose_log_fs_mgm_entry_size(int qp_per_entry)2044 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2045 {
2046 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2047
2048 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2049 i++) {
2050 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2051 break;
2052 }
2053
2054 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2055 }
2056
dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)2057 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2058 {
2059 switch (dmfs_high_steer_mode) {
2060 case MLX4_STEERING_DMFS_A0_DEFAULT:
2061 return "default performance";
2062
2063 case MLX4_STEERING_DMFS_A0_DYNAMIC:
2064 return "dynamic hybrid mode";
2065
2066 case MLX4_STEERING_DMFS_A0_STATIC:
2067 return "performance optimized for limited rule configuration (static)";
2068
2069 case MLX4_STEERING_DMFS_A0_DISABLE:
2070 return "disabled performance optimized steering";
2071
2072 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2073 return "performance optimized steering not supported";
2074
2075 default:
2076 return "Unrecognized mode";
2077 }
2078 }
2079
2080 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2081
choose_steering_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)2082 static void choose_steering_mode(struct mlx4_dev *dev,
2083 struct mlx4_dev_cap *dev_cap)
2084 {
2085 if (mlx4_log_num_mgm_entry_size <= 0) {
2086 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2087 if (dev->caps.dmfs_high_steer_mode ==
2088 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2089 mlx4_err(dev, "DMFS high rate mode not supported\n");
2090 else
2091 dev->caps.dmfs_high_steer_mode =
2092 MLX4_STEERING_DMFS_A0_STATIC;
2093 }
2094 }
2095
2096 if (mlx4_log_num_mgm_entry_size <= 0 &&
2097 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2098 (!mlx4_is_mfunc(dev) ||
2099 (dev_cap->fs_max_num_qp_per_entry >=
2100 (dev->persist->num_vfs + 1))) &&
2101 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2102 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2103 dev->oper_log_mgm_entry_size =
2104 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2105 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2106 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2107 dev->caps.fs_log_max_ucast_qp_range_size =
2108 dev_cap->fs_log_max_ucast_qp_range_size;
2109 } else {
2110 if (dev->caps.dmfs_high_steer_mode !=
2111 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2112 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2113 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2114 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2115 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2116 else {
2117 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2118
2119 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2120 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2121 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2122 }
2123 dev->oper_log_mgm_entry_size =
2124 mlx4_log_num_mgm_entry_size > 0 ?
2125 mlx4_log_num_mgm_entry_size :
2126 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2127 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2128 }
2129 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2130 mlx4_steering_mode_str(dev->caps.steering_mode),
2131 dev->oper_log_mgm_entry_size,
2132 mlx4_log_num_mgm_entry_size);
2133 }
2134
choose_tunnel_offload_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)2135 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2136 struct mlx4_dev_cap *dev_cap)
2137 {
2138 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2139 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2140 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2141 else
2142 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2143
2144 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2145 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2146 }
2147
mlx4_validate_optimized_steering(struct mlx4_dev * dev)2148 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2149 {
2150 int i;
2151 struct mlx4_port_cap port_cap;
2152
2153 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2154 return -EINVAL;
2155
2156 for (i = 1; i <= dev->caps.num_ports; i++) {
2157 if (mlx4_dev_port(dev, i, &port_cap)) {
2158 mlx4_err(dev,
2159 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
2160 } else if ((dev->caps.dmfs_high_steer_mode !=
2161 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2162 (port_cap.dmfs_optimized_state ==
2163 !!(dev->caps.dmfs_high_steer_mode ==
2164 MLX4_STEERING_DMFS_A0_DISABLE))) {
2165 mlx4_err(dev,
2166 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2167 dmfs_high_rate_steering_mode_str(
2168 dev->caps.dmfs_high_steer_mode),
2169 (port_cap.dmfs_optimized_state ?
2170 "enabled" : "disabled"));
2171 }
2172 }
2173
2174 return 0;
2175 }
2176
mlx4_init_fw(struct mlx4_dev * dev)2177 static int mlx4_init_fw(struct mlx4_dev *dev)
2178 {
2179 struct mlx4_mod_stat_cfg mlx4_cfg;
2180 int err = 0;
2181
2182 if (!mlx4_is_slave(dev)) {
2183 err = mlx4_QUERY_FW(dev);
2184 if (err) {
2185 if (err == -EACCES)
2186 mlx4_info(dev, "non-primary physical function, skipping\n");
2187 else
2188 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2189 return err;
2190 }
2191
2192 err = mlx4_load_fw(dev);
2193 if (err) {
2194 mlx4_err(dev, "Failed to start FW, aborting\n");
2195 return err;
2196 }
2197
2198 mlx4_cfg.log_pg_sz_m = 1;
2199 mlx4_cfg.log_pg_sz = 0;
2200 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2201 if (err)
2202 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2203 }
2204
2205 return err;
2206 }
2207
mlx4_init_hca(struct mlx4_dev * dev)2208 static int mlx4_init_hca(struct mlx4_dev *dev)
2209 {
2210 struct mlx4_priv *priv = mlx4_priv(dev);
2211 struct mlx4_adapter adapter;
2212 struct mlx4_dev_cap dev_cap = {};
2213 struct mlx4_profile profile;
2214 struct mlx4_init_hca_param init_hca;
2215 u64 icm_size;
2216 struct mlx4_config_dev_params params;
2217 int err;
2218
2219 if (!mlx4_is_slave(dev)) {
2220 err = mlx4_dev_cap(dev, &dev_cap);
2221 if (err) {
2222 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2223 return err;
2224 }
2225
2226 choose_steering_mode(dev, &dev_cap);
2227 choose_tunnel_offload_mode(dev, &dev_cap);
2228
2229 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2230 mlx4_is_master(dev))
2231 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2232
2233 err = mlx4_get_phys_port_id(dev);
2234 if (err)
2235 mlx4_err(dev, "Fail to get physical port id\n");
2236
2237 if (mlx4_is_master(dev))
2238 mlx4_parav_master_pf_caps(dev);
2239
2240 if (mlx4_low_memory_profile()) {
2241 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2242 profile = low_mem_profile;
2243 } else {
2244 profile = default_profile;
2245 }
2246 if (dev->caps.steering_mode ==
2247 MLX4_STEERING_MODE_DEVICE_MANAGED)
2248 profile.num_mcg = MLX4_FS_NUM_MCG;
2249
2250 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2251 &init_hca);
2252 if ((long long) icm_size < 0) {
2253 err = icm_size;
2254 return err;
2255 }
2256
2257 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2258
2259 if (enable_4k_uar) {
2260 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2261 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2262 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2263 } else {
2264 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2265 init_hca.uar_page_sz = PAGE_SHIFT - 12;
2266 }
2267
2268 init_hca.mw_enabled = 0;
2269 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2270 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2271 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2272
2273 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2274 if (err)
2275 return err;
2276
2277 err = mlx4_INIT_HCA(dev, &init_hca);
2278 if (err) {
2279 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2280 goto err_free_icm;
2281 }
2282
2283 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2284 err = mlx4_query_func(dev, &dev_cap);
2285 if (err < 0) {
2286 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2287 goto err_close;
2288 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2289 dev->caps.num_eqs = dev_cap.max_eqs;
2290 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2291 dev->caps.reserved_uars = dev_cap.reserved_uars;
2292 }
2293 }
2294
2295 /*
2296 * If TS is supported by FW
2297 * read HCA frequency by QUERY_HCA command
2298 */
2299 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2300 memset(&init_hca, 0, sizeof(init_hca));
2301 err = mlx4_QUERY_HCA(dev, &init_hca);
2302 if (err) {
2303 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2304 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2305 } else {
2306 dev->caps.hca_core_clock =
2307 init_hca.hca_core_clock;
2308 }
2309
2310 /* In case we got HCA frequency 0 - disable timestamping
2311 * to avoid dividing by zero
2312 */
2313 if (!dev->caps.hca_core_clock) {
2314 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2315 mlx4_err(dev,
2316 "HCA frequency is 0 - timestamping is not supported\n");
2317 } else if (map_internal_clock(dev)) {
2318 /*
2319 * Map internal clock,
2320 * in case of failure disable timestamping
2321 */
2322 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2323 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2324 }
2325 }
2326
2327 if (dev->caps.dmfs_high_steer_mode !=
2328 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2329 if (mlx4_validate_optimized_steering(dev))
2330 mlx4_warn(dev, "Optimized steering validation failed\n");
2331
2332 if (dev->caps.dmfs_high_steer_mode ==
2333 MLX4_STEERING_DMFS_A0_DISABLE) {
2334 dev->caps.dmfs_high_rate_qpn_base =
2335 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2336 dev->caps.dmfs_high_rate_qpn_range =
2337 MLX4_A0_STEERING_TABLE_SIZE;
2338 }
2339
2340 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2341 dmfs_high_rate_steering_mode_str(
2342 dev->caps.dmfs_high_steer_mode));
2343 }
2344 } else {
2345 err = mlx4_init_slave(dev);
2346 if (err) {
2347 if (err != -EAGAIN)
2348 mlx4_err(dev, "Failed to initialize slave\n");
2349 return err;
2350 }
2351
2352 err = mlx4_slave_cap(dev);
2353 if (err) {
2354 mlx4_err(dev, "Failed to obtain slave caps\n");
2355 goto err_close;
2356 }
2357 }
2358
2359 if (map_bf_area(dev))
2360 mlx4_dbg(dev, "Failed to map blue flame area\n");
2361
2362 /*Only the master set the ports, all the rest got it from it.*/
2363 if (!mlx4_is_slave(dev))
2364 mlx4_set_port_mask(dev);
2365
2366 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2367 if (err) {
2368 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2369 goto unmap_bf;
2370 }
2371
2372 /* Query CONFIG_DEV parameters */
2373 err = mlx4_config_dev_retrieval(dev, ¶ms);
2374 if (err && err != -ENOTSUPP) {
2375 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2376 } else if (!err) {
2377 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2378 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2379 }
2380 priv->eq_table.inta_pin = adapter.inta_pin;
2381 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2382
2383 return 0;
2384
2385 unmap_bf:
2386 unmap_internal_clock(dev);
2387 unmap_bf_area(dev);
2388
2389 if (mlx4_is_slave(dev)) {
2390 kfree(dev->caps.qp0_qkey);
2391 kfree(dev->caps.qp0_tunnel);
2392 kfree(dev->caps.qp0_proxy);
2393 kfree(dev->caps.qp1_tunnel);
2394 kfree(dev->caps.qp1_proxy);
2395 }
2396
2397 err_close:
2398 if (mlx4_is_slave(dev))
2399 mlx4_slave_exit(dev);
2400 else
2401 mlx4_CLOSE_HCA(dev, 0);
2402
2403 err_free_icm:
2404 if (!mlx4_is_slave(dev))
2405 mlx4_free_icms(dev);
2406
2407 return err;
2408 }
2409
mlx4_init_counters_table(struct mlx4_dev * dev)2410 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2411 {
2412 struct mlx4_priv *priv = mlx4_priv(dev);
2413 int nent_pow2;
2414
2415 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2416 return -ENOENT;
2417
2418 if (!dev->caps.max_counters)
2419 return -ENOSPC;
2420
2421 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2422 /* reserve last counter index for sink counter */
2423 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2424 nent_pow2 - 1, 0,
2425 nent_pow2 - dev->caps.max_counters + 1);
2426 }
2427
mlx4_cleanup_counters_table(struct mlx4_dev * dev)2428 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2429 {
2430 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2431 return;
2432
2433 if (!dev->caps.max_counters)
2434 return;
2435
2436 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2437 }
2438
mlx4_cleanup_default_counters(struct mlx4_dev * dev)2439 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2440 {
2441 struct mlx4_priv *priv = mlx4_priv(dev);
2442 int port;
2443
2444 for (port = 0; port < dev->caps.num_ports; port++)
2445 if (priv->def_counter[port] != -1)
2446 mlx4_counter_free(dev, priv->def_counter[port]);
2447 }
2448
mlx4_allocate_default_counters(struct mlx4_dev * dev)2449 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2450 {
2451 struct mlx4_priv *priv = mlx4_priv(dev);
2452 int port, err = 0;
2453 u32 idx;
2454
2455 for (port = 0; port < dev->caps.num_ports; port++)
2456 priv->def_counter[port] = -1;
2457
2458 for (port = 0; port < dev->caps.num_ports; port++) {
2459 err = mlx4_counter_alloc(dev, &idx);
2460
2461 if (!err || err == -ENOSPC) {
2462 priv->def_counter[port] = idx;
2463 } else if (err == -ENOENT) {
2464 err = 0;
2465 continue;
2466 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2467 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2468 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2469 MLX4_SINK_COUNTER_INDEX(dev));
2470 err = 0;
2471 } else {
2472 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2473 __func__, port + 1, err);
2474 mlx4_cleanup_default_counters(dev);
2475 return err;
2476 }
2477
2478 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2479 __func__, priv->def_counter[port], port + 1);
2480 }
2481
2482 return err;
2483 }
2484
__mlx4_counter_alloc(struct mlx4_dev * dev,u32 * idx)2485 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2486 {
2487 struct mlx4_priv *priv = mlx4_priv(dev);
2488
2489 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2490 return -ENOENT;
2491
2492 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2493 if (*idx == -1) {
2494 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2495 return -ENOSPC;
2496 }
2497
2498 return 0;
2499 }
2500
mlx4_counter_alloc(struct mlx4_dev * dev,u32 * idx)2501 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2502 {
2503 u64 out_param;
2504 int err;
2505
2506 if (mlx4_is_mfunc(dev)) {
2507 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2508 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2509 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2510 if (!err)
2511 *idx = get_param_l(&out_param);
2512
2513 return err;
2514 }
2515 return __mlx4_counter_alloc(dev, idx);
2516 }
2517 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2518
__mlx4_clear_if_stat(struct mlx4_dev * dev,u8 counter_index)2519 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2520 u8 counter_index)
2521 {
2522 struct mlx4_cmd_mailbox *if_stat_mailbox;
2523 int err;
2524 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2525
2526 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2527 if (IS_ERR(if_stat_mailbox))
2528 return PTR_ERR(if_stat_mailbox);
2529
2530 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2531 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2532 MLX4_CMD_NATIVE);
2533
2534 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2535 return err;
2536 }
2537
__mlx4_counter_free(struct mlx4_dev * dev,u32 idx)2538 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2539 {
2540 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2541 return;
2542
2543 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2544 return;
2545
2546 __mlx4_clear_if_stat(dev, idx);
2547
2548 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2549 return;
2550 }
2551
mlx4_counter_free(struct mlx4_dev * dev,u32 idx)2552 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2553 {
2554 u64 in_param = 0;
2555
2556 if (mlx4_is_mfunc(dev)) {
2557 set_param_l(&in_param, idx);
2558 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2559 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2560 MLX4_CMD_WRAPPED);
2561 return;
2562 }
2563 __mlx4_counter_free(dev, idx);
2564 }
2565 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2566
mlx4_get_default_counter_index(struct mlx4_dev * dev,int port)2567 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2568 {
2569 struct mlx4_priv *priv = mlx4_priv(dev);
2570
2571 return priv->def_counter[port - 1];
2572 }
2573 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2574
mlx4_set_admin_guid(struct mlx4_dev * dev,__be64 guid,int entry,int port)2575 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2576 {
2577 struct mlx4_priv *priv = mlx4_priv(dev);
2578
2579 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2580 }
2581 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2582
mlx4_get_admin_guid(struct mlx4_dev * dev,int entry,int port)2583 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2584 {
2585 struct mlx4_priv *priv = mlx4_priv(dev);
2586
2587 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2588 }
2589 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2590
mlx4_set_random_admin_guid(struct mlx4_dev * dev,int entry,int port)2591 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2592 {
2593 struct mlx4_priv *priv = mlx4_priv(dev);
2594 __be64 guid;
2595
2596 /* hw GUID */
2597 if (entry == 0)
2598 return;
2599
2600 get_random_bytes((char *)&guid, sizeof(guid));
2601 guid &= ~(cpu_to_be64(1ULL << 56));
2602 guid |= cpu_to_be64(1ULL << 57);
2603 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2604 }
2605
mlx4_setup_hca(struct mlx4_dev * dev)2606 static int mlx4_setup_hca(struct mlx4_dev *dev)
2607 {
2608 struct mlx4_priv *priv = mlx4_priv(dev);
2609 int err;
2610 int port;
2611 __be32 ib_port_default_caps;
2612
2613 err = mlx4_init_uar_table(dev);
2614 if (err) {
2615 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2616 return err;
2617 }
2618
2619 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2620 if (err) {
2621 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2622 goto err_uar_table_free;
2623 }
2624
2625 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2626 if (!priv->kar) {
2627 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2628 err = -ENOMEM;
2629 goto err_uar_free;
2630 }
2631
2632 err = mlx4_init_pd_table(dev);
2633 if (err) {
2634 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2635 goto err_kar_unmap;
2636 }
2637
2638 err = mlx4_init_xrcd_table(dev);
2639 if (err) {
2640 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2641 goto err_pd_table_free;
2642 }
2643
2644 err = mlx4_init_mr_table(dev);
2645 if (err) {
2646 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2647 goto err_xrcd_table_free;
2648 }
2649
2650 if (!mlx4_is_slave(dev)) {
2651 err = mlx4_init_mcg_table(dev);
2652 if (err) {
2653 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2654 goto err_mr_table_free;
2655 }
2656 err = mlx4_config_mad_demux(dev);
2657 if (err) {
2658 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2659 goto err_mcg_table_free;
2660 }
2661 }
2662
2663 err = mlx4_init_eq_table(dev);
2664 if (err) {
2665 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2666 goto err_mcg_table_free;
2667 }
2668
2669 err = mlx4_cmd_use_events(dev);
2670 if (err) {
2671 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2672 goto err_eq_table_free;
2673 }
2674
2675 err = mlx4_NOP(dev);
2676 if (err) {
2677 if (dev->flags & MLX4_FLAG_MSI_X) {
2678 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2679 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2680 mlx4_warn(dev, "Trying again without MSI-X\n");
2681 } else {
2682 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2683 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2684 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2685 }
2686
2687 goto err_cmd_poll;
2688 }
2689
2690 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2691
2692 err = mlx4_init_cq_table(dev);
2693 if (err) {
2694 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2695 goto err_cmd_poll;
2696 }
2697
2698 err = mlx4_init_srq_table(dev);
2699 if (err) {
2700 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2701 goto err_cq_table_free;
2702 }
2703
2704 err = mlx4_init_qp_table(dev);
2705 if (err) {
2706 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2707 goto err_srq_table_free;
2708 }
2709
2710 if (!mlx4_is_slave(dev)) {
2711 err = mlx4_init_counters_table(dev);
2712 if (err && err != -ENOENT) {
2713 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2714 goto err_qp_table_free;
2715 }
2716 }
2717
2718 err = mlx4_allocate_default_counters(dev);
2719 if (err) {
2720 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2721 goto err_counters_table_free;
2722 }
2723
2724 if (!mlx4_is_slave(dev)) {
2725 for (port = 1; port <= dev->caps.num_ports; port++) {
2726 ib_port_default_caps = 0;
2727 err = mlx4_get_port_ib_caps(dev, port,
2728 &ib_port_default_caps);
2729 if (err)
2730 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2731 port, err);
2732 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2733
2734 /* initialize per-slave default ib port capabilities */
2735 if (mlx4_is_master(dev)) {
2736 int i;
2737 for (i = 0; i < dev->num_slaves; i++) {
2738 if (i == mlx4_master_func_num(dev))
2739 continue;
2740 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2741 ib_port_default_caps;
2742 }
2743 }
2744
2745 if (mlx4_is_mfunc(dev))
2746 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2747 else
2748 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2749
2750 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2751 dev->caps.pkey_table_len[port] : -1);
2752 if (err) {
2753 mlx4_err(dev, "Failed to set port %d, aborting\n",
2754 port);
2755 goto err_default_countes_free;
2756 }
2757 }
2758 }
2759
2760 return 0;
2761
2762 err_default_countes_free:
2763 mlx4_cleanup_default_counters(dev);
2764
2765 err_counters_table_free:
2766 if (!mlx4_is_slave(dev))
2767 mlx4_cleanup_counters_table(dev);
2768
2769 err_qp_table_free:
2770 mlx4_cleanup_qp_table(dev);
2771
2772 err_srq_table_free:
2773 mlx4_cleanup_srq_table(dev);
2774
2775 err_cq_table_free:
2776 mlx4_cleanup_cq_table(dev);
2777
2778 err_cmd_poll:
2779 mlx4_cmd_use_polling(dev);
2780
2781 err_eq_table_free:
2782 mlx4_cleanup_eq_table(dev);
2783
2784 err_mcg_table_free:
2785 if (!mlx4_is_slave(dev))
2786 mlx4_cleanup_mcg_table(dev);
2787
2788 err_mr_table_free:
2789 mlx4_cleanup_mr_table(dev);
2790
2791 err_xrcd_table_free:
2792 mlx4_cleanup_xrcd_table(dev);
2793
2794 err_pd_table_free:
2795 mlx4_cleanup_pd_table(dev);
2796
2797 err_kar_unmap:
2798 iounmap(priv->kar);
2799
2800 err_uar_free:
2801 mlx4_uar_free(dev, &priv->driver_uar);
2802
2803 err_uar_table_free:
2804 mlx4_cleanup_uar_table(dev);
2805 return err;
2806 }
2807
mlx4_init_affinity_hint(struct mlx4_dev * dev,int port,int eqn)2808 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2809 {
2810 int requested_cpu = 0;
2811 struct mlx4_priv *priv = mlx4_priv(dev);
2812 struct mlx4_eq *eq;
2813 int off = 0;
2814 int i;
2815
2816 if (eqn > dev->caps.num_comp_vectors)
2817 return -EINVAL;
2818
2819 for (i = 1; i < port; i++)
2820 off += mlx4_get_eqs_per_port(dev, i);
2821
2822 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2823
2824 /* Meaning EQs are shared, and this call comes from the second port */
2825 if (requested_cpu < 0)
2826 return 0;
2827
2828 eq = &priv->eq_table.eq[eqn];
2829
2830 eq->affinity_cpu_id = requested_cpu % num_online_cpus();
2831
2832 return 0;
2833 }
2834
mlx4_enable_msi_x(struct mlx4_dev * dev)2835 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2836 {
2837 struct mlx4_priv *priv = mlx4_priv(dev);
2838 struct msix_entry *entries;
2839 int i;
2840 int port = 0;
2841
2842 if (msi_x) {
2843 int nreq = dev->caps.num_ports * num_online_cpus() + 1;
2844
2845 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2846 nreq);
2847 if (nreq > MAX_MSIX)
2848 nreq = MAX_MSIX;
2849
2850 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2851 if (!entries)
2852 goto no_msi;
2853
2854 for (i = 0; i < nreq; ++i)
2855 entries[i].entry = i;
2856
2857 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2858 nreq);
2859
2860 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2861 kfree(entries);
2862 goto no_msi;
2863 }
2864 /* 1 is reserved for events (asyncrounous EQ) */
2865 dev->caps.num_comp_vectors = nreq - 1;
2866
2867 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2868 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2869 dev->caps.num_ports);
2870
2871 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2872 if (i == MLX4_EQ_ASYNC)
2873 continue;
2874
2875 priv->eq_table.eq[i].irq =
2876 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2877
2878 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2879 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2880 dev->caps.num_ports);
2881 /* We don't set affinity hint when there
2882 * aren't enough EQs
2883 */
2884 } else {
2885 set_bit(port,
2886 priv->eq_table.eq[i].actv_ports.ports);
2887 if (mlx4_init_affinity_hint(dev, port + 1, i))
2888 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2889 i);
2890 }
2891 /* We divide the Eqs evenly between the two ports.
2892 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2893 * refers to the number of Eqs per port
2894 * (i.e eqs_per_port). Theoretically, we would like to
2895 * write something like (i + 1) % eqs_per_port == 0.
2896 * However, since there's an asynchronous Eq, we have
2897 * to skip over it by comparing this condition to
2898 * !!((i + 1) > MLX4_EQ_ASYNC).
2899 */
2900 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2901 ((i + 1) %
2902 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2903 !!((i + 1) > MLX4_EQ_ASYNC))
2904 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2905 * everything is shared anyway.
2906 */
2907 port++;
2908 }
2909
2910 dev->flags |= MLX4_FLAG_MSI_X;
2911
2912 kfree(entries);
2913 return;
2914 }
2915
2916 no_msi:
2917 dev->caps.num_comp_vectors = 1;
2918
2919 BUG_ON(MLX4_EQ_ASYNC >= 2);
2920 for (i = 0; i < 2; ++i) {
2921 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2922 if (i != MLX4_EQ_ASYNC) {
2923 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2924 dev->caps.num_ports);
2925 }
2926 }
2927 }
2928
mlx4_init_port_info(struct mlx4_dev * dev,int port)2929 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2930 {
2931 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2932 int err = 0;
2933
2934 info->dev = dev;
2935 info->port = port;
2936 if (!mlx4_is_slave(dev)) {
2937 mlx4_init_mac_table(dev, &info->mac_table);
2938 mlx4_init_vlan_table(dev, &info->vlan_table);
2939 mlx4_init_roce_gid_table(dev, &info->gid_table);
2940 info->base_qpn = mlx4_get_base_qpn(dev, port);
2941 }
2942
2943 sprintf(info->dev_name, "mlx4_port%d", port);
2944 info->port_attr.attr.name = info->dev_name;
2945 if (mlx4_is_mfunc(dev))
2946 info->port_attr.attr.mode = S_IRUGO;
2947 else {
2948 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2949 info->port_attr.store = set_port_type;
2950 }
2951 info->port_attr.show = show_port_type;
2952 sysfs_attr_init(&info->port_attr.attr);
2953
2954 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2955 if (err) {
2956 mlx4_err(dev, "Failed to create file for port %d\n", port);
2957 info->port = -1;
2958 }
2959
2960 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2961 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2962 if (mlx4_is_mfunc(dev))
2963 info->port_mtu_attr.attr.mode = S_IRUGO;
2964 else {
2965 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2966 info->port_mtu_attr.store = set_port_ib_mtu;
2967 }
2968 info->port_mtu_attr.show = show_port_ib_mtu;
2969 sysfs_attr_init(&info->port_mtu_attr.attr);
2970
2971 err = device_create_file(&dev->persist->pdev->dev,
2972 &info->port_mtu_attr);
2973 if (err) {
2974 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2975 device_remove_file(&info->dev->persist->pdev->dev,
2976 &info->port_attr);
2977 info->port = -1;
2978 }
2979
2980 return err;
2981 }
2982
mlx4_cleanup_port_info(struct mlx4_port_info * info)2983 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2984 {
2985 if (info->port < 0)
2986 return;
2987
2988 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2989 device_remove_file(&info->dev->persist->pdev->dev,
2990 &info->port_mtu_attr);
2991 #ifdef CONFIG_RFS_ACCEL
2992 free_irq_cpu_rmap(info->rmap);
2993 info->rmap = NULL;
2994 #endif
2995 }
2996
mlx4_init_steering(struct mlx4_dev * dev)2997 static int mlx4_init_steering(struct mlx4_dev *dev)
2998 {
2999 struct mlx4_priv *priv = mlx4_priv(dev);
3000 int num_entries = dev->caps.num_ports;
3001 int i, j;
3002
3003 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
3004 if (!priv->steer)
3005 return -ENOMEM;
3006
3007 for (i = 0; i < num_entries; i++)
3008 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3009 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3010 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3011 }
3012 return 0;
3013 }
3014
mlx4_clear_steering(struct mlx4_dev * dev)3015 static void mlx4_clear_steering(struct mlx4_dev *dev)
3016 {
3017 struct mlx4_priv *priv = mlx4_priv(dev);
3018 struct mlx4_steer_index *entry, *tmp_entry;
3019 struct mlx4_promisc_qp *pqp, *tmp_pqp;
3020 int num_entries = dev->caps.num_ports;
3021 int i, j;
3022
3023 for (i = 0; i < num_entries; i++) {
3024 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3025 list_for_each_entry_safe(pqp, tmp_pqp,
3026 &priv->steer[i].promisc_qps[j],
3027 list) {
3028 list_del(&pqp->list);
3029 kfree(pqp);
3030 }
3031 list_for_each_entry_safe(entry, tmp_entry,
3032 &priv->steer[i].steer_entries[j],
3033 list) {
3034 list_del(&entry->list);
3035 list_for_each_entry_safe(pqp, tmp_pqp,
3036 &entry->duplicates,
3037 list) {
3038 list_del(&pqp->list);
3039 kfree(pqp);
3040 }
3041 kfree(entry);
3042 }
3043 }
3044 }
3045 kfree(priv->steer);
3046 }
3047
extended_func_num(struct pci_dev * pdev)3048 static int extended_func_num(struct pci_dev *pdev)
3049 {
3050 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3051 }
3052
3053 #define MLX4_OWNER_BASE 0x8069c
3054 #define MLX4_OWNER_SIZE 4
3055
mlx4_get_ownership(struct mlx4_dev * dev)3056 static int mlx4_get_ownership(struct mlx4_dev *dev)
3057 {
3058 void __iomem *owner;
3059 u32 ret;
3060
3061 if (pci_channel_offline(dev->persist->pdev))
3062 return -EIO;
3063
3064 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3065 MLX4_OWNER_BASE,
3066 MLX4_OWNER_SIZE);
3067 if (!owner) {
3068 mlx4_err(dev, "Failed to obtain ownership bit\n");
3069 return -ENOMEM;
3070 }
3071
3072 ret = readl(owner);
3073 iounmap(owner);
3074 return (int) !!ret;
3075 }
3076
mlx4_free_ownership(struct mlx4_dev * dev)3077 static void mlx4_free_ownership(struct mlx4_dev *dev)
3078 {
3079 void __iomem *owner;
3080
3081 if (pci_channel_offline(dev->persist->pdev))
3082 return;
3083
3084 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3085 MLX4_OWNER_BASE,
3086 MLX4_OWNER_SIZE);
3087 if (!owner) {
3088 mlx4_err(dev, "Failed to obtain ownership bit\n");
3089 return;
3090 }
3091 writel(0, owner);
3092 msleep(1000);
3093 iounmap(owner);
3094 }
3095
3096 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3097 !!((flags) & MLX4_FLAG_MASTER))
3098
mlx4_enable_sriov(struct mlx4_dev * dev,struct pci_dev * pdev,u8 total_vfs,int existing_vfs,int reset_flow)3099 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3100 u8 total_vfs, int existing_vfs, int reset_flow)
3101 {
3102 u64 dev_flags = dev->flags;
3103 int err = 0;
3104
3105 if (reset_flow) {
3106 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3107 GFP_KERNEL);
3108 if (!dev->dev_vfs)
3109 goto free_mem;
3110 return dev_flags;
3111 }
3112
3113 atomic_inc(&pf_loading);
3114 if (dev->flags & MLX4_FLAG_SRIOV) {
3115 if (existing_vfs != total_vfs) {
3116 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3117 existing_vfs, total_vfs);
3118 total_vfs = existing_vfs;
3119 }
3120 }
3121
3122 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
3123 if (NULL == dev->dev_vfs) {
3124 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3125 goto disable_sriov;
3126 }
3127
3128 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3129 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3130 err = pci_enable_sriov(pdev, total_vfs);
3131 }
3132 if (err) {
3133 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3134 err);
3135 goto disable_sriov;
3136 } else {
3137 mlx4_warn(dev, "Running in master mode\n");
3138 dev_flags |= MLX4_FLAG_SRIOV |
3139 MLX4_FLAG_MASTER;
3140 dev_flags &= ~MLX4_FLAG_SLAVE;
3141 dev->persist->num_vfs = total_vfs;
3142 }
3143 return dev_flags;
3144
3145 disable_sriov:
3146 atomic_dec(&pf_loading);
3147 free_mem:
3148 dev->persist->num_vfs = 0;
3149 kfree(dev->dev_vfs);
3150 dev->dev_vfs = NULL;
3151 return dev_flags & ~MLX4_FLAG_MASTER;
3152 }
3153
3154 enum {
3155 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3156 };
3157
mlx4_check_dev_cap(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,int * nvfs)3158 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3159 int *nvfs)
3160 {
3161 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3162 /* Checking for 64 VFs as a limitation of CX2 */
3163 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3164 requested_vfs >= 64) {
3165 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3166 requested_vfs);
3167 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3168 }
3169 return 0;
3170 }
3171
mlx4_pci_enable_device(struct mlx4_dev * dev)3172 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3173 {
3174 struct pci_dev *pdev = dev->persist->pdev;
3175 int err = 0;
3176
3177 mutex_lock(&dev->persist->pci_status_mutex);
3178 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3179 err = pci_enable_device(pdev);
3180 if (!err)
3181 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3182 }
3183 mutex_unlock(&dev->persist->pci_status_mutex);
3184
3185 return err;
3186 }
3187
mlx4_pci_disable_device(struct mlx4_dev * dev)3188 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3189 {
3190 struct pci_dev *pdev = dev->persist->pdev;
3191
3192 mutex_lock(&dev->persist->pci_status_mutex);
3193 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3194 pci_disable_device(pdev);
3195 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3196 }
3197 mutex_unlock(&dev->persist->pci_status_mutex);
3198 }
3199
mlx4_load_one(struct pci_dev * pdev,int pci_dev_data,int total_vfs,int * nvfs,struct mlx4_priv * priv,int reset_flow)3200 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3201 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3202 int reset_flow)
3203 {
3204 struct mlx4_dev *dev;
3205 unsigned sum = 0;
3206 int err;
3207 int port;
3208 int i;
3209 struct mlx4_dev_cap *dev_cap = NULL;
3210 int existing_vfs = 0;
3211
3212 dev = &priv->dev;
3213
3214 INIT_LIST_HEAD(&priv->ctx_list);
3215 spin_lock_init(&priv->ctx_lock);
3216
3217 mutex_init(&priv->port_mutex);
3218 mutex_init(&priv->bond_mutex);
3219
3220 INIT_LIST_HEAD(&priv->pgdir_list);
3221 mutex_init(&priv->pgdir_mutex);
3222 spin_lock_init(&priv->cmd.context_lock);
3223
3224 INIT_LIST_HEAD(&priv->bf_list);
3225 mutex_init(&priv->bf_mutex);
3226
3227 dev->rev_id = pdev->revision;
3228 dev->numa_node = dev_to_node(&pdev->dev);
3229
3230 /* Detect if this device is a virtual function */
3231 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3232 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3233 dev->flags |= MLX4_FLAG_SLAVE;
3234 } else {
3235 /* We reset the device and enable SRIOV only for physical
3236 * devices. Try to claim ownership on the device;
3237 * if already taken, skip -- do not allow multiple PFs */
3238 err = mlx4_get_ownership(dev);
3239 if (err) {
3240 if (err < 0)
3241 return err;
3242 else {
3243 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3244 return -EINVAL;
3245 }
3246 }
3247
3248 atomic_set(&priv->opreq_count, 0);
3249 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3250
3251 /*
3252 * Now reset the HCA before we touch the PCI capabilities or
3253 * attempt a firmware command, since a boot ROM may have left
3254 * the HCA in an undefined state.
3255 */
3256 err = mlx4_reset(dev);
3257 if (err) {
3258 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3259 goto err_sriov;
3260 }
3261
3262 if (total_vfs) {
3263 dev->flags = MLX4_FLAG_MASTER;
3264 existing_vfs = pci_num_vf(pdev);
3265 if (existing_vfs)
3266 dev->flags |= MLX4_FLAG_SRIOV;
3267 dev->persist->num_vfs = total_vfs;
3268 }
3269 }
3270
3271 /* on load remove any previous indication of internal error,
3272 * device is up.
3273 */
3274 dev->persist->state = MLX4_DEVICE_STATE_UP;
3275
3276 slave_start:
3277 err = mlx4_cmd_init(dev);
3278 if (err) {
3279 mlx4_err(dev, "Failed to init command interface, aborting\n");
3280 goto err_sriov;
3281 }
3282
3283 /* In slave functions, the communication channel must be initialized
3284 * before posting commands. Also, init num_slaves before calling
3285 * mlx4_init_hca */
3286 if (mlx4_is_mfunc(dev)) {
3287 if (mlx4_is_master(dev)) {
3288 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3289
3290 } else {
3291 dev->num_slaves = 0;
3292 err = mlx4_multi_func_init(dev);
3293 if (err) {
3294 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3295 goto err_cmd;
3296 }
3297 }
3298 }
3299
3300 err = mlx4_init_fw(dev);
3301 if (err) {
3302 mlx4_err(dev, "Failed to init fw, aborting.\n");
3303 goto err_mfunc;
3304 }
3305
3306 if (mlx4_is_master(dev)) {
3307 /* when we hit the goto slave_start below, dev_cap already initialized */
3308 if (!dev_cap) {
3309 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3310
3311 if (!dev_cap) {
3312 err = -ENOMEM;
3313 goto err_fw;
3314 }
3315
3316 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3317 if (err) {
3318 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3319 goto err_fw;
3320 }
3321
3322 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3323 goto err_fw;
3324
3325 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3326 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3327 total_vfs,
3328 existing_vfs,
3329 reset_flow);
3330
3331 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3332 dev->flags = dev_flags;
3333 if (!SRIOV_VALID_STATE(dev->flags)) {
3334 mlx4_err(dev, "Invalid SRIOV state\n");
3335 goto err_sriov;
3336 }
3337 err = mlx4_reset(dev);
3338 if (err) {
3339 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3340 goto err_sriov;
3341 }
3342 goto slave_start;
3343 }
3344 } else {
3345 /* Legacy mode FW requires SRIOV to be enabled before
3346 * doing QUERY_DEV_CAP, since max_eq's value is different if
3347 * SRIOV is enabled.
3348 */
3349 memset(dev_cap, 0, sizeof(*dev_cap));
3350 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3351 if (err) {
3352 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3353 goto err_fw;
3354 }
3355
3356 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3357 goto err_fw;
3358 }
3359 }
3360
3361 err = mlx4_init_hca(dev);
3362 if (err) {
3363 if (err == -EACCES) {
3364 /* Not primary Physical function
3365 * Running in slave mode */
3366 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3367 /* We're not a PF */
3368 if (dev->flags & MLX4_FLAG_SRIOV) {
3369 if (!existing_vfs)
3370 pci_disable_sriov(pdev);
3371 if (mlx4_is_master(dev) && !reset_flow)
3372 atomic_dec(&pf_loading);
3373 dev->flags &= ~MLX4_FLAG_SRIOV;
3374 }
3375 if (!mlx4_is_slave(dev))
3376 mlx4_free_ownership(dev);
3377 dev->flags |= MLX4_FLAG_SLAVE;
3378 dev->flags &= ~MLX4_FLAG_MASTER;
3379 goto slave_start;
3380 } else
3381 goto err_fw;
3382 }
3383
3384 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3385 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3386 existing_vfs, reset_flow);
3387
3388 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3389 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3390 dev->flags = dev_flags;
3391 err = mlx4_cmd_init(dev);
3392 if (err) {
3393 /* Only VHCR is cleaned up, so could still
3394 * send FW commands
3395 */
3396 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3397 goto err_close;
3398 }
3399 } else {
3400 dev->flags = dev_flags;
3401 }
3402
3403 if (!SRIOV_VALID_STATE(dev->flags)) {
3404 mlx4_err(dev, "Invalid SRIOV state\n");
3405 goto err_close;
3406 }
3407 }
3408
3409 /* check if the device is functioning at its maximum possible speed.
3410 * No return code for this call, just warn the user in case of PCI
3411 * express device capabilities are under-satisfied by the bus.
3412 */
3413 if (!mlx4_is_slave(dev))
3414 mlx4_check_pcie_caps(dev);
3415
3416 /* In master functions, the communication channel must be initialized
3417 * after obtaining its address from fw */
3418 if (mlx4_is_master(dev)) {
3419 if (dev->caps.num_ports < 2 &&
3420 num_vfs_argc > 1) {
3421 err = -EINVAL;
3422 mlx4_err(dev,
3423 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3424 dev->caps.num_ports);
3425 goto err_close;
3426 }
3427 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3428
3429 for (i = 0;
3430 i < sizeof(dev->persist->nvfs)/
3431 sizeof(dev->persist->nvfs[0]); i++) {
3432 unsigned j;
3433
3434 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3435 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3436 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3437 dev->caps.num_ports;
3438 }
3439 }
3440
3441 /* In master functions, the communication channel
3442 * must be initialized after obtaining its address from fw
3443 */
3444 err = mlx4_multi_func_init(dev);
3445 if (err) {
3446 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3447 goto err_close;
3448 }
3449 }
3450
3451 err = mlx4_alloc_eq_table(dev);
3452 if (err)
3453 goto err_master_mfunc;
3454
3455 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3456 mutex_init(&priv->msix_ctl.pool_lock);
3457
3458 mlx4_enable_msi_x(dev);
3459 if ((mlx4_is_mfunc(dev)) &&
3460 !(dev->flags & MLX4_FLAG_MSI_X)) {
3461 err = -ENOSYS;
3462 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3463 goto err_free_eq;
3464 }
3465
3466 if (!mlx4_is_slave(dev)) {
3467 err = mlx4_init_steering(dev);
3468 if (err)
3469 goto err_disable_msix;
3470 }
3471
3472 mlx4_init_quotas(dev);
3473
3474 err = mlx4_setup_hca(dev);
3475 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3476 !mlx4_is_mfunc(dev)) {
3477 dev->flags &= ~MLX4_FLAG_MSI_X;
3478 dev->caps.num_comp_vectors = 1;
3479 pci_disable_msix(pdev);
3480 err = mlx4_setup_hca(dev);
3481 }
3482
3483 if (err)
3484 goto err_steer;
3485
3486 /* When PF resources are ready arm its comm channel to enable
3487 * getting commands
3488 */
3489 if (mlx4_is_master(dev)) {
3490 err = mlx4_ARM_COMM_CHANNEL(dev);
3491 if (err) {
3492 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3493 err);
3494 goto err_steer;
3495 }
3496 }
3497
3498 for (port = 1; port <= dev->caps.num_ports; port++) {
3499 err = mlx4_init_port_info(dev, port);
3500 if (err)
3501 goto err_port;
3502 }
3503
3504 priv->v2p.port1 = 1;
3505 priv->v2p.port2 = 2;
3506
3507 err = mlx4_register_device(dev);
3508 if (err)
3509 goto err_port;
3510
3511 mlx4_request_modules(dev);
3512
3513 mlx4_sense_init(dev);
3514 mlx4_start_sense(dev);
3515
3516 priv->removed = 0;
3517
3518 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3519 atomic_dec(&pf_loading);
3520
3521 kfree(dev_cap);
3522 return 0;
3523
3524 err_port:
3525 for (--port; port >= 1; --port)
3526 mlx4_cleanup_port_info(&priv->port[port]);
3527
3528 mlx4_cleanup_counters_table(dev);
3529 mlx4_cleanup_qp_table(dev);
3530 mlx4_cleanup_srq_table(dev);
3531 mlx4_cleanup_cq_table(dev);
3532 mlx4_cmd_use_polling(dev);
3533 mlx4_cleanup_eq_table(dev);
3534 mlx4_cleanup_mcg_table(dev);
3535 mlx4_cleanup_mr_table(dev);
3536 mlx4_cleanup_xrcd_table(dev);
3537 mlx4_cleanup_pd_table(dev);
3538 mlx4_cleanup_uar_table(dev);
3539
3540 err_steer:
3541 if (!mlx4_is_slave(dev))
3542 mlx4_clear_steering(dev);
3543
3544 err_disable_msix:
3545 if (dev->flags & MLX4_FLAG_MSI_X)
3546 pci_disable_msix(pdev);
3547
3548 err_free_eq:
3549 mlx4_free_eq_table(dev);
3550
3551 err_master_mfunc:
3552 if (mlx4_is_master(dev)) {
3553 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3554 mlx4_multi_func_cleanup(dev);
3555 }
3556
3557 if (mlx4_is_slave(dev)) {
3558 kfree(dev->caps.qp0_qkey);
3559 kfree(dev->caps.qp0_tunnel);
3560 kfree(dev->caps.qp0_proxy);
3561 kfree(dev->caps.qp1_tunnel);
3562 kfree(dev->caps.qp1_proxy);
3563 }
3564
3565 err_close:
3566 mlx4_close_hca(dev);
3567
3568 err_fw:
3569 mlx4_close_fw(dev);
3570
3571 err_mfunc:
3572 if (mlx4_is_slave(dev))
3573 mlx4_multi_func_cleanup(dev);
3574
3575 err_cmd:
3576 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3577
3578 err_sriov:
3579 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3580 pci_disable_sriov(pdev);
3581 dev->flags &= ~MLX4_FLAG_SRIOV;
3582 }
3583
3584 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3585 atomic_dec(&pf_loading);
3586
3587 kfree(priv->dev.dev_vfs);
3588
3589 if (!mlx4_is_slave(dev))
3590 mlx4_free_ownership(dev);
3591
3592 kfree(dev_cap);
3593 return err;
3594 }
3595
__mlx4_init_one(struct pci_dev * pdev,int pci_dev_data,struct mlx4_priv * priv)3596 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3597 struct mlx4_priv *priv)
3598 {
3599 int err;
3600 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3601 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3602 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3603 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3604 unsigned total_vfs = 0;
3605 unsigned int i;
3606
3607 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3608
3609 err = mlx4_pci_enable_device(&priv->dev);
3610 if (err) {
3611 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3612 return err;
3613 }
3614
3615 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3616 * per port, we must limit the number of VFs to 63 (since their are
3617 * 128 MACs)
3618 */
3619 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3620 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3621 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3622 if (nvfs[i] < 0) {
3623 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3624 err = -EINVAL;
3625 goto err_disable_pdev;
3626 }
3627 }
3628 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3629 i++) {
3630 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3631 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3632 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3633 err = -EINVAL;
3634 goto err_disable_pdev;
3635 }
3636 }
3637 if (total_vfs > MLX4_MAX_NUM_VF) {
3638 dev_err(&pdev->dev,
3639 "Requested more VF's (%d) than allowed by hw (%d)\n",
3640 total_vfs, MLX4_MAX_NUM_VF);
3641 err = -EINVAL;
3642 goto err_disable_pdev;
3643 }
3644
3645 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3646 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3647 dev_err(&pdev->dev,
3648 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3649 nvfs[i] + nvfs[2], i + 1,
3650 MLX4_MAX_NUM_VF_P_PORT);
3651 err = -EINVAL;
3652 goto err_disable_pdev;
3653 }
3654 }
3655
3656 /* Check for BARs. */
3657 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3658 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3659 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3660 pci_dev_data, (long)pci_resource_flags(pdev, 0));
3661 err = -ENODEV;
3662 goto err_disable_pdev;
3663 }
3664 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3665 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3666 err = -ENODEV;
3667 goto err_disable_pdev;
3668 }
3669
3670 err = pci_request_regions(pdev, DRV_NAME);
3671 if (err) {
3672 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3673 goto err_disable_pdev;
3674 }
3675
3676 pci_set_master(pdev);
3677
3678 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3679 if (err) {
3680 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3681 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3682 if (err) {
3683 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3684 goto err_release_regions;
3685 }
3686 }
3687 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3688 if (err) {
3689 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3690 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3691 if (err) {
3692 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3693 goto err_release_regions;
3694 }
3695 }
3696
3697 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3698 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3699 /* Detect if this device is a virtual function */
3700 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3701 /* When acting as pf, we normally skip vfs unless explicitly
3702 * requested to probe them.
3703 */
3704 if (total_vfs) {
3705 unsigned vfs_offset = 0;
3706
3707 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3708 vfs_offset + nvfs[i] < extended_func_num(pdev);
3709 vfs_offset += nvfs[i], i++)
3710 ;
3711 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3712 err = -ENODEV;
3713 goto err_release_regions;
3714 }
3715 if ((extended_func_num(pdev) - vfs_offset)
3716 > prb_vf[i]) {
3717 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3718 extended_func_num(pdev));
3719 err = -ENODEV;
3720 goto err_release_regions;
3721 }
3722 }
3723 }
3724
3725 err = mlx4_catas_init(&priv->dev);
3726 if (err)
3727 goto err_release_regions;
3728
3729 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3730 if (err)
3731 goto err_catas;
3732
3733 return 0;
3734
3735 err_catas:
3736 mlx4_catas_end(&priv->dev);
3737
3738 err_release_regions:
3739 pci_release_regions(pdev);
3740
3741 err_disable_pdev:
3742 mlx4_pci_disable_device(&priv->dev);
3743 pci_set_drvdata(pdev, NULL);
3744 return err;
3745 }
3746
mlx4_init_one(struct pci_dev * pdev,const struct pci_device_id * id)3747 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3748 {
3749
3750 struct sysctl_ctx_list *ctx;
3751 struct sysctl_oid *node;
3752 struct sysctl_oid_list *node_list;
3753 struct mlx4_priv *priv;
3754 struct mlx4_dev *dev;
3755 int ret;
3756
3757 printk_once(KERN_INFO "%s", mlx4_version);
3758
3759 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3760 if (!priv)
3761 return -ENOMEM;
3762
3763 dev = &priv->dev;
3764 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3765 if (!dev->persist) {
3766 kfree(priv);
3767 return -ENOMEM;
3768 }
3769 dev->persist->pdev = pdev;
3770 dev->persist->dev = dev;
3771 pci_set_drvdata(pdev, dev->persist);
3772 priv->pci_dev_data = id->driver_data;
3773 mutex_init(&dev->persist->device_state_mutex);
3774 mutex_init(&dev->persist->interface_state_mutex);
3775 mutex_init(&dev->persist->pci_status_mutex);
3776
3777 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3778 if (ret) {
3779 kfree(dev->persist);
3780 kfree(priv);
3781 return ret;
3782 } else {
3783 device_set_desc(pdev->dev.bsddev, mlx4_description);
3784 pci_save_state(pdev->dev.bsddev);
3785 }
3786
3787 snprintf(dev->fw_str, sizeof(dev->fw_str), "%d.%d.%d",
3788 (int) (dev->caps.fw_ver >> 32),
3789 (int) (dev->caps.fw_ver >> 16) & 0xffff,
3790 (int) (dev->caps.fw_ver & 0xffff));
3791
3792 ctx = &dev->hw_ctx;
3793 sysctl_ctx_init(ctx);
3794 node = SYSCTL_ADD_NODE(ctx,SYSCTL_CHILDREN(pdev->dev.kobj.oidp),
3795 OID_AUTO, "hw" , CTLFLAG_RD, 0, "mlx4 dev hw information");
3796 if (node != NULL) {
3797 node_list = SYSCTL_CHILDREN(node);
3798 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO,
3799 "fw_version", CTLFLAG_RD, dev->fw_str, 0,
3800 "Device firmware version");
3801 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO,
3802 "board_id", CTLFLAG_RD, dev->board_id, 0,
3803 "Device board identifier");
3804 }
3805
3806 return ret;
3807 }
3808
mlx4_clean_dev(struct mlx4_dev * dev)3809 static void mlx4_clean_dev(struct mlx4_dev *dev)
3810 {
3811 struct mlx4_dev_persistent *persist = dev->persist;
3812 struct mlx4_priv *priv = mlx4_priv(dev);
3813 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3814
3815 memset(priv, 0, sizeof(*priv));
3816 priv->dev.persist = persist;
3817 priv->dev.flags = flags;
3818 }
3819
mlx4_unload_one(struct pci_dev * pdev)3820 static void mlx4_unload_one(struct pci_dev *pdev)
3821 {
3822 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3823 struct mlx4_dev *dev = persist->dev;
3824 struct mlx4_priv *priv = mlx4_priv(dev);
3825 int pci_dev_data;
3826 int p, i;
3827
3828 if (priv->removed)
3829 return;
3830
3831 /* saving current ports type for further use */
3832 for (i = 0; i < dev->caps.num_ports; i++) {
3833 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3834 dev->persist->curr_port_poss_type[i] = dev->caps.
3835 possible_type[i + 1];
3836 }
3837
3838 pci_dev_data = priv->pci_dev_data;
3839
3840 mlx4_stop_sense(dev);
3841 mlx4_unregister_device(dev);
3842
3843 for (p = 1; p <= dev->caps.num_ports; p++) {
3844 mlx4_cleanup_port_info(&priv->port[p]);
3845 mlx4_CLOSE_PORT(dev, p);
3846 }
3847
3848 if (mlx4_is_master(dev))
3849 mlx4_free_resource_tracker(dev,
3850 RES_TR_FREE_SLAVES_ONLY);
3851
3852 mlx4_cleanup_default_counters(dev);
3853 if (!mlx4_is_slave(dev))
3854 mlx4_cleanup_counters_table(dev);
3855 mlx4_cleanup_qp_table(dev);
3856 mlx4_cleanup_srq_table(dev);
3857 mlx4_cleanup_cq_table(dev);
3858 mlx4_cmd_use_polling(dev);
3859 mlx4_cleanup_eq_table(dev);
3860 mlx4_cleanup_mcg_table(dev);
3861 mlx4_cleanup_mr_table(dev);
3862 mlx4_cleanup_xrcd_table(dev);
3863 mlx4_cleanup_pd_table(dev);
3864
3865 if (mlx4_is_master(dev))
3866 mlx4_free_resource_tracker(dev,
3867 RES_TR_FREE_STRUCTS_ONLY);
3868
3869 iounmap(priv->kar);
3870 mlx4_uar_free(dev, &priv->driver_uar);
3871 mlx4_cleanup_uar_table(dev);
3872 if (!mlx4_is_slave(dev))
3873 mlx4_clear_steering(dev);
3874 mlx4_free_eq_table(dev);
3875 if (mlx4_is_master(dev))
3876 mlx4_multi_func_cleanup(dev);
3877 mlx4_close_hca(dev);
3878 mlx4_close_fw(dev);
3879 if (mlx4_is_slave(dev))
3880 mlx4_multi_func_cleanup(dev);
3881 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3882
3883 if (dev->flags & MLX4_FLAG_MSI_X)
3884 pci_disable_msix(pdev);
3885
3886 if (!mlx4_is_slave(dev))
3887 mlx4_free_ownership(dev);
3888
3889 kfree(dev->caps.qp0_qkey);
3890 kfree(dev->caps.qp0_tunnel);
3891 kfree(dev->caps.qp0_proxy);
3892 kfree(dev->caps.qp1_tunnel);
3893 kfree(dev->caps.qp1_proxy);
3894 kfree(dev->dev_vfs);
3895
3896 mlx4_clean_dev(dev);
3897 priv->pci_dev_data = pci_dev_data;
3898 priv->removed = 1;
3899 }
3900
mlx4_remove_one(struct pci_dev * pdev)3901 static void mlx4_remove_one(struct pci_dev *pdev)
3902 {
3903 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3904 struct mlx4_dev *dev = persist->dev;
3905 struct mlx4_priv *priv = mlx4_priv(dev);
3906 int active_vfs = 0;
3907
3908 mutex_lock(&persist->interface_state_mutex);
3909 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3910 mutex_unlock(&persist->interface_state_mutex);
3911
3912 /*
3913 * Clear the device description to avoid use after free,
3914 * because the bsddev is not destroyed when this module is
3915 * unloaded:
3916 */
3917 device_set_desc(pdev->dev.bsddev, NULL);
3918
3919 /* Disabling SR-IOV is not allowed while there are active vf's */
3920 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3921 active_vfs = mlx4_how_many_lives_vf(dev);
3922 if (active_vfs) {
3923 pr_warn("Removing PF when there are active VF's !!\n");
3924 pr_warn("Will not disable SR-IOV.\n");
3925 }
3926 }
3927
3928 /* device marked to be under deletion running now without the lock
3929 * letting other tasks to be terminated
3930 */
3931 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3932 mlx4_unload_one(pdev);
3933 else
3934 mlx4_info(dev, "%s: interface is down\n", __func__);
3935 mlx4_catas_end(dev);
3936 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3937 mlx4_warn(dev, "Disabling SR-IOV\n");
3938 pci_disable_sriov(pdev);
3939 }
3940
3941 pci_release_regions(pdev);
3942 pci_disable_device(pdev);
3943 kfree(dev->persist);
3944 kfree(priv);
3945 pci_set_drvdata(pdev, NULL);
3946 }
3947
restore_current_port_types(struct mlx4_dev * dev,enum mlx4_port_type * types,enum mlx4_port_type * poss_types)3948 static int restore_current_port_types(struct mlx4_dev *dev,
3949 enum mlx4_port_type *types,
3950 enum mlx4_port_type *poss_types)
3951 {
3952 struct mlx4_priv *priv = mlx4_priv(dev);
3953 int err, i;
3954
3955 mlx4_stop_sense(dev);
3956
3957 mutex_lock(&priv->port_mutex);
3958 for (i = 0; i < dev->caps.num_ports; i++)
3959 dev->caps.possible_type[i + 1] = poss_types[i];
3960 err = mlx4_change_port_types(dev, types);
3961 mutex_unlock(&priv->port_mutex);
3962
3963 mlx4_start_sense(dev);
3964
3965 return err;
3966 }
3967
mlx4_restart_one(struct pci_dev * pdev)3968 int mlx4_restart_one(struct pci_dev *pdev)
3969 {
3970 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3971 struct mlx4_dev *dev = persist->dev;
3972 struct mlx4_priv *priv = mlx4_priv(dev);
3973 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3974 int pci_dev_data, err, total_vfs;
3975
3976 pci_dev_data = priv->pci_dev_data;
3977 total_vfs = dev->persist->num_vfs;
3978 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3979
3980 mlx4_unload_one(pdev);
3981 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3982 if (err) {
3983 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3984 __func__, pci_name(pdev), err);
3985 return err;
3986 }
3987
3988 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3989 dev->persist->curr_port_poss_type);
3990 if (err)
3991 mlx4_err(dev, "could not restore original port types (%d)\n",
3992 err);
3993
3994 return err;
3995 }
3996
3997 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
3998 /* MT25408 "Hermon" SDR */
3999 { PCI_VDEVICE(MELLANOX, 0x6340),
4000 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4001 /* MT25408 "Hermon" DDR */
4002 { PCI_VDEVICE(MELLANOX, 0x634a),
4003 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4004 /* MT25408 "Hermon" QDR */
4005 { PCI_VDEVICE(MELLANOX, 0x6354),
4006 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4007 /* MT25408 "Hermon" DDR PCIe gen2 */
4008 { PCI_VDEVICE(MELLANOX, 0x6732),
4009 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4010 /* MT25408 "Hermon" QDR PCIe gen2 */
4011 { PCI_VDEVICE(MELLANOX, 0x673c),
4012 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4013 /* MT25408 "Hermon" EN 10GigE */
4014 { PCI_VDEVICE(MELLANOX, 0x6368),
4015 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4016 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
4017 { PCI_VDEVICE(MELLANOX, 0x6750),
4018 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4019 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
4020 { PCI_VDEVICE(MELLANOX, 0x6372),
4021 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4022 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
4023 { PCI_VDEVICE(MELLANOX, 0x675a),
4024 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4025 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
4026 { PCI_VDEVICE(MELLANOX, 0x6764),
4027 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4028 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
4029 { PCI_VDEVICE(MELLANOX, 0x6746),
4030 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4031 /* MT26478 ConnectX2 40GigE PCIe gen2 */
4032 { PCI_VDEVICE(MELLANOX, 0x676e),
4033 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4034 /* MT25400 Family [ConnectX-2 Virtual Function] */
4035 { PCI_VDEVICE(MELLANOX, 0x1002),
4036 .driver_data = MLX4_PCI_DEV_IS_VF },
4037 /* MT27500 Family [ConnectX-3] */
4038 { PCI_VDEVICE(MELLANOX, 0x1003) },
4039 /* MT27500 Family [ConnectX-3 Virtual Function] */
4040 { PCI_VDEVICE(MELLANOX, 0x1004),
4041 .driver_data = MLX4_PCI_DEV_IS_VF },
4042 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
4043 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
4044 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
4045 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
4046 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
4047 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
4048 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
4049 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
4050 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
4051 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
4052 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
4053 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
4054 { 0, }
4055 };
4056
4057 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4058
mlx4_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t state)4059 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4060 pci_channel_state_t state)
4061 {
4062 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4063
4064 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4065 mlx4_enter_error_state(persist);
4066
4067 mutex_lock(&persist->interface_state_mutex);
4068 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4069 mlx4_unload_one(pdev);
4070
4071 mutex_unlock(&persist->interface_state_mutex);
4072 if (state == pci_channel_io_perm_failure)
4073 return PCI_ERS_RESULT_DISCONNECT;
4074
4075 mlx4_pci_disable_device(persist->dev);
4076 return PCI_ERS_RESULT_NEED_RESET;
4077 }
4078
mlx4_pci_slot_reset(struct pci_dev * pdev)4079 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4080 {
4081 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4082 struct mlx4_dev *dev = persist->dev;
4083 int err;
4084
4085 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4086 err = mlx4_pci_enable_device(dev);
4087 if (err) {
4088 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4089 return PCI_ERS_RESULT_DISCONNECT;
4090 }
4091
4092 pci_set_master(pdev);
4093 return PCI_ERS_RESULT_RECOVERED;
4094 }
4095
mlx4_pci_resume(struct pci_dev * pdev)4096 static void mlx4_pci_resume(struct pci_dev *pdev)
4097 {
4098 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4099 struct mlx4_dev *dev = persist->dev;
4100 struct mlx4_priv *priv = mlx4_priv(dev);
4101 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4102 int total_vfs;
4103 int err;
4104
4105 mlx4_err(dev, "%s was called\n", __func__);
4106 total_vfs = dev->persist->num_vfs;
4107 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4108
4109 mutex_lock(&persist->interface_state_mutex);
4110 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4111 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4112 priv, 1);
4113 if (err) {
4114 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4115 __func__, err);
4116 goto end;
4117 }
4118
4119 err = restore_current_port_types(dev, dev->persist->
4120 curr_port_type, dev->persist->
4121 curr_port_poss_type);
4122 if (err)
4123 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4124 }
4125 end:
4126 mutex_unlock(&persist->interface_state_mutex);
4127
4128 }
4129
mlx4_shutdown(struct pci_dev * pdev)4130 static void mlx4_shutdown(struct pci_dev *pdev)
4131 {
4132 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4133
4134 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4135 mutex_lock(&persist->interface_state_mutex);
4136 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4137 mlx4_unload_one(pdev);
4138 mutex_unlock(&persist->interface_state_mutex);
4139 }
4140
4141 static const struct pci_error_handlers mlx4_err_handler = {
4142 .error_detected = mlx4_pci_err_detected,
4143 .slot_reset = mlx4_pci_slot_reset,
4144 .resume = mlx4_pci_resume,
4145 };
4146
4147 static struct pci_driver mlx4_driver = {
4148 .name = DRV_NAME,
4149 .id_table = mlx4_pci_table,
4150 .probe = mlx4_init_one,
4151 .shutdown = mlx4_shutdown,
4152 .remove = mlx4_remove_one,
4153 .err_handler = &mlx4_err_handler,
4154 };
4155
mlx4_verify_params(void)4156 static int __init mlx4_verify_params(void)
4157 {
4158 if ((log_num_mac < 0) || (log_num_mac > 7)) {
4159 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4160 return -1;
4161 }
4162
4163 if (log_num_vlan != 0)
4164 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4165 MLX4_LOG_NUM_VLANS);
4166
4167 if (use_prio != 0)
4168 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4169
4170 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
4171 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4172 log_mtts_per_seg);
4173 return -1;
4174 }
4175
4176 /* Check if module param for ports type has legal combination */
4177 if (port_type_array[0] == false && port_type_array[1] == true) {
4178 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4179 port_type_array[0] = true;
4180 }
4181
4182 if (mlx4_log_num_mgm_entry_size < -7 ||
4183 (mlx4_log_num_mgm_entry_size > 0 &&
4184 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4185 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4186 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4187 mlx4_log_num_mgm_entry_size,
4188 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4189 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4190 return -1;
4191 }
4192
4193 return 0;
4194 }
4195
mlx4_init(void)4196 static int __init mlx4_init(void)
4197 {
4198 int ret;
4199
4200 if (mlx4_verify_params())
4201 return -EINVAL;
4202
4203
4204 mlx4_wq = create_singlethread_workqueue("mlx4");
4205 if (!mlx4_wq)
4206 return -ENOMEM;
4207
4208 ret = pci_register_driver(&mlx4_driver);
4209 if (ret < 0)
4210 destroy_workqueue(mlx4_wq);
4211 return ret < 0 ? ret : 0;
4212 }
4213
mlx4_cleanup(void)4214 static void __exit mlx4_cleanup(void)
4215 {
4216 pci_unregister_driver(&mlx4_driver);
4217 destroy_workqueue(mlx4_wq);
4218 }
4219
4220 module_init(mlx4_init);
4221 module_exit(mlx4_cleanup);
4222
4223 static int
mlx4_evhand(module_t mod,int event,void * arg)4224 mlx4_evhand(module_t mod, int event, void *arg)
4225 {
4226 return (0);
4227 }
4228
4229 static moduledata_t mlx4_mod = {
4230 .name = "mlx4",
4231 .evhand = mlx4_evhand,
4232 };
4233 MODULE_VERSION(mlx4, 1);
4234 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
4235 MODULE_DEPEND(mlx4, linuxkpi, 1, 1, 1);
4236
4237