blob: 85f1b1e7e505727bcdb7f424348d9dce0c5825d3 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07004 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07005 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Eli Cohenc1b43dc2011-03-22 22:38:41 +000042#include <linux/io-mapping.h>
Jack Morgensteinab9c17a2011-12-13 04:18:30 +000043#include <linux/delay.h>
Eyal Perryb046ffe2013-10-15 16:55:24 +020044#include <linux/kmod.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070045
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "mlx4.h"
50#include "fw.h"
51#include "icm.h"
52
53MODULE_AUTHOR("Roland Dreier");
54MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55MODULE_LICENSE("Dual BSD/GPL");
56MODULE_VERSION(DRV_VERSION);
57
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -070058struct workqueue_struct *mlx4_wq;
59
Roland Dreier225c7b12007-05-08 18:00:38 -070060#ifdef CONFIG_MLX4_DEBUG
61
62int mlx4_debug_level = 0;
63module_param_named(debug_level, mlx4_debug_level, int, 0644);
64MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65
66#endif /* CONFIG_MLX4_DEBUG */
67
68#ifdef CONFIG_PCI_MSI
69
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +030070static int msi_x = 1;
Roland Dreier225c7b12007-05-08 18:00:38 -070071module_param(msi_x, int, 0444);
72MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73
74#else /* CONFIG_PCI_MSI */
75
76#define msi_x (0)
77
78#endif /* CONFIG_PCI_MSI */
79
Matan Barakdd41cc32014-03-19 18:11:53 +020080static uint8_t num_vfs[3] = {0, 0, 0};
Matan Barakeffa4bc2014-09-23 16:05:59 +030081static int num_vfs_argc;
Matan Barakdd41cc32014-03-19 18:11:53 +020082module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +000085
Matan Barakdd41cc32014-03-19 18:11:53 +020086static uint8_t probe_vf[3] = {0, 0, 0};
Matan Barakeffa4bc2014-09-23 16:05:59 +030087static int probe_vfs_argc;
Matan Barakdd41cc32014-03-19 18:11:53 +020088module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +000091
Jack Morgenstein3c439b52012-12-06 17:12:00 +000092int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +000093module_param_named(log_num_mgm_entry_size,
94 mlx4_log_num_mgm_entry_size, int, 0444);
95MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
Jack Morgenstein3c439b52012-12-06 17:12:00 +000097 " 10 gives 248.range: 7 <="
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +000098 " log_num_mgm_entry_size <= 12."
Jack Morgenstein3c439b52012-12-06 17:12:00 +000099 " To activate device managed"
100 " flow steering when available, set to -1");
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +0000101
Eyal Perrybe902ab2013-12-19 21:20:15 +0200102static bool enable_64b_cqe_eqe = true;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000103module_param(enable_64b_cqe_eqe, bool, 0444);
104MODULE_PARM_DESC(enable_64b_cqe_eqe,
Eyal Perrybe902ab2013-12-19 21:20:15 +0200105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
Or Gerlitz08ff3232012-10-21 14:59:24 +0000106
Ido Shamay77507aa2014-09-18 11:50:59 +0300107#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
Matan Barak7d077cd2014-12-11 10:58:00 +0200108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000110
Yishai Hadas55ad3592015-01-25 16:59:42 +0200111#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
112
Bill Pembertonf57e6842012-12-03 09:23:15 -0500113static char mlx4_version[] =
Roland Dreier225c7b12007-05-08 18:00:38 -0700114 DRV_NAME ": Mellanox ConnectX core driver v"
115 DRV_VERSION " (" DRV_RELDATE ")\n";
116
117static struct mlx4_profile default_profile = {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000118 .num_qp = 1 << 18,
Roland Dreier225c7b12007-05-08 18:00:38 -0700119 .num_srq = 1 << 16,
Jack Morgensteinc9f2ba52007-07-17 13:11:43 +0300120 .rdmarc_per_qp = 1 << 4,
Roland Dreier225c7b12007-05-08 18:00:38 -0700121 .num_cq = 1 << 16,
122 .num_mcg = 1 << 13,
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000123 .num_mpt = 1 << 19,
Marcel Apfelbaum9fd7a1e2012-01-19 09:45:31 +0000124 .num_mtt = 1 << 20, /* It is really num mtt segements */
Roland Dreier225c7b12007-05-08 18:00:38 -0700125};
126
Amir Vadai2599d852014-07-22 15:44:11 +0300127static struct mlx4_profile low_mem_profile = {
128 .num_qp = 1 << 17,
129 .num_srq = 1 << 6,
130 .rdmarc_per_qp = 1 << 4,
131 .num_cq = 1 << 8,
132 .num_mcg = 1 << 8,
133 .num_mpt = 1 << 9,
134 .num_mtt = 1 << 7,
135};
136
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000137static int log_num_mac = 7;
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700138module_param_named(log_num_mac, log_num_mac, int, 0444);
139MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
140
141static int log_num_vlan;
142module_param_named(log_num_vlan, log_num_vlan, int, 0444);
143MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
Or Gerlitzcb296882011-10-16 10:26:21 +0200144/* Log2 max number of VLANs per ETH port (0-7) */
145#define MLX4_LOG_NUM_VLANS 7
Amir Vadai2599d852014-07-22 15:44:11 +0300146#define MLX4_MIN_LOG_NUM_VLANS 0
147#define MLX4_MIN_LOG_NUM_MAC 1
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700148
Rusty Russelleb939922011-12-19 14:08:01 +0000149static bool use_prio;
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700150module_param_named(use_prio, use_prio, bool, 0444);
Amir Vadaiecc8fb12014-05-22 15:55:39 +0300151MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700152
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +0000153int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
Eli Cohenab6bf422009-05-27 14:38:34 -0700154module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
Eli Cohen04986282010-09-20 08:42:38 +0200155MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
Eli Cohenab6bf422009-05-27 14:38:34 -0700156
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000157static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000158static int arr_argc = 2;
159module_param_array(port_type_array, int, &arr_argc, 0444);
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000160MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
161 "1 for IB, 2 for Ethernet");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000162
163struct mlx4_port_config {
164 struct list_head list;
165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
166 struct pci_dev *pdev;
167};
168
Amir Vadai97989352014-03-06 18:28:17 +0200169static atomic_t pf_loading = ATOMIC_INIT(0);
170
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700171int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type)
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700173{
174 int i;
175
Yuval Shaia0b997652014-12-13 10:18:40 -0800176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
177 for (i = 0; i < dev->caps.num_ports - 1; i++) {
178 if (port_type[i] != port_type[i + 1]) {
Joe Perches1a91de22014-05-07 12:52:57 -0700179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700180 return -EINVAL;
181 }
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700182 }
183 }
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700184
185 for (i = 0; i < dev->caps.num_ports; i++) {
186 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
Joe Perches1a91de22014-05-07 12:52:57 -0700187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
188 i + 1);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700189 return -EINVAL;
190 }
191 }
192 return 0;
193}
194
195static void mlx4_set_port_mask(struct mlx4_dev *dev)
196{
197 int i;
198
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700199 for (i = 1; i <= dev->caps.num_ports; ++i)
Jack Morgenstein65dab252011-12-13 04:10:41 +0000200 dev->caps.port_mask[i] = dev->caps.port_type[i];
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700201}
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +0000202
Matan Barak7ae0e402014-11-13 14:45:32 +0200203enum {
204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
205};
206
207static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
208{
209 int err = 0;
210 struct mlx4_func func;
211
212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
213 err = mlx4_QUERY_FUNC(dev, &func, 0);
214 if (err) {
215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
216 return err;
217 }
218 dev_cap->max_eqs = func.max_eq;
219 dev_cap->reserved_eqs = func.rsvd_eqs;
220 dev_cap->reserved_uars = func.rsvd_uars;
221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
222 }
223 return err;
224}
225
Ido Shamay77507aa2014-09-18 11:50:59 +0300226static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
227{
228 struct mlx4_caps *dev_cap = &dev->caps;
229
230 /* FW not supporting or cancelled by user */
231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
233 return;
234
235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
236 * When FW has NCSI it may decide not to report 64B CQE/EQEs
237 */
238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
242 return;
243 }
244
245 if (cache_line_size() == 128 || cache_line_size() == 256) {
246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
247 /* Changing the real data inside CQE size to 32B */
248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
250
251 if (mlx4_is_master(dev))
252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
253 } else {
Or Gerlitz0fab5412015-02-03 17:57:17 +0200254 if (cache_line_size() != 32 && cache_line_size() != 64)
255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
Ido Shamay77507aa2014-09-18 11:50:59 +0300256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
258 }
259}
260
Matan Barak431df8c2014-12-11 10:57:59 +0200261static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
262 struct mlx4_port_cap *port_cap)
263{
264 dev->caps.vl_cap[port] = port_cap->max_vl;
265 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
266 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
267 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
268 /* set gid and pkey table operating lengths by default
269 * to non-sriov values
270 */
271 dev->caps.gid_table_len[port] = port_cap->max_gids;
272 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
273 dev->caps.port_width_cap[port] = port_cap->max_port_width;
274 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
275 dev->caps.def_mac[port] = port_cap->def_mac;
276 dev->caps.supported_type[port] = port_cap->supported_port_types;
277 dev->caps.suggested_type[port] = port_cap->suggested_type;
278 dev->caps.default_sense[port] = port_cap->default_sense;
279 dev->caps.trans_type[port] = port_cap->trans_type;
280 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
281 dev->caps.wavelength[port] = port_cap->wavelength;
282 dev->caps.trans_code[port] = port_cap->trans_code;
283
284 return 0;
285}
286
287static int mlx4_dev_port(struct mlx4_dev *dev, int port,
288 struct mlx4_port_cap *port_cap)
289{
290 int err = 0;
291
292 err = mlx4_QUERY_PORT(dev, port, port_cap);
293
294 if (err)
295 mlx4_err(dev, "QUERY_PORT command failed.\n");
296
297 return err;
298}
299
Muhammad Mahajna78500b82015-04-02 16:31:22 +0300300static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
301{
302 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
303 return;
304
305 if (mlx4_is_mfunc(dev)) {
306 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
308 return;
309 }
310
311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
312 mlx4_dbg(dev,
313 "Keep FCS is not supported - Disabling Ignore FCS");
314 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
315 return;
316 }
317}
318
Matan Barak431df8c2014-12-11 10:57:59 +0200319#define MLX4_A0_STEERING_TABLE_SIZE 256
Roland Dreier3d73c282007-10-10 15:43:54 -0700320static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
Roland Dreier225c7b12007-05-08 18:00:38 -0700321{
322 int err;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700323 int i;
Roland Dreier225c7b12007-05-08 18:00:38 -0700324
325 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
326 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700327 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -0700328 return err;
329 }
Or Gerlitzc78e25e2014-12-14 16:18:05 +0200330 mlx4_dev_cap_dump(dev, dev_cap);
Roland Dreier225c7b12007-05-08 18:00:38 -0700331
332 if (dev_cap->min_page_sz > PAGE_SIZE) {
Joe Perches1a91de22014-05-07 12:52:57 -0700333 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700334 dev_cap->min_page_sz, PAGE_SIZE);
335 return -ENODEV;
336 }
337 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
Joe Perches1a91de22014-05-07 12:52:57 -0700338 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700339 dev_cap->num_ports, MLX4_MAX_PORTS);
340 return -ENODEV;
341 }
342
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200343 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700344 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700345 dev_cap->uar_size,
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200346 (unsigned long long)
347 pci_resource_len(dev->persist->pdev, 2));
Roland Dreier225c7b12007-05-08 18:00:38 -0700348 return -ENODEV;
349 }
350
351 dev->caps.num_ports = dev_cap->num_ports;
Matan Barak7ae0e402014-11-13 14:45:32 +0200352 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
353 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
354 dev->caps.num_sys_eqs :
355 MLX4_MAX_EQ_NUM;
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700356 for (i = 1; i <= dev->caps.num_ports; ++i) {
Matan Barak431df8c2014-12-11 10:57:59 +0200357 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
358 if (err) {
359 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
360 return err;
361 }
Roland Dreier5ae2a7a2007-06-18 08:15:02 -0700362 }
363
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000364 dev->caps.uar_page_size = PAGE_SIZE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700365 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700366 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
367 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
368 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
369 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
370 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
371 dev->caps.max_wqes = dev_cap->max_qp_sz;
372 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
Roland Dreier225c7b12007-05-08 18:00:38 -0700373 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
374 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
375 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
376 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
377 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
Roland Dreier225c7b12007-05-08 18:00:38 -0700378 /*
379 * Subtract 1 from the limit because we need to allocate a
380 * spare CQE so the HCA HW can tell the difference between an
381 * empty CQ and a full CQ.
382 */
383 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
384 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
385 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +0000386 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
Roland Dreier225c7b12007-05-08 18:00:38 -0700387 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000388
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
Roland Dreier225c7b12007-05-08 18:00:38 -0700391 dev->caps.reserved_pds = dev_cap->reserved_pds;
Sean Hefty012a8ff2011-06-02 09:01:33 -0700392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0;
394 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
395 dev_cap->max_xrcds : 0;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +0000396 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
397
Dotan Barak149983af2007-06-26 15:55:28 +0300398 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
Roland Dreier225c7b12007-05-08 18:00:38 -0700399 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
400 dev->caps.flags = dev_cap->flags;
Shlomo Pongratzb3416f42012-04-29 17:04:25 +0300401 dev->caps.flags2 = dev_cap->flags2;
Roland Dreier95d04f02008-07-23 08:12:26 -0700402 dev->caps.bmme_flags = dev_cap->bmme_flags;
403 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
Roland Dreier225c7b12007-05-08 18:00:38 -0700404 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
Eli Cohenb832be12008-04-16 21:09:27 -0700405 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
Shlomo Pongratzb3416f42012-04-29 17:04:25 +0300406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
Roland Dreier225c7b12007-05-08 18:00:38 -0700407
Hadar Hen Zion77fc29c2015-07-27 14:46:31 +0300408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param;
410
411 memset(&hca_param, 0, sizeof(hca_param));
412 err = mlx4_QUERY_HCA(dev, &hca_param);
413 /* Turn off PHV_EN flag in case phv_check_en is set.
414 * phv_check_en is a HW check that parse the packet and verify
415 * phv bit was reported correctly in the wqe. To allow QinQ
416 * PHV_EN flag should be set and phv_check_en must be cleared
417 * otherwise QinQ packets will be drop by the HW.
418 */
419 if (err || hca_param.phv_check_en)
420 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
421 }
422
Roland Dreierca3e57a2012-09-27 09:53:05 -0700423 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
424 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
Yevgeny Petrilin58a60162011-12-19 04:00:26 +0000425 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
Roland Dreieraadf4f32012-09-27 10:01:19 -0700426 /* Don't do sense port on multifunction devices (for now at least) */
427 if (mlx4_is_mfunc(dev))
428 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
Yevgeny Petrilin58a60162011-12-19 04:00:26 +0000429
Amir Vadai2599d852014-07-22 15:44:11 +0300430 if (mlx4_low_memory_profile()) {
431 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
432 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
433 } else {
434 dev->caps.log_num_macs = log_num_mac;
435 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
436 }
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700437
438 for (i = 1; i <= dev->caps.num_ports; ++i) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000439 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
440 if (dev->caps.supported_type[i]) {
441 /* if only ETH is supported - assign ETH */
442 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
443 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
Jack Morgenstein105c3202012-06-19 11:21:43 +0300444 /* if only IB is supported, assign IB */
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000445 else if (dev->caps.supported_type[i] ==
Jack Morgenstein105c3202012-06-19 11:21:43 +0300446 MLX4_PORT_TYPE_IB)
447 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000448 else {
Jack Morgenstein105c3202012-06-19 11:21:43 +0300449 /* if IB and ETH are supported, we set the port
450 * type according to user selection of port type;
451 * if user selected none, take the FW hint */
452 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000453 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
454 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000455 else
Jack Morgenstein105c3202012-06-19 11:21:43 +0300456 dev->caps.port_type[i] = port_type_array[i - 1];
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000457 }
458 }
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000459 /*
460 * Link sensing is allowed on the port if 3 conditions are true:
461 * 1. Both protocols are supported on the port.
462 * 2. Different types are supported on the port
463 * 3. FW declared that it supports link sensing
464 */
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700465 mlx4_priv(dev)->sense.sense_allowed[i] =
Yevgeny Petrilin58a60162011-12-19 04:00:26 +0000466 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000467 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
Yevgeny Petrilin58a60162011-12-19 04:00:26 +0000468 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700469
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000470 /*
471 * If "default_sense" bit is set, we move the port to "AUTO" mode
472 * and perform sense_port FW command to try and set the correct
473 * port type from beginning
474 */
Yevgeny Petrilin46c46742011-12-29 07:42:34 +0000475 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
Yevgeny Petrilin8d0fc7b2011-12-19 04:00:34 +0000476 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
477 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
478 mlx4_SENSE_PORT(dev, i, &sensed_port);
479 if (sensed_port != MLX4_PORT_TYPE_NONE)
480 dev->caps.port_type[i] = sensed_port;
481 } else {
482 dev->caps.possible_type[i] = dev->caps.port_type[i];
483 }
484
Matan Barak431df8c2014-12-11 10:57:59 +0200485 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
486 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
Joe Perches1a91de22014-05-07 12:52:57 -0700487 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700488 i, 1 << dev->caps.log_num_macs);
489 }
Matan Barak431df8c2014-12-11 10:57:59 +0200490 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
491 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
Joe Perches1a91de22014-05-07 12:52:57 -0700492 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700493 i, 1 << dev->caps.log_num_vlans);
494 }
495 }
496
Or Gerlitzac0a72a2015-06-14 17:13:06 +0300497 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
498 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
499 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
500 mlx4_warn(dev,
501 "Granular QoS per VF not supported with IB/Eth configuration\n");
502 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
503 }
504
Eran Ben Elisha47d84172015-06-15 17:58:58 +0300505 dev->caps.max_counters = dev_cap->max_counters;
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +0000506
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700507 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
508 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
509 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
510 (1 << dev->caps.log_num_macs) *
511 (1 << dev->caps.log_num_vlans) *
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700512 dev->caps.num_ports;
513 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
Matan Barak7d077cd2014-12-11 10:58:00 +0200514
515 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
516 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
517 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
518 else
519 dev->caps.dmfs_high_rate_qpn_base =
520 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
521
522 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
523 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
524 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
525 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
526 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
527 } else {
528 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
529 dev->caps.dmfs_high_rate_qpn_base =
530 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
531 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
532 }
533
Or Gerlitzfc31e252015-03-18 14:57:34 +0200534 dev->caps.rl_caps = dev_cap->rl_caps;
535
Matan Barakd57febe2014-12-11 10:57:57 +0200536 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
Matan Barak7d077cd2014-12-11 10:58:00 +0200537 dev->caps.dmfs_high_rate_qpn_range;
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700538
539 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
540 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
541 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
542 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
543
Jack Morgensteine2c76822012-08-03 08:40:41 +0000544 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000545
Jack Morgensteinb3051322013-08-01 19:55:01 +0300546 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
Or Gerlitz08ff3232012-10-21 14:59:24 +0000547 if (dev_cap->flags &
548 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
549 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
550 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
551 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
552 }
Ido Shamay77507aa2014-09-18 11:50:59 +0300553
554 if (dev_cap->flags2 &
555 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
556 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
557 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
558 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
559 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
560 }
Or Gerlitz08ff3232012-10-21 14:59:24 +0000561 }
562
Or Gerlitzf97b4b52013-01-10 15:18:35 +0000563 if ((dev->caps.flags &
Or Gerlitz08ff3232012-10-21 14:59:24 +0000564 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
565 mlx4_is_master(dev))
566 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
567
Eugenia Emantayevddae0342014-12-11 10:57:54 +0200568 if (!mlx4_is_slave(dev)) {
Ido Shamay77507aa2014-09-18 11:50:59 +0300569 mlx4_enable_cqe_eqe_stride(dev);
Eugenia Emantayevddae0342014-12-11 10:57:54 +0200570 dev->caps.alloc_res_qp_mask =
Matan Barakd57febe2014-12-11 10:57:57 +0200571 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
572 MLX4_RESERVE_A0_QP;
Ido Shamay3742cc62015-04-02 16:31:17 +0300573
574 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
575 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
576 mlx4_warn(dev, "Old device ETS support detected\n");
577 mlx4_warn(dev, "Consider upgrading device FW.\n");
578 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
579 }
580
Eugenia Emantayevddae0342014-12-11 10:57:54 +0200581 } else {
582 dev->caps.alloc_res_qp_mask = 0;
583 }
Ido Shamay77507aa2014-09-18 11:50:59 +0300584
Muhammad Mahajna78500b82015-04-02 16:31:22 +0300585 mlx4_enable_ignore_fcs(dev);
586
Roland Dreier225c7b12007-05-08 18:00:38 -0700587 return 0;
588}
Eyal Perryb912b2f2014-01-05 17:41:08 +0200589
590static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
591 enum pci_bus_speed *speed,
592 enum pcie_link_width *width)
593{
594 u32 lnkcap1, lnkcap2;
595 int err1, err2;
596
597#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
598
599 *speed = PCI_SPEED_UNKNOWN;
600 *width = PCIE_LNK_WIDTH_UNKNOWN;
601
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200602 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
603 &lnkcap1);
604 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
605 &lnkcap2);
Eyal Perryb912b2f2014-01-05 17:41:08 +0200606 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
607 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
608 *speed = PCIE_SPEED_8_0GT;
609 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
610 *speed = PCIE_SPEED_5_0GT;
611 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
612 *speed = PCIE_SPEED_2_5GT;
613 }
614 if (!err1) {
615 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
616 if (!lnkcap2) { /* pre-r3.0 */
617 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
618 *speed = PCIE_SPEED_5_0GT;
619 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
620 *speed = PCIE_SPEED_2_5GT;
621 }
622 }
623
624 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
625 return err1 ? err1 :
626 err2 ? err2 : -EINVAL;
627 }
628 return 0;
629}
630
631static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
632{
633 enum pcie_link_width width, width_cap;
634 enum pci_bus_speed speed, speed_cap;
635 int err;
636
637#define PCIE_SPEED_STR(speed) \
638 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
639 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
640 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
641 "Unknown")
642
643 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
644 if (err) {
645 mlx4_warn(dev,
646 "Unable to determine PCIe device BW capabilities\n");
647 return;
648 }
649
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200650 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
Eyal Perryb912b2f2014-01-05 17:41:08 +0200651 if (err || speed == PCI_SPEED_UNKNOWN ||
652 width == PCIE_LNK_WIDTH_UNKNOWN) {
653 mlx4_warn(dev,
654 "Unable to determine PCI device chain minimum BW\n");
655 return;
656 }
657
658 if (width != width_cap || speed != speed_cap)
659 mlx4_warn(dev,
660 "PCIe BW is different than device's capability\n");
661
662 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
663 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
664 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
665 width, width_cap);
666 return;
667}
668
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000669/*The function checks if there are live vf, return the num of them*/
670static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
671{
672 struct mlx4_priv *priv = mlx4_priv(dev);
673 struct mlx4_slave_state *s_state;
674 int i;
675 int ret = 0;
676
677 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
678 s_state = &priv->mfunc.master.slave_state[i];
679 if (s_state->active && s_state->last_cmd !=
680 MLX4_COMM_CMD_RESET) {
681 mlx4_warn(dev, "%s: slave: %d is still active\n",
682 __func__, i);
683 ret++;
684 }
685 }
686 return ret;
687}
688
Jack Morgenstein396f2fe2012-06-19 11:21:42 +0300689int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
690{
691 u32 qk = MLX4_RESERVED_QKEY_BASE;
Jack Morgenstein47605df2012-08-03 08:40:57 +0000692
693 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
694 qpn < dev->phys_caps.base_proxy_sqpn)
Jack Morgenstein396f2fe2012-06-19 11:21:42 +0300695 return -EINVAL;
696
Jack Morgenstein47605df2012-08-03 08:40:57 +0000697 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
Jack Morgenstein396f2fe2012-06-19 11:21:42 +0300698 /* tunnel qp */
Jack Morgenstein47605df2012-08-03 08:40:57 +0000699 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
Jack Morgenstein396f2fe2012-06-19 11:21:42 +0300700 else
Jack Morgenstein47605df2012-08-03 08:40:57 +0000701 qk += qpn - dev->phys_caps.base_proxy_sqpn;
Jack Morgenstein396f2fe2012-06-19 11:21:42 +0300702 *qkey = qk;
703 return 0;
704}
705EXPORT_SYMBOL(mlx4_get_parav_qkey);
706
Jack Morgenstein54679e12012-08-03 08:40:43 +0000707void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
708{
709 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
710
711 if (!mlx4_is_master(dev))
712 return;
713
714 priv->virt2phys_pkey[slave][port - 1][i] = val;
715}
716EXPORT_SYMBOL(mlx4_sync_pkey_table);
717
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000718void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
719{
720 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
721
722 if (!mlx4_is_master(dev))
723 return;
724
725 priv->slave_node_guids[slave] = guid;
726}
727EXPORT_SYMBOL(mlx4_put_slave_node_guid);
728
729__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
730{
731 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
732
733 if (!mlx4_is_master(dev))
734 return 0;
735
736 return priv->slave_node_guids[slave];
737}
738EXPORT_SYMBOL(mlx4_get_slave_node_guid);
739
Roland Dreiere10903b2012-02-26 01:48:12 -0800740int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000741{
742 struct mlx4_priv *priv = mlx4_priv(dev);
743 struct mlx4_slave_state *s_slave;
744
745 if (!mlx4_is_master(dev))
746 return 0;
747
748 s_slave = &priv->mfunc.master.slave_state[slave];
749 return !!s_slave->active;
750}
751EXPORT_SYMBOL(mlx4_is_slave_active);
752
Jack Morgenstein7b8157b2012-12-06 17:11:59 +0000753static void slave_adjust_steering_mode(struct mlx4_dev *dev,
754 struct mlx4_dev_cap *dev_cap,
755 struct mlx4_init_hca_param *hca_param)
756{
757 dev->caps.steering_mode = hca_param->steering_mode;
758 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
759 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
760 dev->caps.fs_log_max_ucast_qp_range_size =
761 dev_cap->fs_log_max_ucast_qp_range_size;
762 } else
763 dev->caps.num_qp_per_mgm =
764 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
765
766 mlx4_dbg(dev, "Steering mode is: %s\n",
767 mlx4_steering_mode_str(dev->caps.steering_mode));
768}
769
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000770static int mlx4_slave_cap(struct mlx4_dev *dev)
771{
772 int err;
773 u32 page_size;
774 struct mlx4_dev_cap dev_cap;
775 struct mlx4_func_cap func_cap;
776 struct mlx4_init_hca_param hca_param;
Matan Barak225c6c82014-11-13 14:45:28 +0200777 u8 i;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000778
779 memset(&hca_param, 0, sizeof(hca_param));
780 err = mlx4_QUERY_HCA(dev, &hca_param);
781 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700782 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000783 return err;
784 }
785
Eyal Perry483e0132014-05-14 12:15:14 +0300786 /* fail if the hca has an unknown global capability
787 * at this time global_caps should be always zeroed
788 */
789 if (hca_param.global_caps) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000790 mlx4_err(dev, "Unknown hca global capabilities\n");
791 return -ENOSYS;
792 }
793
794 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
795
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +0000796 dev->caps.hca_core_clock = hca_param.hca_core_clock;
797
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000798 memset(&dev_cap, 0, sizeof(dev_cap));
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000799 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000800 err = mlx4_dev_cap(dev, &dev_cap);
801 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700802 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000803 return err;
804 }
805
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000806 err = mlx4_QUERY_FW(dev);
807 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -0700808 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000809
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000810 page_size = ~dev->caps.page_size_cap + 1;
811 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
812 if (page_size > PAGE_SIZE) {
Joe Perches1a91de22014-05-07 12:52:57 -0700813 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000814 page_size, PAGE_SIZE);
815 return -ENODEV;
816 }
817
818 /* slave gets uar page size from QUERY_HCA fw command */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
820
821 /* TODO: relax this assumption */
822 if (dev->caps.uar_page_size != PAGE_SIZE) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
824 dev->caps.uar_page_size, PAGE_SIZE);
825 return -ENODEV;
826 }
827
828 memset(&func_cap, 0, sizeof(func_cap));
Jack Morgenstein47605df2012-08-03 08:40:57 +0000829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000830 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700831 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
832 err);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000833 return err;
834 }
835
836 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
837 PF_CONTEXT_BEHAVIOUR_MASK) {
Matan Barak7d077cd2014-12-11 10:58:00 +0200838 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
839 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000840 return -ENOSYS;
841 }
842
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000843 dev->caps.num_ports = func_cap.num_ports;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200844 dev->quotas.qp = func_cap.qp_quota;
845 dev->quotas.srq = func_cap.srq_quota;
846 dev->quotas.cq = func_cap.cq_quota;
847 dev->quotas.mpt = func_cap.mpt_quota;
848 dev->quotas.mtt = func_cap.mtt_quota;
849 dev->caps.num_qps = 1 << hca_param.log_num_qps;
850 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
851 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
852 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
853 dev->caps.num_eqs = func_cap.max_eq;
854 dev->caps.reserved_eqs = func_cap.reserved_eq;
Jack Morgensteinf0ce0612015-01-27 15:58:00 +0200855 dev->caps.reserved_lkey = func_cap.reserved_lkey;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000856 dev->caps.num_pds = MLX4_NUM_PDS;
857 dev->caps.num_mgms = 0;
858 dev->caps.num_amgms = 0;
859
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000860 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
Joe Perches1a91de22014-05-07 12:52:57 -0700861 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
862 dev->caps.num_ports, MLX4_MAX_PORTS);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000863 return -ENODEV;
864 }
865
Jack Morgenstein2b3ddf22015-10-14 17:43:48 +0300866 mlx4_replace_zero_macs(dev);
867
Jack Morgenstein99ec41d2014-05-29 16:31:03 +0300868 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
Jack Morgenstein47605df2012-08-03 08:40:57 +0000869 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
870 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
871 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
872 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
873
874 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
Jack Morgenstein99ec41d2014-05-29 16:31:03 +0300875 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
876 !dev->caps.qp0_qkey) {
Jack Morgenstein47605df2012-08-03 08:40:57 +0000877 err = -ENOMEM;
878 goto err_mem;
879 }
880
Jack Morgenstein66349612012-06-19 11:21:44 +0300881 for (i = 1; i <= dev->caps.num_ports; ++i) {
Matan Barak225c6c82014-11-13 14:45:28 +0200882 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
Jack Morgenstein47605df2012-08-03 08:40:57 +0000883 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700884 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
885 i, err);
Jack Morgenstein47605df2012-08-03 08:40:57 +0000886 goto err_mem;
887 }
Jack Morgenstein99ec41d2014-05-29 16:31:03 +0300888 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
Jack Morgenstein47605df2012-08-03 08:40:57 +0000889 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
890 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
891 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
Jack Morgenstein6230bb22012-05-30 09:14:54 +0000893 dev->caps.port_mask[i] = dev->caps.port_type[i];
Hadar Hen Zion8e1a28e2013-12-19 21:20:12 +0200894 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
Jack Morgenstein66349612012-06-19 11:21:44 +0300895 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
896 &dev->caps.gid_table_len[i],
897 &dev->caps.pkey_table_len[i]))
Jack Morgenstein47605df2012-08-03 08:40:57 +0000898 goto err_mem;
Jack Morgenstein66349612012-06-19 11:21:44 +0300899 }
Jack Morgenstein6230bb22012-05-30 09:14:54 +0000900
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000901 if (dev->caps.uar_page_size * (dev->caps.num_uars -
902 dev->caps.reserved_uars) >
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200903 pci_resource_len(dev->persist->pdev,
904 2)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700905 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000906 dev->caps.uar_page_size * dev->caps.num_uars,
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200907 (unsigned long long)
908 pci_resource_len(dev->persist->pdev, 2));
Jack Morgenstein47605df2012-08-03 08:40:57 +0000909 goto err_mem;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000910 }
911
Or Gerlitz08ff3232012-10-21 14:59:24 +0000912 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
913 dev->caps.eqe_size = 64;
914 dev->caps.eqe_factor = 1;
915 } else {
916 dev->caps.eqe_size = 32;
917 dev->caps.eqe_factor = 0;
918 }
919
920 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
921 dev->caps.cqe_size = 64;
Ido Shamay77507aa2014-09-18 11:50:59 +0300922 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000923 } else {
924 dev->caps.cqe_size = 32;
925 }
926
Ido Shamay77507aa2014-09-18 11:50:59 +0300927 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
928 dev->caps.eqe_size = hca_param.eqe_size;
929 dev->caps.eqe_factor = 0;
930 }
931
932 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
933 dev->caps.cqe_size = hca_param.cqe_size;
934 /* User still need to know when CQE > 32B */
935 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
936 }
937
Amir Vadaif9bd2d72013-06-20 14:58:10 +0300938 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
Joe Perches1a91de22014-05-07 12:52:57 -0700939 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
Amir Vadaif9bd2d72013-06-20 14:58:10 +0300940
Jack Morgenstein7b8157b2012-12-06 17:11:59 +0000941 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
Ido Shamay802f42a2015-04-02 16:31:06 +0300942 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
943 hca_param.rss_ip_frags ? "on" : "off");
Jack Morgenstein7b8157b2012-12-06 17:11:59 +0000944
Eugenia Emantayevddae0342014-12-11 10:57:54 +0200945 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
946 dev->caps.bf_reg_size)
947 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
948
Matan Barakd57febe2014-12-11 10:57:57 +0200949 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
950 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
951
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000952 return 0;
Jack Morgenstein47605df2012-08-03 08:40:57 +0000953
954err_mem:
Jack Morgenstein99ec41d2014-05-29 16:31:03 +0300955 kfree(dev->caps.qp0_qkey);
Jack Morgenstein47605df2012-08-03 08:40:57 +0000956 kfree(dev->caps.qp0_tunnel);
957 kfree(dev->caps.qp0_proxy);
958 kfree(dev->caps.qp1_tunnel);
959 kfree(dev->caps.qp1_proxy);
Jack Morgenstein99ec41d2014-05-29 16:31:03 +0300960 dev->caps.qp0_qkey = NULL;
961 dev->caps.qp0_tunnel = NULL;
962 dev->caps.qp0_proxy = NULL;
963 dev->caps.qp1_tunnel = NULL;
964 dev->caps.qp1_proxy = NULL;
Jack Morgenstein47605df2012-08-03 08:40:57 +0000965
966 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000967}
Roland Dreier225c7b12007-05-08 18:00:38 -0700968
Eyal Perryb046ffe2013-10-15 16:55:24 +0200969static void mlx4_request_modules(struct mlx4_dev *dev)
970{
971 int port;
972 int has_ib_port = false;
973 int has_eth_port = false;
974#define EN_DRV_NAME "mlx4_en"
975#define IB_DRV_NAME "mlx4_ib"
976
977 for (port = 1; port <= dev->caps.num_ports; port++) {
978 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
979 has_ib_port = true;
980 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
981 has_eth_port = true;
982 }
983
Eyal Perryb046ffe2013-10-15 16:55:24 +0200984 if (has_eth_port)
985 request_module_nowait(EN_DRV_NAME);
Or Gerlitzf24f7902014-05-04 17:07:24 +0300986 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
987 request_module_nowait(IB_DRV_NAME);
Eyal Perryb046ffe2013-10-15 16:55:24 +0200988}
989
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700990/*
991 * Change the port configuration of the device.
992 * Every user of this function must hold the port mutex.
993 */
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700994int mlx4_change_port_types(struct mlx4_dev *dev,
995 enum mlx4_port_type *port_types)
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700996{
997 int err = 0;
998 int change = 0;
999 int port;
1000
1001 for (port = 0; port < dev->caps.num_ports; port++) {
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001002 /* Change the port type only if the new type is different
1003 * from the current, and not set to Auto */
Yevgeny Petrilin3d8f9302012-02-21 03:41:07 +00001004 if (port_types[port] != dev->caps.port_type[port + 1])
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001005 change = 1;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001006 }
1007 if (change) {
1008 mlx4_unregister_device(dev);
1009 for (port = 1; port <= dev->caps.num_ports; port++) {
1010 mlx4_CLOSE_PORT(dev, port);
Yevgeny Petrilin1e0f03d2012-02-23 07:04:35 +00001011 dev->caps.port_type[port] = port_types[port - 1];
Jack Morgenstein66349612012-06-19 11:21:44 +03001012 err = mlx4_SET_PORT(dev, port, -1);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001013 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001014 mlx4_err(dev, "Failed to set port %d, aborting\n",
1015 port);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001016 goto out;
1017 }
1018 }
1019 mlx4_set_port_mask(dev);
1020 err = mlx4_register_device(dev);
Eyal Perryb046ffe2013-10-15 16:55:24 +02001021 if (err) {
1022 mlx4_err(dev, "Failed to register device\n");
1023 goto out;
1024 }
1025 mlx4_request_modules(dev);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001026 }
1027
1028out:
1029 return err;
1030}
1031
1032static ssize_t show_port_type(struct device *dev,
1033 struct device_attribute *attr,
1034 char *buf)
1035{
1036 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1037 port_attr);
1038 struct mlx4_dev *mdev = info->dev;
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001039 char type[8];
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001040
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001041 sprintf(type, "%s",
1042 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1043 "ib" : "eth");
1044 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1045 sprintf(buf, "auto (%s)\n", type);
1046 else
1047 sprintf(buf, "%s\n", type);
1048
1049 return strlen(buf);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001050}
1051
1052static ssize_t set_port_type(struct device *dev,
1053 struct device_attribute *attr,
1054 const char *buf, size_t count)
1055{
1056 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1057 port_attr);
1058 struct mlx4_dev *mdev = info->dev;
1059 struct mlx4_priv *priv = mlx4_priv(mdev);
1060 enum mlx4_port_type types[MLX4_MAX_PORTS];
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001061 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
Amir Vadai0a984552014-11-02 16:26:14 +02001062 static DEFINE_MUTEX(set_port_type_mutex);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001063 int i;
1064 int err = 0;
1065
Amir Vadai0a984552014-11-02 16:26:14 +02001066 mutex_lock(&set_port_type_mutex);
1067
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001068 if (!strcmp(buf, "ib\n"))
1069 info->tmp_type = MLX4_PORT_TYPE_IB;
1070 else if (!strcmp(buf, "eth\n"))
1071 info->tmp_type = MLX4_PORT_TYPE_ETH;
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001072 else if (!strcmp(buf, "auto\n"))
1073 info->tmp_type = MLX4_PORT_TYPE_AUTO;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001074 else {
1075 mlx4_err(mdev, "%s is not supported port type\n", buf);
Amir Vadai0a984552014-11-02 16:26:14 +02001076 err = -EINVAL;
1077 goto err_out;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001078 }
1079
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001080 mlx4_stop_sense(mdev);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001081 mutex_lock(&priv->port_mutex);
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001082 /* Possible type is always the one that was delivered */
1083 mdev->caps.possible_type[info->port] = info->tmp_type;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001084
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001085 for (i = 0; i < mdev->caps.num_ports; i++) {
1086 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1087 mdev->caps.possible_type[i+1];
1088 if (types[i] == MLX4_PORT_TYPE_AUTO)
1089 types[i] = mdev->caps.port_type[i+1];
1090 }
1091
Yevgeny Petrilin58a60162011-12-19 04:00:26 +00001092 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1093 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001094 for (i = 1; i <= mdev->caps.num_ports; i++) {
1095 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1096 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1097 err = -EINVAL;
1098 }
1099 }
1100 }
1101 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001102 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001103 goto out;
1104 }
1105
1106 mlx4_do_sense_ports(mdev, new_types, types);
1107
1108 err = mlx4_check_port_params(mdev, new_types);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001109 if (err)
1110 goto out;
1111
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001112 /* We are about to apply the changes after the configuration
1113 * was verified, no need to remember the temporary types
1114 * any more */
1115 for (i = 0; i < mdev->caps.num_ports; i++)
1116 priv->port[i + 1].tmp_type = 0;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001117
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001118 err = mlx4_change_port_types(mdev, new_types);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001119
1120out:
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07001121 mlx4_start_sense(mdev);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001122 mutex_unlock(&priv->port_mutex);
Amir Vadai0a984552014-11-02 16:26:14 +02001123err_out:
1124 mutex_unlock(&set_port_type_mutex);
1125
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07001126 return err ? err : count;
1127}
1128
Or Gerlitz096335b2012-01-11 19:02:17 +02001129enum ibta_mtu {
1130 IB_MTU_256 = 1,
1131 IB_MTU_512 = 2,
1132 IB_MTU_1024 = 3,
1133 IB_MTU_2048 = 4,
1134 IB_MTU_4096 = 5
1135};
1136
1137static inline int int_to_ibta_mtu(int mtu)
1138{
1139 switch (mtu) {
1140 case 256: return IB_MTU_256;
1141 case 512: return IB_MTU_512;
1142 case 1024: return IB_MTU_1024;
1143 case 2048: return IB_MTU_2048;
1144 case 4096: return IB_MTU_4096;
1145 default: return -1;
1146 }
1147}
1148
1149static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1150{
1151 switch (mtu) {
1152 case IB_MTU_256: return 256;
1153 case IB_MTU_512: return 512;
1154 case IB_MTU_1024: return 1024;
1155 case IB_MTU_2048: return 2048;
1156 case IB_MTU_4096: return 4096;
1157 default: return -1;
1158 }
1159}
1160
1161static ssize_t show_port_ib_mtu(struct device *dev,
1162 struct device_attribute *attr,
1163 char *buf)
1164{
1165 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1166 port_mtu_attr);
1167 struct mlx4_dev *mdev = info->dev;
1168
1169 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1170 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1171
1172 sprintf(buf, "%d\n",
1173 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1174 return strlen(buf);
1175}
1176
1177static ssize_t set_port_ib_mtu(struct device *dev,
1178 struct device_attribute *attr,
1179 const char *buf, size_t count)
1180{
1181 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1182 port_mtu_attr);
1183 struct mlx4_dev *mdev = info->dev;
1184 struct mlx4_priv *priv = mlx4_priv(mdev);
1185 int err, port, mtu, ibta_mtu = -1;
1186
1187 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1188 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1189 return -EINVAL;
1190 }
1191
Dotan Barak618fad92013-06-25 12:09:36 +03001192 err = kstrtoint(buf, 0, &mtu);
1193 if (!err)
Or Gerlitz096335b2012-01-11 19:02:17 +02001194 ibta_mtu = int_to_ibta_mtu(mtu);
1195
Dotan Barak618fad92013-06-25 12:09:36 +03001196 if (err || ibta_mtu < 0) {
Or Gerlitz096335b2012-01-11 19:02:17 +02001197 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1198 return -EINVAL;
1199 }
1200
1201 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1202
1203 mlx4_stop_sense(mdev);
1204 mutex_lock(&priv->port_mutex);
1205 mlx4_unregister_device(mdev);
1206 for (port = 1; port <= mdev->caps.num_ports; port++) {
1207 mlx4_CLOSE_PORT(mdev, port);
Jack Morgenstein66349612012-06-19 11:21:44 +03001208 err = mlx4_SET_PORT(mdev, port, -1);
Or Gerlitz096335b2012-01-11 19:02:17 +02001209 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001210 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1211 port);
Or Gerlitz096335b2012-01-11 19:02:17 +02001212 goto err_set_port;
1213 }
1214 }
1215 err = mlx4_register_device(mdev);
1216err_set_port:
1217 mutex_unlock(&priv->port_mutex);
1218 mlx4_start_sense(mdev);
1219 return err ? err : count;
1220}
1221
Moni Shoua53f33ae2015-02-03 16:48:33 +02001222int mlx4_bond(struct mlx4_dev *dev)
1223{
1224 int ret = 0;
1225 struct mlx4_priv *priv = mlx4_priv(dev);
1226
1227 mutex_lock(&priv->bond_mutex);
1228
1229 if (!mlx4_is_bonded(dev))
1230 ret = mlx4_do_bond(dev, true);
1231 else
1232 ret = 0;
1233
1234 mutex_unlock(&priv->bond_mutex);
1235 if (ret)
1236 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1237 else
1238 mlx4_dbg(dev, "Device is bonded\n");
1239 return ret;
1240}
1241EXPORT_SYMBOL_GPL(mlx4_bond);
1242
1243int mlx4_unbond(struct mlx4_dev *dev)
1244{
1245 int ret = 0;
1246 struct mlx4_priv *priv = mlx4_priv(dev);
1247
1248 mutex_lock(&priv->bond_mutex);
1249
1250 if (mlx4_is_bonded(dev))
1251 ret = mlx4_do_bond(dev, false);
1252
1253 mutex_unlock(&priv->bond_mutex);
1254 if (ret)
1255 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1256 else
1257 mlx4_dbg(dev, "Device is unbonded\n");
1258 return ret;
1259}
1260EXPORT_SYMBOL_GPL(mlx4_unbond);
1261
1262
1263int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1264{
1265 u8 port1 = v2p->port1;
1266 u8 port2 = v2p->port2;
1267 struct mlx4_priv *priv = mlx4_priv(dev);
1268 int err;
1269
1270 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1271 return -ENOTSUPP;
1272
1273 mutex_lock(&priv->bond_mutex);
1274
1275 /* zero means keep current mapping for this port */
1276 if (port1 == 0)
1277 port1 = priv->v2p.port1;
1278 if (port2 == 0)
1279 port2 = priv->v2p.port2;
1280
1281 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1282 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1283 (port1 == 2 && port2 == 1)) {
1284 /* besides boundary checks cross mapping makes
1285 * no sense and therefore not allowed */
1286 err = -EINVAL;
1287 } else if ((port1 == priv->v2p.port1) &&
1288 (port2 == priv->v2p.port2)) {
1289 err = 0;
1290 } else {
1291 err = mlx4_virt2phy_port_map(dev, port1, port2);
1292 if (!err) {
1293 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1294 port1, port2);
1295 priv->v2p.port1 = port1;
1296 priv->v2p.port2 = port2;
1297 } else {
1298 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1299 }
1300 }
1301
1302 mutex_unlock(&priv->bond_mutex);
1303 return err;
1304}
1305EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1306
Roland Dreiere8f9b2e2008-02-04 20:20:41 -08001307static int mlx4_load_fw(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -07001308{
1309 struct mlx4_priv *priv = mlx4_priv(dev);
1310 int err;
1311
1312 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001313 GFP_HIGHUSER | __GFP_NOWARN, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001314 if (!priv->fw.fw_icm) {
Joe Perches1a91de22014-05-07 12:52:57 -07001315 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001316 return -ENOMEM;
1317 }
1318
1319 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1320 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001321 mlx4_err(dev, "MAP_FA command failed, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001322 goto err_free;
1323 }
1324
1325 err = mlx4_RUN_FW(dev);
1326 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001327 mlx4_err(dev, "RUN_FW command failed, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001328 goto err_unmap_fa;
1329 }
1330
1331 return 0;
1332
1333err_unmap_fa:
1334 mlx4_UNMAP_FA(dev);
1335
1336err_free:
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001337 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001338 return err;
1339}
1340
Roland Dreiere8f9b2e2008-02-04 20:20:41 -08001341static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1342 int cmpt_entry_sz)
Roland Dreier225c7b12007-05-08 18:00:38 -07001343{
1344 struct mlx4_priv *priv = mlx4_priv(dev);
1345 int err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001346 int num_eqs;
Roland Dreier225c7b12007-05-08 18:00:38 -07001347
1348 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1349 cmpt_base +
1350 ((u64) (MLX4_CMPT_TYPE_QP *
1351 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1352 cmpt_entry_sz, dev->caps.num_qps,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -07001353 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1354 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001355 if (err)
1356 goto err;
1357
1358 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1359 cmpt_base +
1360 ((u64) (MLX4_CMPT_TYPE_SRQ *
1361 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1362 cmpt_entry_sz, dev->caps.num_srqs,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001363 dev->caps.reserved_srqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001364 if (err)
1365 goto err_qp;
1366
1367 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1368 cmpt_base +
1369 ((u64) (MLX4_CMPT_TYPE_CQ *
1370 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1371 cmpt_entry_sz, dev->caps.num_cqs,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001372 dev->caps.reserved_cqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001373 if (err)
1374 goto err_srq;
1375
Matan Barak7ae0e402014-11-13 14:45:32 +02001376 num_eqs = dev->phys_caps.num_phys_eqs;
Roland Dreier225c7b12007-05-08 18:00:38 -07001377 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1378 cmpt_base +
1379 ((u64) (MLX4_CMPT_TYPE_EQ *
1380 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001381 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001382 if (err)
1383 goto err_cq;
1384
1385 return 0;
1386
1387err_cq:
1388 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1389
1390err_srq:
1391 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1392
1393err_qp:
1394 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1395
1396err:
1397 return err;
1398}
1399
Roland Dreier3d73c282007-10-10 15:43:54 -07001400static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1401 struct mlx4_init_hca_param *init_hca, u64 icm_size)
Roland Dreier225c7b12007-05-08 18:00:38 -07001402{
1403 struct mlx4_priv *priv = mlx4_priv(dev);
1404 u64 aux_pages;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001405 int num_eqs;
Roland Dreier225c7b12007-05-08 18:00:38 -07001406 int err;
1407
1408 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1409 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001410 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001411 return err;
1412 }
1413
Joe Perches1a91de22014-05-07 12:52:57 -07001414 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
Roland Dreier225c7b12007-05-08 18:00:38 -07001415 (unsigned long long) icm_size >> 10,
1416 (unsigned long long) aux_pages << 2);
1417
1418 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001419 GFP_HIGHUSER | __GFP_NOWARN, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001420 if (!priv->fw.aux_icm) {
Joe Perches1a91de22014-05-07 12:52:57 -07001421 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001422 return -ENOMEM;
1423 }
1424
1425 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1426 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001427 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001428 goto err_free_aux;
1429 }
1430
1431 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1432 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001433 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001434 goto err_unmap_aux;
1435 }
1436
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001437
Matan Barak7ae0e402014-11-13 14:45:32 +02001438 num_eqs = dev->phys_caps.num_phys_eqs;
Roland Dreierfa0681d2009-09-05 20:24:49 -07001439 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1440 init_hca->eqc_base, dev_cap->eqc_entry_sz,
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001441 num_eqs, num_eqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001442 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001443 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001444 goto err_unmap_cmpt;
1445 }
1446
Jack Morgensteind7bb58f2007-08-01 12:28:53 +03001447 /*
1448 * Reserved MTT entries must be aligned up to a cacheline
1449 * boundary, since the FW will write to them, while the driver
1450 * writes to all other MTT entries. (The variable
1451 * dev->caps.mtt_entry_sz below is really the MTT segment
1452 * size, not the raw entry size)
1453 */
1454 dev->caps.reserved_mtts =
1455 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1456 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1457
Roland Dreier225c7b12007-05-08 18:00:38 -07001458 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1459 init_hca->mtt_base,
1460 dev->caps.mtt_entry_sz,
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001461 dev->caps.num_mtts,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001462 dev->caps.reserved_mtts, 1, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001463 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001464 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001465 goto err_unmap_eq;
1466 }
1467
1468 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1469 init_hca->dmpt_base,
1470 dev_cap->dmpt_entry_sz,
1471 dev->caps.num_mpts,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001472 dev->caps.reserved_mrws, 1, 1);
Roland Dreier225c7b12007-05-08 18:00:38 -07001473 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001474 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001475 goto err_unmap_mtt;
1476 }
1477
1478 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1479 init_hca->qpc_base,
1480 dev_cap->qpc_entry_sz,
1481 dev->caps.num_qps,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -07001482 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1483 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001484 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001485 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001486 goto err_unmap_dmpt;
1487 }
1488
1489 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1490 init_hca->auxc_base,
1491 dev_cap->aux_entry_sz,
1492 dev->caps.num_qps,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -07001493 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1494 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001495 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001496 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001497 goto err_unmap_qp;
1498 }
1499
1500 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1501 init_hca->altc_base,
1502 dev_cap->altc_entry_sz,
1503 dev->caps.num_qps,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -07001504 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1505 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001506 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001507 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001508 goto err_unmap_auxc;
1509 }
1510
1511 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1512 init_hca->rdmarc_base,
1513 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1514 dev->caps.num_qps,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -07001515 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1516 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001517 if (err) {
1518 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1519 goto err_unmap_altc;
1520 }
1521
1522 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1523 init_hca->cqc_base,
1524 dev_cap->cqc_entry_sz,
1525 dev->caps.num_cqs,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001526 dev->caps.reserved_cqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001527 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001528 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001529 goto err_unmap_rdmarc;
1530 }
1531
1532 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1533 init_hca->srqc_base,
1534 dev_cap->srq_entry_sz,
1535 dev->caps.num_srqs,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001536 dev->caps.reserved_srqs, 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001537 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001538 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001539 goto err_unmap_cq;
1540 }
1541
1542 /*
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001543 * For flow steering device managed mode it is required to use
1544 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1545 * required, but for simplicity just map the whole multicast
1546 * group table now. The table isn't very big and it's a lot
1547 * easier than trying to track ref counts.
Roland Dreier225c7b12007-05-08 18:00:38 -07001548 */
1549 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +00001550 init_hca->mc_base,
1551 mlx4_get_mgm_entry_size(dev),
Roland Dreier225c7b12007-05-08 18:00:38 -07001552 dev->caps.num_mgms + dev->caps.num_amgms,
1553 dev->caps.num_mgms + dev->caps.num_amgms,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001554 0, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001555 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001556 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07001557 goto err_unmap_srq;
1558 }
1559
1560 return 0;
1561
1562err_unmap_srq:
1563 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1564
1565err_unmap_cq:
1566 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1567
1568err_unmap_rdmarc:
1569 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1570
1571err_unmap_altc:
1572 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1573
1574err_unmap_auxc:
1575 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1576
1577err_unmap_qp:
1578 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1579
1580err_unmap_dmpt:
1581 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1582
1583err_unmap_mtt:
1584 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1585
1586err_unmap_eq:
Roland Dreierfa0681d2009-09-05 20:24:49 -07001587 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
Roland Dreier225c7b12007-05-08 18:00:38 -07001588
1589err_unmap_cmpt:
1590 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1591 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1592 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1593 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1594
1595err_unmap_aux:
1596 mlx4_UNMAP_ICM_AUX(dev);
1597
1598err_free_aux:
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001599 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001600
1601 return err;
1602}
1603
1604static void mlx4_free_icms(struct mlx4_dev *dev)
1605{
1606 struct mlx4_priv *priv = mlx4_priv(dev);
1607
1608 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1609 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1610 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1611 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1612 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1613 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1614 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1615 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1616 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
Roland Dreierfa0681d2009-09-05 20:24:49 -07001617 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
Roland Dreier225c7b12007-05-08 18:00:38 -07001618 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1619 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1620 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1621 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
Roland Dreier225c7b12007-05-08 18:00:38 -07001622
1623 mlx4_UNMAP_ICM_AUX(dev);
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +03001624 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07001625}
1626
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001627static void mlx4_slave_exit(struct mlx4_dev *dev)
1628{
1629 struct mlx4_priv *priv = mlx4_priv(dev);
1630
Roland Dreierf3d4c892012-09-25 21:24:07 -07001631 mutex_lock(&priv->cmd.slave_cmd_mutex);
Yishai Hadas0cd93022015-01-25 16:59:43 +02001632 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1633 MLX4_COMM_TIME))
Joe Perches1a91de22014-05-07 12:52:57 -07001634 mlx4_warn(dev, "Failed to close slave function\n");
Roland Dreierf3d4c892012-09-25 21:24:07 -07001635 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001636}
1637
Eli Cohenc1b43dc2011-03-22 22:38:41 +00001638static int map_bf_area(struct mlx4_dev *dev)
1639{
1640 struct mlx4_priv *priv = mlx4_priv(dev);
1641 resource_size_t bf_start;
1642 resource_size_t bf_len;
1643 int err = 0;
1644
Jack Morgenstein3d747472012-02-19 21:38:52 +00001645 if (!dev->caps.bf_reg_size)
1646 return -ENXIO;
1647
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001648 bf_start = pci_resource_start(dev->persist->pdev, 2) +
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001649 (dev->caps.num_uars << PAGE_SHIFT);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001650 bf_len = pci_resource_len(dev->persist->pdev, 2) -
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001651 (dev->caps.num_uars << PAGE_SHIFT);
Eli Cohenc1b43dc2011-03-22 22:38:41 +00001652 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1653 if (!priv->bf_mapping)
1654 err = -ENOMEM;
1655
1656 return err;
1657}
1658
1659static void unmap_bf_area(struct mlx4_dev *dev)
1660{
1661 if (mlx4_priv(dev)->bf_mapping)
1662 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1663}
1664
Amir Vadaiec693d42013-04-23 06:06:49 +00001665cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1666{
1667 u32 clockhi, clocklo, clockhi1;
1668 cycle_t cycles;
1669 int i;
1670 struct mlx4_priv *priv = mlx4_priv(dev);
1671
1672 for (i = 0; i < 10; i++) {
1673 clockhi = swab32(readl(priv->clock_mapping));
1674 clocklo = swab32(readl(priv->clock_mapping + 4));
1675 clockhi1 = swab32(readl(priv->clock_mapping));
1676 if (clockhi == clockhi1)
1677 break;
1678 }
1679
1680 cycles = (u64) clockhi << 32 | (u64) clocklo;
1681
1682 return cycles;
1683}
1684EXPORT_SYMBOL_GPL(mlx4_read_clock);
1685
1686
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00001687static int map_internal_clock(struct mlx4_dev *dev)
1688{
1689 struct mlx4_priv *priv = mlx4_priv(dev);
1690
1691 priv->clock_mapping =
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001692 ioremap(pci_resource_start(dev->persist->pdev,
1693 priv->fw.clock_bar) +
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00001694 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1695
1696 if (!priv->clock_mapping)
1697 return -ENOMEM;
1698
1699 return 0;
1700}
1701
Matan Barak52033cf2015-06-11 16:35:26 +03001702int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1703 struct mlx4_clock_params *params)
1704{
1705 struct mlx4_priv *priv = mlx4_priv(dev);
1706
1707 if (mlx4_is_slave(dev))
1708 return -ENOTSUPP;
1709
1710 if (!params)
1711 return -EINVAL;
1712
1713 params->bar = priv->fw.clock_bar;
1714 params->offset = priv->fw.clock_offset;
1715 params->size = MLX4_CLOCK_SIZE;
1716
1717 return 0;
1718}
1719EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1720
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00001721static void unmap_internal_clock(struct mlx4_dev *dev)
1722{
1723 struct mlx4_priv *priv = mlx4_priv(dev);
1724
1725 if (priv->clock_mapping)
1726 iounmap(priv->clock_mapping);
1727}
1728
Roland Dreier225c7b12007-05-08 18:00:38 -07001729static void mlx4_close_hca(struct mlx4_dev *dev)
1730{
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00001731 unmap_internal_clock(dev);
Eli Cohenc1b43dc2011-03-22 22:38:41 +00001732 unmap_bf_area(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001733 if (mlx4_is_slave(dev))
1734 mlx4_slave_exit(dev);
1735 else {
1736 mlx4_CLOSE_HCA(dev, 0);
1737 mlx4_free_icms(dev);
Matan Baraka0eacca2014-11-13 14:45:30 +02001738 }
1739}
1740
1741static void mlx4_close_fw(struct mlx4_dev *dev)
1742{
1743 if (!mlx4_is_slave(dev)) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001744 mlx4_UNMAP_FA(dev);
1745 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1746 }
1747}
1748
Yishai Hadas55ad3592015-01-25 16:59:42 +02001749static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1750{
1751#define COMM_CHAN_OFFLINE_OFFSET 0x09
1752
1753 u32 comm_flags;
1754 u32 offline_bit;
1755 unsigned long end;
1756 struct mlx4_priv *priv = mlx4_priv(dev);
1757
1758 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1759 while (time_before(jiffies, end)) {
1760 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1761 MLX4_COMM_CHAN_FLAGS));
1762 offline_bit = (comm_flags &
1763 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1764 if (!offline_bit)
1765 return 0;
1766 /* There are cases as part of AER/Reset flow that PF needs
1767 * around 100 msec to load. We therefore sleep for 100 msec
1768 * to allow other tasks to make use of that CPU during this
1769 * time interval.
1770 */
1771 msleep(100);
1772 }
1773 mlx4_err(dev, "Communication channel is offline.\n");
1774 return -EIO;
1775}
1776
1777static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1778{
1779#define COMM_CHAN_RST_OFFSET 0x1e
1780
1781 struct mlx4_priv *priv = mlx4_priv(dev);
1782 u32 comm_rst;
1783 u32 comm_caps;
1784
1785 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1786 MLX4_COMM_CHAN_CAPS));
1787 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1788
1789 if (comm_rst)
1790 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1791}
1792
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001793static int mlx4_init_slave(struct mlx4_dev *dev)
1794{
1795 struct mlx4_priv *priv = mlx4_priv(dev);
1796 u64 dma = (u64) priv->mfunc.vhcr_dma;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001797 int ret_from_reset = 0;
1798 u32 slave_read;
1799 u32 cmd_channel_ver;
1800
Amir Vadai97989352014-03-06 18:28:17 +02001801 if (atomic_read(&pf_loading)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001802 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
Amir Vadai97989352014-03-06 18:28:17 +02001803 return -EPROBE_DEFER;
1804 }
1805
Roland Dreierf3d4c892012-09-25 21:24:07 -07001806 mutex_lock(&priv->cmd.slave_cmd_mutex);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001807 priv->cmd.max_cmds = 1;
Yishai Hadas55ad3592015-01-25 16:59:42 +02001808 if (mlx4_comm_check_offline(dev)) {
1809 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1810 goto err_offline;
1811 }
1812
1813 mlx4_reset_vf_support(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001814 mlx4_warn(dev, "Sending reset\n");
1815 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
Yishai Hadas0cd93022015-01-25 16:59:43 +02001816 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001817 /* if we are in the middle of flr the slave will try
1818 * NUM_OF_RESET_RETRIES times before leaving.*/
1819 if (ret_from_reset) {
1820 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
Joe Perches1a91de22014-05-07 12:52:57 -07001821 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
Jack Morgenstein5efe5352013-06-04 05:13:27 +00001822 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1823 return -EPROBE_DEFER;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001824 } else
1825 goto err;
1826 }
1827
1828 /* check the driver version - the slave I/F revision
1829 * must match the master's */
1830 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1831 cmd_channel_ver = mlx4_comm_get_version();
1832
1833 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1834 MLX4_COMM_GET_IF_REV(slave_read)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001835 mlx4_err(dev, "slave driver version is not supported by the master\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001836 goto err;
1837 }
1838
1839 mlx4_warn(dev, "Sending vhcr0\n");
1840 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
Yishai Hadas0cd93022015-01-25 16:59:43 +02001841 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001842 goto err;
1843 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
Yishai Hadas0cd93022015-01-25 16:59:43 +02001844 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001845 goto err;
1846 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
Yishai Hadas0cd93022015-01-25 16:59:43 +02001847 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001848 goto err;
Yishai Hadas0cd93022015-01-25 16:59:43 +02001849 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
1850 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001851 goto err;
Roland Dreierf3d4c892012-09-25 21:24:07 -07001852
1853 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001854 return 0;
1855
1856err:
Yishai Hadas0cd93022015-01-25 16:59:43 +02001857 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
Yishai Hadas55ad3592015-01-25 16:59:42 +02001858err_offline:
Roland Dreierf3d4c892012-09-25 21:24:07 -07001859 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00001860 return -EIO;
Roland Dreier225c7b12007-05-08 18:00:38 -07001861}
1862
Jack Morgenstein66349612012-06-19 11:21:44 +03001863static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1864{
1865 int i;
1866
1867 for (i = 1; i <= dev->caps.num_ports; i++) {
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02001868 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1869 dev->caps.gid_table_len[i] =
Matan Barak449fc482014-03-19 18:11:52 +02001870 mlx4_get_slave_num_gids(dev, 0, i);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02001871 else
1872 dev->caps.gid_table_len[i] = 1;
Jack Morgenstein66349612012-06-19 11:21:44 +03001873 dev->caps.pkey_table_len[i] =
1874 dev->phys_caps.pkey_phys_table_len[i] - 1;
1875 }
1876}
1877
Jack Morgenstein3c439b52012-12-06 17:12:00 +00001878static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1879{
1880 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1881
1882 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1883 i++) {
1884 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1885 break;
1886 }
1887
1888 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1889}
1890
Matan Barak7d077cd2014-12-11 10:58:00 +02001891static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
1892{
1893 switch (dmfs_high_steer_mode) {
1894 case MLX4_STEERING_DMFS_A0_DEFAULT:
1895 return "default performance";
1896
1897 case MLX4_STEERING_DMFS_A0_DYNAMIC:
1898 return "dynamic hybrid mode";
1899
1900 case MLX4_STEERING_DMFS_A0_STATIC:
1901 return "performance optimized for limited rule configuration (static)";
1902
1903 case MLX4_STEERING_DMFS_A0_DISABLE:
1904 return "disabled performance optimized steering";
1905
1906 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
1907 return "performance optimized steering not supported";
1908
1909 default:
1910 return "Unrecognized mode";
1911 }
1912}
1913
1914#define MLX4_DMFS_A0_STEERING (1UL << 2)
1915
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001916static void choose_steering_mode(struct mlx4_dev *dev,
1917 struct mlx4_dev_cap *dev_cap)
1918{
Matan Barak7d077cd2014-12-11 10:58:00 +02001919 if (mlx4_log_num_mgm_entry_size <= 0) {
1920 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
1921 if (dev->caps.dmfs_high_steer_mode ==
1922 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1923 mlx4_err(dev, "DMFS high rate mode not supported\n");
1924 else
1925 dev->caps.dmfs_high_steer_mode =
1926 MLX4_STEERING_DMFS_A0_STATIC;
1927 }
1928 }
1929
1930 if (mlx4_log_num_mgm_entry_size <= 0 &&
Jack Morgenstein3c439b52012-12-06 17:12:00 +00001931 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001932 (!mlx4_is_mfunc(dev) ||
Yishai Hadas872bf2f2015-01-25 16:59:35 +02001933 (dev_cap->fs_max_num_qp_per_entry >=
1934 (dev->persist->num_vfs + 1))) &&
Jack Morgenstein3c439b52012-12-06 17:12:00 +00001935 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1936 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1937 dev->oper_log_mgm_entry_size =
1938 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001939 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1940 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1941 dev->caps.fs_log_max_ucast_qp_range_size =
1942 dev_cap->fs_log_max_ucast_qp_range_size;
1943 } else {
Matan Barak7d077cd2014-12-11 10:58:00 +02001944 if (dev->caps.dmfs_high_steer_mode !=
1945 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1946 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001947 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1948 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1949 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1950 else {
1951 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1952
1953 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1954 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
Joe Perches1a91de22014-05-07 12:52:57 -07001955 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001956 }
Jack Morgenstein3c439b52012-12-06 17:12:00 +00001957 dev->oper_log_mgm_entry_size =
1958 mlx4_log_num_mgm_entry_size > 0 ?
1959 mlx4_log_num_mgm_entry_size :
1960 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001961 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1962 }
Joe Perches1a91de22014-05-07 12:52:57 -07001963 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
Jack Morgenstein3c439b52012-12-06 17:12:00 +00001964 mlx4_steering_mode_str(dev->caps.steering_mode),
1965 dev->oper_log_mgm_entry_size,
1966 mlx4_log_num_mgm_entry_size);
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00001967}
1968
Or Gerlitz7ffdf722013-12-23 16:09:43 +02001969static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1970 struct mlx4_dev_cap *dev_cap)
1971{
1972 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
Or Gerlitz5eff6da2015-01-15 15:28:54 +02001973 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
Or Gerlitz7ffdf722013-12-23 16:09:43 +02001974 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1975 else
1976 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1977
1978 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
1979 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1980}
1981
Matan Barak7d077cd2014-12-11 10:58:00 +02001982static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
1983{
1984 int i;
1985 struct mlx4_port_cap port_cap;
1986
1987 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1988 return -EINVAL;
1989
1990 for (i = 1; i <= dev->caps.num_ports; i++) {
1991 if (mlx4_dev_port(dev, i, &port_cap)) {
1992 mlx4_err(dev,
1993 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1994 } else if ((dev->caps.dmfs_high_steer_mode !=
1995 MLX4_STEERING_DMFS_A0_DEFAULT) &&
1996 (port_cap.dmfs_optimized_state ==
1997 !!(dev->caps.dmfs_high_steer_mode ==
1998 MLX4_STEERING_DMFS_A0_DISABLE))) {
1999 mlx4_err(dev,
2000 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2001 dmfs_high_rate_steering_mode_str(
2002 dev->caps.dmfs_high_steer_mode),
2003 (port_cap.dmfs_optimized_state ?
2004 "enabled" : "disabled"));
2005 }
2006 }
2007
2008 return 0;
2009}
2010
Matan Baraka0eacca2014-11-13 14:45:30 +02002011static int mlx4_init_fw(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -07002012{
Vladimir Sokolovsky2d928652008-07-14 23:48:53 -07002013 struct mlx4_mod_stat_cfg mlx4_cfg;
Matan Baraka0eacca2014-11-13 14:45:30 +02002014 int err = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002015
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002016 if (!mlx4_is_slave(dev)) {
2017 err = mlx4_QUERY_FW(dev);
2018 if (err) {
2019 if (err == -EACCES)
Joe Perches1a91de22014-05-07 12:52:57 -07002020 mlx4_info(dev, "non-primary physical function, skipping\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002021 else
Joe Perches1a91de22014-05-07 12:52:57 -07002022 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002023 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002024 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002025
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002026 err = mlx4_load_fw(dev);
2027 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002028 mlx4_err(dev, "Failed to start FW, aborting\n");
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002029 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002030 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002031
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002032 mlx4_cfg.log_pg_sz_m = 1;
2033 mlx4_cfg.log_pg_sz = 0;
2034 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2035 if (err)
2036 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
Matan Baraka0eacca2014-11-13 14:45:30 +02002037 }
Vladimir Sokolovsky2d928652008-07-14 23:48:53 -07002038
Matan Baraka0eacca2014-11-13 14:45:30 +02002039 return err;
2040}
2041
2042static int mlx4_init_hca(struct mlx4_dev *dev)
2043{
2044 struct mlx4_priv *priv = mlx4_priv(dev);
2045 struct mlx4_adapter adapter;
2046 struct mlx4_dev_cap dev_cap;
2047 struct mlx4_profile profile;
2048 struct mlx4_init_hca_param init_hca;
2049 u64 icm_size;
2050 struct mlx4_config_dev_params params;
2051 int err;
2052
2053 if (!mlx4_is_slave(dev)) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002054 err = mlx4_dev_cap(dev, &dev_cap);
2055 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002056 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
Jack Morgensteind0d01252014-12-30 11:59:50 +02002057 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002058 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002059
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00002060 choose_steering_mode(dev, &dev_cap);
Or Gerlitz7ffdf722013-12-23 16:09:43 +02002061 choose_tunnel_offload_mode(dev, &dev_cap);
Jack Morgenstein7b8157b2012-12-06 17:11:59 +00002062
Matan Barak7d077cd2014-12-11 10:58:00 +02002063 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2064 mlx4_is_master(dev))
2065 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2066
Hadar Hen Zion8e1a28e2013-12-19 21:20:12 +02002067 err = mlx4_get_phys_port_id(dev);
2068 if (err)
2069 mlx4_err(dev, "Fail to get physical port id\n");
2070
Jack Morgenstein66349612012-06-19 11:21:44 +03002071 if (mlx4_is_master(dev))
2072 mlx4_parav_master_pf_caps(dev);
2073
Amir Vadai2599d852014-07-22 15:44:11 +03002074 if (mlx4_low_memory_profile()) {
2075 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2076 profile = low_mem_profile;
2077 } else {
2078 profile = default_profile;
2079 }
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00002080 if (dev->caps.steering_mode ==
2081 MLX4_STEERING_MODE_DEVICE_MANAGED)
2082 profile.num_mcg = MLX4_FS_NUM_MCG;
Roland Dreier225c7b12007-05-08 18:00:38 -07002083
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002084 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2085 &init_hca);
2086 if ((long long) icm_size < 0) {
2087 err = icm_size;
Jack Morgensteind0d01252014-12-30 11:59:50 +02002088 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002089 }
2090
Eli Cohena5bbe892012-02-09 18:10:06 +02002091 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2092
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002093 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2094 init_hca.uar_page_sz = PAGE_SHIFT - 12;
Shani Michaelie4488342013-02-06 16:19:11 +00002095 init_hca.mw_enabled = 0;
2096 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2097 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2098 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002099
2100 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2101 if (err)
Jack Morgensteind0d01252014-12-30 11:59:50 +02002102 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002103
2104 err = mlx4_INIT_HCA(dev, &init_hca);
2105 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002106 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002107 goto err_free_icm;
2108 }
Matan Barak7ae0e402014-11-13 14:45:32 +02002109
2110 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2111 err = mlx4_query_func(dev, &dev_cap);
2112 if (err < 0) {
2113 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
Jack Morgensteind0d01252014-12-30 11:59:50 +02002114 goto err_close;
Matan Barak7ae0e402014-11-13 14:45:32 +02002115 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2116 dev->caps.num_eqs = dev_cap.max_eqs;
2117 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2118 dev->caps.reserved_uars = dev_cap.reserved_uars;
2119 }
2120 }
2121
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00002122 /*
2123 * If TS is supported by FW
2124 * read HCA frequency by QUERY_HCA command
2125 */
2126 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2127 memset(&init_hca, 0, sizeof(init_hca));
2128 err = mlx4_QUERY_HCA(dev, &init_hca);
2129 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002130 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00002131 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2132 } else {
2133 dev->caps.hca_core_clock =
2134 init_hca.hca_core_clock;
2135 }
2136
2137 /* In case we got HCA frequency 0 - disable timestamping
2138 * to avoid dividing by zero
2139 */
2140 if (!dev->caps.hca_core_clock) {
2141 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2142 mlx4_err(dev,
Joe Perches1a91de22014-05-07 12:52:57 -07002143 "HCA frequency is 0 - timestamping is not supported\n");
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00002144 } else if (map_internal_clock(dev)) {
2145 /*
2146 * Map internal clock,
2147 * in case of failure disable timestamping
2148 */
2149 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
Joe Perches1a91de22014-05-07 12:52:57 -07002150 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00002151 }
2152 }
Matan Barak7d077cd2014-12-11 10:58:00 +02002153
2154 if (dev->caps.dmfs_high_steer_mode !=
2155 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2156 if (mlx4_validate_optimized_steering(dev))
2157 mlx4_warn(dev, "Optimized steering validation failed\n");
2158
2159 if (dev->caps.dmfs_high_steer_mode ==
2160 MLX4_STEERING_DMFS_A0_DISABLE) {
2161 dev->caps.dmfs_high_rate_qpn_base =
2162 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2163 dev->caps.dmfs_high_rate_qpn_range =
2164 MLX4_A0_STEERING_TABLE_SIZE;
2165 }
2166
2167 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2168 dmfs_high_rate_steering_mode_str(
2169 dev->caps.dmfs_high_steer_mode));
2170 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002171 } else {
2172 err = mlx4_init_slave(dev);
2173 if (err) {
Jack Morgenstein5efe5352013-06-04 05:13:27 +00002174 if (err != -EPROBE_DEFER)
2175 mlx4_err(dev, "Failed to initialize slave\n");
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002176 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002177 }
2178
2179 err = mlx4_slave_cap(dev);
2180 if (err) {
2181 mlx4_err(dev, "Failed to obtain slave caps\n");
2182 goto err_close;
2183 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002184 }
2185
Eli Cohenc1b43dc2011-03-22 22:38:41 +00002186 if (map_bf_area(dev))
2187 mlx4_dbg(dev, "Failed to map blue flame area\n");
2188
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002189 /*Only the master set the ports, all the rest got it from it.*/
2190 if (!mlx4_is_slave(dev))
2191 mlx4_set_port_mask(dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002192
2193 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2194 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002195 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002196 goto unmap_bf;
Roland Dreier225c7b12007-05-08 18:00:38 -07002197 }
2198
Shani Michaelif8c64552014-11-09 13:51:53 +02002199 /* Query CONFIG_DEV parameters */
2200 err = mlx4_config_dev_retrieval(dev, &params);
2201 if (err && err != -ENOTSUPP) {
2202 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2203 } else if (!err) {
2204 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2205 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2206 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002207 priv->eq_table.inta_pin = adapter.inta_pin;
Jack Morgensteincd9281d2007-09-18 09:14:18 +02002208 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
Roland Dreier225c7b12007-05-08 18:00:38 -07002209
2210 return 0;
2211
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002212unmap_bf:
Eugenia Emantayevddd8a6c2013-04-23 06:06:48 +00002213 unmap_internal_clock(dev);
Aviad Yehezkelbef772e2012-09-05 22:50:51 +00002214 unmap_bf_area(dev);
2215
Dotan Barakb38f2872014-05-29 16:30:59 +03002216 if (mlx4_is_slave(dev)) {
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03002217 kfree(dev->caps.qp0_qkey);
Dotan Barakb38f2872014-05-29 16:30:59 +03002218 kfree(dev->caps.qp0_tunnel);
2219 kfree(dev->caps.qp0_proxy);
2220 kfree(dev->caps.qp1_tunnel);
2221 kfree(dev->caps.qp1_proxy);
2222 }
2223
Roland Dreier225c7b12007-05-08 18:00:38 -07002224err_close:
Dotan Barak41929ed2012-10-21 14:59:23 +00002225 if (mlx4_is_slave(dev))
2226 mlx4_slave_exit(dev);
2227 else
2228 mlx4_CLOSE_HCA(dev, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -07002229
2230err_free_icm:
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002231 if (!mlx4_is_slave(dev))
2232 mlx4_free_icms(dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07002233
Roland Dreier225c7b12007-05-08 18:00:38 -07002234 return err;
2235}
2236
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002237static int mlx4_init_counters_table(struct mlx4_dev *dev)
2238{
2239 struct mlx4_priv *priv = mlx4_priv(dev);
Eran Ben Elisha47d84172015-06-15 17:58:58 +03002240 int nent_pow2;
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002241
2242 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2243 return -ENOENT;
2244
Eran Ben Elisha2632d182015-06-15 17:58:59 +03002245 if (!dev->caps.max_counters)
2246 return -ENOSPC;
2247
Eran Ben Elisha47d84172015-06-15 17:58:58 +03002248 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2249 /* reserve last counter index for sink counter */
2250 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2251 nent_pow2 - 1, 0,
2252 nent_pow2 - dev->caps.max_counters + 1);
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002253}
2254
2255static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2256{
Eran Ben Elishaefa6bc92015-06-15 17:58:56 +03002257 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2258 return;
2259
Eran Ben Elisha2632d182015-06-15 17:58:59 +03002260 if (!dev->caps.max_counters)
2261 return;
2262
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002263 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2264}
2265
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002266static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2267{
2268 struct mlx4_priv *priv = mlx4_priv(dev);
2269 int port;
2270
2271 for (port = 0; port < dev->caps.num_ports; port++)
2272 if (priv->def_counter[port] != -1)
2273 mlx4_counter_free(dev, priv->def_counter[port]);
2274}
2275
2276static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2277{
2278 struct mlx4_priv *priv = mlx4_priv(dev);
2279 int port, err = 0;
2280 u32 idx;
2281
2282 for (port = 0; port < dev->caps.num_ports; port++)
2283 priv->def_counter[port] = -1;
2284
2285 for (port = 0; port < dev->caps.num_ports; port++) {
2286 err = mlx4_counter_alloc(dev, &idx);
2287
2288 if (!err || err == -ENOSPC) {
2289 priv->def_counter[port] = idx;
2290 } else if (err == -ENOENT) {
2291 err = 0;
2292 continue;
Or Gerlitz178d23e2015-07-22 16:53:46 +03002293 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2294 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2295 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2296 MLX4_SINK_COUNTER_INDEX(dev));
2297 err = 0;
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002298 } else {
2299 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2300 __func__, port + 1, err);
2301 mlx4_cleanup_default_counters(dev);
2302 return err;
2303 }
2304
2305 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2306 __func__, priv->def_counter[port], port + 1);
2307 }
2308
2309 return err;
2310}
2311
Jack Morgensteinba062d52012-05-15 10:35:03 +00002312int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002313{
2314 struct mlx4_priv *priv = mlx4_priv(dev);
2315
2316 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2317 return -ENOENT;
2318
2319 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002320 if (*idx == -1) {
2321 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2322 return -ENOSPC;
2323 }
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002324
2325 return 0;
2326}
Jack Morgensteinba062d52012-05-15 10:35:03 +00002327
2328int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2329{
2330 u64 out_param;
2331 int err;
2332
2333 if (mlx4_is_mfunc(dev)) {
2334 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2335 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2336 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2337 if (!err)
2338 *idx = get_param_l(&out_param);
2339
2340 return err;
2341 }
2342 return __mlx4_counter_alloc(dev, idx);
2343}
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002344EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2345
Eran Ben Elishab72ca7e2015-06-15 17:58:57 +03002346static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2347 u8 counter_index)
2348{
2349 struct mlx4_cmd_mailbox *if_stat_mailbox;
2350 int err;
2351 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2352
2353 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2354 if (IS_ERR(if_stat_mailbox))
2355 return PTR_ERR(if_stat_mailbox);
2356
2357 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2358 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2359 MLX4_CMD_NATIVE);
2360
2361 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2362 return err;
2363}
2364
Jack Morgensteinba062d52012-05-15 10:35:03 +00002365void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002366{
Eran Ben Elishaefa6bc92015-06-15 17:58:56 +03002367 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2368 return;
2369
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002370 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2371 return;
2372
Eran Ben Elishab72ca7e2015-06-15 17:58:57 +03002373 __mlx4_clear_if_stat(dev, idx);
2374
Jack Morgenstein7c6d74d2013-12-08 16:50:17 +02002375 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002376 return;
2377}
Jack Morgensteinba062d52012-05-15 10:35:03 +00002378
2379void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2380{
Jack Morgensteine7dbeba2013-03-07 03:46:54 +00002381 u64 in_param = 0;
Jack Morgensteinba062d52012-05-15 10:35:03 +00002382
2383 if (mlx4_is_mfunc(dev)) {
2384 set_param_l(&in_param, idx);
2385 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2386 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2387 MLX4_CMD_WRAPPED);
2388 return;
2389 }
2390 __mlx4_counter_free(dev, idx);
2391}
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002392EXPORT_SYMBOL_GPL(mlx4_counter_free);
2393
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002394int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2395{
2396 struct mlx4_priv *priv = mlx4_priv(dev);
2397
2398 return priv->def_counter[port - 1];
2399}
2400EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2401
Yishai Hadas773af942015-03-03 10:54:48 +02002402void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2403{
2404 struct mlx4_priv *priv = mlx4_priv(dev);
2405
2406 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2407}
2408EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2409
2410__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2411{
2412 struct mlx4_priv *priv = mlx4_priv(dev);
2413
2414 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2415}
2416EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2417
Yishai Hadasfb517a42015-03-03 11:23:32 +02002418void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2419{
2420 struct mlx4_priv *priv = mlx4_priv(dev);
2421 __be64 guid;
2422
2423 /* hw GUID */
2424 if (entry == 0)
2425 return;
2426
2427 get_random_bytes((char *)&guid, sizeof(guid));
2428 guid &= ~(cpu_to_be64(1ULL << 56));
2429 guid |= cpu_to_be64(1ULL << 57);
2430 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2431}
2432
Roland Dreier3d73c282007-10-10 15:43:54 -07002433static int mlx4_setup_hca(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -07002434{
2435 struct mlx4_priv *priv = mlx4_priv(dev);
2436 int err;
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002437 int port;
Jack Morgenstein9a5aa622008-11-28 21:29:46 -08002438 __be32 ib_port_default_caps;
Roland Dreier225c7b12007-05-08 18:00:38 -07002439
Roland Dreier225c7b12007-05-08 18:00:38 -07002440 err = mlx4_init_uar_table(dev);
2441 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002442 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2443 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002444 }
2445
2446 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2447 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002448 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002449 goto err_uar_table_free;
2450 }
2451
Roland Dreier4979d182011-01-12 09:50:36 -08002452 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -07002453 if (!priv->kar) {
Joe Perches1a91de22014-05-07 12:52:57 -07002454 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002455 err = -ENOMEM;
2456 goto err_uar_free;
2457 }
2458
2459 err = mlx4_init_pd_table(dev);
2460 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002461 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002462 goto err_kar_unmap;
2463 }
2464
Sean Hefty012a8ff2011-06-02 09:01:33 -07002465 err = mlx4_init_xrcd_table(dev);
2466 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002467 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
Sean Hefty012a8ff2011-06-02 09:01:33 -07002468 goto err_pd_table_free;
2469 }
2470
Roland Dreier225c7b12007-05-08 18:00:38 -07002471 err = mlx4_init_mr_table(dev);
2472 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002473 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
Sean Hefty012a8ff2011-06-02 09:01:33 -07002474 goto err_xrcd_table_free;
Roland Dreier225c7b12007-05-08 18:00:38 -07002475 }
2476
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03002477 if (!mlx4_is_slave(dev)) {
2478 err = mlx4_init_mcg_table(dev);
2479 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002480 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03002481 goto err_mr_table_free;
2482 }
Jack Morgenstein114840c2014-06-01 11:53:50 +03002483 err = mlx4_config_mad_demux(dev);
2484 if (err) {
2485 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2486 goto err_mcg_table_free;
2487 }
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03002488 }
2489
Roland Dreier225c7b12007-05-08 18:00:38 -07002490 err = mlx4_init_eq_table(dev);
2491 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002492 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03002493 goto err_mcg_table_free;
Roland Dreier225c7b12007-05-08 18:00:38 -07002494 }
2495
2496 err = mlx4_cmd_use_events(dev);
2497 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002498 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002499 goto err_eq_table_free;
2500 }
2501
2502 err = mlx4_NOP(dev);
2503 if (err) {
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03002504 if (dev->flags & MLX4_FLAG_MSI_X) {
Joe Perches1a91de22014-05-07 12:52:57 -07002505 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
Matan Barakc66fa192015-05-31 09:30:16 +03002506 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
Joe Perches1a91de22014-05-07 12:52:57 -07002507 mlx4_warn(dev, "Trying again without MSI-X\n");
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03002508 } else {
Joe Perches1a91de22014-05-07 12:52:57 -07002509 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
Matan Barakc66fa192015-05-31 09:30:16 +03002510 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
Roland Dreier225c7b12007-05-08 18:00:38 -07002511 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03002512 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002513
2514 goto err_cmd_poll;
2515 }
2516
2517 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2518
2519 err = mlx4_init_cq_table(dev);
2520 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002521 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002522 goto err_cmd_poll;
2523 }
2524
2525 err = mlx4_init_srq_table(dev);
2526 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002527 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002528 goto err_cq_table_free;
2529 }
2530
2531 err = mlx4_init_qp_table(dev);
2532 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002533 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
Roland Dreier225c7b12007-05-08 18:00:38 -07002534 goto err_srq_table_free;
2535 }
2536
Eran Ben Elisha2632d182015-06-15 17:58:59 +03002537 if (!mlx4_is_slave(dev)) {
2538 err = mlx4_init_counters_table(dev);
2539 if (err && err != -ENOENT) {
2540 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2541 goto err_qp_table_free;
2542 }
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002543 }
2544
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002545 err = mlx4_allocate_default_counters(dev);
2546 if (err) {
2547 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2548 goto err_counters_table_free;
Roland Dreier225c7b12007-05-08 18:00:38 -07002549 }
2550
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002551 if (!mlx4_is_slave(dev)) {
2552 for (port = 1; port <= dev->caps.num_ports; port++) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002553 ib_port_default_caps = 0;
2554 err = mlx4_get_port_ib_caps(dev, port,
2555 &ib_port_default_caps);
2556 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07002557 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2558 port, err);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002559 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
Marcel Apfelbaum97285b72011-10-24 11:02:34 +02002560
Jack Morgenstein2aca1172012-06-19 11:21:41 +03002561 /* initialize per-slave default ib port capabilities */
2562 if (mlx4_is_master(dev)) {
2563 int i;
2564 for (i = 0; i < dev->num_slaves; i++) {
2565 if (i == mlx4_master_func_num(dev))
2566 continue;
2567 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
Joe Perches1a91de22014-05-07 12:52:57 -07002568 ib_port_default_caps;
Jack Morgenstein2aca1172012-06-19 11:21:41 +03002569 }
2570 }
2571
Or Gerlitz096335b2012-01-11 19:02:17 +02002572 if (mlx4_is_mfunc(dev))
2573 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2574 else
2575 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
Marcel Apfelbaum97285b72011-10-24 11:02:34 +02002576
Jack Morgenstein66349612012-06-19 11:21:44 +03002577 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2578 dev->caps.pkey_table_len[port] : -1);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002579 if (err) {
2580 mlx4_err(dev, "Failed to set port %d, aborting\n",
Joe Perches1a91de22014-05-07 12:52:57 -07002581 port);
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002582 goto err_default_countes_free;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002583 }
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002584 }
2585 }
2586
Roland Dreier225c7b12007-05-08 18:00:38 -07002587 return 0;
2588
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03002589err_default_countes_free:
2590 mlx4_cleanup_default_counters(dev);
2591
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002592err_counters_table_free:
Eran Ben Elisha2632d182015-06-15 17:58:59 +03002593 if (!mlx4_is_slave(dev))
2594 mlx4_cleanup_counters_table(dev);
Or Gerlitzf2a3f6a2011-06-15 14:47:14 +00002595
Roland Dreier225c7b12007-05-08 18:00:38 -07002596err_qp_table_free:
2597 mlx4_cleanup_qp_table(dev);
2598
2599err_srq_table_free:
2600 mlx4_cleanup_srq_table(dev);
2601
2602err_cq_table_free:
2603 mlx4_cleanup_cq_table(dev);
2604
2605err_cmd_poll:
2606 mlx4_cmd_use_polling(dev);
2607
2608err_eq_table_free:
2609 mlx4_cleanup_eq_table(dev);
2610
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03002611err_mcg_table_free:
2612 if (!mlx4_is_slave(dev))
2613 mlx4_cleanup_mcg_table(dev);
2614
Jack Morgensteinee49bd92007-07-12 17:50:45 +03002615err_mr_table_free:
Roland Dreier225c7b12007-05-08 18:00:38 -07002616 mlx4_cleanup_mr_table(dev);
2617
Sean Hefty012a8ff2011-06-02 09:01:33 -07002618err_xrcd_table_free:
2619 mlx4_cleanup_xrcd_table(dev);
2620
Roland Dreier225c7b12007-05-08 18:00:38 -07002621err_pd_table_free:
2622 mlx4_cleanup_pd_table(dev);
2623
2624err_kar_unmap:
2625 iounmap(priv->kar);
2626
2627err_uar_free:
2628 mlx4_uar_free(dev, &priv->driver_uar);
2629
2630err_uar_table_free:
2631 mlx4_cleanup_uar_table(dev);
2632 return err;
2633}
2634
Ido Shamayde161802015-05-31 09:30:17 +03002635static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2636{
2637 int requested_cpu = 0;
2638 struct mlx4_priv *priv = mlx4_priv(dev);
2639 struct mlx4_eq *eq;
2640 int off = 0;
2641 int i;
2642
2643 if (eqn > dev->caps.num_comp_vectors)
2644 return -EINVAL;
2645
2646 for (i = 1; i < port; i++)
2647 off += mlx4_get_eqs_per_port(dev, i);
2648
2649 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2650
2651 /* Meaning EQs are shared, and this call comes from the second port */
2652 if (requested_cpu < 0)
2653 return 0;
2654
2655 eq = &priv->eq_table.eq[eqn];
2656
2657 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2658 return -ENOMEM;
2659
2660 cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2661
2662 return 0;
2663}
2664
Roland Dreiere8f9b2e2008-02-04 20:20:41 -08002665static void mlx4_enable_msi_x(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -07002666{
2667 struct mlx4_priv *priv = mlx4_priv(dev);
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002668 struct msix_entry *entries;
Roland Dreier225c7b12007-05-08 18:00:38 -07002669 int i;
Matan Barakc66fa192015-05-31 09:30:16 +03002670 int port = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002671
2672 if (msi_x) {
Matan Barakc66fa192015-05-31 09:30:16 +03002673 int nreq = dev->caps.num_ports * num_online_cpus() + 1;
Matan Barak7ae0e402014-11-13 14:45:32 +02002674
Or Gerlitzca4c7b32013-01-17 05:30:43 +00002675 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2676 nreq);
Carol L Soto85121d62015-10-07 12:31:46 -04002677 if (nreq > MAX_MSIX)
Carol L Soto92932672015-08-27 14:43:25 -05002678 nreq = MAX_MSIX;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002679
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002680 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2681 if (!entries)
2682 goto no_msi;
2683
2684 for (i = 0; i < nreq; ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -07002685 entries[i].entry = i;
2686
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002687 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2688 nreq);
Alexander Gordeev66e2f9c2014-02-18 11:11:47 +01002689
Matan Barakc66fa192015-05-31 09:30:16 +03002690 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
Nicolas Morey-Chaisemartin5bf0da72009-04-21 10:11:06 -07002691 kfree(entries);
Roland Dreier225c7b12007-05-08 18:00:38 -07002692 goto no_msi;
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +00002693 }
Matan Barakc66fa192015-05-31 09:30:16 +03002694 /* 1 is reserved for events (asyncrounous EQ) */
2695 dev->caps.num_comp_vectors = nreq - 1;
2696
2697 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2698 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2699 dev->caps.num_ports);
2700
2701 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2702 if (i == MLX4_EQ_ASYNC)
2703 continue;
2704
2705 priv->eq_table.eq[i].irq =
2706 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2707
Carol L Soto85121d62015-10-07 12:31:46 -04002708 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
Matan Barakc66fa192015-05-31 09:30:16 +03002709 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2710 dev->caps.num_ports);
Ido Shamayde161802015-05-31 09:30:17 +03002711 /* We don't set affinity hint when there
2712 * aren't enough EQs
2713 */
Matan Barakc66fa192015-05-31 09:30:16 +03002714 } else {
2715 set_bit(port,
2716 priv->eq_table.eq[i].actv_ports.ports);
Ido Shamayde161802015-05-31 09:30:17 +03002717 if (mlx4_init_affinity_hint(dev, port + 1, i))
2718 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2719 i);
Matan Barakc66fa192015-05-31 09:30:16 +03002720 }
2721 /* We divide the Eqs evenly between the two ports.
2722 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2723 * refers to the number of Eqs per port
2724 * (i.e eqs_per_port). Theoretically, we would like to
2725 * write something like (i + 1) % eqs_per_port == 0.
2726 * However, since there's an asynchronous Eq, we have
2727 * to skip over it by comparing this condition to
2728 * !!((i + 1) > MLX4_EQ_ASYNC).
2729 */
2730 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2731 ((i + 1) %
2732 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2733 !!((i + 1) > MLX4_EQ_ASYNC))
2734 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2735 * everything is shared anyway.
2736 */
2737 port++;
2738 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002739
2740 dev->flags |= MLX4_FLAG_MSI_X;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002741
2742 kfree(entries);
Roland Dreier225c7b12007-05-08 18:00:38 -07002743 return;
2744 }
2745
2746no_msi:
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08002747 dev->caps.num_comp_vectors = 1;
2748
Matan Barakc66fa192015-05-31 09:30:16 +03002749 BUG_ON(MLX4_EQ_ASYNC >= 2);
2750 for (i = 0; i < 2; ++i) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002751 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
Matan Barakc66fa192015-05-31 09:30:16 +03002752 if (i != MLX4_EQ_ASYNC) {
2753 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2754 dev->caps.num_ports);
2755 }
2756 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002757}
2758
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002759static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
Yevgeny Petrilin2a2336f2008-10-22 11:44:46 -07002760{
2761 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002762 int err = 0;
Yevgeny Petrilin2a2336f2008-10-22 11:44:46 -07002763
2764 info->dev = dev;
2765 info->port = port;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002766 if (!mlx4_is_slave(dev)) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002767 mlx4_init_mac_table(dev, &info->mac_table);
2768 mlx4_init_vlan_table(dev, &info->vlan_table);
Jack Morgenstein111c6092014-05-27 09:26:38 +03002769 mlx4_init_roce_gid_table(dev, &info->gid_table);
Yan Burman16a10ff2013-02-07 02:25:22 +00002770 info->base_qpn = mlx4_get_base_qpn(dev, port);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002771 }
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002772
2773 sprintf(info->dev_name, "mlx4_port%d", port);
2774 info->port_attr.attr.name = info->dev_name;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002775 if (mlx4_is_mfunc(dev))
2776 info->port_attr.attr.mode = S_IRUGO;
2777 else {
2778 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2779 info->port_attr.store = set_port_type;
2780 }
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002781 info->port_attr.show = show_port_type;
Greg Kroah-Hartman3691c9642010-03-15 14:01:55 -07002782 sysfs_attr_init(&info->port_attr.attr);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002783
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002784 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002785 if (err) {
2786 mlx4_err(dev, "Failed to create file for port %d\n", port);
2787 info->port = -1;
2788 }
2789
Or Gerlitz096335b2012-01-11 19:02:17 +02002790 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2791 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2792 if (mlx4_is_mfunc(dev))
2793 info->port_mtu_attr.attr.mode = S_IRUGO;
2794 else {
2795 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2796 info->port_mtu_attr.store = set_port_ib_mtu;
2797 }
2798 info->port_mtu_attr.show = show_port_ib_mtu;
2799 sysfs_attr_init(&info->port_mtu_attr.attr);
2800
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002801 err = device_create_file(&dev->persist->pdev->dev,
2802 &info->port_mtu_attr);
Or Gerlitz096335b2012-01-11 19:02:17 +02002803 if (err) {
2804 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002805 device_remove_file(&info->dev->persist->pdev->dev,
2806 &info->port_attr);
Or Gerlitz096335b2012-01-11 19:02:17 +02002807 info->port = -1;
2808 }
2809
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07002810 return err;
2811}
2812
2813static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2814{
2815 if (info->port < 0)
2816 return;
2817
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002818 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2819 device_remove_file(&info->dev->persist->pdev->dev,
2820 &info->port_mtu_attr);
Matan Barakc66fa192015-05-31 09:30:16 +03002821#ifdef CONFIG_RFS_ACCEL
2822 free_irq_cpu_rmap(info->rmap);
2823 info->rmap = NULL;
2824#endif
Yevgeny Petrilin2a2336f2008-10-22 11:44:46 -07002825}
2826
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00002827static int mlx4_init_steering(struct mlx4_dev *dev)
2828{
2829 struct mlx4_priv *priv = mlx4_priv(dev);
2830 int num_entries = dev->caps.num_ports;
2831 int i, j;
2832
2833 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2834 if (!priv->steer)
2835 return -ENOMEM;
2836
Eugenia Emantayev45b51362012-02-14 06:37:41 +00002837 for (i = 0; i < num_entries; i++)
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00002838 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2839 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2840 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2841 }
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00002842 return 0;
2843}
2844
2845static void mlx4_clear_steering(struct mlx4_dev *dev)
2846{
2847 struct mlx4_priv *priv = mlx4_priv(dev);
2848 struct mlx4_steer_index *entry, *tmp_entry;
2849 struct mlx4_promisc_qp *pqp, *tmp_pqp;
2850 int num_entries = dev->caps.num_ports;
2851 int i, j;
2852
2853 for (i = 0; i < num_entries; i++) {
2854 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2855 list_for_each_entry_safe(pqp, tmp_pqp,
2856 &priv->steer[i].promisc_qps[j],
2857 list) {
2858 list_del(&pqp->list);
2859 kfree(pqp);
2860 }
2861 list_for_each_entry_safe(entry, tmp_entry,
2862 &priv->steer[i].steer_entries[j],
2863 list) {
2864 list_del(&entry->list);
2865 list_for_each_entry_safe(pqp, tmp_pqp,
2866 &entry->duplicates,
2867 list) {
2868 list_del(&pqp->list);
2869 kfree(pqp);
2870 }
2871 kfree(entry);
2872 }
2873 }
2874 }
2875 kfree(priv->steer);
2876}
2877
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002878static int extended_func_num(struct pci_dev *pdev)
2879{
2880 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2881}
2882
2883#define MLX4_OWNER_BASE 0x8069c
2884#define MLX4_OWNER_SIZE 4
2885
2886static int mlx4_get_ownership(struct mlx4_dev *dev)
2887{
2888 void __iomem *owner;
2889 u32 ret;
2890
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002891 if (pci_channel_offline(dev->persist->pdev))
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00002892 return -EIO;
2893
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002894 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2895 MLX4_OWNER_BASE,
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002896 MLX4_OWNER_SIZE);
2897 if (!owner) {
2898 mlx4_err(dev, "Failed to obtain ownership bit\n");
2899 return -ENOMEM;
2900 }
2901
2902 ret = readl(owner);
2903 iounmap(owner);
2904 return (int) !!ret;
2905}
2906
2907static void mlx4_free_ownership(struct mlx4_dev *dev)
2908{
2909 void __iomem *owner;
2910
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002911 if (pci_channel_offline(dev->persist->pdev))
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00002912 return;
2913
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002914 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2915 MLX4_OWNER_BASE,
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002916 MLX4_OWNER_SIZE);
2917 if (!owner) {
2918 mlx4_err(dev, "Failed to obtain ownership bit\n");
2919 return;
2920 }
2921 writel(0, owner);
2922 msleep(1000);
2923 iounmap(owner);
2924}
2925
Matan Baraka0eacca2014-11-13 14:45:30 +02002926#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2927 !!((flags) & MLX4_FLAG_MASTER))
2928
2929static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
Yishai Hadas55ad3592015-01-25 16:59:42 +02002930 u8 total_vfs, int existing_vfs, int reset_flow)
Matan Baraka0eacca2014-11-13 14:45:30 +02002931{
2932 u64 dev_flags = dev->flags;
Matan Barakda315672014-12-14 16:18:04 +02002933 int err = 0;
Carol Soto0beb44b2015-07-06 09:20:19 -05002934 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
2935 MLX4_MAX_NUM_VF);
Matan Baraka0eacca2014-11-13 14:45:30 +02002936
Yishai Hadas55ad3592015-01-25 16:59:42 +02002937 if (reset_flow) {
2938 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
2939 GFP_KERNEL);
2940 if (!dev->dev_vfs)
2941 goto free_mem;
2942 return dev_flags;
2943 }
2944
Matan Barakda315672014-12-14 16:18:04 +02002945 atomic_inc(&pf_loading);
2946 if (dev->flags & MLX4_FLAG_SRIOV) {
2947 if (existing_vfs != total_vfs) {
2948 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2949 existing_vfs, total_vfs);
2950 total_vfs = existing_vfs;
2951 }
2952 }
2953
2954 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
Matan Baraka0eacca2014-11-13 14:45:30 +02002955 if (NULL == dev->dev_vfs) {
2956 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2957 goto disable_sriov;
Matan Barakda315672014-12-14 16:18:04 +02002958 }
Matan Baraka0eacca2014-11-13 14:45:30 +02002959
Matan Barakda315672014-12-14 16:18:04 +02002960 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
Carol Soto0beb44b2015-07-06 09:20:19 -05002961 if (total_vfs > fw_enabled_sriov_vfs) {
2962 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
2963 total_vfs, fw_enabled_sriov_vfs);
2964 err = -ENOMEM;
2965 goto disable_sriov;
2966 }
Matan Barakda315672014-12-14 16:18:04 +02002967 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
2968 err = pci_enable_sriov(pdev, total_vfs);
2969 }
2970 if (err) {
2971 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2972 err);
2973 goto disable_sriov;
2974 } else {
2975 mlx4_warn(dev, "Running in master mode\n");
2976 dev_flags |= MLX4_FLAG_SRIOV |
2977 MLX4_FLAG_MASTER;
2978 dev_flags &= ~MLX4_FLAG_SLAVE;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002979 dev->persist->num_vfs = total_vfs;
Matan Baraka0eacca2014-11-13 14:45:30 +02002980 }
2981 return dev_flags;
2982
2983disable_sriov:
Matan Barakda315672014-12-14 16:18:04 +02002984 atomic_dec(&pf_loading);
Yishai Hadas55ad3592015-01-25 16:59:42 +02002985free_mem:
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002986 dev->persist->num_vfs = 0;
Matan Baraka0eacca2014-11-13 14:45:30 +02002987 kfree(dev->dev_vfs);
Carol L Soto5114a042015-06-02 16:07:23 -05002988 dev->dev_vfs = NULL;
Matan Baraka0eacca2014-11-13 14:45:30 +02002989 return dev_flags & ~MLX4_FLAG_MASTER;
2990}
2991
Matan Barakde966c52014-11-13 14:45:33 +02002992enum {
2993 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
2994};
2995
2996static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
2997 int *nvfs)
2998{
2999 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3000 /* Checking for 64 VFs as a limitation of CX2 */
3001 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3002 requested_vfs >= 64) {
3003 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3004 requested_vfs);
3005 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3006 }
3007 return 0;
3008}
3009
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003010static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
Yishai Hadas55ad3592015-01-25 16:59:42 +02003011 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3012 int reset_flow)
Roland Dreier225c7b12007-05-08 18:00:38 -07003013{
Roland Dreier225c7b12007-05-08 18:00:38 -07003014 struct mlx4_dev *dev;
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003015 unsigned sum = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07003016 int err;
Yevgeny Petrilin2a2336f2008-10-22 11:44:46 -07003017 int port;
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003018 int i;
Matan Barak7ae0e402014-11-13 14:45:32 +02003019 struct mlx4_dev_cap *dev_cap = NULL;
Jack Morgensteinbbb07af2014-09-30 12:03:47 +03003020 int existing_vfs = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07003021
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003022 dev = &priv->dev;
Roland Dreier225c7b12007-05-08 18:00:38 -07003023
Roland Dreierb5814012007-06-07 11:51:58 -07003024 INIT_LIST_HEAD(&priv->ctx_list);
3025 spin_lock_init(&priv->ctx_lock);
Roland Dreier225c7b12007-05-08 18:00:38 -07003026
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003027 mutex_init(&priv->port_mutex);
Moni Shoua53f33ae2015-02-03 16:48:33 +02003028 mutex_init(&priv->bond_mutex);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003029
Yevgeny Petrilin62968832008-04-23 11:55:45 -07003030 INIT_LIST_HEAD(&priv->pgdir_list);
3031 mutex_init(&priv->pgdir_mutex);
3032
Eli Cohenc1b43dc2011-03-22 22:38:41 +00003033 INIT_LIST_HEAD(&priv->bf_list);
3034 mutex_init(&priv->bf_mutex);
3035
Sergei Shtylyovaca7a3a2011-06-23 04:44:30 +00003036 dev->rev_id = pdev->revision;
Eugenia Emantayev6e7136e2013-11-07 12:19:53 +02003037 dev->numa_node = dev_to_node(&pdev->dev);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003038
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003039 /* Detect if this device is a virtual function */
Roland Dreier839f1242012-09-27 09:23:41 -07003040 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003041 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3042 dev->flags |= MLX4_FLAG_SLAVE;
3043 } else {
3044 /* We reset the device and enable SRIOV only for physical
3045 * devices. Try to claim ownership on the device;
3046 * if already taken, skip -- do not allow multiple PFs */
3047 err = mlx4_get_ownership(dev);
3048 if (err) {
3049 if (err < 0)
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003050 return err;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003051 else {
Joe Perches1a91de22014-05-07 12:52:57 -07003052 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003053 return -EINVAL;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003054 }
3055 }
Sergei Shtylyovaca7a3a2011-06-23 04:44:30 +00003056
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03003057 atomic_set(&priv->opreq_count, 0);
3058 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3059
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003060 /*
3061 * Now reset the HCA before we touch the PCI capabilities or
3062 * attempt a firmware command, since a boot ROM may have left
3063 * the HCA in an undefined state.
3064 */
3065 err = mlx4_reset(dev);
3066 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003067 mlx4_err(dev, "Failed to reset HCA, aborting\n");
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003068 goto err_sriov;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003069 }
Matan Barak7ae0e402014-11-13 14:45:32 +02003070
3071 if (total_vfs) {
Matan Barak7ae0e402014-11-13 14:45:32 +02003072 dev->flags = MLX4_FLAG_MASTER;
Matan Barakda315672014-12-14 16:18:04 +02003073 existing_vfs = pci_num_vf(pdev);
3074 if (existing_vfs)
3075 dev->flags |= MLX4_FLAG_SRIOV;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003076 dev->persist->num_vfs = total_vfs;
Matan Barak7ae0e402014-11-13 14:45:32 +02003077 }
Roland Dreier225c7b12007-05-08 18:00:38 -07003078 }
3079
Yishai Hadasf6bc11e2015-01-25 16:59:38 +02003080 /* on load remove any previous indication of internal error,
3081 * device is up.
3082 */
3083 dev->persist->state = MLX4_DEVICE_STATE_UP;
3084
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003085slave_start:
Eugenia Emantayev521130d2012-09-05 22:50:52 +00003086 err = mlx4_cmd_init(dev);
3087 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003088 mlx4_err(dev, "Failed to init command interface, aborting\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003089 goto err_sriov;
3090 }
3091
3092 /* In slave functions, the communication channel must be initialized
3093 * before posting commands. Also, init num_slaves before calling
3094 * mlx4_init_hca */
3095 if (mlx4_is_mfunc(dev)) {
Matan Barak7ae0e402014-11-13 14:45:32 +02003096 if (mlx4_is_master(dev)) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003097 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
Matan Barak7ae0e402014-11-13 14:45:32 +02003098
3099 } else {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003100 dev->num_slaves = 0;
Jack Morgensteinf356fcbe2013-01-24 01:54:17 +00003101 err = mlx4_multi_func_init(dev);
3102 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003103 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003104 goto err_cmd;
3105 }
3106 }
Roland Dreier225c7b12007-05-08 18:00:38 -07003107 }
3108
Matan Baraka0eacca2014-11-13 14:45:30 +02003109 err = mlx4_init_fw(dev);
3110 if (err) {
3111 mlx4_err(dev, "Failed to init fw, aborting.\n");
3112 goto err_mfunc;
3113 }
3114
Matan Barak7ae0e402014-11-13 14:45:32 +02003115 if (mlx4_is_master(dev)) {
Matan Barakda315672014-12-14 16:18:04 +02003116 /* when we hit the goto slave_start below, dev_cap already initialized */
Matan Barak7ae0e402014-11-13 14:45:32 +02003117 if (!dev_cap) {
3118 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3119
3120 if (!dev_cap) {
3121 err = -ENOMEM;
3122 goto err_fw;
3123 }
3124
3125 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3126 if (err) {
3127 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3128 goto err_fw;
3129 }
3130
Matan Barakde966c52014-11-13 14:45:33 +02003131 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3132 goto err_fw;
3133
Matan Barak7ae0e402014-11-13 14:45:32 +02003134 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
Yishai Hadas55ad3592015-01-25 16:59:42 +02003135 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3136 total_vfs,
3137 existing_vfs,
3138 reset_flow);
Matan Barak7ae0e402014-11-13 14:45:32 +02003139
Carol Sotoed3d2272015-06-02 16:07:24 -05003140 mlx4_close_fw(dev);
Matan Barak7ae0e402014-11-13 14:45:32 +02003141 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3142 dev->flags = dev_flags;
3143 if (!SRIOV_VALID_STATE(dev->flags)) {
3144 mlx4_err(dev, "Invalid SRIOV state\n");
3145 goto err_sriov;
3146 }
3147 err = mlx4_reset(dev);
3148 if (err) {
3149 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3150 goto err_sriov;
3151 }
3152 goto slave_start;
3153 }
3154 } else {
3155 /* Legacy mode FW requires SRIOV to be enabled before
3156 * doing QUERY_DEV_CAP, since max_eq's value is different if
3157 * SRIOV is enabled.
3158 */
3159 memset(dev_cap, 0, sizeof(*dev_cap));
3160 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3161 if (err) {
3162 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3163 goto err_fw;
3164 }
Matan Barakde966c52014-11-13 14:45:33 +02003165
3166 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3167 goto err_fw;
Matan Barak7ae0e402014-11-13 14:45:32 +02003168 }
3169 }
3170
Roland Dreier225c7b12007-05-08 18:00:38 -07003171 err = mlx4_init_hca(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003172 if (err) {
3173 if (err == -EACCES) {
3174 /* Not primary Physical function
3175 * Running in slave mode */
Matan Barakffc39f62014-11-13 14:45:29 +02003176 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
Matan Baraka0eacca2014-11-13 14:45:30 +02003177 /* We're not a PF */
3178 if (dev->flags & MLX4_FLAG_SRIOV) {
3179 if (!existing_vfs)
3180 pci_disable_sriov(pdev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003181 if (mlx4_is_master(dev) && !reset_flow)
Matan Baraka0eacca2014-11-13 14:45:30 +02003182 atomic_dec(&pf_loading);
3183 dev->flags &= ~MLX4_FLAG_SRIOV;
3184 }
3185 if (!mlx4_is_slave(dev))
3186 mlx4_free_ownership(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003187 dev->flags |= MLX4_FLAG_SLAVE;
3188 dev->flags &= ~MLX4_FLAG_MASTER;
3189 goto slave_start;
3190 } else
Matan Baraka0eacca2014-11-13 14:45:30 +02003191 goto err_fw;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003192 }
3193
Matan Barak7ae0e402014-11-13 14:45:32 +02003194 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
Yishai Hadas55ad3592015-01-25 16:59:42 +02003195 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3196 existing_vfs, reset_flow);
Matan Barak7ae0e402014-11-13 14:45:32 +02003197
3198 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3199 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3200 dev->flags = dev_flags;
3201 err = mlx4_cmd_init(dev);
3202 if (err) {
3203 /* Only VHCR is cleaned up, so could still
3204 * send FW commands
3205 */
3206 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3207 goto err_close;
3208 }
3209 } else {
3210 dev->flags = dev_flags;
3211 }
3212
3213 if (!SRIOV_VALID_STATE(dev->flags)) {
3214 mlx4_err(dev, "Invalid SRIOV state\n");
3215 goto err_close;
3216 }
3217 }
3218
Eyal Perryb912b2f2014-01-05 17:41:08 +02003219 /* check if the device is functioning at its maximum possible speed.
3220 * No return code for this call, just warn the user in case of PCI
3221 * express device capabilities are under-satisfied by the bus.
3222 */
Eyal Perry83d34592014-05-04 17:07:25 +03003223 if (!mlx4_is_slave(dev))
3224 mlx4_check_pcie_caps(dev);
Eyal Perryb912b2f2014-01-05 17:41:08 +02003225
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003226 /* In master functions, the communication channel must be initialized
3227 * after obtaining its address from fw */
3228 if (mlx4_is_master(dev)) {
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003229 if (dev->caps.num_ports < 2 &&
3230 num_vfs_argc > 1) {
3231 err = -EINVAL;
3232 mlx4_err(dev,
3233 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3234 dev->caps.num_ports);
3235 goto err_close;
3236 }
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003237 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
Matan Barakdd41cc32014-03-19 18:11:53 +02003238
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003239 for (i = 0;
3240 i < sizeof(dev->persist->nvfs)/
3241 sizeof(dev->persist->nvfs[0]); i++) {
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003242 unsigned j;
3243
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003244 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003245 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3246 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3247 dev->caps.num_ports;
Matan Barakdd41cc32014-03-19 18:11:53 +02003248 }
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003249 }
3250
3251 /* In master functions, the communication channel
3252 * must be initialized after obtaining its address from fw
3253 */
3254 err = mlx4_multi_func_init(dev);
3255 if (err) {
3256 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3257 goto err_close;
Matan Barak1ab95d32014-03-19 18:11:50 +02003258 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003259 }
Roland Dreier225c7b12007-05-08 18:00:38 -07003260
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08003261 err = mlx4_alloc_eq_table(dev);
3262 if (err)
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003263 goto err_master_mfunc;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08003264
Matan Barakc66fa192015-05-31 09:30:16 +03003265 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
Yevgeny Petrilin730c41d2012-02-21 03:39:32 +00003266 mutex_init(&priv->msix_ctl.pool_lock);
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +00003267
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03003268 mlx4_enable_msi_x(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003269 if ((mlx4_is_mfunc(dev)) &&
3270 !(dev->flags & MLX4_FLAG_MSI_X)) {
Jack Morgensteinf356fcbe2013-01-24 01:54:17 +00003271 err = -ENOSYS;
Joe Perches1a91de22014-05-07 12:52:57 -07003272 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00003273 goto err_free_eq;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003274 }
3275
3276 if (!mlx4_is_slave(dev)) {
3277 err = mlx4_init_steering(dev);
3278 if (err)
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003279 goto err_disable_msix;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003280 }
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00003281
Roland Dreier225c7b12007-05-08 18:00:38 -07003282 err = mlx4_setup_hca(dev);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003283 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3284 !mlx4_is_mfunc(dev)) {
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03003285 dev->flags &= ~MLX4_FLAG_MSI_X;
Yevgeny Petrilin9858d2d2012-06-25 00:24:12 +00003286 dev->caps.num_comp_vectors = 1;
Michael S. Tsirkin08fb1052007-08-07 16:08:28 +03003287 pci_disable_msix(pdev);
3288 err = mlx4_setup_hca(dev);
3289 }
3290
Roland Dreier225c7b12007-05-08 18:00:38 -07003291 if (err)
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00003292 goto err_steer;
Roland Dreier225c7b12007-05-08 18:00:38 -07003293
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +02003294 mlx4_init_quotas(dev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003295 /* When PF resources are ready arm its comm channel to enable
3296 * getting commands
3297 */
3298 if (mlx4_is_master(dev)) {
3299 err = mlx4_ARM_COMM_CHANNEL(dev);
3300 if (err) {
3301 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3302 err);
3303 goto err_steer;
3304 }
3305 }
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +02003306
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003307 for (port = 1; port <= dev->caps.num_ports; port++) {
3308 err = mlx4_init_port_info(dev, port);
3309 if (err)
3310 goto err_port;
3311 }
Yevgeny Petrilin2a2336f2008-10-22 11:44:46 -07003312
Moni Shoua53f33ae2015-02-03 16:48:33 +02003313 priv->v2p.port1 = 1;
3314 priv->v2p.port2 = 2;
3315
Roland Dreier225c7b12007-05-08 18:00:38 -07003316 err = mlx4_register_device(dev);
3317 if (err)
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003318 goto err_port;
Roland Dreier225c7b12007-05-08 18:00:38 -07003319
Eyal Perryb046ffe2013-10-15 16:55:24 +02003320 mlx4_request_modules(dev);
3321
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07003322 mlx4_sense_init(dev);
3323 mlx4_start_sense(dev);
3324
Wei Yangbefdf892014-04-14 09:51:19 +08003325 priv->removed = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07003326
Yishai Hadas55ad3592015-01-25 16:59:42 +02003327 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
Amir Vadaie1a5ddc2014-04-14 11:17:22 +03003328 atomic_dec(&pf_loading);
3329
Matan Barakda315672014-12-14 16:18:04 +02003330 kfree(dev_cap);
Roland Dreier225c7b12007-05-08 18:00:38 -07003331 return 0;
3332
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003333err_port:
Eli Cohenb4f77262010-01-06 12:54:39 -08003334 for (--port; port >= 1; --port)
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003335 mlx4_cleanup_port_info(&priv->port[port]);
3336
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03003337 mlx4_cleanup_default_counters(dev);
Eran Ben Elisha2632d182015-06-15 17:58:59 +03003338 if (!mlx4_is_slave(dev))
3339 mlx4_cleanup_counters_table(dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003340 mlx4_cleanup_qp_table(dev);
3341 mlx4_cleanup_srq_table(dev);
3342 mlx4_cleanup_cq_table(dev);
3343 mlx4_cmd_use_polling(dev);
3344 mlx4_cleanup_eq_table(dev);
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03003345 mlx4_cleanup_mcg_table(dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003346 mlx4_cleanup_mr_table(dev);
Sean Hefty012a8ff2011-06-02 09:01:33 -07003347 mlx4_cleanup_xrcd_table(dev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003348 mlx4_cleanup_pd_table(dev);
3349 mlx4_cleanup_uar_table(dev);
3350
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00003351err_steer:
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003352 if (!mlx4_is_slave(dev))
3353 mlx4_clear_steering(dev);
Yevgeny Petrilinb12d93d2011-03-22 22:38:24 +00003354
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003355err_disable_msix:
3356 if (dev->flags & MLX4_FLAG_MSI_X)
3357 pci_disable_msix(pdev);
3358
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -08003359err_free_eq:
3360 mlx4_free_eq_table(dev);
3361
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003362err_master_mfunc:
Jack Morgenstein772103e2015-01-27 15:58:01 +02003363 if (mlx4_is_master(dev)) {
3364 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003365 mlx4_multi_func_cleanup(dev);
Jack Morgenstein772103e2015-01-27 15:58:01 +02003366 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003367
Dotan Barakb38f2872014-05-29 16:30:59 +03003368 if (mlx4_is_slave(dev)) {
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003369 kfree(dev->caps.qp0_qkey);
Dotan Barakb38f2872014-05-29 16:30:59 +03003370 kfree(dev->caps.qp0_tunnel);
3371 kfree(dev->caps.qp0_proxy);
3372 kfree(dev->caps.qp1_tunnel);
3373 kfree(dev->caps.qp1_proxy);
3374 }
3375
Roland Dreier225c7b12007-05-08 18:00:38 -07003376err_close:
3377 mlx4_close_hca(dev);
3378
Matan Baraka0eacca2014-11-13 14:45:30 +02003379err_fw:
3380 mlx4_close_fw(dev);
3381
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003382err_mfunc:
3383 if (mlx4_is_slave(dev))
3384 mlx4_multi_func_cleanup(dev);
3385
Roland Dreier225c7b12007-05-08 18:00:38 -07003386err_cmd:
Matan Barakffc39f62014-11-13 14:45:29 +02003387 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
Roland Dreier225c7b12007-05-08 18:00:38 -07003388
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003389err_sriov:
Yishai Hadas55ad3592015-01-25 16:59:42 +02003390 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003391 pci_disable_sriov(pdev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003392 dev->flags &= ~MLX4_FLAG_SRIOV;
3393 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003394
Yishai Hadas55ad3592015-01-25 16:59:42 +02003395 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
Amir Vadaie1a5ddc2014-04-14 11:17:22 +03003396 atomic_dec(&pf_loading);
3397
Matan Barak1ab95d32014-03-19 18:11:50 +02003398 kfree(priv->dev.dev_vfs);
3399
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003400 if (!mlx4_is_slave(dev))
3401 mlx4_free_ownership(dev);
3402
Matan Barak7ae0e402014-11-13 14:45:32 +02003403 kfree(dev_cap);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003404 return err;
3405}
3406
3407static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3408 struct mlx4_priv *priv)
3409{
3410 int err;
3411 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3412 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3413 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3414 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3415 unsigned total_vfs = 0;
3416 unsigned int i;
3417
3418 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3419
3420 err = pci_enable_device(pdev);
3421 if (err) {
3422 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3423 return err;
3424 }
3425
3426 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3427 * per port, we must limit the number of VFs to 63 (since their are
3428 * 128 MACs)
3429 */
3430 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3431 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3432 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3433 if (nvfs[i] < 0) {
3434 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3435 err = -EINVAL;
3436 goto err_disable_pdev;
3437 }
3438 }
3439 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3440 i++) {
3441 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3442 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3443 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3444 err = -EINVAL;
3445 goto err_disable_pdev;
3446 }
3447 }
Carol Soto0beb44b2015-07-06 09:20:19 -05003448 if (total_vfs > MLX4_MAX_NUM_VF) {
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003449 dev_err(&pdev->dev,
Carol Soto0beb44b2015-07-06 09:20:19 -05003450 "Requested more VF's (%d) than allowed by hw (%d)\n",
3451 total_vfs, MLX4_MAX_NUM_VF);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003452 err = -EINVAL;
3453 goto err_disable_pdev;
3454 }
3455
3456 for (i = 0; i < MLX4_MAX_PORTS; i++) {
Carol Soto0beb44b2015-07-06 09:20:19 -05003457 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003458 dev_err(&pdev->dev,
Carol Soto0beb44b2015-07-06 09:20:19 -05003459 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003460 nvfs[i] + nvfs[2], i + 1,
Carol Soto0beb44b2015-07-06 09:20:19 -05003461 MLX4_MAX_NUM_VF_P_PORT);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003462 err = -EINVAL;
3463 goto err_disable_pdev;
3464 }
3465 }
3466
3467 /* Check for BARs. */
3468 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3469 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3470 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3471 pci_dev_data, pci_resource_flags(pdev, 0));
3472 err = -ENODEV;
3473 goto err_disable_pdev;
3474 }
3475 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3476 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3477 err = -ENODEV;
3478 goto err_disable_pdev;
3479 }
3480
3481 err = pci_request_regions(pdev, DRV_NAME);
3482 if (err) {
3483 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3484 goto err_disable_pdev;
3485 }
3486
3487 pci_set_master(pdev);
3488
3489 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3490 if (err) {
3491 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3492 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3493 if (err) {
3494 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3495 goto err_release_regions;
3496 }
3497 }
3498 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3499 if (err) {
3500 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3501 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3502 if (err) {
3503 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3504 goto err_release_regions;
3505 }
3506 }
3507
3508 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3509 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3510 /* Detect if this device is a virtual function */
3511 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3512 /* When acting as pf, we normally skip vfs unless explicitly
3513 * requested to probe them.
3514 */
3515 if (total_vfs) {
3516 unsigned vfs_offset = 0;
3517
3518 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3519 vfs_offset + nvfs[i] < extended_func_num(pdev);
3520 vfs_offset += nvfs[i], i++)
3521 ;
3522 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3523 err = -ENODEV;
3524 goto err_release_regions;
3525 }
3526 if ((extended_func_num(pdev) - vfs_offset)
3527 > prb_vf[i]) {
3528 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3529 extended_func_num(pdev));
3530 err = -ENODEV;
3531 goto err_release_regions;
3532 }
3533 }
3534 }
3535
Yishai Hadasad9a0bf2015-01-25 16:59:37 +02003536 err = mlx4_catas_init(&priv->dev);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003537 if (err)
3538 goto err_release_regions;
Yishai Hadasad9a0bf2015-01-25 16:59:37 +02003539
Yishai Hadas55ad3592015-01-25 16:59:42 +02003540 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
Yishai Hadasad9a0bf2015-01-25 16:59:37 +02003541 if (err)
3542 goto err_catas;
3543
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003544 return 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07003545
Yishai Hadasad9a0bf2015-01-25 16:59:37 +02003546err_catas:
3547 mlx4_catas_end(&priv->dev);
3548
Roland Dreiera01df0f2009-09-05 20:24:48 -07003549err_release_regions:
3550 pci_release_regions(pdev);
Roland Dreier225c7b12007-05-08 18:00:38 -07003551
3552err_disable_pdev:
3553 pci_disable_device(pdev);
3554 pci_set_drvdata(pdev, NULL);
3555 return err;
3556}
3557
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003558static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
Roland Dreier3d73c282007-10-10 15:43:54 -07003559{
Wei Yangbefdf892014-04-14 09:51:19 +08003560 struct mlx4_priv *priv;
3561 struct mlx4_dev *dev;
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003562 int ret;
Wei Yangbefdf892014-04-14 09:51:19 +08003563
Joe Perches0a645e82010-07-10 07:22:46 +00003564 printk_once(KERN_INFO "%s", mlx4_version);
Roland Dreier3d73c282007-10-10 15:43:54 -07003565
Wei Yangbefdf892014-04-14 09:51:19 +08003566 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3567 if (!priv)
3568 return -ENOMEM;
3569
3570 dev = &priv->dev;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003571 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3572 if (!dev->persist) {
3573 kfree(priv);
3574 return -ENOMEM;
3575 }
3576 dev->persist->pdev = pdev;
3577 dev->persist->dev = dev;
3578 pci_set_drvdata(pdev, dev->persist);
Wei Yangbefdf892014-04-14 09:51:19 +08003579 priv->pci_dev_data = id->driver_data;
Yishai Hadasf6bc11e2015-01-25 16:59:38 +02003580 mutex_init(&dev->persist->device_state_mutex);
Yishai Hadasc69453e2015-01-25 16:59:40 +02003581 mutex_init(&dev->persist->interface_state_mutex);
Wei Yangbefdf892014-04-14 09:51:19 +08003582
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003583 ret = __mlx4_init_one(pdev, id->driver_data, priv);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003584 if (ret) {
3585 kfree(dev->persist);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003586 kfree(priv);
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003587 } else {
3588 pci_save_state(pdev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003589 }
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003590
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003591 return ret;
Roland Dreier3d73c282007-10-10 15:43:54 -07003592}
3593
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003594static void mlx4_clean_dev(struct mlx4_dev *dev)
3595{
3596 struct mlx4_dev_persistent *persist = dev->persist;
3597 struct mlx4_priv *priv = mlx4_priv(dev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003598 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003599
3600 memset(priv, 0, sizeof(*priv));
3601 priv->dev.persist = persist;
Yishai Hadas55ad3592015-01-25 16:59:42 +02003602 priv->dev.flags = flags;
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003603}
3604
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003605static void mlx4_unload_one(struct pci_dev *pdev)
Wei Yangbefdf892014-04-14 09:51:19 +08003606{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003607 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3608 struct mlx4_dev *dev = persist->dev;
Wei Yangbefdf892014-04-14 09:51:19 +08003609 struct mlx4_priv *priv = mlx4_priv(dev);
3610 int pci_dev_data;
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003611 int p, i;
Wei Yangbefdf892014-04-14 09:51:19 +08003612
3613 if (priv->removed)
3614 return;
3615
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003616 /* saving current ports type for further use */
3617 for (i = 0; i < dev->caps.num_ports; i++) {
3618 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3619 dev->persist->curr_port_poss_type[i] = dev->caps.
3620 possible_type[i + 1];
3621 }
3622
Wei Yangbefdf892014-04-14 09:51:19 +08003623 pci_dev_data = priv->pci_dev_data;
3624
Wei Yangbefdf892014-04-14 09:51:19 +08003625 mlx4_stop_sense(dev);
3626 mlx4_unregister_device(dev);
3627
3628 for (p = 1; p <= dev->caps.num_ports; p++) {
3629 mlx4_cleanup_port_info(&priv->port[p]);
3630 mlx4_CLOSE_PORT(dev, p);
3631 }
3632
3633 if (mlx4_is_master(dev))
3634 mlx4_free_resource_tracker(dev,
3635 RES_TR_FREE_SLAVES_ONLY);
3636
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03003637 mlx4_cleanup_default_counters(dev);
Eran Ben Elisha2632d182015-06-15 17:58:59 +03003638 if (!mlx4_is_slave(dev))
3639 mlx4_cleanup_counters_table(dev);
Wei Yangbefdf892014-04-14 09:51:19 +08003640 mlx4_cleanup_qp_table(dev);
3641 mlx4_cleanup_srq_table(dev);
3642 mlx4_cleanup_cq_table(dev);
3643 mlx4_cmd_use_polling(dev);
3644 mlx4_cleanup_eq_table(dev);
3645 mlx4_cleanup_mcg_table(dev);
3646 mlx4_cleanup_mr_table(dev);
3647 mlx4_cleanup_xrcd_table(dev);
3648 mlx4_cleanup_pd_table(dev);
3649
3650 if (mlx4_is_master(dev))
3651 mlx4_free_resource_tracker(dev,
3652 RES_TR_FREE_STRUCTS_ONLY);
3653
3654 iounmap(priv->kar);
3655 mlx4_uar_free(dev, &priv->driver_uar);
3656 mlx4_cleanup_uar_table(dev);
3657 if (!mlx4_is_slave(dev))
3658 mlx4_clear_steering(dev);
3659 mlx4_free_eq_table(dev);
3660 if (mlx4_is_master(dev))
3661 mlx4_multi_func_cleanup(dev);
3662 mlx4_close_hca(dev);
Matan Baraka0eacca2014-11-13 14:45:30 +02003663 mlx4_close_fw(dev);
Wei Yangbefdf892014-04-14 09:51:19 +08003664 if (mlx4_is_slave(dev))
3665 mlx4_multi_func_cleanup(dev);
Matan Barakffc39f62014-11-13 14:45:29 +02003666 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
Wei Yangbefdf892014-04-14 09:51:19 +08003667
3668 if (dev->flags & MLX4_FLAG_MSI_X)
3669 pci_disable_msix(pdev);
Wei Yangbefdf892014-04-14 09:51:19 +08003670
3671 if (!mlx4_is_slave(dev))
3672 mlx4_free_ownership(dev);
3673
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003674 kfree(dev->caps.qp0_qkey);
Wei Yangbefdf892014-04-14 09:51:19 +08003675 kfree(dev->caps.qp0_tunnel);
3676 kfree(dev->caps.qp0_proxy);
3677 kfree(dev->caps.qp1_tunnel);
3678 kfree(dev->caps.qp1_proxy);
3679 kfree(dev->dev_vfs);
3680
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003681 mlx4_clean_dev(dev);
Wei Yangbefdf892014-04-14 09:51:19 +08003682 priv->pci_dev_data = pci_dev_data;
3683 priv->removed = 1;
3684}
3685
Roland Dreier3d73c282007-10-10 15:43:54 -07003686static void mlx4_remove_one(struct pci_dev *pdev)
Roland Dreier225c7b12007-05-08 18:00:38 -07003687{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003688 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3689 struct mlx4_dev *dev = persist->dev;
Roland Dreier225c7b12007-05-08 18:00:38 -07003690 struct mlx4_priv *priv = mlx4_priv(dev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003691 int active_vfs = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07003692
Yishai Hadasc69453e2015-01-25 16:59:40 +02003693 mutex_lock(&persist->interface_state_mutex);
3694 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3695 mutex_unlock(&persist->interface_state_mutex);
3696
Yishai Hadas55ad3592015-01-25 16:59:42 +02003697 /* Disabling SR-IOV is not allowed while there are active vf's */
3698 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3699 active_vfs = mlx4_how_many_lives_vf(dev);
3700 if (active_vfs) {
3701 pr_warn("Removing PF when there are active VF's !!\n");
3702 pr_warn("Will not disable SR-IOV.\n");
3703 }
3704 }
3705
Yishai Hadasc69453e2015-01-25 16:59:40 +02003706 /* device marked to be under deletion running now without the lock
3707 * letting other tasks to be terminated
3708 */
3709 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3710 mlx4_unload_one(pdev);
3711 else
3712 mlx4_info(dev, "%s: interface is down\n", __func__);
Yishai Hadasad9a0bf2015-01-25 16:59:37 +02003713 mlx4_catas_end(dev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003714 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3715 mlx4_warn(dev, "Disabling SR-IOV\n");
3716 pci_disable_sriov(pdev);
3717 }
3718
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003719 pci_release_regions(pdev);
3720 pci_disable_device(pdev);
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003721 kfree(dev->persist);
Wei Yangbefdf892014-04-14 09:51:19 +08003722 kfree(priv);
3723 pci_set_drvdata(pdev, NULL);
Roland Dreier225c7b12007-05-08 18:00:38 -07003724}
3725
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003726static int restore_current_port_types(struct mlx4_dev *dev,
3727 enum mlx4_port_type *types,
3728 enum mlx4_port_type *poss_types)
3729{
3730 struct mlx4_priv *priv = mlx4_priv(dev);
3731 int err, i;
3732
3733 mlx4_stop_sense(dev);
3734
3735 mutex_lock(&priv->port_mutex);
3736 for (i = 0; i < dev->caps.num_ports; i++)
3737 dev->caps.possible_type[i + 1] = poss_types[i];
3738 err = mlx4_change_port_types(dev, types);
3739 mlx4_start_sense(dev);
3740 mutex_unlock(&priv->port_mutex);
3741
3742 return err;
3743}
3744
Jack Morgensteinee49bd92007-07-12 17:50:45 +03003745int mlx4_restart_one(struct pci_dev *pdev)
3746{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003747 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3748 struct mlx4_dev *dev = persist->dev;
Roland Dreier839f1242012-09-27 09:23:41 -07003749 struct mlx4_priv *priv = mlx4_priv(dev);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003750 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3751 int pci_dev_data, err, total_vfs;
Roland Dreier839f1242012-09-27 09:23:41 -07003752
3753 pci_dev_data = priv->pci_dev_data;
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003754 total_vfs = dev->persist->num_vfs;
3755 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003756
3757 mlx4_unload_one(pdev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02003758 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003759 if (err) {
3760 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3761 __func__, pci_name(pdev), err);
3762 return err;
3763 }
3764
Yishai Hadasdd0eefe2015-01-25 16:59:36 +02003765 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3766 dev->persist->curr_port_poss_type);
3767 if (err)
3768 mlx4_err(dev, "could not restore original port types (%d)\n",
3769 err);
3770
Majd Dibbinye1c00e12014-09-30 12:03:48 +03003771 return err;
Jack Morgensteinee49bd92007-07-12 17:50:45 +03003772}
3773
Benoit Taine9baa3c32014-08-08 15:56:03 +02003774static const struct pci_device_id mlx4_pci_table[] = {
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003775 /* MT25408 "Hermon" SDR */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003776 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003777 /* MT25408 "Hermon" DDR */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003778 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003779 /* MT25408 "Hermon" QDR */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003780 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003781 /* MT25408 "Hermon" DDR PCIe gen2 */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003782 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003783 /* MT25408 "Hermon" QDR PCIe gen2 */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003784 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003785 /* MT25408 "Hermon" EN 10GigE */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003786 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003787 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003788 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003789 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003790 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003791 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003792 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003793 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
Roland Dreierca3e57a2012-09-27 09:53:05 -07003794 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003795 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003796 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003797 /* MT26478 ConnectX2 40GigE PCIe gen2 */
Roland Dreierca3e57a2012-09-27 09:53:05 -07003798 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003799 /* MT25400 Family [ConnectX-2 Virtual Function] */
Roland Dreier839f1242012-09-27 09:23:41 -07003800 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003801 /* MT27500 Family [ConnectX-3] */
3802 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
3803 /* MT27500 Family [ConnectX-3 Virtual Function] */
Roland Dreier839f1242012-09-27 09:23:41 -07003804 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003805 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
3806 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
3807 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
3808 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
3809 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
3810 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
3811 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
3812 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
3813 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
3814 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
3815 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
3816 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
Roland Dreier225c7b12007-05-08 18:00:38 -07003817 { 0, }
3818};
3819
3820MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3821
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003822static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3823 pci_channel_state_t state)
3824{
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003825 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003826
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003827 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
3828 mlx4_enter_error_state(persist);
3829
3830 mutex_lock(&persist->interface_state_mutex);
3831 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3832 mlx4_unload_one(pdev);
3833
3834 mutex_unlock(&persist->interface_state_mutex);
3835 if (state == pci_channel_io_perm_failure)
3836 return PCI_ERS_RESULT_DISCONNECT;
3837
3838 pci_disable_device(pdev);
3839 return PCI_ERS_RESULT_NEED_RESET;
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003840}
3841
3842static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3843{
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003844 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3845 struct mlx4_dev *dev = persist->dev;
Wei Yangbefdf892014-04-14 09:51:19 +08003846 struct mlx4_priv *priv = mlx4_priv(dev);
3847 int ret;
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003848 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3849 int total_vfs;
Wei Yang97a52212014-03-27 09:28:31 +08003850
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003851 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
3852 ret = pci_enable_device(pdev);
3853 if (ret) {
3854 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
3855 return PCI_ERS_RESULT_DISCONNECT;
3856 }
3857
3858 pci_set_master(pdev);
3859 pci_restore_state(pdev);
3860 pci_save_state(pdev);
3861
3862 total_vfs = dev->persist->num_vfs;
3863 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3864
3865 mutex_lock(&persist->interface_state_mutex);
3866 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
3867 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
Yishai Hadas55ad3592015-01-25 16:59:42 +02003868 priv, 1);
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003869 if (ret) {
3870 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
3871 __func__, ret);
3872 goto end;
3873 }
3874
3875 ret = restore_current_port_types(dev, dev->persist->
3876 curr_port_type, dev->persist->
3877 curr_port_poss_type);
3878 if (ret)
3879 mlx4_err(dev, "could not restore original port types (%d)\n", ret);
3880 }
3881end:
3882 mutex_unlock(&persist->interface_state_mutex);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003883
3884 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3885}
3886
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003887static void mlx4_shutdown(struct pci_dev *pdev)
3888{
3889 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3890
3891 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
3892 mutex_lock(&persist->interface_state_mutex);
3893 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3894 mlx4_unload_one(pdev);
3895 mutex_unlock(&persist->interface_state_mutex);
3896}
3897
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07003898static const struct pci_error_handlers mlx4_err_handler = {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003899 .error_detected = mlx4_pci_err_detected,
3900 .slot_reset = mlx4_pci_slot_reset,
3901};
3902
Roland Dreier225c7b12007-05-08 18:00:38 -07003903static struct pci_driver mlx4_driver = {
3904 .name = DRV_NAME,
3905 .id_table = mlx4_pci_table,
3906 .probe = mlx4_init_one,
Yishai Hadas2ba5fbd2015-01-25 16:59:41 +02003907 .shutdown = mlx4_shutdown,
Bill Pembertonf57e6842012-12-03 09:23:15 -05003908 .remove = mlx4_remove_one,
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +00003909 .err_handler = &mlx4_err_handler,
Roland Dreier225c7b12007-05-08 18:00:38 -07003910};
3911
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003912static int __init mlx4_verify_params(void)
3913{
3914 if ((log_num_mac < 0) || (log_num_mac > 7)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03003915 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003916 return -1;
3917 }
3918
Or Gerlitzcb296882011-10-16 10:26:21 +02003919 if (log_num_vlan != 0)
Amir Vadaic20862c2014-05-22 15:55:40 +03003920 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3921 MLX4_LOG_NUM_VLANS);
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003922
Amir Vadaiecc8fb12014-05-22 15:55:39 +03003923 if (use_prio != 0)
3924 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003925
Eli Cohen04986282010-09-20 08:42:38 +02003926 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03003927 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3928 log_mtts_per_seg);
Eli Cohenab6bf422009-05-27 14:38:34 -07003929 return -1;
3930 }
3931
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003932 /* Check if module param for ports type has legal combination */
3933 if (port_type_array[0] == false && port_type_array[1] == true) {
Amir Vadaic20862c2014-05-22 15:55:40 +03003934 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00003935 port_type_array[0] = true;
3936 }
3937
Matan Barak7d077cd2014-12-11 10:58:00 +02003938 if (mlx4_log_num_mgm_entry_size < -7 ||
3939 (mlx4_log_num_mgm_entry_size > 0 &&
3940 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
3941 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
3942 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
Joe Perches1a91de22014-05-07 12:52:57 -07003943 mlx4_log_num_mgm_entry_size,
3944 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
3945 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
Jack Morgenstein3c439b52012-12-06 17:12:00 +00003946 return -1;
3947 }
3948
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003949 return 0;
3950}
3951
Roland Dreier225c7b12007-05-08 18:00:38 -07003952static int __init mlx4_init(void)
3953{
3954 int ret;
3955
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -07003956 if (mlx4_verify_params())
3957 return -EINVAL;
3958
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07003959
3960 mlx4_wq = create_singlethread_workqueue("mlx4");
3961 if (!mlx4_wq)
3962 return -ENOMEM;
Jack Morgensteinee49bd92007-07-12 17:50:45 +03003963
Roland Dreier225c7b12007-05-08 18:00:38 -07003964 ret = pci_register_driver(&mlx4_driver);
Wei Yang1b85ee02013-12-03 10:04:10 +08003965 if (ret < 0)
3966 destroy_workqueue(mlx4_wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003967 return ret < 0 ? ret : 0;
3968}
3969
3970static void __exit mlx4_cleanup(void)
3971{
3972 pci_unregister_driver(&mlx4_driver);
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -07003973 destroy_workqueue(mlx4_wq);
Roland Dreier225c7b12007-05-08 18:00:38 -07003974}
3975
3976module_init(mlx4_init);
3977module_exit(mlx4_cleanup);