blob: 3895b2b5fc9212664a9d9b1c1d5959e8bc6c90ae [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Gortmakeree40fa02011-05-27 16:14:23 -040037#include <linux/export.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
Rony Efraim948e3062013-06-13 13:19:11 +030042#include <linux/mlx4/device.h>
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000043#include <linux/semaphore.h>
Jack Morgenstein0a9a0182012-08-03 08:40:45 +000044#include <rdma/ib_smi.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070045
46#include <asm/io.h>
47
48#include "mlx4.h"
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000049#include "fw.h"
Roland Dreier225c7b12007-05-08 18:00:38 -070050
51#define CMD_POLL_TOKEN 0xffff
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000052#define INBOX_MASK 0xffffffffffffff00ULL
53
54#define CMD_CHAN_VER 1
55#define CMD_CHAN_IF_REV 1
Roland Dreier225c7b12007-05-08 18:00:38 -070056
57enum {
58 /* command completed successfully: */
59 CMD_STAT_OK = 0x00,
60 /* Internal error (such as a bus error) occurred while processing command: */
61 CMD_STAT_INTERNAL_ERR = 0x01,
62 /* Operation/command not supported or opcode modifier not supported: */
63 CMD_STAT_BAD_OP = 0x02,
64 /* Parameter not supported or parameter out of range: */
65 CMD_STAT_BAD_PARAM = 0x03,
66 /* System not enabled or bad system state: */
67 CMD_STAT_BAD_SYS_STATE = 0x04,
68 /* Attempt to access reserved or unallocaterd resource: */
69 CMD_STAT_BAD_RESOURCE = 0x05,
70 /* Requested resource is currently executing a command, or is otherwise busy: */
71 CMD_STAT_RESOURCE_BUSY = 0x06,
72 /* Required capability exceeds device limits: */
73 CMD_STAT_EXCEED_LIM = 0x08,
74 /* Resource is not in the appropriate state or ownership: */
75 CMD_STAT_BAD_RES_STATE = 0x09,
76 /* Index out of range: */
77 CMD_STAT_BAD_INDEX = 0x0a,
78 /* FW image corrupted: */
79 CMD_STAT_BAD_NVMEM = 0x0b,
Jack Morgenstein899698d2008-07-22 14:19:39 -070080 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
81 CMD_STAT_ICM_ERROR = 0x0c,
Roland Dreier225c7b12007-05-08 18:00:38 -070082 /* Attempt to modify a QP/EE which is not in the presumed state: */
83 CMD_STAT_BAD_QP_STATE = 0x10,
84 /* Bad segment parameters (Address/Size): */
85 CMD_STAT_BAD_SEG_PARAM = 0x20,
86 /* Memory Region has Memory Windows bound to: */
87 CMD_STAT_REG_BOUND = 0x21,
88 /* HCA local attached memory not present: */
89 CMD_STAT_LAM_NOT_PRE = 0x22,
90 /* Bad management packet (silently discarded): */
91 CMD_STAT_BAD_PKT = 0x30,
92 /* More outstanding CQEs in CQ than new CQ size: */
Yevgeny Petrilincc4ac2e2009-07-06 16:10:03 -070093 CMD_STAT_BAD_SIZE = 0x40,
94 /* Multi Function device support required: */
95 CMD_STAT_MULTI_FUNC_REQ = 0x50,
Roland Dreier225c7b12007-05-08 18:00:38 -070096};
97
98enum {
99 HCR_IN_PARAM_OFFSET = 0x00,
100 HCR_IN_MODIFIER_OFFSET = 0x08,
101 HCR_OUT_PARAM_OFFSET = 0x0c,
102 HCR_TOKEN_OFFSET = 0x14,
103 HCR_STATUS_OFFSET = 0x18,
104
105 HCR_OPMOD_SHIFT = 12,
106 HCR_T_BIT = 21,
107 HCR_E_BIT = 22,
108 HCR_GO_BIT = 23
109};
110
111enum {
Dotan Barak36ce10d2007-08-07 11:18:52 +0300112 GO_BIT_TIMEOUT_MSECS = 10000
Roland Dreier225c7b12007-05-08 18:00:38 -0700113};
114
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300115enum mlx4_vlan_transition {
116 MLX4_VLAN_TRANSITION_VST_VST = 0,
117 MLX4_VLAN_TRANSITION_VST_VGT = 1,
118 MLX4_VLAN_TRANSITION_VGT_VST = 2,
119 MLX4_VLAN_TRANSITION_VGT_VGT = 3,
120};
121
122
Roland Dreier225c7b12007-05-08 18:00:38 -0700123struct mlx4_cmd_context {
124 struct completion done;
125 int result;
126 int next;
127 u64 out_param;
128 u16 token;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000129 u8 fw_status;
Roland Dreier225c7b12007-05-08 18:00:38 -0700130};
131
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000132static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
133 struct mlx4_vhcr_cmd *in_vhcr);
134
Roland Dreierca281212008-04-16 21:01:04 -0700135static int mlx4_status_to_errno(u8 status)
136{
Roland Dreier225c7b12007-05-08 18:00:38 -0700137 static const int trans_table[] = {
138 [CMD_STAT_INTERNAL_ERR] = -EIO,
139 [CMD_STAT_BAD_OP] = -EPERM,
140 [CMD_STAT_BAD_PARAM] = -EINVAL,
141 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
142 [CMD_STAT_BAD_RESOURCE] = -EBADF,
143 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
144 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
145 [CMD_STAT_BAD_RES_STATE] = -EBADF,
146 [CMD_STAT_BAD_INDEX] = -EBADF,
147 [CMD_STAT_BAD_NVMEM] = -EFAULT,
Jack Morgenstein899698d2008-07-22 14:19:39 -0700148 [CMD_STAT_ICM_ERROR] = -ENFILE,
Roland Dreier225c7b12007-05-08 18:00:38 -0700149 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
150 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
151 [CMD_STAT_REG_BOUND] = -EBUSY,
152 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
153 [CMD_STAT_BAD_PKT] = -EINVAL,
154 [CMD_STAT_BAD_SIZE] = -ENOMEM,
Yevgeny Petrilincc4ac2e2009-07-06 16:10:03 -0700155 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
Roland Dreier225c7b12007-05-08 18:00:38 -0700156 };
157
158 if (status >= ARRAY_SIZE(trans_table) ||
159 (status != CMD_STAT_OK && trans_table[status] == 0))
160 return -EIO;
161
162 return trans_table[status];
163}
164
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000165static u8 mlx4_errno_to_status(int errno)
166{
167 switch (errno) {
168 case -EPERM:
169 return CMD_STAT_BAD_OP;
170 case -EINVAL:
171 return CMD_STAT_BAD_PARAM;
172 case -ENXIO:
173 return CMD_STAT_BAD_SYS_STATE;
174 case -EBUSY:
175 return CMD_STAT_RESOURCE_BUSY;
176 case -ENOMEM:
177 return CMD_STAT_EXCEED_LIM;
178 case -ENFILE:
179 return CMD_STAT_ICM_ERROR;
180 default:
181 return CMD_STAT_INTERNAL_ERR;
182 }
183}
184
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200185static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
186 u8 op_modifier)
187{
188 switch (op) {
189 case MLX4_CMD_UNMAP_ICM:
190 case MLX4_CMD_UNMAP_ICM_AUX:
191 case MLX4_CMD_UNMAP_FA:
192 case MLX4_CMD_2RST_QP:
193 case MLX4_CMD_HW2SW_EQ:
194 case MLX4_CMD_HW2SW_CQ:
195 case MLX4_CMD_HW2SW_SRQ:
196 case MLX4_CMD_HW2SW_MPT:
197 case MLX4_CMD_CLOSE_HCA:
198 case MLX4_QP_FLOW_STEERING_DETACH:
199 case MLX4_CMD_FREE_RES:
200 case MLX4_CMD_CLOSE_PORT:
201 return CMD_STAT_OK;
202
203 case MLX4_CMD_QP_ATTACH:
204 /* On Detach case return success */
205 if (op_modifier == 0)
206 return CMD_STAT_OK;
207 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
208
209 default:
210 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211 }
212}
213
214static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
215{
216 /* Any error during the closing commands below is considered fatal */
217 if (op == MLX4_CMD_CLOSE_HCA ||
218 op == MLX4_CMD_HW2SW_EQ ||
219 op == MLX4_CMD_HW2SW_CQ ||
220 op == MLX4_CMD_2RST_QP ||
221 op == MLX4_CMD_HW2SW_SRQ ||
222 op == MLX4_CMD_SYNC_TPT ||
223 op == MLX4_CMD_UNMAP_ICM ||
224 op == MLX4_CMD_UNMAP_ICM_AUX ||
225 op == MLX4_CMD_UNMAP_FA)
226 return 1;
227 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
228 * CMD_STAT_REG_BOUND.
229 * This status indicates that memory region has memory windows bound to it
230 * which may result from invalid user space usage and is not fatal.
231 */
232 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
233 return 1;
234 return 0;
235}
236
237static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
238 int err)
239{
240 /* Only if reset flow is really active return code is based on
241 * command, otherwise current error code is returned.
242 */
243 if (mlx4_internal_err_reset) {
244 mlx4_enter_error_state(dev->persist);
245 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
246 }
247
248 return err;
249}
250
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000251static int comm_pending(struct mlx4_dev *dev)
252{
253 struct mlx4_priv *priv = mlx4_priv(dev);
254 u32 status = readl(&priv->mfunc.comm->slave_read);
255
256 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
257}
258
259static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
260{
261 struct mlx4_priv *priv = mlx4_priv(dev);
262 u32 val;
263
264 priv->cmd.comm_toggle ^= 1;
265 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
266 __raw_writel((__force u32) cpu_to_be32(val),
267 &priv->mfunc.comm->slave_write);
268 mmiowb();
269}
270
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000271static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
272 unsigned long timeout)
273{
274 struct mlx4_priv *priv = mlx4_priv(dev);
275 unsigned long end;
276 int err = 0;
277 int ret_from_pending = 0;
278
279 /* First, verify that the master reports correct status */
280 if (comm_pending(dev)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700281 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000282 priv->cmd.comm_toggle, cmd);
283 return -EAGAIN;
284 }
285
286 /* Write command */
287 down(&priv->cmd.poll_sem);
288 mlx4_comm_cmd_post(dev, cmd, param);
289
290 end = msecs_to_jiffies(timeout) + jiffies;
291 while (comm_pending(dev) && time_before(jiffies, end))
292 cond_resched();
293 ret_from_pending = comm_pending(dev);
294 if (ret_from_pending) {
295 /* check if the slave is trying to boot in the middle of
296 * FLR process. The only non-zero result in the RESET command
297 * is MLX4_DELAY_RESET_SLAVE*/
298 if ((MLX4_COMM_CMD_RESET == cmd)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000299 err = MLX4_DELAY_RESET_SLAVE;
300 } else {
301 mlx4_warn(dev, "Communication channel timed out\n");
302 err = -ETIMEDOUT;
303 }
304 }
305
306 up(&priv->cmd.poll_sem);
307 return err;
308}
309
310static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
311 u16 param, unsigned long timeout)
312{
313 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
314 struct mlx4_cmd_context *context;
Eugenia Emantayev58a3de02012-03-18 04:32:08 +0000315 unsigned long end;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000316 int err = 0;
317
318 down(&cmd->event_sem);
319
320 spin_lock(&cmd->context_lock);
321 BUG_ON(cmd->free_head < 0);
322 context = &cmd->context[cmd->free_head];
323 context->token += cmd->token_mask + 1;
324 cmd->free_head = context->next;
325 spin_unlock(&cmd->context_lock);
326
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200327 reinit_completion(&context->done);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000328
329 mlx4_comm_cmd_post(dev, op, param);
330
331 if (!wait_for_completion_timeout(&context->done,
332 msecs_to_jiffies(timeout))) {
Dotan Barak674925e2013-06-25 12:09:37 +0300333 mlx4_warn(dev, "communication channel command 0x%x timed out\n",
334 op);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000335 err = -EBUSY;
336 goto out;
337 }
338
339 err = context->result;
340 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
341 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
342 op, context->fw_status);
343 goto out;
344 }
345
346out:
Eugenia Emantayev58a3de02012-03-18 04:32:08 +0000347 /* wait for comm channel ready
348 * this is necessary for prevention the race
349 * when switching between event to polling mode
350 */
351 end = msecs_to_jiffies(timeout) + jiffies;
352 while (comm_pending(dev) && time_before(jiffies, end))
353 cond_resched();
354
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000355 spin_lock(&cmd->context_lock);
356 context->next = cmd->free_head;
357 cmd->free_head = context - cmd->context;
358 spin_unlock(&cmd->context_lock);
359
360 up(&cmd->event_sem);
361 return err;
362}
363
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000364int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000365 unsigned long timeout)
366{
367 if (mlx4_priv(dev)->cmd.use_events)
368 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
369 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
370}
371
Roland Dreier225c7b12007-05-08 18:00:38 -0700372static int cmd_pending(struct mlx4_dev *dev)
373{
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000374 u32 status;
375
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200376 if (pci_channel_offline(dev->persist->pdev))
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000377 return -EIO;
378
379 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
Roland Dreier225c7b12007-05-08 18:00:38 -0700380
381 return (status & swab32(1 << HCR_GO_BIT)) ||
382 (mlx4_priv(dev)->cmd.toggle ==
383 !!(status & swab32(1 << HCR_T_BIT)));
384}
385
386static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
387 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
388 int event)
389{
390 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
391 u32 __iomem *hcr = cmd->hcr;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200392 int ret = -EIO;
Roland Dreier225c7b12007-05-08 18:00:38 -0700393 unsigned long end;
394
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200395 mutex_lock(&dev->persist->device_state_mutex);
396 /* To avoid writing to unknown addresses after the device state was
397 * changed to internal error and the chip was reset,
398 * check the INTERNAL_ERROR flag which is updated under
399 * device_state_mutex lock.
400 */
401 if (pci_channel_offline(dev->persist->pdev) ||
402 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000403 /*
404 * Device is going through error recovery
405 * and cannot accept commands.
406 */
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000407 goto out;
408 }
409
Roland Dreier225c7b12007-05-08 18:00:38 -0700410 end = jiffies;
411 if (event)
Dotan Barak36ce10d2007-08-07 11:18:52 +0300412 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
Roland Dreier225c7b12007-05-08 18:00:38 -0700413
414 while (cmd_pending(dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200415 if (pci_channel_offline(dev->persist->pdev)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000416 /*
417 * Device is going through error recovery
418 * and cannot accept commands.
419 */
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000420 goto out;
421 }
422
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000423 if (time_after_eq(jiffies, end)) {
424 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
Roland Dreier225c7b12007-05-08 18:00:38 -0700425 goto out;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000426 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700427 cond_resched();
428 }
429
430 /*
431 * We use writel (instead of something like memcpy_toio)
432 * because writes of less than 32 bits to the HCR don't work
433 * (and some architectures such as ia64 implement memcpy_toio
434 * in terms of writeb).
435 */
436 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
437 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
438 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
439 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
440 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
441 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
442
443 /* __raw_writel may not order writes. */
444 wmb();
445
446 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
447 (cmd->toggle << HCR_T_BIT) |
448 (event ? (1 << HCR_E_BIT) : 0) |
449 (op_modifier << HCR_OPMOD_SHIFT) |
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000450 op), hcr + 6);
Roland Dreier2e61c642007-10-09 19:59:18 -0700451
452 /*
453 * Make sure that our HCR writes don't get mixed in with
454 * writes from another CPU starting a FW command.
455 */
456 mmiowb();
457
Roland Dreier225c7b12007-05-08 18:00:38 -0700458 cmd->toggle = cmd->toggle ^ 1;
459
460 ret = 0;
461
462out:
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200463 if (ret)
464 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
465 op, ret, in_param, in_modifier, op_modifier);
466 mutex_unlock(&dev->persist->device_state_mutex);
467
Roland Dreier225c7b12007-05-08 18:00:38 -0700468 return ret;
469}
470
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000471static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
472 int out_is_imm, u32 in_modifier, u8 op_modifier,
473 u16 op, unsigned long timeout)
474{
475 struct mlx4_priv *priv = mlx4_priv(dev);
476 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
477 int ret;
478
Roland Dreierf3d4c892012-09-25 21:24:07 -0700479 mutex_lock(&priv->cmd.slave_cmd_mutex);
480
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000481 vhcr->in_param = cpu_to_be64(in_param);
482 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
483 vhcr->in_modifier = cpu_to_be32(in_modifier);
484 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
485 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
486 vhcr->status = 0;
487 vhcr->flags = !!(priv->cmd.use_events) << 6;
Roland Dreierf3d4c892012-09-25 21:24:07 -0700488
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000489 if (mlx4_is_master(dev)) {
490 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
491 if (!ret) {
492 if (out_is_imm) {
493 if (out_param)
494 *out_param =
495 be64_to_cpu(vhcr->out_param);
496 else {
Joe Perches1a91de22014-05-07 12:52:57 -0700497 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
498 op);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000499 vhcr->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000500 }
501 }
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000502 ret = mlx4_status_to_errno(vhcr->status);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000503 }
504 } else {
505 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
506 MLX4_COMM_TIME + timeout);
507 if (!ret) {
508 if (out_is_imm) {
509 if (out_param)
510 *out_param =
511 be64_to_cpu(vhcr->out_param);
512 else {
Joe Perches1a91de22014-05-07 12:52:57 -0700513 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
514 op);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000515 vhcr->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000516 }
517 }
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000518 ret = mlx4_status_to_errno(vhcr->status);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000519 } else
Joe Perches1a91de22014-05-07 12:52:57 -0700520 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
521 op);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000522 }
Roland Dreierf3d4c892012-09-25 21:24:07 -0700523
524 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000525 return ret;
526}
527
Roland Dreier225c7b12007-05-08 18:00:38 -0700528static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
529 int out_is_imm, u32 in_modifier, u8 op_modifier,
530 u16 op, unsigned long timeout)
531{
532 struct mlx4_priv *priv = mlx4_priv(dev);
533 void __iomem *hcr = priv->cmd.hcr;
534 int err = 0;
535 unsigned long end;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000536 u32 stat;
Roland Dreier225c7b12007-05-08 18:00:38 -0700537
538 down(&priv->cmd.poll_sem);
539
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200540 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000541 /*
542 * Device is going through error recovery
543 * and cannot accept commands.
544 */
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200545 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000546 goto out;
547 }
548
Eyal Perryc05a1162014-05-14 12:15:13 +0300549 if (out_is_imm && !out_param) {
550 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
551 op);
552 err = -EINVAL;
553 goto out;
554 }
555
Roland Dreier225c7b12007-05-08 18:00:38 -0700556 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
557 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
558 if (err)
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200559 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700560
561 end = msecs_to_jiffies(timeout) + jiffies;
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000562 while (cmd_pending(dev) && time_before(jiffies, end)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200563 if (pci_channel_offline(dev->persist->pdev)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000564 /*
565 * Device is going through error recovery
566 * and cannot accept commands.
567 */
568 err = -EIO;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200569 goto out_reset;
570 }
571
572 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
573 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000574 goto out;
575 }
576
Roland Dreier225c7b12007-05-08 18:00:38 -0700577 cond_resched();
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000578 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700579
580 if (cmd_pending(dev)) {
Dotan Barak674925e2013-06-25 12:09:37 +0300581 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
582 op);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200583 err = -EIO;
584 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700585 }
586
587 if (out_is_imm)
588 *out_param =
589 (u64) be32_to_cpu((__force __be32)
590 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
591 (u64) be32_to_cpu((__force __be32)
592 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000593 stat = be32_to_cpu((__force __be32)
594 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
595 err = mlx4_status_to_errno(stat);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200596 if (err) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000597 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
598 op, stat);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200599 if (mlx4_closing_cmd_fatal_error(op, stat))
600 goto out_reset;
601 goto out;
602 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700603
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200604out_reset:
605 if (err)
606 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
Roland Dreier225c7b12007-05-08 18:00:38 -0700607out:
608 up(&priv->cmd.poll_sem);
609 return err;
610}
611
612void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
613{
614 struct mlx4_priv *priv = mlx4_priv(dev);
615 struct mlx4_cmd_context *context =
616 &priv->cmd.context[token & priv->cmd.token_mask];
617
618 /* previously timed out command completing at long last */
619 if (token != context->token)
620 return;
621
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000622 context->fw_status = status;
Roland Dreier225c7b12007-05-08 18:00:38 -0700623 context->result = mlx4_status_to_errno(status);
624 context->out_param = out_param;
625
Roland Dreier225c7b12007-05-08 18:00:38 -0700626 complete(&context->done);
627}
628
629static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
630 int out_is_imm, u32 in_modifier, u8 op_modifier,
631 u16 op, unsigned long timeout)
632{
633 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
634 struct mlx4_cmd_context *context;
635 int err = 0;
636
637 down(&cmd->event_sem);
638
639 spin_lock(&cmd->context_lock);
640 BUG_ON(cmd->free_head < 0);
641 context = &cmd->context[cmd->free_head];
Roland Dreier09815822007-07-20 21:19:43 -0700642 context->token += cmd->token_mask + 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700643 cmd->free_head = context->next;
644 spin_unlock(&cmd->context_lock);
645
Eyal Perryc05a1162014-05-14 12:15:13 +0300646 if (out_is_imm && !out_param) {
647 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
648 op);
649 err = -EINVAL;
650 goto out;
651 }
652
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200653 reinit_completion(&context->done);
Roland Dreier225c7b12007-05-08 18:00:38 -0700654
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200655 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
656 in_modifier, op_modifier, op, context->token, 1);
657 if (err)
658 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700659
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000660 if (!wait_for_completion_timeout(&context->done,
661 msecs_to_jiffies(timeout))) {
Dotan Barak674925e2013-06-25 12:09:37 +0300662 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
663 op);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200664 err = -EIO;
665 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700666 }
667
668 err = context->result;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000669 if (err) {
Jack Morgenstein1daa4302014-09-30 12:03:50 +0300670 /* Since we do not want to have this error message always
671 * displayed at driver start when there are ConnectX2 HCAs
672 * on the host, we deprecate the error message for this
673 * specific command/input_mod/opcode_mod/fw-status to be debug.
674 */
675 if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
676 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
677 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
678 op, context->fw_status);
679 else
680 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
681 op, context->fw_status);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200682 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
683 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
684 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
685 goto out_reset;
686
Roland Dreier225c7b12007-05-08 18:00:38 -0700687 goto out;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000688 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700689
690 if (out_is_imm)
691 *out_param = context->out_param;
692
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200693out_reset:
694 if (err)
695 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
Roland Dreier225c7b12007-05-08 18:00:38 -0700696out:
697 spin_lock(&cmd->context_lock);
698 context->next = cmd->free_head;
699 cmd->free_head = context - cmd->context;
700 spin_unlock(&cmd->context_lock);
701
702 up(&cmd->event_sem);
703 return err;
704}
705
706int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
707 int out_is_imm, u32 in_modifier, u8 op_modifier,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000708 u16 op, unsigned long timeout, int native)
Roland Dreier225c7b12007-05-08 18:00:38 -0700709{
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200710 if (pci_channel_offline(dev->persist->pdev))
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200711 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000712
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000713 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200714 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
715 return mlx4_internal_err_ret_value(dev, op,
716 op_modifier);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000717 if (mlx4_priv(dev)->cmd.use_events)
718 return mlx4_cmd_wait(dev, in_param, out_param,
719 out_is_imm, in_modifier,
720 op_modifier, op, timeout);
721 else
722 return mlx4_cmd_poll(dev, in_param, out_param,
723 out_is_imm, in_modifier,
724 op_modifier, op, timeout);
725 }
726 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
727 in_modifier, op_modifier, op, timeout);
Roland Dreier225c7b12007-05-08 18:00:38 -0700728}
729EXPORT_SYMBOL_GPL(__mlx4_cmd);
730
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000731
732static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
733{
734 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
735 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
736}
737
738static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
739 int slave, u64 slave_addr,
740 int size, int is_read)
741{
742 u64 in_param;
743 u64 out_param;
744
745 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
746 (slave & ~0x7f) | (size & 0xff)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700747 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
748 slave_addr, master_addr, slave, size);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000749 return -EINVAL;
750 }
751
752 if (is_read) {
753 in_param = (u64) slave | slave_addr;
754 out_param = (u64) dev->caps.function | master_addr;
755 } else {
756 in_param = (u64) dev->caps.function | master_addr;
757 out_param = (u64) slave | slave_addr;
758 }
759
760 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
761 MLX4_CMD_ACCESS_MEM,
762 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
763}
764
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000765static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
766 struct mlx4_cmd_mailbox *inbox,
767 struct mlx4_cmd_mailbox *outbox)
768{
769 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
770 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
771 int err;
772 int i;
773
774 if (index & 0x1f)
775 return -EINVAL;
776
777 in_mad->attr_mod = cpu_to_be32(index / 32);
778
779 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
780 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
781 MLX4_CMD_NATIVE);
782 if (err)
783 return err;
784
785 for (i = 0; i < 32; ++i)
786 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
787
788 return err;
789}
790
791static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
792 struct mlx4_cmd_mailbox *inbox,
793 struct mlx4_cmd_mailbox *outbox)
794{
795 int i;
796 int err;
797
798 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
799 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
800 if (err)
801 return err;
802 }
803
804 return 0;
805}
806#define PORT_CAPABILITY_LOCATION_IN_SMP 20
807#define PORT_STATE_OFFSET 32
808
809static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
810{
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000811 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
812 return IB_PORT_ACTIVE;
813 else
814 return IB_PORT_DOWN;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000815}
816
817static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
818 struct mlx4_vhcr *vhcr,
819 struct mlx4_cmd_mailbox *inbox,
820 struct mlx4_cmd_mailbox *outbox,
821 struct mlx4_cmd_info *cmd)
822{
823 struct ib_smp *smp = inbox->buf;
824 u32 index;
825 u8 port;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300826 u8 opcode_modifier;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000827 u16 *table;
828 int err;
829 int vidx, pidx;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300830 int network_view;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000831 struct mlx4_priv *priv = mlx4_priv(dev);
832 struct ib_smp *outsmp = outbox->buf;
833 __be16 *outtab = (__be16 *)(outsmp->data);
834 __be32 slave_cap_mask;
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000835 __be64 slave_node_guid;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300836
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000837 port = vhcr->in_modifier;
838
Jack Morgenstein97982f52014-05-29 16:31:02 +0300839 /* network-view bit is for driver use only, and should not be passed to FW */
840 opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
841 network_view = !!(vhcr->op_modifier & 0x8);
842
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000843 if (smp->base_version == 1 &&
844 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
845 smp->class_version == 1) {
Jack Morgenstein97982f52014-05-29 16:31:02 +0300846 /* host view is paravirtualized */
847 if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000848 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
849 index = be32_to_cpu(smp->attr_mod);
850 if (port < 1 || port > dev->caps.num_ports)
851 return -EINVAL;
852 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
853 if (!table)
854 return -ENOMEM;
855 /* need to get the full pkey table because the paravirtualized
856 * pkeys may be scattered among several pkey blocks.
857 */
858 err = get_full_pkey_table(dev, port, table, inbox, outbox);
859 if (!err) {
860 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
861 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
862 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
863 }
864 }
865 kfree(table);
866 return err;
867 }
868 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
869 /*get the slave specific caps:*/
870 /*do the command */
871 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300872 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000873 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
874 /* modify the response for slaves */
875 if (!err && slave != mlx4_master_func_num(dev)) {
876 u8 *state = outsmp->data + PORT_STATE_OFFSET;
877
878 *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
879 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
880 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
881 }
882 return err;
883 }
884 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
885 /* compute slave's gid block */
886 smp->attr_mod = cpu_to_be32(slave / 8);
887 /* execute cmd */
888 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300889 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000890 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
891 if (!err) {
892 /* if needed, move slave gid to index 0 */
893 if (slave % 8)
894 memcpy(outsmp->data,
895 outsmp->data + (slave % 8) * 8, 8);
896 /* delete all other gids */
897 memset(outsmp->data + 8, 0, 56);
898 }
899 return err;
900 }
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000901 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
902 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300903 vhcr->in_modifier, opcode_modifier,
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000904 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
905 if (!err) {
906 slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
907 memcpy(outsmp->data + 12, &slave_node_guid, 8);
908 }
909 return err;
910 }
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000911 }
912 }
Jack Morgenstein97982f52014-05-29 16:31:02 +0300913
914 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
915 * These are the MADs used by ib verbs (such as ib_query_gids).
916 */
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000917 if (slave != mlx4_master_func_num(dev) &&
Jack Morgenstein97982f52014-05-29 16:31:02 +0300918 !mlx4_vf_smi_enabled(dev, slave, port)) {
919 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
920 smp->method == IB_MGMT_METHOD_GET) || network_view) {
921 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
922 slave, smp->method, smp->mgmt_class,
923 network_view ? "Network" : "Host",
924 be16_to_cpu(smp->attr_id));
925 return -EPERM;
926 }
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000927 }
Jack Morgenstein97982f52014-05-29 16:31:02 +0300928
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000929 return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300930 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000931 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
932}
933
Or Gerlitzb7475792014-03-27 14:02:02 +0200934static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +0300935 struct mlx4_vhcr *vhcr,
936 struct mlx4_cmd_mailbox *inbox,
937 struct mlx4_cmd_mailbox *outbox,
938 struct mlx4_cmd_info *cmd)
939{
940 return -EPERM;
941}
942
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000943int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
944 struct mlx4_vhcr *vhcr,
945 struct mlx4_cmd_mailbox *inbox,
946 struct mlx4_cmd_mailbox *outbox,
947 struct mlx4_cmd_info *cmd)
948{
949 u64 in_param;
950 u64 out_param;
951 int err;
952
953 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
954 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
955 if (cmd->encode_slave_id) {
956 in_param &= 0xffffffffffffff00ll;
957 in_param |= slave;
958 }
959
960 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
961 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
962 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
963
964 if (cmd->out_is_imm)
965 vhcr->out_param = out_param;
966
967 return err;
968}
969
970static struct mlx4_cmd_info cmd_info[] = {
971 {
972 .opcode = MLX4_CMD_QUERY_FW,
973 .has_inbox = false,
974 .has_outbox = true,
975 .out_is_imm = false,
976 .encode_slave_id = false,
977 .verify = NULL,
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000978 .wrapper = mlx4_QUERY_FW_wrapper
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000979 },
980 {
981 .opcode = MLX4_CMD_QUERY_HCA,
982 .has_inbox = false,
983 .has_outbox = true,
984 .out_is_imm = false,
985 .encode_slave_id = false,
986 .verify = NULL,
987 .wrapper = NULL
988 },
989 {
990 .opcode = MLX4_CMD_QUERY_DEV_CAP,
991 .has_inbox = false,
992 .has_outbox = true,
993 .out_is_imm = false,
994 .encode_slave_id = false,
995 .verify = NULL,
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000996 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000997 },
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000998 {
999 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1000 .has_inbox = false,
1001 .has_outbox = true,
1002 .out_is_imm = false,
1003 .encode_slave_id = false,
1004 .verify = NULL,
1005 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1006 },
1007 {
1008 .opcode = MLX4_CMD_QUERY_ADAPTER,
1009 .has_inbox = false,
1010 .has_outbox = true,
1011 .out_is_imm = false,
1012 .encode_slave_id = false,
1013 .verify = NULL,
1014 .wrapper = NULL
1015 },
1016 {
1017 .opcode = MLX4_CMD_INIT_PORT,
1018 .has_inbox = false,
1019 .has_outbox = false,
1020 .out_is_imm = false,
1021 .encode_slave_id = false,
1022 .verify = NULL,
1023 .wrapper = mlx4_INIT_PORT_wrapper
1024 },
1025 {
1026 .opcode = MLX4_CMD_CLOSE_PORT,
1027 .has_inbox = false,
1028 .has_outbox = false,
1029 .out_is_imm = false,
1030 .encode_slave_id = false,
1031 .verify = NULL,
1032 .wrapper = mlx4_CLOSE_PORT_wrapper
1033 },
1034 {
1035 .opcode = MLX4_CMD_QUERY_PORT,
1036 .has_inbox = false,
1037 .has_outbox = true,
1038 .out_is_imm = false,
1039 .encode_slave_id = false,
1040 .verify = NULL,
1041 .wrapper = mlx4_QUERY_PORT_wrapper
1042 },
1043 {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001044 .opcode = MLX4_CMD_SET_PORT,
1045 .has_inbox = true,
1046 .has_outbox = false,
1047 .out_is_imm = false,
1048 .encode_slave_id = false,
1049 .verify = NULL,
1050 .wrapper = mlx4_SET_PORT_wrapper
1051 },
1052 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001053 .opcode = MLX4_CMD_MAP_EQ,
1054 .has_inbox = false,
1055 .has_outbox = false,
1056 .out_is_imm = false,
1057 .encode_slave_id = false,
1058 .verify = NULL,
1059 .wrapper = mlx4_MAP_EQ_wrapper
1060 },
1061 {
1062 .opcode = MLX4_CMD_SW2HW_EQ,
1063 .has_inbox = true,
1064 .has_outbox = false,
1065 .out_is_imm = false,
1066 .encode_slave_id = true,
1067 .verify = NULL,
1068 .wrapper = mlx4_SW2HW_EQ_wrapper
1069 },
1070 {
1071 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1072 .has_inbox = false,
1073 .has_outbox = false,
1074 .out_is_imm = false,
1075 .encode_slave_id = false,
1076 .verify = NULL,
1077 .wrapper = NULL
1078 },
1079 {
1080 .opcode = MLX4_CMD_NOP,
1081 .has_inbox = false,
1082 .has_outbox = false,
1083 .out_is_imm = false,
1084 .encode_slave_id = false,
1085 .verify = NULL,
1086 .wrapper = NULL
1087 },
1088 {
Or Gerlitzd18f1412014-03-27 14:02:03 +02001089 .opcode = MLX4_CMD_CONFIG_DEV,
1090 .has_inbox = false,
Matan Barakd475c952014-11-02 16:26:17 +02001091 .has_outbox = true,
Or Gerlitzd18f1412014-03-27 14:02:03 +02001092 .out_is_imm = false,
1093 .encode_slave_id = false,
1094 .verify = NULL,
Matan Barakd475c952014-11-02 16:26:17 +02001095 .wrapper = mlx4_CONFIG_DEV_wrapper
Or Gerlitzd18f1412014-03-27 14:02:03 +02001096 },
1097 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001098 .opcode = MLX4_CMD_ALLOC_RES,
1099 .has_inbox = false,
1100 .has_outbox = false,
1101 .out_is_imm = true,
1102 .encode_slave_id = false,
1103 .verify = NULL,
1104 .wrapper = mlx4_ALLOC_RES_wrapper
1105 },
1106 {
1107 .opcode = MLX4_CMD_FREE_RES,
1108 .has_inbox = false,
1109 .has_outbox = false,
1110 .out_is_imm = false,
1111 .encode_slave_id = false,
1112 .verify = NULL,
1113 .wrapper = mlx4_FREE_RES_wrapper
1114 },
1115 {
1116 .opcode = MLX4_CMD_SW2HW_MPT,
1117 .has_inbox = true,
1118 .has_outbox = false,
1119 .out_is_imm = false,
1120 .encode_slave_id = true,
1121 .verify = NULL,
1122 .wrapper = mlx4_SW2HW_MPT_wrapper
1123 },
1124 {
1125 .opcode = MLX4_CMD_QUERY_MPT,
1126 .has_inbox = false,
1127 .has_outbox = true,
1128 .out_is_imm = false,
1129 .encode_slave_id = false,
1130 .verify = NULL,
1131 .wrapper = mlx4_QUERY_MPT_wrapper
1132 },
1133 {
1134 .opcode = MLX4_CMD_HW2SW_MPT,
1135 .has_inbox = false,
1136 .has_outbox = false,
1137 .out_is_imm = false,
1138 .encode_slave_id = false,
1139 .verify = NULL,
1140 .wrapper = mlx4_HW2SW_MPT_wrapper
1141 },
1142 {
1143 .opcode = MLX4_CMD_READ_MTT,
1144 .has_inbox = false,
1145 .has_outbox = true,
1146 .out_is_imm = false,
1147 .encode_slave_id = false,
1148 .verify = NULL,
1149 .wrapper = NULL
1150 },
1151 {
1152 .opcode = MLX4_CMD_WRITE_MTT,
1153 .has_inbox = true,
1154 .has_outbox = false,
1155 .out_is_imm = false,
1156 .encode_slave_id = false,
1157 .verify = NULL,
1158 .wrapper = mlx4_WRITE_MTT_wrapper
1159 },
1160 {
1161 .opcode = MLX4_CMD_SYNC_TPT,
1162 .has_inbox = true,
1163 .has_outbox = false,
1164 .out_is_imm = false,
1165 .encode_slave_id = false,
1166 .verify = NULL,
1167 .wrapper = NULL
1168 },
1169 {
1170 .opcode = MLX4_CMD_HW2SW_EQ,
1171 .has_inbox = false,
1172 .has_outbox = true,
1173 .out_is_imm = false,
1174 .encode_slave_id = true,
1175 .verify = NULL,
1176 .wrapper = mlx4_HW2SW_EQ_wrapper
1177 },
1178 {
1179 .opcode = MLX4_CMD_QUERY_EQ,
1180 .has_inbox = false,
1181 .has_outbox = true,
1182 .out_is_imm = false,
1183 .encode_slave_id = true,
1184 .verify = NULL,
1185 .wrapper = mlx4_QUERY_EQ_wrapper
1186 },
1187 {
1188 .opcode = MLX4_CMD_SW2HW_CQ,
1189 .has_inbox = true,
1190 .has_outbox = false,
1191 .out_is_imm = false,
1192 .encode_slave_id = true,
1193 .verify = NULL,
1194 .wrapper = mlx4_SW2HW_CQ_wrapper
1195 },
1196 {
1197 .opcode = MLX4_CMD_HW2SW_CQ,
1198 .has_inbox = false,
1199 .has_outbox = false,
1200 .out_is_imm = false,
1201 .encode_slave_id = false,
1202 .verify = NULL,
1203 .wrapper = mlx4_HW2SW_CQ_wrapper
1204 },
1205 {
1206 .opcode = MLX4_CMD_QUERY_CQ,
1207 .has_inbox = false,
1208 .has_outbox = true,
1209 .out_is_imm = false,
1210 .encode_slave_id = false,
1211 .verify = NULL,
1212 .wrapper = mlx4_QUERY_CQ_wrapper
1213 },
1214 {
1215 .opcode = MLX4_CMD_MODIFY_CQ,
1216 .has_inbox = true,
1217 .has_outbox = false,
1218 .out_is_imm = true,
1219 .encode_slave_id = false,
1220 .verify = NULL,
1221 .wrapper = mlx4_MODIFY_CQ_wrapper
1222 },
1223 {
1224 .opcode = MLX4_CMD_SW2HW_SRQ,
1225 .has_inbox = true,
1226 .has_outbox = false,
1227 .out_is_imm = false,
1228 .encode_slave_id = true,
1229 .verify = NULL,
1230 .wrapper = mlx4_SW2HW_SRQ_wrapper
1231 },
1232 {
1233 .opcode = MLX4_CMD_HW2SW_SRQ,
1234 .has_inbox = false,
1235 .has_outbox = false,
1236 .out_is_imm = false,
1237 .encode_slave_id = false,
1238 .verify = NULL,
1239 .wrapper = mlx4_HW2SW_SRQ_wrapper
1240 },
1241 {
1242 .opcode = MLX4_CMD_QUERY_SRQ,
1243 .has_inbox = false,
1244 .has_outbox = true,
1245 .out_is_imm = false,
1246 .encode_slave_id = false,
1247 .verify = NULL,
1248 .wrapper = mlx4_QUERY_SRQ_wrapper
1249 },
1250 {
1251 .opcode = MLX4_CMD_ARM_SRQ,
1252 .has_inbox = false,
1253 .has_outbox = false,
1254 .out_is_imm = false,
1255 .encode_slave_id = false,
1256 .verify = NULL,
1257 .wrapper = mlx4_ARM_SRQ_wrapper
1258 },
1259 {
1260 .opcode = MLX4_CMD_RST2INIT_QP,
1261 .has_inbox = true,
1262 .has_outbox = false,
1263 .out_is_imm = false,
1264 .encode_slave_id = true,
1265 .verify = NULL,
1266 .wrapper = mlx4_RST2INIT_QP_wrapper
1267 },
1268 {
1269 .opcode = MLX4_CMD_INIT2INIT_QP,
1270 .has_inbox = true,
1271 .has_outbox = false,
1272 .out_is_imm = false,
1273 .encode_slave_id = false,
1274 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001275 .wrapper = mlx4_INIT2INIT_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001276 },
1277 {
1278 .opcode = MLX4_CMD_INIT2RTR_QP,
1279 .has_inbox = true,
1280 .has_outbox = false,
1281 .out_is_imm = false,
1282 .encode_slave_id = false,
1283 .verify = NULL,
1284 .wrapper = mlx4_INIT2RTR_QP_wrapper
1285 },
1286 {
1287 .opcode = MLX4_CMD_RTR2RTS_QP,
1288 .has_inbox = true,
1289 .has_outbox = false,
1290 .out_is_imm = false,
1291 .encode_slave_id = false,
1292 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001293 .wrapper = mlx4_RTR2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001294 },
1295 {
1296 .opcode = MLX4_CMD_RTS2RTS_QP,
1297 .has_inbox = true,
1298 .has_outbox = false,
1299 .out_is_imm = false,
1300 .encode_slave_id = false,
1301 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001302 .wrapper = mlx4_RTS2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001303 },
1304 {
1305 .opcode = MLX4_CMD_SQERR2RTS_QP,
1306 .has_inbox = true,
1307 .has_outbox = false,
1308 .out_is_imm = false,
1309 .encode_slave_id = false,
1310 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001311 .wrapper = mlx4_SQERR2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001312 },
1313 {
1314 .opcode = MLX4_CMD_2ERR_QP,
1315 .has_inbox = false,
1316 .has_outbox = false,
1317 .out_is_imm = false,
1318 .encode_slave_id = false,
1319 .verify = NULL,
1320 .wrapper = mlx4_GEN_QP_wrapper
1321 },
1322 {
1323 .opcode = MLX4_CMD_RTS2SQD_QP,
1324 .has_inbox = false,
1325 .has_outbox = false,
1326 .out_is_imm = false,
1327 .encode_slave_id = false,
1328 .verify = NULL,
1329 .wrapper = mlx4_GEN_QP_wrapper
1330 },
1331 {
1332 .opcode = MLX4_CMD_SQD2SQD_QP,
1333 .has_inbox = true,
1334 .has_outbox = false,
1335 .out_is_imm = false,
1336 .encode_slave_id = false,
1337 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001338 .wrapper = mlx4_SQD2SQD_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001339 },
1340 {
1341 .opcode = MLX4_CMD_SQD2RTS_QP,
1342 .has_inbox = true,
1343 .has_outbox = false,
1344 .out_is_imm = false,
1345 .encode_slave_id = false,
1346 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001347 .wrapper = mlx4_SQD2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001348 },
1349 {
1350 .opcode = MLX4_CMD_2RST_QP,
1351 .has_inbox = false,
1352 .has_outbox = false,
1353 .out_is_imm = false,
1354 .encode_slave_id = false,
1355 .verify = NULL,
1356 .wrapper = mlx4_2RST_QP_wrapper
1357 },
1358 {
1359 .opcode = MLX4_CMD_QUERY_QP,
1360 .has_inbox = false,
1361 .has_outbox = true,
1362 .out_is_imm = false,
1363 .encode_slave_id = false,
1364 .verify = NULL,
1365 .wrapper = mlx4_GEN_QP_wrapper
1366 },
1367 {
1368 .opcode = MLX4_CMD_SUSPEND_QP,
1369 .has_inbox = false,
1370 .has_outbox = false,
1371 .out_is_imm = false,
1372 .encode_slave_id = false,
1373 .verify = NULL,
1374 .wrapper = mlx4_GEN_QP_wrapper
1375 },
1376 {
1377 .opcode = MLX4_CMD_UNSUSPEND_QP,
1378 .has_inbox = false,
1379 .has_outbox = false,
1380 .out_is_imm = false,
1381 .encode_slave_id = false,
1382 .verify = NULL,
1383 .wrapper = mlx4_GEN_QP_wrapper
1384 },
1385 {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001386 .opcode = MLX4_CMD_UPDATE_QP,
Matan Barakce8d9e02014-05-15 15:29:27 +03001387 .has_inbox = true,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001388 .has_outbox = false,
1389 .out_is_imm = false,
1390 .encode_slave_id = false,
1391 .verify = NULL,
Matan Barakce8d9e02014-05-15 15:29:27 +03001392 .wrapper = mlx4_UPDATE_QP_wrapper
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001393 },
1394 {
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03001395 .opcode = MLX4_CMD_GET_OP_REQ,
1396 .has_inbox = false,
1397 .has_outbox = false,
1398 .out_is_imm = false,
1399 .encode_slave_id = false,
1400 .verify = NULL,
Or Gerlitzb7475792014-03-27 14:02:02 +02001401 .wrapper = mlx4_CMD_EPERM_wrapper,
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03001402 },
1403 {
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001404 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1405 .has_inbox = false,
1406 .has_outbox = false,
1407 .out_is_imm = false,
1408 .encode_slave_id = false,
1409 .verify = NULL, /* XXX verify: only demux can do this */
1410 .wrapper = NULL
1411 },
1412 {
1413 .opcode = MLX4_CMD_MAD_IFC,
1414 .has_inbox = true,
1415 .has_outbox = true,
1416 .out_is_imm = false,
1417 .encode_slave_id = false,
1418 .verify = NULL,
1419 .wrapper = mlx4_MAD_IFC_wrapper
1420 },
1421 {
Jack Morgenstein114840c2014-06-01 11:53:50 +03001422 .opcode = MLX4_CMD_MAD_DEMUX,
1423 .has_inbox = false,
1424 .has_outbox = false,
1425 .out_is_imm = false,
1426 .encode_slave_id = false,
1427 .verify = NULL,
1428 .wrapper = mlx4_CMD_EPERM_wrapper
1429 },
1430 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001431 .opcode = MLX4_CMD_QUERY_IF_STAT,
1432 .has_inbox = false,
1433 .has_outbox = true,
1434 .out_is_imm = false,
1435 .encode_slave_id = false,
1436 .verify = NULL,
1437 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1438 },
Saeed Mahameedadbc7ac2014-10-27 11:37:37 +02001439 {
1440 .opcode = MLX4_CMD_ACCESS_REG,
1441 .has_inbox = true,
1442 .has_outbox = true,
1443 .out_is_imm = false,
1444 .encode_slave_id = false,
1445 .verify = NULL,
Saeed Mahameed6e806692014-11-02 16:26:13 +02001446 .wrapper = mlx4_ACCESS_REG_wrapper,
Saeed Mahameedadbc7ac2014-10-27 11:37:37 +02001447 },
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001448 /* Native multicast commands are not available for guests */
1449 {
1450 .opcode = MLX4_CMD_QP_ATTACH,
1451 .has_inbox = true,
1452 .has_outbox = false,
1453 .out_is_imm = false,
1454 .encode_slave_id = false,
1455 .verify = NULL,
1456 .wrapper = mlx4_QP_ATTACH_wrapper
1457 },
1458 {
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +00001459 .opcode = MLX4_CMD_PROMISC,
1460 .has_inbox = false,
1461 .has_outbox = false,
1462 .out_is_imm = false,
1463 .encode_slave_id = false,
1464 .verify = NULL,
1465 .wrapper = mlx4_PROMISC_wrapper
1466 },
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001467 /* Ethernet specific commands */
1468 {
1469 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1470 .has_inbox = true,
1471 .has_outbox = false,
1472 .out_is_imm = false,
1473 .encode_slave_id = false,
1474 .verify = NULL,
1475 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1476 },
1477 {
1478 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1479 .has_inbox = false,
1480 .has_outbox = false,
1481 .out_is_imm = false,
1482 .encode_slave_id = false,
1483 .verify = NULL,
1484 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1485 },
1486 {
1487 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1488 .has_inbox = false,
1489 .has_outbox = true,
1490 .out_is_imm = false,
1491 .encode_slave_id = false,
1492 .verify = NULL,
1493 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1494 },
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +00001495 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001496 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1497 .has_inbox = false,
1498 .has_outbox = false,
1499 .out_is_imm = false,
1500 .encode_slave_id = false,
1501 .verify = NULL,
1502 .wrapper = NULL
1503 },
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00001504 /* flow steering commands */
1505 {
1506 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1507 .has_inbox = true,
1508 .has_outbox = false,
1509 .out_is_imm = true,
1510 .encode_slave_id = false,
1511 .verify = NULL,
1512 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1513 },
1514 {
1515 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1516 .has_inbox = false,
1517 .has_outbox = false,
1518 .out_is_imm = false,
1519 .encode_slave_id = false,
1520 .verify = NULL,
1521 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1522 },
Matan Barak4de65802013-11-07 15:25:14 +02001523 {
1524 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1525 .has_inbox = false,
1526 .has_outbox = false,
1527 .out_is_imm = false,
1528 .encode_slave_id = false,
1529 .verify = NULL,
Or Gerlitzb7475792014-03-27 14:02:02 +02001530 .wrapper = mlx4_CMD_EPERM_wrapper
Matan Barak4de65802013-11-07 15:25:14 +02001531 },
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001532};
1533
1534static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1535 struct mlx4_vhcr_cmd *in_vhcr)
1536{
1537 struct mlx4_priv *priv = mlx4_priv(dev);
1538 struct mlx4_cmd_info *cmd = NULL;
1539 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1540 struct mlx4_vhcr *vhcr;
1541 struct mlx4_cmd_mailbox *inbox = NULL;
1542 struct mlx4_cmd_mailbox *outbox = NULL;
1543 u64 in_param;
1544 u64 out_param;
1545 int ret = 0;
1546 int i;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001547 int err = 0;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001548
1549 /* Create sw representation of Virtual HCR */
1550 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1551 if (!vhcr)
1552 return -ENOMEM;
1553
1554 /* DMA in the vHCR */
1555 if (!in_vhcr) {
1556 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1557 priv->mfunc.master.slave_state[slave].vhcr_dma,
1558 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1559 MLX4_ACCESS_MEM_ALIGN), 1);
1560 if (ret) {
Joe Perches1a91de22014-05-07 12:52:57 -07001561 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1562 __func__, ret);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001563 kfree(vhcr);
1564 return ret;
1565 }
1566 }
1567
1568 /* Fill SW VHCR fields */
1569 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1570 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1571 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1572 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1573 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1574 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1575 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1576
1577 /* Lookup command */
1578 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1579 if (vhcr->op == cmd_info[i].opcode) {
1580 cmd = &cmd_info[i];
1581 break;
1582 }
1583 }
1584 if (!cmd) {
1585 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1586 vhcr->op, slave);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001587 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001588 goto out_status;
1589 }
1590
1591 /* Read inbox */
1592 if (cmd->has_inbox) {
1593 vhcr->in_param &= INBOX_MASK;
1594 inbox = mlx4_alloc_cmd_mailbox(dev);
1595 if (IS_ERR(inbox)) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001596 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001597 inbox = NULL;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001598 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001599 }
1600
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001601 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1602 vhcr->in_param,
1603 MLX4_MAILBOX_SIZE, 1)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001604 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1605 __func__, cmd->opcode);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001606 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1607 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001608 }
1609 }
1610
1611 /* Apply permission and bound checks if applicable */
1612 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001613 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1614 vhcr->op, slave, vhcr->in_modifier);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001615 vhcr_cmd->status = CMD_STAT_BAD_OP;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001616 goto out_status;
1617 }
1618
1619 /* Allocate outbox */
1620 if (cmd->has_outbox) {
1621 outbox = mlx4_alloc_cmd_mailbox(dev);
1622 if (IS_ERR(outbox)) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001623 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001624 outbox = NULL;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001625 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001626 }
1627 }
1628
1629 /* Execute the command! */
1630 if (cmd->wrapper) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001631 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1632 cmd);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001633 if (cmd->out_is_imm)
1634 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1635 } else {
1636 in_param = cmd->has_inbox ? (u64) inbox->dma :
1637 vhcr->in_param;
1638 out_param = cmd->has_outbox ? (u64) outbox->dma :
1639 vhcr->out_param;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001640 err = __mlx4_cmd(dev, in_param, &out_param,
1641 cmd->out_is_imm, vhcr->in_modifier,
1642 vhcr->op_modifier, vhcr->op,
1643 MLX4_CMD_TIME_CLASS_A,
1644 MLX4_CMD_NATIVE);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001645
1646 if (cmd->out_is_imm) {
1647 vhcr->out_param = out_param;
1648 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1649 }
1650 }
1651
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001652 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001653 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001654 vhcr->op, slave, vhcr->errno, err);
1655 vhcr_cmd->status = mlx4_errno_to_status(err);
1656 goto out_status;
1657 }
1658
1659
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001660 /* Write outbox if command completed successfully */
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001661 if (cmd->has_outbox && !vhcr_cmd->status) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001662 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1663 vhcr->out_param,
1664 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1665 if (ret) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001666 /* If we failed to write back the outbox after the
1667 *command was successfully executed, we must fail this
1668 * slave, as it is now in undefined state */
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001669 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1670 goto out;
1671 }
1672 }
1673
1674out_status:
1675 /* DMA back vhcr result */
1676 if (!in_vhcr) {
1677 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1678 priv->mfunc.master.slave_state[slave].vhcr_dma,
1679 ALIGN(sizeof(struct mlx4_vhcr),
1680 MLX4_ACCESS_MEM_ALIGN),
1681 MLX4_CMD_WRAPPED);
1682 if (ret)
1683 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1684 __func__);
1685 else if (vhcr->e_bit &&
1686 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
Joe Perches1a91de22014-05-07 12:52:57 -07001687 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1688 slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001689 }
1690
1691out:
1692 kfree(vhcr);
1693 mlx4_free_cmd_mailbox(dev, inbox);
1694 mlx4_free_cmd_mailbox(dev, outbox);
1695 return ret;
1696}
1697
Jingoo Hanf0946682013-08-05 18:04:51 +09001698static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001699 int slave, int port)
1700{
1701 struct mlx4_vport_oper_state *vp_oper;
1702 struct mlx4_vport_state *vp_admin;
1703 struct mlx4_vf_immed_vlan_work *work;
Rony Efraim0a6eac22013-06-27 19:05:22 +03001704 struct mlx4_dev *dev = &(priv->dev);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001705 int err;
1706 int admin_vlan_ix = NO_INDX;
1707
1708 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1709 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1710
1711 if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
Rony Efraim0a6eac22013-06-27 19:05:22 +03001712 vp_oper->state.default_qos == vp_admin->default_qos &&
1713 vp_oper->state.link_state == vp_admin->link_state)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001714 return 0;
1715
Rony Efraim0a6eac22013-06-27 19:05:22 +03001716 if (!(priv->mfunc.master.slave_state[slave].active &&
Rony Efraimf0f829b2013-11-07 12:19:51 +02001717 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
Rony Efraim0a6eac22013-06-27 19:05:22 +03001718 /* even if the UPDATE_QP command isn't supported, we still want
1719 * to set this VF link according to the admin directive
1720 */
1721 vp_oper->state.link_state = vp_admin->link_state;
1722 return -1;
1723 }
1724
1725 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1726 slave, port);
Joe Perches1a91de22014-05-07 12:52:57 -07001727 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1728 vp_admin->default_vlan, vp_admin->default_qos,
1729 vp_admin->link_state);
Rony Efraim0a6eac22013-06-27 19:05:22 +03001730
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001731 work = kzalloc(sizeof(*work), GFP_KERNEL);
1732 if (!work)
1733 return -ENOMEM;
1734
1735 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
Rony Efraimf0f829b2013-11-07 12:19:51 +02001736 if (MLX4_VGT != vp_admin->default_vlan) {
1737 err = __mlx4_register_vlan(&priv->dev, port,
1738 vp_admin->default_vlan,
1739 &admin_vlan_ix);
1740 if (err) {
1741 kfree(work);
Joe Perches1a91de22014-05-07 12:52:57 -07001742 mlx4_warn(&priv->dev,
Rony Efraimf0f829b2013-11-07 12:19:51 +02001743 "No vlan resources slave %d, port %d\n",
1744 slave, port);
1745 return err;
1746 }
1747 } else {
1748 admin_vlan_ix = NO_INDX;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001749 }
1750 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
Joe Perches1a91de22014-05-07 12:52:57 -07001751 mlx4_dbg(&priv->dev,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001752 "alloc vlan %d idx %d slave %d port %d\n",
1753 (int)(vp_admin->default_vlan),
1754 admin_vlan_ix, slave, port);
1755 }
1756
1757 /* save original vlan ix and vlan id */
1758 work->orig_vlan_id = vp_oper->state.default_vlan;
1759 work->orig_vlan_ix = vp_oper->vlan_idx;
1760
1761 /* handle new qos */
1762 if (vp_oper->state.default_qos != vp_admin->default_qos)
1763 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1764
1765 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1766 vp_oper->vlan_idx = admin_vlan_ix;
1767
1768 vp_oper->state.default_vlan = vp_admin->default_vlan;
1769 vp_oper->state.default_qos = vp_admin->default_qos;
Rony Efraim0a6eac22013-06-27 19:05:22 +03001770 vp_oper->state.link_state = vp_admin->link_state;
1771
1772 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1773 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001774
1775 /* iterate over QPs owned by this slave, using UPDATE_QP */
1776 work->port = port;
1777 work->slave = slave;
1778 work->qos = vp_oper->state.default_qos;
1779 work->vlan_id = vp_oper->state.default_vlan;
1780 work->vlan_ix = vp_oper->vlan_idx;
1781 work->priv = priv;
1782 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1783 queue_work(priv->mfunc.master.comm_wq, &work->work);
1784
1785 return 0;
1786}
1787
1788
Rony Efraim0eb62b92013-04-25 05:22:26 +00001789static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1790{
Rony Efraim3f7fb022013-04-25 05:22:28 +00001791 int port, err;
1792 struct mlx4_vport_state *vp_admin;
1793 struct mlx4_vport_oper_state *vp_oper;
Matan Barak449fc482014-03-19 18:11:52 +02001794 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1795 &priv->dev, slave);
1796 int min_port = find_first_bit(actv_ports.ports,
1797 priv->dev.caps.num_ports) + 1;
1798 int max_port = min_port - 1 +
1799 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001800
Matan Barak449fc482014-03-19 18:11:52 +02001801 for (port = min_port; port <= max_port; port++) {
1802 if (!test_bit(port - 1, actv_ports.ports))
1803 continue;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03001804 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1805 priv->mfunc.master.vf_admin[slave].enable_smi[port];
Rony Efraim3f7fb022013-04-25 05:22:28 +00001806 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1807 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1808 vp_oper->state = *vp_admin;
1809 if (MLX4_VGT != vp_admin->default_vlan) {
1810 err = __mlx4_register_vlan(&priv->dev, port,
1811 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1812 if (err) {
1813 vp_oper->vlan_idx = NO_INDX;
Joe Perches1a91de22014-05-07 12:52:57 -07001814 mlx4_warn(&priv->dev,
Masanari Iida1a84db52014-08-29 23:37:33 +09001815 "No vlan resources slave %d, port %d\n",
Rony Efraim3f7fb022013-04-25 05:22:28 +00001816 slave, port);
1817 return err;
1818 }
Joe Perches1a91de22014-05-07 12:52:57 -07001819 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
Rony Efraim3f7fb022013-04-25 05:22:28 +00001820 (int)(vp_oper->state.default_vlan),
1821 vp_oper->vlan_idx, slave, port);
1822 }
Rony Efraime6b6a232013-04-25 05:22:29 +00001823 if (vp_admin->spoofchk) {
1824 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1825 port,
1826 vp_admin->mac);
1827 if (0 > vp_oper->mac_idx) {
1828 err = vp_oper->mac_idx;
1829 vp_oper->mac_idx = NO_INDX;
Joe Perches1a91de22014-05-07 12:52:57 -07001830 mlx4_warn(&priv->dev,
Masanari Iida1a84db52014-08-29 23:37:33 +09001831 "No mac resources slave %d, port %d\n",
Rony Efraime6b6a232013-04-25 05:22:29 +00001832 slave, port);
1833 return err;
1834 }
Joe Perches1a91de22014-05-07 12:52:57 -07001835 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
Rony Efraime6b6a232013-04-25 05:22:29 +00001836 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1837 }
Rony Efraim0eb62b92013-04-25 05:22:26 +00001838 }
1839 return 0;
1840}
1841
Rony Efraim3f7fb022013-04-25 05:22:28 +00001842static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1843{
1844 int port;
1845 struct mlx4_vport_oper_state *vp_oper;
Matan Barak449fc482014-03-19 18:11:52 +02001846 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1847 &priv->dev, slave);
1848 int min_port = find_first_bit(actv_ports.ports,
1849 priv->dev.caps.num_ports) + 1;
1850 int max_port = min_port - 1 +
1851 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001852
Matan Barak449fc482014-03-19 18:11:52 +02001853
1854 for (port = min_port; port <= max_port; port++) {
1855 if (!test_bit(port - 1, actv_ports.ports))
1856 continue;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03001857 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1858 MLX4_VF_SMI_DISABLED;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001859 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1860 if (NO_INDX != vp_oper->vlan_idx) {
1861 __mlx4_unregister_vlan(&priv->dev,
Jack Morgenstein2009d002013-11-03 10:03:19 +02001862 port, vp_oper->state.default_vlan);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001863 vp_oper->vlan_idx = NO_INDX;
1864 }
Rony Efraime6b6a232013-04-25 05:22:29 +00001865 if (NO_INDX != vp_oper->mac_idx) {
Jack Morgensteinc32b7df2013-11-03 10:04:07 +02001866 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
Rony Efraime6b6a232013-04-25 05:22:29 +00001867 vp_oper->mac_idx = NO_INDX;
1868 }
Rony Efraim3f7fb022013-04-25 05:22:28 +00001869 }
1870 return;
1871}
1872
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001873static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1874 u16 param, u8 toggle)
1875{
1876 struct mlx4_priv *priv = mlx4_priv(dev);
1877 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1878 u32 reply;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001879 u8 is_going_down = 0;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00001880 int i;
Jack Morgenstein311f8132012-11-27 16:24:30 +00001881 unsigned long flags;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001882
1883 slave_state[slave].comm_toggle ^= 1;
1884 reply = (u32) slave_state[slave].comm_toggle << 31;
1885 if (toggle != slave_state[slave].comm_toggle) {
Joe Perches1a91de22014-05-07 12:52:57 -07001886 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1887 toggle, slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001888 goto reset_slave;
1889 }
1890 if (cmd == MLX4_COMM_CMD_RESET) {
1891 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1892 slave_state[slave].active = false;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001893 slave_state[slave].old_vlan_api = false;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001894 mlx4_master_deactivate_admin_state(priv, slave);
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00001895 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1896 slave_state[slave].event_eq[i].eqn = -1;
1897 slave_state[slave].event_eq[i].token = 0;
1898 }
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001899 /*check if we are in the middle of FLR process,
1900 if so return "retry" status to the slave*/
Or Gerlitz162344e2012-05-15 10:34:57 +00001901 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001902 goto inform_slave_state;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001903
Jack Morgensteinfc065732012-08-03 08:40:42 +00001904 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1905
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001906 /* write the version in the event field */
1907 reply |= mlx4_comm_get_version();
1908
1909 goto reset_slave;
1910 }
1911 /*command from slave in the middle of FLR*/
1912 if (cmd != MLX4_COMM_CMD_RESET &&
1913 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
Joe Perches1a91de22014-05-07 12:52:57 -07001914 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1915 slave, cmd);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001916 return;
1917 }
1918
1919 switch (cmd) {
1920 case MLX4_COMM_CMD_VHCR0:
1921 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1922 goto reset_slave;
1923 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1924 priv->mfunc.master.slave_state[slave].cookie = 0;
1925 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1926 break;
1927 case MLX4_COMM_CMD_VHCR1:
1928 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1929 goto reset_slave;
1930 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1931 break;
1932 case MLX4_COMM_CMD_VHCR2:
1933 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1934 goto reset_slave;
1935 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1936 break;
1937 case MLX4_COMM_CMD_VHCR_EN:
1938 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1939 goto reset_slave;
1940 slave_state[slave].vhcr_dma |= param;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001941 if (mlx4_master_activate_admin_state(priv, slave))
1942 goto reset_slave;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001943 slave_state[slave].active = true;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001944 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001945 break;
1946 case MLX4_COMM_CMD_VHCR_POST:
1947 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1948 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1949 goto reset_slave;
Roland Dreierf3d4c892012-09-25 21:24:07 -07001950
1951 mutex_lock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001952 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001953 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1954 slave);
Roland Dreierf3d4c892012-09-25 21:24:07 -07001955 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001956 goto reset_slave;
1957 }
Roland Dreierf3d4c892012-09-25 21:24:07 -07001958 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001959 break;
1960 default:
1961 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1962 goto reset_slave;
1963 }
Jack Morgenstein311f8132012-11-27 16:24:30 +00001964 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001965 if (!slave_state[slave].is_slave_going_down)
1966 slave_state[slave].last_cmd = cmd;
1967 else
1968 is_going_down = 1;
Jack Morgenstein311f8132012-11-27 16:24:30 +00001969 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001970 if (is_going_down) {
Joe Perches1a91de22014-05-07 12:52:57 -07001971 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001972 cmd, slave);
1973 return;
1974 }
1975 __raw_writel((__force u32) cpu_to_be32(reply),
1976 &priv->mfunc.comm[slave].slave_read);
1977 mmiowb();
1978
1979 return;
1980
1981reset_slave:
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001982 /* cleanup any slave resources */
1983 mlx4_delete_all_resources_for_slave(dev, slave);
Jack Morgenstein311f8132012-11-27 16:24:30 +00001984 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001985 if (!slave_state[slave].is_slave_going_down)
1986 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
Jack Morgenstein311f8132012-11-27 16:24:30 +00001987 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001988 /*with slave in the middle of flr, no need to clean resources again.*/
1989inform_slave_state:
1990 memset(&slave_state[slave].event_eq, 0,
1991 sizeof(struct mlx4_slave_event_eq_info));
1992 __raw_writel((__force u32) cpu_to_be32(reply),
1993 &priv->mfunc.comm[slave].slave_read);
1994 wmb();
1995}
1996
1997/* master command processing */
1998void mlx4_master_comm_channel(struct work_struct *work)
1999{
2000 struct mlx4_mfunc_master_ctx *master =
2001 container_of(work,
2002 struct mlx4_mfunc_master_ctx,
2003 comm_work);
2004 struct mlx4_mfunc *mfunc =
2005 container_of(master, struct mlx4_mfunc, master);
2006 struct mlx4_priv *priv =
2007 container_of(mfunc, struct mlx4_priv, mfunc);
2008 struct mlx4_dev *dev = &priv->dev;
2009 __be32 *bit_vec;
2010 u32 comm_cmd;
2011 u32 vec;
2012 int i, j, slave;
2013 int toggle;
2014 int served = 0;
2015 int reported = 0;
2016 u32 slt;
2017
2018 bit_vec = master->comm_arm_bit_vector;
2019 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2020 vec = be32_to_cpu(bit_vec[i]);
2021 for (j = 0; j < 32; j++) {
2022 if (!(vec & (1 << j)))
2023 continue;
2024 ++reported;
2025 slave = (i * 32) + j;
2026 comm_cmd = swab32(readl(
2027 &mfunc->comm[slave].slave_write));
2028 slt = swab32(readl(&mfunc->comm[slave].slave_read))
2029 >> 31;
2030 toggle = comm_cmd >> 31;
2031 if (toggle != slt) {
2032 if (master->slave_state[slave].comm_toggle
2033 != slt) {
Amir Vadaic20862c2014-05-22 15:55:40 +03002034 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2035 slave, slt,
2036 master->slave_state[slave].comm_toggle);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002037 master->slave_state[slave].comm_toggle =
2038 slt;
2039 }
2040 mlx4_master_do_cmd(dev, slave,
2041 comm_cmd >> 16 & 0xff,
2042 comm_cmd & 0xffff, toggle);
2043 ++served;
2044 }
2045 }
2046 }
2047
2048 if (reported && reported != served)
Joe Perches1a91de22014-05-07 12:52:57 -07002049 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002050 reported, served);
2051
2052 if (mlx4_ARM_COMM_CHANNEL(dev))
2053 mlx4_warn(dev, "Failed to arm comm channel events\n");
2054}
2055
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002056static int sync_toggles(struct mlx4_dev *dev)
2057{
2058 struct mlx4_priv *priv = mlx4_priv(dev);
2059 int wr_toggle;
2060 int rd_toggle;
2061 unsigned long end;
2062
2063 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
2064 end = jiffies + msecs_to_jiffies(5000);
2065
2066 while (time_before(jiffies, end)) {
2067 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
2068 if (rd_toggle == wr_toggle) {
2069 priv->cmd.comm_toggle = rd_toggle;
2070 return 0;
2071 }
2072
2073 cond_resched();
2074 }
2075
2076 /*
2077 * we could reach here if for example the previous VM using this
2078 * function misbehaved and left the channel with unsynced state. We
2079 * should fix this here and give this VM a chance to use a properly
2080 * synced channel
2081 */
2082 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2083 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2084 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2085 priv->cmd.comm_toggle = 0;
2086
2087 return 0;
2088}
2089
2090int mlx4_multi_func_init(struct mlx4_dev *dev)
2091{
2092 struct mlx4_priv *priv = mlx4_priv(dev);
2093 struct mlx4_slave_state *s_state;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002094 int i, j, err, port;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002095
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002096 if (mlx4_is_master(dev))
2097 priv->mfunc.comm =
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002098 ioremap(pci_resource_start(dev->persist->pdev,
2099 priv->fw.comm_bar) +
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002100 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2101 else
2102 priv->mfunc.comm =
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002103 ioremap(pci_resource_start(dev->persist->pdev, 2) +
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002104 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2105 if (!priv->mfunc.comm) {
Joe Perches1a91de22014-05-07 12:52:57 -07002106 mlx4_err(dev, "Couldn't map communication vector\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002107 goto err_vhcr;
2108 }
2109
2110 if (mlx4_is_master(dev)) {
2111 priv->mfunc.master.slave_state =
2112 kzalloc(dev->num_slaves *
2113 sizeof(struct mlx4_slave_state), GFP_KERNEL);
2114 if (!priv->mfunc.master.slave_state)
2115 goto err_comm;
2116
Rony Efraim0eb62b92013-04-25 05:22:26 +00002117 priv->mfunc.master.vf_admin =
2118 kzalloc(dev->num_slaves *
2119 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2120 if (!priv->mfunc.master.vf_admin)
2121 goto err_comm_admin;
2122
2123 priv->mfunc.master.vf_oper =
2124 kzalloc(dev->num_slaves *
2125 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2126 if (!priv->mfunc.master.vf_oper)
2127 goto err_comm_oper;
2128
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002129 for (i = 0; i < dev->num_slaves; ++i) {
2130 s_state = &priv->mfunc.master.slave_state[i];
2131 s_state->last_cmd = MLX4_COMM_CMD_RESET;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002132 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2133 s_state->event_eq[j].eqn = -1;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002134 __raw_writel((__force u32) 0,
2135 &priv->mfunc.comm[i].slave_write);
2136 __raw_writel((__force u32) 0,
2137 &priv->mfunc.comm[i].slave_read);
2138 mmiowb();
2139 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2140 s_state->vlan_filter[port] =
2141 kzalloc(sizeof(struct mlx4_vlan_fltr),
2142 GFP_KERNEL);
2143 if (!s_state->vlan_filter[port]) {
2144 if (--port)
2145 kfree(s_state->vlan_filter[port]);
2146 goto err_slaves;
2147 }
2148 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
Rony Efraim0eb62b92013-04-25 05:22:26 +00002149 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002150 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
Rony Efraim0eb62b92013-04-25 05:22:26 +00002151 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2152 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002153 }
2154 spin_lock_init(&s_state->lock);
2155 }
2156
Or Gerlitz08ff3232012-10-21 14:59:24 +00002157 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002158 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2159 INIT_WORK(&priv->mfunc.master.comm_work,
2160 mlx4_master_comm_channel);
2161 INIT_WORK(&priv->mfunc.master.slave_event_work,
2162 mlx4_gen_slave_eqe);
2163 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2164 mlx4_master_handle_slave_flr);
2165 spin_lock_init(&priv->mfunc.master.slave_state_lock);
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002166 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002167 priv->mfunc.master.comm_wq =
2168 create_singlethread_workqueue("mlx4_comm");
2169 if (!priv->mfunc.master.comm_wq)
2170 goto err_slaves;
2171
2172 if (mlx4_init_resource_tracker(dev))
2173 goto err_thread;
2174
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002175 err = mlx4_ARM_COMM_CHANNEL(dev);
2176 if (err) {
2177 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2178 err);
2179 goto err_resource;
2180 }
2181
2182 } else {
2183 err = sync_toggles(dev);
2184 if (err) {
2185 mlx4_err(dev, "Couldn't sync toggles\n");
2186 goto err_comm;
2187 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002188 }
2189 return 0;
2190
2191err_resource:
Jack Morgensteinb8924952012-05-15 10:35:02 +00002192 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002193err_thread:
2194 flush_workqueue(priv->mfunc.master.comm_wq);
2195 destroy_workqueue(priv->mfunc.master.comm_wq);
2196err_slaves:
2197 while (--i) {
2198 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2199 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2200 }
Rony Efraim0eb62b92013-04-25 05:22:26 +00002201 kfree(priv->mfunc.master.vf_oper);
2202err_comm_oper:
2203 kfree(priv->mfunc.master.vf_admin);
2204err_comm_admin:
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002205 kfree(priv->mfunc.master.slave_state);
2206err_comm:
2207 iounmap(priv->mfunc.comm);
2208err_vhcr:
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002209 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2210 priv->mfunc.vhcr,
2211 priv->mfunc.vhcr_dma);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002212 priv->mfunc.vhcr = NULL;
2213 return -ENOMEM;
2214}
2215
Roland Dreier225c7b12007-05-08 18:00:38 -07002216int mlx4_cmd_init(struct mlx4_dev *dev)
2217{
2218 struct mlx4_priv *priv = mlx4_priv(dev);
Matan Barakffc39f62014-11-13 14:45:29 +02002219 int flags = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002220
Matan Barakffc39f62014-11-13 14:45:29 +02002221 if (!priv->cmd.initialized) {
Matan Barakffc39f62014-11-13 14:45:29 +02002222 mutex_init(&priv->cmd.slave_cmd_mutex);
2223 sema_init(&priv->cmd.poll_sem, 1);
2224 priv->cmd.use_events = 0;
2225 priv->cmd.toggle = 1;
2226 priv->cmd.initialized = 1;
2227 flags |= MLX4_CMD_CLEANUP_STRUCT;
2228 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002229
Matan Barakffc39f62014-11-13 14:45:29 +02002230 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002231 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2232 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002233 if (!priv->cmd.hcr) {
Joe Perches1a91de22014-05-07 12:52:57 -07002234 mlx4_err(dev, "Couldn't map command register\n");
Matan Barakffc39f62014-11-13 14:45:29 +02002235 goto err;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002236 }
Matan Barakffc39f62014-11-13 14:45:29 +02002237 flags |= MLX4_CMD_CLEANUP_HCR;
Roland Dreier225c7b12007-05-08 18:00:38 -07002238 }
2239
Matan Barakffc39f62014-11-13 14:45:29 +02002240 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002241 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2242 PAGE_SIZE,
Roland Dreierf3d4c892012-09-25 21:24:07 -07002243 &priv->mfunc.vhcr_dma,
2244 GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002245 if (!priv->mfunc.vhcr)
Matan Barakffc39f62014-11-13 14:45:29 +02002246 goto err;
2247
2248 flags |= MLX4_CMD_CLEANUP_VHCR;
Roland Dreierf3d4c892012-09-25 21:24:07 -07002249 }
2250
Matan Barakffc39f62014-11-13 14:45:29 +02002251 if (!priv->cmd.pool) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002252 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2253 dev->persist->pdev,
Matan Barakffc39f62014-11-13 14:45:29 +02002254 MLX4_MAILBOX_SIZE,
2255 MLX4_MAILBOX_SIZE, 0);
2256 if (!priv->cmd.pool)
2257 goto err;
2258
2259 flags |= MLX4_CMD_CLEANUP_POOL;
2260 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002261
2262 return 0;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002263
Matan Barakffc39f62014-11-13 14:45:29 +02002264err:
2265 mlx4_cmd_cleanup(dev, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002266 return -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -07002267}
2268
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002269void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2270{
2271 struct mlx4_priv *priv = mlx4_priv(dev);
2272 int i, port;
2273
2274 if (mlx4_is_master(dev)) {
2275 flush_workqueue(priv->mfunc.master.comm_wq);
2276 destroy_workqueue(priv->mfunc.master.comm_wq);
2277 for (i = 0; i < dev->num_slaves; i++) {
2278 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2279 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2280 }
2281 kfree(priv->mfunc.master.slave_state);
Rony Efraim0eb62b92013-04-25 05:22:26 +00002282 kfree(priv->mfunc.master.vf_admin);
2283 kfree(priv->mfunc.master.vf_oper);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002284 }
Eugenia Emantayevf08ad062012-02-06 06:26:17 +00002285
2286 iounmap(priv->mfunc.comm);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002287}
2288
Matan Barakffc39f62014-11-13 14:45:29 +02002289void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -07002290{
2291 struct mlx4_priv *priv = mlx4_priv(dev);
2292
Matan Barakffc39f62014-11-13 14:45:29 +02002293 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2294 pci_pool_destroy(priv->cmd.pool);
2295 priv->cmd.pool = NULL;
2296 }
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002297
Matan Barakffc39f62014-11-13 14:45:29 +02002298 if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2299 (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002300 iounmap(priv->cmd.hcr);
Matan Barakffc39f62014-11-13 14:45:29 +02002301 priv->cmd.hcr = NULL;
2302 }
2303 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2304 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002305 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
Roland Dreierf3d4c892012-09-25 21:24:07 -07002306 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
Matan Barakffc39f62014-11-13 14:45:29 +02002307 priv->mfunc.vhcr = NULL;
2308 }
2309 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2310 priv->cmd.initialized = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002311}
2312
2313/*
2314 * Switch to using events to issue FW commands (can only be called
2315 * after event queue for command events has been initialized).
2316 */
2317int mlx4_cmd_use_events(struct mlx4_dev *dev)
2318{
2319 struct mlx4_priv *priv = mlx4_priv(dev);
2320 int i;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002321 int err = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002322
2323 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2324 sizeof (struct mlx4_cmd_context),
2325 GFP_KERNEL);
2326 if (!priv->cmd.context)
2327 return -ENOMEM;
2328
2329 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2330 priv->cmd.context[i].token = i;
2331 priv->cmd.context[i].next = i + 1;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +02002332 /* To support fatal error flow, initialize all
2333 * cmd contexts to allow simulating completions
2334 * with complete() at any time.
2335 */
2336 init_completion(&priv->cmd.context[i].done);
Roland Dreier225c7b12007-05-08 18:00:38 -07002337 }
2338
2339 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2340 priv->cmd.free_head = 0;
2341
2342 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2343 spin_lock_init(&priv->cmd.context_lock);
2344
2345 for (priv->cmd.token_mask = 1;
2346 priv->cmd.token_mask < priv->cmd.max_cmds;
2347 priv->cmd.token_mask <<= 1)
2348 ; /* nothing */
2349 --priv->cmd.token_mask;
2350
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002351 down(&priv->cmd.poll_sem);
Roland Dreier225c7b12007-05-08 18:00:38 -07002352 priv->cmd.use_events = 1;
2353
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002354 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002355}
2356
2357/*
2358 * Switch back to polling (used when shutting down the device)
2359 */
2360void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2361{
2362 struct mlx4_priv *priv = mlx4_priv(dev);
2363 int i;
2364
2365 priv->cmd.use_events = 0;
2366
2367 for (i = 0; i < priv->cmd.max_cmds; ++i)
2368 down(&priv->cmd.event_sem);
2369
2370 kfree(priv->cmd.context);
2371
2372 up(&priv->cmd.poll_sem);
2373}
2374
2375struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2376{
2377 struct mlx4_cmd_mailbox *mailbox;
2378
2379 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2380 if (!mailbox)
2381 return ERR_PTR(-ENOMEM);
2382
2383 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2384 &mailbox->dma);
2385 if (!mailbox->buf) {
2386 kfree(mailbox);
2387 return ERR_PTR(-ENOMEM);
2388 }
2389
Jack Morgenstein571b8b92013-11-07 12:19:50 +02002390 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2391
Roland Dreier225c7b12007-05-08 18:00:38 -07002392 return mailbox;
2393}
2394EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2395
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002396void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2397 struct mlx4_cmd_mailbox *mailbox)
Roland Dreier225c7b12007-05-08 18:00:38 -07002398{
2399 if (!mailbox)
2400 return;
2401
2402 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2403 kfree(mailbox);
2404}
2405EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002406
2407u32 mlx4_comm_get_version(void)
2408{
2409 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2410}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002411
2412static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2413{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002414 if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2415 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2416 vf, dev->persist->num_vfs);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002417 return -EINVAL;
2418 }
2419
2420 return vf+1;
2421}
2422
Matan Barakf74462a2014-03-19 18:11:51 +02002423int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2424{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002425 if (slave < 1 || slave > dev->persist->num_vfs) {
Matan Barakf74462a2014-03-19 18:11:51 +02002426 mlx4_err(dev,
2427 "Bad slave number:%d (number of activated slaves: %lu)\n",
2428 slave, dev->num_slaves);
2429 return -EINVAL;
2430 }
2431 return slave - 1;
2432}
2433
Yishai Hadasf5aef5a2015-01-25 16:59:39 +02002434void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2435{
2436 struct mlx4_priv *priv = mlx4_priv(dev);
2437 struct mlx4_cmd_context *context;
2438 int i;
2439
2440 spin_lock(&priv->cmd.context_lock);
2441 if (priv->cmd.context) {
2442 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2443 context = &priv->cmd.context[i];
2444 context->fw_status = CMD_STAT_INTERNAL_ERR;
2445 context->result =
2446 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2447 complete(&context->done);
2448 }
2449 }
2450 spin_unlock(&priv->cmd.context_lock);
2451}
2452
Matan Barakf74462a2014-03-19 18:11:51 +02002453struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2454{
2455 struct mlx4_active_ports actv_ports;
2456 int vf;
2457
2458 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2459
2460 if (slave == 0) {
2461 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2462 return actv_ports;
2463 }
2464
2465 vf = mlx4_get_vf_indx(dev, slave);
2466 if (vf < 0)
2467 return actv_ports;
2468
2469 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2470 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2471 dev->caps.num_ports));
2472
2473 return actv_ports;
2474}
2475EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2476
2477int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2478{
2479 unsigned n;
2480 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2481 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2482
2483 if (port <= 0 || port > m)
2484 return -EINVAL;
2485
2486 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2487 if (port <= n)
2488 port = n + 1;
2489
2490 return port;
2491}
2492EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2493
2494int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2495{
2496 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2497 if (test_bit(port - 1, actv_ports.ports))
2498 return port -
2499 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2500
2501 return -1;
2502}
2503EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2504
2505struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2506 int port)
2507{
2508 unsigned i;
2509 struct mlx4_slaves_pport slaves_pport;
2510
2511 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2512
2513 if (port <= 0 || port > dev->caps.num_ports)
2514 return slaves_pport;
2515
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002516 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
Matan Barakf74462a2014-03-19 18:11:51 +02002517 struct mlx4_active_ports actv_ports =
2518 mlx4_get_active_ports(dev, i);
2519 if (test_bit(port - 1, actv_ports.ports))
2520 set_bit(i, slaves_pport.slaves);
2521 }
2522
2523 return slaves_pport;
2524}
2525EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2526
2527struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2528 struct mlx4_dev *dev,
2529 const struct mlx4_active_ports *crit_ports)
2530{
2531 unsigned i;
2532 struct mlx4_slaves_pport slaves_pport;
2533
2534 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2535
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002536 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
Matan Barakf74462a2014-03-19 18:11:51 +02002537 struct mlx4_active_ports actv_ports =
2538 mlx4_get_active_ports(dev, i);
2539 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2540 dev->caps.num_ports))
2541 set_bit(i, slaves_pport.slaves);
2542 }
2543
2544 return slaves_pport;
2545}
2546EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2547
Matan Baraka91c7722014-09-10 16:41:53 +03002548static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2549{
2550 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2551 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2552 + 1;
2553 int max_port = min_port +
2554 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2555
2556 if (port < min_port)
2557 port = min_port;
2558 else if (port >= max_port)
2559 port = max_port - 1;
2560
2561 return port;
2562}
2563
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002564int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2565{
2566 struct mlx4_priv *priv = mlx4_priv(dev);
2567 struct mlx4_vport_state *s_info;
2568 int slave;
2569
2570 if (!mlx4_is_master(dev))
2571 return -EPROTONOSUPPORT;
2572
2573 slave = mlx4_get_slave_indx(dev, vf);
2574 if (slave < 0)
2575 return -EINVAL;
2576
Matan Baraka91c7722014-09-10 16:41:53 +03002577 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002578 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2579 s_info->mac = mac;
2580 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2581 vf, port, s_info->mac);
2582 return 0;
2583}
2584EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002585
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002586
Rony Efraim3f7fb022013-04-25 05:22:28 +00002587int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2588{
2589 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002590 struct mlx4_vport_state *vf_admin;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002591 int slave;
2592
2593 if ((!mlx4_is_master(dev)) ||
2594 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2595 return -EPROTONOSUPPORT;
2596
2597 if ((vlan > 4095) || (qos > 7))
2598 return -EINVAL;
2599
2600 slave = mlx4_get_slave_indx(dev, vf);
2601 if (slave < 0)
2602 return -EINVAL;
2603
Matan Baraka91c7722014-09-10 16:41:53 +03002604 port = mlx4_slaves_closest_port(dev, slave, port);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002605 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002606
Rony Efraim3f7fb022013-04-25 05:22:28 +00002607 if ((0 == vlan) && (0 == qos))
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002608 vf_admin->default_vlan = MLX4_VGT;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002609 else
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002610 vf_admin->default_vlan = vlan;
2611 vf_admin->default_qos = qos;
2612
Rony Efraim0a6eac22013-06-27 19:05:22 +03002613 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2614 mlx4_info(dev,
2615 "updating vf %d port %d config will take effect on next VF restart\n",
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002616 vf, port);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002617 return 0;
2618}
2619EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
Rony Efraime6b6a232013-04-25 05:22:29 +00002620
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +02002621 /* mlx4_get_slave_default_vlan -
2622 * return true if VST ( default vlan)
2623 * if VST, will return vlan & qos (if not NULL)
2624 */
2625bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2626 u16 *vlan, u8 *qos)
2627{
2628 struct mlx4_vport_oper_state *vp_oper;
2629 struct mlx4_priv *priv;
2630
2631 priv = mlx4_priv(dev);
Matan Baraka91c7722014-09-10 16:41:53 +03002632 port = mlx4_slaves_closest_port(dev, slave, port);
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +02002633 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2634
2635 if (MLX4_VGT != vp_oper->state.default_vlan) {
2636 if (vlan)
2637 *vlan = vp_oper->state.default_vlan;
2638 if (qos)
2639 *qos = vp_oper->state.default_qos;
2640 return true;
2641 }
2642 return false;
2643}
2644EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2645
Rony Efraime6b6a232013-04-25 05:22:29 +00002646int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2647{
2648 struct mlx4_priv *priv = mlx4_priv(dev);
2649 struct mlx4_vport_state *s_info;
2650 int slave;
2651
2652 if ((!mlx4_is_master(dev)) ||
2653 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2654 return -EPROTONOSUPPORT;
2655
2656 slave = mlx4_get_slave_indx(dev, vf);
2657 if (slave < 0)
2658 return -EINVAL;
2659
Matan Baraka91c7722014-09-10 16:41:53 +03002660 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraime6b6a232013-04-25 05:22:29 +00002661 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2662 s_info->spoofchk = setting;
2663
2664 return 0;
2665}
2666EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002667
2668int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2669{
2670 struct mlx4_priv *priv = mlx4_priv(dev);
2671 struct mlx4_vport_state *s_info;
2672 int slave;
2673
2674 if (!mlx4_is_master(dev))
2675 return -EPROTONOSUPPORT;
2676
2677 slave = mlx4_get_slave_indx(dev, vf);
2678 if (slave < 0)
2679 return -EINVAL;
2680
2681 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2682 ivf->vf = vf;
2683
2684 /* need to convert it to a func */
2685 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2686 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2687 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2688 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2689 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2690 ivf->mac[5] = ((s_info->mac) & 0xff);
2691
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04002692 ivf->vlan = s_info->default_vlan;
2693 ivf->qos = s_info->default_qos;
2694 ivf->max_tx_rate = s_info->tx_rate;
2695 ivf->min_tx_rate = 0;
2696 ivf->spoofchk = s_info->spoofchk;
2697 ivf->linkstate = s_info->link_state;
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002698
2699 return 0;
2700}
2701EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
Rony Efraim948e3062013-06-13 13:19:11 +03002702
2703int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2704{
2705 struct mlx4_priv *priv = mlx4_priv(dev);
2706 struct mlx4_vport_state *s_info;
Rony Efraim948e3062013-06-13 13:19:11 +03002707 int slave;
2708 u8 link_stat_event;
2709
2710 slave = mlx4_get_slave_indx(dev, vf);
2711 if (slave < 0)
2712 return -EINVAL;
2713
Matan Baraka91c7722014-09-10 16:41:53 +03002714 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraim948e3062013-06-13 13:19:11 +03002715 switch (link_state) {
2716 case IFLA_VF_LINK_STATE_AUTO:
2717 /* get current link state */
2718 if (!priv->sense.do_sense_port[port])
2719 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2720 else
2721 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2722 break;
2723
2724 case IFLA_VF_LINK_STATE_ENABLE:
2725 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2726 break;
2727
2728 case IFLA_VF_LINK_STATE_DISABLE:
2729 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2730 break;
2731
2732 default:
2733 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2734 link_state, slave, port);
2735 return -EINVAL;
2736 };
Rony Efraim948e3062013-06-13 13:19:11 +03002737 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
Rony Efraim948e3062013-06-13 13:19:11 +03002738 s_info->link_state = link_state;
Rony Efraim948e3062013-06-13 13:19:11 +03002739
2740 /* send event */
2741 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
Rony Efraim0a6eac22013-06-27 19:05:22 +03002742
2743 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2744 mlx4_dbg(dev,
2745 "updating vf %d port %d no link state HW enforcment\n",
2746 vf, port);
Rony Efraim948e3062013-06-13 13:19:11 +03002747 return 0;
2748}
2749EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
Jack Morgenstein97982f52014-05-29 16:31:02 +03002750
2751int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
2752{
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03002753 struct mlx4_priv *priv = mlx4_priv(dev);
2754
2755 if (slave < 1 || slave >= dev->num_slaves ||
2756 port < 1 || port > MLX4_MAX_PORTS)
2757 return 0;
2758
2759 return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
2760 MLX4_VF_SMI_ENABLED;
Jack Morgenstein97982f52014-05-29 16:31:02 +03002761}
2762EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
Jack Morgenstein65fed8a2014-05-29 16:31:04 +03002763
2764int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
2765{
2766 struct mlx4_priv *priv = mlx4_priv(dev);
2767
2768 if (slave == mlx4_master_func_num(dev))
2769 return 1;
2770
2771 if (slave < 1 || slave >= dev->num_slaves ||
2772 port < 1 || port > MLX4_MAX_PORTS)
2773 return 0;
2774
2775 return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
2776 MLX4_VF_SMI_ENABLED;
2777}
2778EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
2779
2780int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
2781 int enabled)
2782{
2783 struct mlx4_priv *priv = mlx4_priv(dev);
2784
2785 if (slave == mlx4_master_func_num(dev))
2786 return 0;
2787
2788 if (slave < 1 || slave >= dev->num_slaves ||
2789 port < 1 || port > MLX4_MAX_PORTS ||
2790 enabled < 0 || enabled > 1)
2791 return -EINVAL;
2792
2793 priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
2794 return 0;
2795}
2796EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);