blob: 7652eed4bbc823806b3e87f25dbab32e4709b152 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Gortmakeree40fa02011-05-27 16:14:23 -040037#include <linux/export.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
Rony Efraim948e3062013-06-13 13:19:11 +030042#include <linux/mlx4/device.h>
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000043#include <linux/semaphore.h>
Jack Morgenstein0a9a0182012-08-03 08:40:45 +000044#include <rdma/ib_smi.h>
Yishai Hadas55ad3592015-01-25 16:59:42 +020045#include <linux/delay.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070046
47#include <asm/io.h>
48
49#include "mlx4.h"
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000050#include "fw.h"
Roland Dreier225c7b12007-05-08 18:00:38 -070051
52#define CMD_POLL_TOKEN 0xffff
Yevgeny Petriline8f081a2011-12-13 04:12:25 +000053#define INBOX_MASK 0xffffffffffffff00ULL
54
55#define CMD_CHAN_VER 1
56#define CMD_CHAN_IF_REV 1
Roland Dreier225c7b12007-05-08 18:00:38 -070057
58enum {
59 /* command completed successfully: */
60 CMD_STAT_OK = 0x00,
61 /* Internal error (such as a bus error) occurred while processing command: */
62 CMD_STAT_INTERNAL_ERR = 0x01,
63 /* Operation/command not supported or opcode modifier not supported: */
64 CMD_STAT_BAD_OP = 0x02,
65 /* Parameter not supported or parameter out of range: */
66 CMD_STAT_BAD_PARAM = 0x03,
67 /* System not enabled or bad system state: */
68 CMD_STAT_BAD_SYS_STATE = 0x04,
69 /* Attempt to access reserved or unallocaterd resource: */
70 CMD_STAT_BAD_RESOURCE = 0x05,
71 /* Requested resource is currently executing a command, or is otherwise busy: */
72 CMD_STAT_RESOURCE_BUSY = 0x06,
73 /* Required capability exceeds device limits: */
74 CMD_STAT_EXCEED_LIM = 0x08,
75 /* Resource is not in the appropriate state or ownership: */
76 CMD_STAT_BAD_RES_STATE = 0x09,
77 /* Index out of range: */
78 CMD_STAT_BAD_INDEX = 0x0a,
79 /* FW image corrupted: */
80 CMD_STAT_BAD_NVMEM = 0x0b,
Jack Morgenstein899698d2008-07-22 14:19:39 -070081 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82 CMD_STAT_ICM_ERROR = 0x0c,
Roland Dreier225c7b12007-05-08 18:00:38 -070083 /* Attempt to modify a QP/EE which is not in the presumed state: */
84 CMD_STAT_BAD_QP_STATE = 0x10,
85 /* Bad segment parameters (Address/Size): */
86 CMD_STAT_BAD_SEG_PARAM = 0x20,
87 /* Memory Region has Memory Windows bound to: */
88 CMD_STAT_REG_BOUND = 0x21,
89 /* HCA local attached memory not present: */
90 CMD_STAT_LAM_NOT_PRE = 0x22,
91 /* Bad management packet (silently discarded): */
92 CMD_STAT_BAD_PKT = 0x30,
93 /* More outstanding CQEs in CQ than new CQ size: */
Yevgeny Petrilincc4ac2e2009-07-06 16:10:03 -070094 CMD_STAT_BAD_SIZE = 0x40,
95 /* Multi Function device support required: */
96 CMD_STAT_MULTI_FUNC_REQ = 0x50,
Roland Dreier225c7b12007-05-08 18:00:38 -070097};
98
99enum {
100 HCR_IN_PARAM_OFFSET = 0x00,
101 HCR_IN_MODIFIER_OFFSET = 0x08,
102 HCR_OUT_PARAM_OFFSET = 0x0c,
103 HCR_TOKEN_OFFSET = 0x14,
104 HCR_STATUS_OFFSET = 0x18,
105
106 HCR_OPMOD_SHIFT = 12,
107 HCR_T_BIT = 21,
108 HCR_E_BIT = 22,
109 HCR_GO_BIT = 23
110};
111
112enum {
Dotan Barak36ce10d2007-08-07 11:18:52 +0300113 GO_BIT_TIMEOUT_MSECS = 10000
Roland Dreier225c7b12007-05-08 18:00:38 -0700114};
115
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300116enum mlx4_vlan_transition {
117 MLX4_VLAN_TRANSITION_VST_VST = 0,
118 MLX4_VLAN_TRANSITION_VST_VGT = 1,
119 MLX4_VLAN_TRANSITION_VGT_VST = 2,
120 MLX4_VLAN_TRANSITION_VGT_VGT = 3,
121};
122
123
Roland Dreier225c7b12007-05-08 18:00:38 -0700124struct mlx4_cmd_context {
125 struct completion done;
126 int result;
127 int next;
128 u64 out_param;
129 u16 token;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000130 u8 fw_status;
Roland Dreier225c7b12007-05-08 18:00:38 -0700131};
132
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000133static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
134 struct mlx4_vhcr_cmd *in_vhcr);
135
Roland Dreierca281212008-04-16 21:01:04 -0700136static int mlx4_status_to_errno(u8 status)
137{
Roland Dreier225c7b12007-05-08 18:00:38 -0700138 static const int trans_table[] = {
139 [CMD_STAT_INTERNAL_ERR] = -EIO,
140 [CMD_STAT_BAD_OP] = -EPERM,
141 [CMD_STAT_BAD_PARAM] = -EINVAL,
142 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
143 [CMD_STAT_BAD_RESOURCE] = -EBADF,
144 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
145 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
146 [CMD_STAT_BAD_RES_STATE] = -EBADF,
147 [CMD_STAT_BAD_INDEX] = -EBADF,
148 [CMD_STAT_BAD_NVMEM] = -EFAULT,
Jack Morgenstein899698d2008-07-22 14:19:39 -0700149 [CMD_STAT_ICM_ERROR] = -ENFILE,
Roland Dreier225c7b12007-05-08 18:00:38 -0700150 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
151 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
152 [CMD_STAT_REG_BOUND] = -EBUSY,
153 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
154 [CMD_STAT_BAD_PKT] = -EINVAL,
155 [CMD_STAT_BAD_SIZE] = -ENOMEM,
Yevgeny Petrilincc4ac2e2009-07-06 16:10:03 -0700156 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
Roland Dreier225c7b12007-05-08 18:00:38 -0700157 };
158
159 if (status >= ARRAY_SIZE(trans_table) ||
160 (status != CMD_STAT_OK && trans_table[status] == 0))
161 return -EIO;
162
163 return trans_table[status];
164}
165
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000166static u8 mlx4_errno_to_status(int errno)
167{
168 switch (errno) {
169 case -EPERM:
170 return CMD_STAT_BAD_OP;
171 case -EINVAL:
172 return CMD_STAT_BAD_PARAM;
173 case -ENXIO:
174 return CMD_STAT_BAD_SYS_STATE;
175 case -EBUSY:
176 return CMD_STAT_RESOURCE_BUSY;
177 case -ENOMEM:
178 return CMD_STAT_EXCEED_LIM;
179 case -ENFILE:
180 return CMD_STAT_ICM_ERROR;
181 default:
182 return CMD_STAT_INTERNAL_ERR;
183 }
184}
185
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200186static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
187 u8 op_modifier)
188{
189 switch (op) {
190 case MLX4_CMD_UNMAP_ICM:
191 case MLX4_CMD_UNMAP_ICM_AUX:
192 case MLX4_CMD_UNMAP_FA:
193 case MLX4_CMD_2RST_QP:
194 case MLX4_CMD_HW2SW_EQ:
195 case MLX4_CMD_HW2SW_CQ:
196 case MLX4_CMD_HW2SW_SRQ:
197 case MLX4_CMD_HW2SW_MPT:
198 case MLX4_CMD_CLOSE_HCA:
199 case MLX4_QP_FLOW_STEERING_DETACH:
200 case MLX4_CMD_FREE_RES:
201 case MLX4_CMD_CLOSE_PORT:
202 return CMD_STAT_OK;
203
204 case MLX4_CMD_QP_ATTACH:
205 /* On Detach case return success */
206 if (op_modifier == 0)
207 return CMD_STAT_OK;
208 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
209
210 default:
211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
212 }
213}
214
215static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
216{
217 /* Any error during the closing commands below is considered fatal */
218 if (op == MLX4_CMD_CLOSE_HCA ||
219 op == MLX4_CMD_HW2SW_EQ ||
220 op == MLX4_CMD_HW2SW_CQ ||
221 op == MLX4_CMD_2RST_QP ||
222 op == MLX4_CMD_HW2SW_SRQ ||
223 op == MLX4_CMD_SYNC_TPT ||
224 op == MLX4_CMD_UNMAP_ICM ||
225 op == MLX4_CMD_UNMAP_ICM_AUX ||
226 op == MLX4_CMD_UNMAP_FA)
227 return 1;
228 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
229 * CMD_STAT_REG_BOUND.
230 * This status indicates that memory region has memory windows bound to it
231 * which may result from invalid user space usage and is not fatal.
232 */
233 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
234 return 1;
235 return 0;
236}
237
238static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
239 int err)
240{
241 /* Only if reset flow is really active return code is based on
242 * command, otherwise current error code is returned.
243 */
244 if (mlx4_internal_err_reset) {
245 mlx4_enter_error_state(dev->persist);
246 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
247 }
248
249 return err;
250}
251
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000252static int comm_pending(struct mlx4_dev *dev)
253{
254 struct mlx4_priv *priv = mlx4_priv(dev);
255 u32 status = readl(&priv->mfunc.comm->slave_read);
256
257 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
258}
259
260static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
261{
262 struct mlx4_priv *priv = mlx4_priv(dev);
263 u32 val;
264
265 priv->cmd.comm_toggle ^= 1;
266 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
267 __raw_writel((__force u32) cpu_to_be32(val),
268 &priv->mfunc.comm->slave_write);
269 mmiowb();
270}
271
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000272static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
273 unsigned long timeout)
274{
275 struct mlx4_priv *priv = mlx4_priv(dev);
276 unsigned long end;
277 int err = 0;
278 int ret_from_pending = 0;
279
280 /* First, verify that the master reports correct status */
281 if (comm_pending(dev)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700282 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000283 priv->cmd.comm_toggle, cmd);
284 return -EAGAIN;
285 }
286
287 /* Write command */
288 down(&priv->cmd.poll_sem);
289 mlx4_comm_cmd_post(dev, cmd, param);
290
291 end = msecs_to_jiffies(timeout) + jiffies;
292 while (comm_pending(dev) && time_before(jiffies, end))
293 cond_resched();
294 ret_from_pending = comm_pending(dev);
295 if (ret_from_pending) {
296 /* check if the slave is trying to boot in the middle of
297 * FLR process. The only non-zero result in the RESET command
298 * is MLX4_DELAY_RESET_SLAVE*/
299 if ((MLX4_COMM_CMD_RESET == cmd)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000300 err = MLX4_DELAY_RESET_SLAVE;
301 } else {
302 mlx4_warn(dev, "Communication channel timed out\n");
303 err = -ETIMEDOUT;
304 }
305 }
306
307 up(&priv->cmd.poll_sem);
308 return err;
309}
310
311static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
312 u16 param, unsigned long timeout)
313{
314 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
315 struct mlx4_cmd_context *context;
Eugenia Emantayev58a3de02012-03-18 04:32:08 +0000316 unsigned long end;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000317 int err = 0;
318
319 down(&cmd->event_sem);
320
321 spin_lock(&cmd->context_lock);
322 BUG_ON(cmd->free_head < 0);
323 context = &cmd->context[cmd->free_head];
324 context->token += cmd->token_mask + 1;
325 cmd->free_head = context->next;
326 spin_unlock(&cmd->context_lock);
327
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200328 reinit_completion(&context->done);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000329
330 mlx4_comm_cmd_post(dev, op, param);
331
332 if (!wait_for_completion_timeout(&context->done,
333 msecs_to_jiffies(timeout))) {
Dotan Barak674925e2013-06-25 12:09:37 +0300334 mlx4_warn(dev, "communication channel command 0x%x timed out\n",
335 op);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000336 err = -EBUSY;
337 goto out;
338 }
339
340 err = context->result;
341 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
342 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
343 op, context->fw_status);
344 goto out;
345 }
346
347out:
Eugenia Emantayev58a3de02012-03-18 04:32:08 +0000348 /* wait for comm channel ready
349 * this is necessary for prevention the race
350 * when switching between event to polling mode
351 */
352 end = msecs_to_jiffies(timeout) + jiffies;
353 while (comm_pending(dev) && time_before(jiffies, end))
354 cond_resched();
355
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000356 spin_lock(&cmd->context_lock);
357 context->next = cmd->free_head;
358 cmd->free_head = context - cmd->context;
359 spin_unlock(&cmd->context_lock);
360
361 up(&cmd->event_sem);
362 return err;
363}
364
Jack Morgensteinab9c17a2011-12-13 04:18:30 +0000365int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000366 unsigned long timeout)
367{
368 if (mlx4_priv(dev)->cmd.use_events)
369 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
370 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
371}
372
Roland Dreier225c7b12007-05-08 18:00:38 -0700373static int cmd_pending(struct mlx4_dev *dev)
374{
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000375 u32 status;
376
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200377 if (pci_channel_offline(dev->persist->pdev))
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000378 return -EIO;
379
380 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
Roland Dreier225c7b12007-05-08 18:00:38 -0700381
382 return (status & swab32(1 << HCR_GO_BIT)) ||
383 (mlx4_priv(dev)->cmd.toggle ==
384 !!(status & swab32(1 << HCR_T_BIT)));
385}
386
387static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
388 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
389 int event)
390{
391 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
392 u32 __iomem *hcr = cmd->hcr;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200393 int ret = -EIO;
Roland Dreier225c7b12007-05-08 18:00:38 -0700394 unsigned long end;
395
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200396 mutex_lock(&dev->persist->device_state_mutex);
397 /* To avoid writing to unknown addresses after the device state was
398 * changed to internal error and the chip was reset,
399 * check the INTERNAL_ERROR flag which is updated under
400 * device_state_mutex lock.
401 */
402 if (pci_channel_offline(dev->persist->pdev) ||
403 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000404 /*
405 * Device is going through error recovery
406 * and cannot accept commands.
407 */
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000408 goto out;
409 }
410
Roland Dreier225c7b12007-05-08 18:00:38 -0700411 end = jiffies;
412 if (event)
Dotan Barak36ce10d2007-08-07 11:18:52 +0300413 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
Roland Dreier225c7b12007-05-08 18:00:38 -0700414
415 while (cmd_pending(dev)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200416 if (pci_channel_offline(dev->persist->pdev)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000417 /*
418 * Device is going through error recovery
419 * and cannot accept commands.
420 */
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000421 goto out;
422 }
423
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000424 if (time_after_eq(jiffies, end)) {
425 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
Roland Dreier225c7b12007-05-08 18:00:38 -0700426 goto out;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000427 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700428 cond_resched();
429 }
430
431 /*
432 * We use writel (instead of something like memcpy_toio)
433 * because writes of less than 32 bits to the HCR don't work
434 * (and some architectures such as ia64 implement memcpy_toio
435 * in terms of writeb).
436 */
437 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
438 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
439 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
440 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
441 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
442 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
443
444 /* __raw_writel may not order writes. */
445 wmb();
446
447 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
448 (cmd->toggle << HCR_T_BIT) |
449 (event ? (1 << HCR_E_BIT) : 0) |
450 (op_modifier << HCR_OPMOD_SHIFT) |
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000451 op), hcr + 6);
Roland Dreier2e61c642007-10-09 19:59:18 -0700452
453 /*
454 * Make sure that our HCR writes don't get mixed in with
455 * writes from another CPU starting a FW command.
456 */
457 mmiowb();
458
Roland Dreier225c7b12007-05-08 18:00:38 -0700459 cmd->toggle = cmd->toggle ^ 1;
460
461 ret = 0;
462
463out:
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200464 if (ret)
465 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
466 op, ret, in_param, in_modifier, op_modifier);
467 mutex_unlock(&dev->persist->device_state_mutex);
468
Roland Dreier225c7b12007-05-08 18:00:38 -0700469 return ret;
470}
471
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000472static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
473 int out_is_imm, u32 in_modifier, u8 op_modifier,
474 u16 op, unsigned long timeout)
475{
476 struct mlx4_priv *priv = mlx4_priv(dev);
477 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
478 int ret;
479
Roland Dreierf3d4c892012-09-25 21:24:07 -0700480 mutex_lock(&priv->cmd.slave_cmd_mutex);
481
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000482 vhcr->in_param = cpu_to_be64(in_param);
483 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
484 vhcr->in_modifier = cpu_to_be32(in_modifier);
485 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
486 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
487 vhcr->status = 0;
488 vhcr->flags = !!(priv->cmd.use_events) << 6;
Roland Dreierf3d4c892012-09-25 21:24:07 -0700489
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000490 if (mlx4_is_master(dev)) {
491 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
492 if (!ret) {
493 if (out_is_imm) {
494 if (out_param)
495 *out_param =
496 be64_to_cpu(vhcr->out_param);
497 else {
Joe Perches1a91de22014-05-07 12:52:57 -0700498 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
499 op);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000500 vhcr->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000501 }
502 }
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000503 ret = mlx4_status_to_errno(vhcr->status);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000504 }
505 } else {
506 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
507 MLX4_COMM_TIME + timeout);
508 if (!ret) {
509 if (out_is_imm) {
510 if (out_param)
511 *out_param =
512 be64_to_cpu(vhcr->out_param);
513 else {
Joe Perches1a91de22014-05-07 12:52:57 -0700514 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
515 op);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000516 vhcr->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000517 }
518 }
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +0000519 ret = mlx4_status_to_errno(vhcr->status);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000520 } else
Joe Perches1a91de22014-05-07 12:52:57 -0700521 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
522 op);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000523 }
Roland Dreierf3d4c892012-09-25 21:24:07 -0700524
525 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000526 return ret;
527}
528
Roland Dreier225c7b12007-05-08 18:00:38 -0700529static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
530 int out_is_imm, u32 in_modifier, u8 op_modifier,
531 u16 op, unsigned long timeout)
532{
533 struct mlx4_priv *priv = mlx4_priv(dev);
534 void __iomem *hcr = priv->cmd.hcr;
535 int err = 0;
536 unsigned long end;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000537 u32 stat;
Roland Dreier225c7b12007-05-08 18:00:38 -0700538
539 down(&priv->cmd.poll_sem);
540
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200541 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000542 /*
543 * Device is going through error recovery
544 * and cannot accept commands.
545 */
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200546 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000547 goto out;
548 }
549
Eyal Perryc05a1162014-05-14 12:15:13 +0300550 if (out_is_imm && !out_param) {
551 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
552 op);
553 err = -EINVAL;
554 goto out;
555 }
556
Roland Dreier225c7b12007-05-08 18:00:38 -0700557 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
558 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
559 if (err)
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200560 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700561
562 end = msecs_to_jiffies(timeout) + jiffies;
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000563 while (cmd_pending(dev) && time_before(jiffies, end)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200564 if (pci_channel_offline(dev->persist->pdev)) {
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000565 /*
566 * Device is going through error recovery
567 * and cannot accept commands.
568 */
569 err = -EIO;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200570 goto out_reset;
571 }
572
573 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
574 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000575 goto out;
576 }
577
Roland Dreier225c7b12007-05-08 18:00:38 -0700578 cond_resched();
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000579 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700580
581 if (cmd_pending(dev)) {
Dotan Barak674925e2013-06-25 12:09:37 +0300582 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
583 op);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200584 err = -EIO;
585 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700586 }
587
588 if (out_is_imm)
589 *out_param =
590 (u64) be32_to_cpu((__force __be32)
591 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
592 (u64) be32_to_cpu((__force __be32)
593 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000594 stat = be32_to_cpu((__force __be32)
595 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
596 err = mlx4_status_to_errno(stat);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200597 if (err) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000598 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
599 op, stat);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200600 if (mlx4_closing_cmd_fatal_error(op, stat))
601 goto out_reset;
602 goto out;
603 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700604
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200605out_reset:
606 if (err)
607 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
Roland Dreier225c7b12007-05-08 18:00:38 -0700608out:
609 up(&priv->cmd.poll_sem);
610 return err;
611}
612
613void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
614{
615 struct mlx4_priv *priv = mlx4_priv(dev);
616 struct mlx4_cmd_context *context =
617 &priv->cmd.context[token & priv->cmd.token_mask];
618
619 /* previously timed out command completing at long last */
620 if (token != context->token)
621 return;
622
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000623 context->fw_status = status;
Roland Dreier225c7b12007-05-08 18:00:38 -0700624 context->result = mlx4_status_to_errno(status);
625 context->out_param = out_param;
626
Roland Dreier225c7b12007-05-08 18:00:38 -0700627 complete(&context->done);
628}
629
630static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
631 int out_is_imm, u32 in_modifier, u8 op_modifier,
632 u16 op, unsigned long timeout)
633{
634 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
635 struct mlx4_cmd_context *context;
636 int err = 0;
637
638 down(&cmd->event_sem);
639
640 spin_lock(&cmd->context_lock);
641 BUG_ON(cmd->free_head < 0);
642 context = &cmd->context[cmd->free_head];
Roland Dreier09815822007-07-20 21:19:43 -0700643 context->token += cmd->token_mask + 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700644 cmd->free_head = context->next;
645 spin_unlock(&cmd->context_lock);
646
Eyal Perryc05a1162014-05-14 12:15:13 +0300647 if (out_is_imm && !out_param) {
648 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
649 op);
650 err = -EINVAL;
651 goto out;
652 }
653
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200654 reinit_completion(&context->done);
Roland Dreier225c7b12007-05-08 18:00:38 -0700655
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200656 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
657 in_modifier, op_modifier, op, context->token, 1);
658 if (err)
659 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700660
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000661 if (!wait_for_completion_timeout(&context->done,
662 msecs_to_jiffies(timeout))) {
Dotan Barak674925e2013-06-25 12:09:37 +0300663 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
664 op);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200665 err = -EIO;
666 goto out_reset;
Roland Dreier225c7b12007-05-08 18:00:38 -0700667 }
668
669 err = context->result;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000670 if (err) {
Jack Morgenstein1daa4302014-09-30 12:03:50 +0300671 /* Since we do not want to have this error message always
672 * displayed at driver start when there are ConnectX2 HCAs
673 * on the host, we deprecate the error message for this
674 * specific command/input_mod/opcode_mod/fw-status to be debug.
675 */
676 if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
677 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
678 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
679 op, context->fw_status);
680 else
681 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
682 op, context->fw_status);
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200683 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
684 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
685 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
686 goto out_reset;
687
Roland Dreier225c7b12007-05-08 18:00:38 -0700688 goto out;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000689 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700690
691 if (out_is_imm)
692 *out_param = context->out_param;
693
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200694out_reset:
695 if (err)
696 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
Roland Dreier225c7b12007-05-08 18:00:38 -0700697out:
698 spin_lock(&cmd->context_lock);
699 context->next = cmd->free_head;
700 cmd->free_head = context - cmd->context;
701 spin_unlock(&cmd->context_lock);
702
703 up(&cmd->event_sem);
704 return err;
705}
706
707int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
708 int out_is_imm, u32 in_modifier, u8 op_modifier,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000709 u16 op, unsigned long timeout, int native)
Roland Dreier225c7b12007-05-08 18:00:38 -0700710{
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200711 if (pci_channel_offline(dev->persist->pdev))
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200712 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
Kleber Sacilotto de Souza57dbf292012-07-20 09:55:43 +0000713
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000714 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
Yishai Hadasf5aef5a2015-01-25 16:59:39 +0200715 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
716 return mlx4_internal_err_ret_value(dev, op,
717 op_modifier);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000718 if (mlx4_priv(dev)->cmd.use_events)
719 return mlx4_cmd_wait(dev, in_param, out_param,
720 out_is_imm, in_modifier,
721 op_modifier, op, timeout);
722 else
723 return mlx4_cmd_poll(dev, in_param, out_param,
724 out_is_imm, in_modifier,
725 op_modifier, op, timeout);
726 }
727 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
728 in_modifier, op_modifier, op, timeout);
Roland Dreier225c7b12007-05-08 18:00:38 -0700729}
730EXPORT_SYMBOL_GPL(__mlx4_cmd);
731
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000732
Yishai Hadas55ad3592015-01-25 16:59:42 +0200733int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000734{
735 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
736 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
737}
738
739static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
740 int slave, u64 slave_addr,
741 int size, int is_read)
742{
743 u64 in_param;
744 u64 out_param;
745
746 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
747 (slave & ~0x7f) | (size & 0xff)) {
Joe Perches1a91de22014-05-07 12:52:57 -0700748 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
749 slave_addr, master_addr, slave, size);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000750 return -EINVAL;
751 }
752
753 if (is_read) {
754 in_param = (u64) slave | slave_addr;
755 out_param = (u64) dev->caps.function | master_addr;
756 } else {
757 in_param = (u64) dev->caps.function | master_addr;
758 out_param = (u64) slave | slave_addr;
759 }
760
761 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
762 MLX4_CMD_ACCESS_MEM,
763 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
764}
765
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000766static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
767 struct mlx4_cmd_mailbox *inbox,
768 struct mlx4_cmd_mailbox *outbox)
769{
770 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
771 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
772 int err;
773 int i;
774
775 if (index & 0x1f)
776 return -EINVAL;
777
778 in_mad->attr_mod = cpu_to_be32(index / 32);
779
780 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
781 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
782 MLX4_CMD_NATIVE);
783 if (err)
784 return err;
785
786 for (i = 0; i < 32; ++i)
787 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
788
789 return err;
790}
791
792static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
793 struct mlx4_cmd_mailbox *inbox,
794 struct mlx4_cmd_mailbox *outbox)
795{
796 int i;
797 int err;
798
799 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
800 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
801 if (err)
802 return err;
803 }
804
805 return 0;
806}
807#define PORT_CAPABILITY_LOCATION_IN_SMP 20
808#define PORT_STATE_OFFSET 32
809
810static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
811{
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000812 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
813 return IB_PORT_ACTIVE;
814 else
815 return IB_PORT_DOWN;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000816}
817
818static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
819 struct mlx4_vhcr *vhcr,
820 struct mlx4_cmd_mailbox *inbox,
821 struct mlx4_cmd_mailbox *outbox,
822 struct mlx4_cmd_info *cmd)
823{
824 struct ib_smp *smp = inbox->buf;
825 u32 index;
826 u8 port;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300827 u8 opcode_modifier;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000828 u16 *table;
829 int err;
830 int vidx, pidx;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300831 int network_view;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000832 struct mlx4_priv *priv = mlx4_priv(dev);
833 struct ib_smp *outsmp = outbox->buf;
834 __be16 *outtab = (__be16 *)(outsmp->data);
835 __be32 slave_cap_mask;
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000836 __be64 slave_node_guid;
Jack Morgenstein97982f52014-05-29 16:31:02 +0300837
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000838 port = vhcr->in_modifier;
839
Jack Morgenstein97982f52014-05-29 16:31:02 +0300840 /* network-view bit is for driver use only, and should not be passed to FW */
841 opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
842 network_view = !!(vhcr->op_modifier & 0x8);
843
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000844 if (smp->base_version == 1 &&
845 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
846 smp->class_version == 1) {
Jack Morgenstein97982f52014-05-29 16:31:02 +0300847 /* host view is paravirtualized */
848 if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000849 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
850 index = be32_to_cpu(smp->attr_mod);
851 if (port < 1 || port > dev->caps.num_ports)
852 return -EINVAL;
853 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
854 if (!table)
855 return -ENOMEM;
856 /* need to get the full pkey table because the paravirtualized
857 * pkeys may be scattered among several pkey blocks.
858 */
859 err = get_full_pkey_table(dev, port, table, inbox, outbox);
860 if (!err) {
861 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
862 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
863 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
864 }
865 }
866 kfree(table);
867 return err;
868 }
869 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
870 /*get the slave specific caps:*/
871 /*do the command */
872 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300873 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000874 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
875 /* modify the response for slaves */
876 if (!err && slave != mlx4_master_func_num(dev)) {
877 u8 *state = outsmp->data + PORT_STATE_OFFSET;
878
879 *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
880 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
881 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
882 }
883 return err;
884 }
885 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
886 /* compute slave's gid block */
887 smp->attr_mod = cpu_to_be32(slave / 8);
888 /* execute cmd */
889 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300890 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000891 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
892 if (!err) {
893 /* if needed, move slave gid to index 0 */
894 if (slave % 8)
895 memcpy(outsmp->data,
896 outsmp->data + (slave % 8) * 8, 8);
897 /* delete all other gids */
898 memset(outsmp->data + 8, 0, 56);
899 }
900 return err;
901 }
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000902 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
903 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300904 vhcr->in_modifier, opcode_modifier,
Jack Morgensteinafa8fd12012-08-03 08:40:56 +0000905 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
906 if (!err) {
907 slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
908 memcpy(outsmp->data + 12, &slave_node_guid, 8);
909 }
910 return err;
911 }
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000912 }
913 }
Jack Morgenstein97982f52014-05-29 16:31:02 +0300914
915 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
916 * These are the MADs used by ib verbs (such as ib_query_gids).
917 */
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000918 if (slave != mlx4_master_func_num(dev) &&
Jack Morgenstein97982f52014-05-29 16:31:02 +0300919 !mlx4_vf_smi_enabled(dev, slave, port)) {
920 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
921 smp->method == IB_MGMT_METHOD_GET) || network_view) {
922 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
923 slave, smp->method, smp->mgmt_class,
924 network_view ? "Network" : "Host",
925 be16_to_cpu(smp->attr_id));
926 return -EPERM;
927 }
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000928 }
Jack Morgenstein97982f52014-05-29 16:31:02 +0300929
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000930 return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
Jack Morgenstein97982f52014-05-29 16:31:02 +0300931 vhcr->in_modifier, opcode_modifier,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000932 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
933}
934
Or Gerlitzb7475792014-03-27 14:02:02 +0200935static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +0300936 struct mlx4_vhcr *vhcr,
937 struct mlx4_cmd_mailbox *inbox,
938 struct mlx4_cmd_mailbox *outbox,
939 struct mlx4_cmd_info *cmd)
940{
941 return -EPERM;
942}
943
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000944int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
945 struct mlx4_vhcr *vhcr,
946 struct mlx4_cmd_mailbox *inbox,
947 struct mlx4_cmd_mailbox *outbox,
948 struct mlx4_cmd_info *cmd)
949{
950 u64 in_param;
951 u64 out_param;
952 int err;
953
954 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
955 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
956 if (cmd->encode_slave_id) {
957 in_param &= 0xffffffffffffff00ll;
958 in_param |= slave;
959 }
960
961 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
962 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
963 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
964
965 if (cmd->out_is_imm)
966 vhcr->out_param = out_param;
967
968 return err;
969}
970
971static struct mlx4_cmd_info cmd_info[] = {
972 {
973 .opcode = MLX4_CMD_QUERY_FW,
974 .has_inbox = false,
975 .has_outbox = true,
976 .out_is_imm = false,
977 .encode_slave_id = false,
978 .verify = NULL,
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000979 .wrapper = mlx4_QUERY_FW_wrapper
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000980 },
981 {
982 .opcode = MLX4_CMD_QUERY_HCA,
983 .has_inbox = false,
984 .has_outbox = true,
985 .out_is_imm = false,
986 .encode_slave_id = false,
987 .verify = NULL,
988 .wrapper = NULL
989 },
990 {
991 .opcode = MLX4_CMD_QUERY_DEV_CAP,
992 .has_inbox = false,
993 .has_outbox = true,
994 .out_is_imm = false,
995 .encode_slave_id = false,
996 .verify = NULL,
Jack Morgensteinb91cb3e2012-05-30 09:14:53 +0000997 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
Yevgeny Petriline8f081a2011-12-13 04:12:25 +0000998 },
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000999 {
1000 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1001 .has_inbox = false,
1002 .has_outbox = true,
1003 .out_is_imm = false,
1004 .encode_slave_id = false,
1005 .verify = NULL,
1006 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1007 },
1008 {
1009 .opcode = MLX4_CMD_QUERY_ADAPTER,
1010 .has_inbox = false,
1011 .has_outbox = true,
1012 .out_is_imm = false,
1013 .encode_slave_id = false,
1014 .verify = NULL,
1015 .wrapper = NULL
1016 },
1017 {
1018 .opcode = MLX4_CMD_INIT_PORT,
1019 .has_inbox = false,
1020 .has_outbox = false,
1021 .out_is_imm = false,
1022 .encode_slave_id = false,
1023 .verify = NULL,
1024 .wrapper = mlx4_INIT_PORT_wrapper
1025 },
1026 {
1027 .opcode = MLX4_CMD_CLOSE_PORT,
1028 .has_inbox = false,
1029 .has_outbox = false,
1030 .out_is_imm = false,
1031 .encode_slave_id = false,
1032 .verify = NULL,
1033 .wrapper = mlx4_CLOSE_PORT_wrapper
1034 },
1035 {
1036 .opcode = MLX4_CMD_QUERY_PORT,
1037 .has_inbox = false,
1038 .has_outbox = true,
1039 .out_is_imm = false,
1040 .encode_slave_id = false,
1041 .verify = NULL,
1042 .wrapper = mlx4_QUERY_PORT_wrapper
1043 },
1044 {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001045 .opcode = MLX4_CMD_SET_PORT,
1046 .has_inbox = true,
1047 .has_outbox = false,
1048 .out_is_imm = false,
1049 .encode_slave_id = false,
1050 .verify = NULL,
1051 .wrapper = mlx4_SET_PORT_wrapper
1052 },
1053 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001054 .opcode = MLX4_CMD_MAP_EQ,
1055 .has_inbox = false,
1056 .has_outbox = false,
1057 .out_is_imm = false,
1058 .encode_slave_id = false,
1059 .verify = NULL,
1060 .wrapper = mlx4_MAP_EQ_wrapper
1061 },
1062 {
1063 .opcode = MLX4_CMD_SW2HW_EQ,
1064 .has_inbox = true,
1065 .has_outbox = false,
1066 .out_is_imm = false,
1067 .encode_slave_id = true,
1068 .verify = NULL,
1069 .wrapper = mlx4_SW2HW_EQ_wrapper
1070 },
1071 {
1072 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1073 .has_inbox = false,
1074 .has_outbox = false,
1075 .out_is_imm = false,
1076 .encode_slave_id = false,
1077 .verify = NULL,
1078 .wrapper = NULL
1079 },
1080 {
1081 .opcode = MLX4_CMD_NOP,
1082 .has_inbox = false,
1083 .has_outbox = false,
1084 .out_is_imm = false,
1085 .encode_slave_id = false,
1086 .verify = NULL,
1087 .wrapper = NULL
1088 },
1089 {
Or Gerlitzd18f1412014-03-27 14:02:03 +02001090 .opcode = MLX4_CMD_CONFIG_DEV,
1091 .has_inbox = false,
Matan Barakd475c952014-11-02 16:26:17 +02001092 .has_outbox = true,
Or Gerlitzd18f1412014-03-27 14:02:03 +02001093 .out_is_imm = false,
1094 .encode_slave_id = false,
1095 .verify = NULL,
Matan Barakd475c952014-11-02 16:26:17 +02001096 .wrapper = mlx4_CONFIG_DEV_wrapper
Or Gerlitzd18f1412014-03-27 14:02:03 +02001097 },
1098 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001099 .opcode = MLX4_CMD_ALLOC_RES,
1100 .has_inbox = false,
1101 .has_outbox = false,
1102 .out_is_imm = true,
1103 .encode_slave_id = false,
1104 .verify = NULL,
1105 .wrapper = mlx4_ALLOC_RES_wrapper
1106 },
1107 {
1108 .opcode = MLX4_CMD_FREE_RES,
1109 .has_inbox = false,
1110 .has_outbox = false,
1111 .out_is_imm = false,
1112 .encode_slave_id = false,
1113 .verify = NULL,
1114 .wrapper = mlx4_FREE_RES_wrapper
1115 },
1116 {
1117 .opcode = MLX4_CMD_SW2HW_MPT,
1118 .has_inbox = true,
1119 .has_outbox = false,
1120 .out_is_imm = false,
1121 .encode_slave_id = true,
1122 .verify = NULL,
1123 .wrapper = mlx4_SW2HW_MPT_wrapper
1124 },
1125 {
1126 .opcode = MLX4_CMD_QUERY_MPT,
1127 .has_inbox = false,
1128 .has_outbox = true,
1129 .out_is_imm = false,
1130 .encode_slave_id = false,
1131 .verify = NULL,
1132 .wrapper = mlx4_QUERY_MPT_wrapper
1133 },
1134 {
1135 .opcode = MLX4_CMD_HW2SW_MPT,
1136 .has_inbox = false,
1137 .has_outbox = false,
1138 .out_is_imm = false,
1139 .encode_slave_id = false,
1140 .verify = NULL,
1141 .wrapper = mlx4_HW2SW_MPT_wrapper
1142 },
1143 {
1144 .opcode = MLX4_CMD_READ_MTT,
1145 .has_inbox = false,
1146 .has_outbox = true,
1147 .out_is_imm = false,
1148 .encode_slave_id = false,
1149 .verify = NULL,
1150 .wrapper = NULL
1151 },
1152 {
1153 .opcode = MLX4_CMD_WRITE_MTT,
1154 .has_inbox = true,
1155 .has_outbox = false,
1156 .out_is_imm = false,
1157 .encode_slave_id = false,
1158 .verify = NULL,
1159 .wrapper = mlx4_WRITE_MTT_wrapper
1160 },
1161 {
1162 .opcode = MLX4_CMD_SYNC_TPT,
1163 .has_inbox = true,
1164 .has_outbox = false,
1165 .out_is_imm = false,
1166 .encode_slave_id = false,
1167 .verify = NULL,
1168 .wrapper = NULL
1169 },
1170 {
1171 .opcode = MLX4_CMD_HW2SW_EQ,
1172 .has_inbox = false,
1173 .has_outbox = true,
1174 .out_is_imm = false,
1175 .encode_slave_id = true,
1176 .verify = NULL,
1177 .wrapper = mlx4_HW2SW_EQ_wrapper
1178 },
1179 {
1180 .opcode = MLX4_CMD_QUERY_EQ,
1181 .has_inbox = false,
1182 .has_outbox = true,
1183 .out_is_imm = false,
1184 .encode_slave_id = true,
1185 .verify = NULL,
1186 .wrapper = mlx4_QUERY_EQ_wrapper
1187 },
1188 {
1189 .opcode = MLX4_CMD_SW2HW_CQ,
1190 .has_inbox = true,
1191 .has_outbox = false,
1192 .out_is_imm = false,
1193 .encode_slave_id = true,
1194 .verify = NULL,
1195 .wrapper = mlx4_SW2HW_CQ_wrapper
1196 },
1197 {
1198 .opcode = MLX4_CMD_HW2SW_CQ,
1199 .has_inbox = false,
1200 .has_outbox = false,
1201 .out_is_imm = false,
1202 .encode_slave_id = false,
1203 .verify = NULL,
1204 .wrapper = mlx4_HW2SW_CQ_wrapper
1205 },
1206 {
1207 .opcode = MLX4_CMD_QUERY_CQ,
1208 .has_inbox = false,
1209 .has_outbox = true,
1210 .out_is_imm = false,
1211 .encode_slave_id = false,
1212 .verify = NULL,
1213 .wrapper = mlx4_QUERY_CQ_wrapper
1214 },
1215 {
1216 .opcode = MLX4_CMD_MODIFY_CQ,
1217 .has_inbox = true,
1218 .has_outbox = false,
1219 .out_is_imm = true,
1220 .encode_slave_id = false,
1221 .verify = NULL,
1222 .wrapper = mlx4_MODIFY_CQ_wrapper
1223 },
1224 {
1225 .opcode = MLX4_CMD_SW2HW_SRQ,
1226 .has_inbox = true,
1227 .has_outbox = false,
1228 .out_is_imm = false,
1229 .encode_slave_id = true,
1230 .verify = NULL,
1231 .wrapper = mlx4_SW2HW_SRQ_wrapper
1232 },
1233 {
1234 .opcode = MLX4_CMD_HW2SW_SRQ,
1235 .has_inbox = false,
1236 .has_outbox = false,
1237 .out_is_imm = false,
1238 .encode_slave_id = false,
1239 .verify = NULL,
1240 .wrapper = mlx4_HW2SW_SRQ_wrapper
1241 },
1242 {
1243 .opcode = MLX4_CMD_QUERY_SRQ,
1244 .has_inbox = false,
1245 .has_outbox = true,
1246 .out_is_imm = false,
1247 .encode_slave_id = false,
1248 .verify = NULL,
1249 .wrapper = mlx4_QUERY_SRQ_wrapper
1250 },
1251 {
1252 .opcode = MLX4_CMD_ARM_SRQ,
1253 .has_inbox = false,
1254 .has_outbox = false,
1255 .out_is_imm = false,
1256 .encode_slave_id = false,
1257 .verify = NULL,
1258 .wrapper = mlx4_ARM_SRQ_wrapper
1259 },
1260 {
1261 .opcode = MLX4_CMD_RST2INIT_QP,
1262 .has_inbox = true,
1263 .has_outbox = false,
1264 .out_is_imm = false,
1265 .encode_slave_id = true,
1266 .verify = NULL,
1267 .wrapper = mlx4_RST2INIT_QP_wrapper
1268 },
1269 {
1270 .opcode = MLX4_CMD_INIT2INIT_QP,
1271 .has_inbox = true,
1272 .has_outbox = false,
1273 .out_is_imm = false,
1274 .encode_slave_id = false,
1275 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001276 .wrapper = mlx4_INIT2INIT_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001277 },
1278 {
1279 .opcode = MLX4_CMD_INIT2RTR_QP,
1280 .has_inbox = true,
1281 .has_outbox = false,
1282 .out_is_imm = false,
1283 .encode_slave_id = false,
1284 .verify = NULL,
1285 .wrapper = mlx4_INIT2RTR_QP_wrapper
1286 },
1287 {
1288 .opcode = MLX4_CMD_RTR2RTS_QP,
1289 .has_inbox = true,
1290 .has_outbox = false,
1291 .out_is_imm = false,
1292 .encode_slave_id = false,
1293 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001294 .wrapper = mlx4_RTR2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001295 },
1296 {
1297 .opcode = MLX4_CMD_RTS2RTS_QP,
1298 .has_inbox = true,
1299 .has_outbox = false,
1300 .out_is_imm = false,
1301 .encode_slave_id = false,
1302 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001303 .wrapper = mlx4_RTS2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001304 },
1305 {
1306 .opcode = MLX4_CMD_SQERR2RTS_QP,
1307 .has_inbox = true,
1308 .has_outbox = false,
1309 .out_is_imm = false,
1310 .encode_slave_id = false,
1311 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001312 .wrapper = mlx4_SQERR2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001313 },
1314 {
1315 .opcode = MLX4_CMD_2ERR_QP,
1316 .has_inbox = false,
1317 .has_outbox = false,
1318 .out_is_imm = false,
1319 .encode_slave_id = false,
1320 .verify = NULL,
1321 .wrapper = mlx4_GEN_QP_wrapper
1322 },
1323 {
1324 .opcode = MLX4_CMD_RTS2SQD_QP,
1325 .has_inbox = false,
1326 .has_outbox = false,
1327 .out_is_imm = false,
1328 .encode_slave_id = false,
1329 .verify = NULL,
1330 .wrapper = mlx4_GEN_QP_wrapper
1331 },
1332 {
1333 .opcode = MLX4_CMD_SQD2SQD_QP,
1334 .has_inbox = true,
1335 .has_outbox = false,
1336 .out_is_imm = false,
1337 .encode_slave_id = false,
1338 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001339 .wrapper = mlx4_SQD2SQD_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001340 },
1341 {
1342 .opcode = MLX4_CMD_SQD2RTS_QP,
1343 .has_inbox = true,
1344 .has_outbox = false,
1345 .out_is_imm = false,
1346 .encode_slave_id = false,
1347 .verify = NULL,
Jack Morgenstein54679e12012-08-03 08:40:43 +00001348 .wrapper = mlx4_SQD2RTS_QP_wrapper
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001349 },
1350 {
1351 .opcode = MLX4_CMD_2RST_QP,
1352 .has_inbox = false,
1353 .has_outbox = false,
1354 .out_is_imm = false,
1355 .encode_slave_id = false,
1356 .verify = NULL,
1357 .wrapper = mlx4_2RST_QP_wrapper
1358 },
1359 {
1360 .opcode = MLX4_CMD_QUERY_QP,
1361 .has_inbox = false,
1362 .has_outbox = true,
1363 .out_is_imm = false,
1364 .encode_slave_id = false,
1365 .verify = NULL,
1366 .wrapper = mlx4_GEN_QP_wrapper
1367 },
1368 {
1369 .opcode = MLX4_CMD_SUSPEND_QP,
1370 .has_inbox = false,
1371 .has_outbox = false,
1372 .out_is_imm = false,
1373 .encode_slave_id = false,
1374 .verify = NULL,
1375 .wrapper = mlx4_GEN_QP_wrapper
1376 },
1377 {
1378 .opcode = MLX4_CMD_UNSUSPEND_QP,
1379 .has_inbox = false,
1380 .has_outbox = false,
1381 .out_is_imm = false,
1382 .encode_slave_id = false,
1383 .verify = NULL,
1384 .wrapper = mlx4_GEN_QP_wrapper
1385 },
1386 {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001387 .opcode = MLX4_CMD_UPDATE_QP,
Matan Barakce8d9e02014-05-15 15:29:27 +03001388 .has_inbox = true,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001389 .has_outbox = false,
1390 .out_is_imm = false,
1391 .encode_slave_id = false,
1392 .verify = NULL,
Matan Barakce8d9e02014-05-15 15:29:27 +03001393 .wrapper = mlx4_UPDATE_QP_wrapper
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001394 },
1395 {
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03001396 .opcode = MLX4_CMD_GET_OP_REQ,
1397 .has_inbox = false,
1398 .has_outbox = false,
1399 .out_is_imm = false,
1400 .encode_slave_id = false,
1401 .verify = NULL,
Or Gerlitzb7475792014-03-27 14:02:02 +02001402 .wrapper = mlx4_CMD_EPERM_wrapper,
Yevgeny Petrilinfe6f7002013-07-28 18:54:21 +03001403 },
1404 {
Jack Morgenstein0a9a0182012-08-03 08:40:45 +00001405 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1406 .has_inbox = false,
1407 .has_outbox = false,
1408 .out_is_imm = false,
1409 .encode_slave_id = false,
1410 .verify = NULL, /* XXX verify: only demux can do this */
1411 .wrapper = NULL
1412 },
1413 {
1414 .opcode = MLX4_CMD_MAD_IFC,
1415 .has_inbox = true,
1416 .has_outbox = true,
1417 .out_is_imm = false,
1418 .encode_slave_id = false,
1419 .verify = NULL,
1420 .wrapper = mlx4_MAD_IFC_wrapper
1421 },
1422 {
Jack Morgenstein114840c2014-06-01 11:53:50 +03001423 .opcode = MLX4_CMD_MAD_DEMUX,
1424 .has_inbox = false,
1425 .has_outbox = false,
1426 .out_is_imm = false,
1427 .encode_slave_id = false,
1428 .verify = NULL,
1429 .wrapper = mlx4_CMD_EPERM_wrapper
1430 },
1431 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001432 .opcode = MLX4_CMD_QUERY_IF_STAT,
1433 .has_inbox = false,
1434 .has_outbox = true,
1435 .out_is_imm = false,
1436 .encode_slave_id = false,
1437 .verify = NULL,
1438 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1439 },
Saeed Mahameedadbc7ac2014-10-27 11:37:37 +02001440 {
1441 .opcode = MLX4_CMD_ACCESS_REG,
1442 .has_inbox = true,
1443 .has_outbox = true,
1444 .out_is_imm = false,
1445 .encode_slave_id = false,
1446 .verify = NULL,
Saeed Mahameed6e806692014-11-02 16:26:13 +02001447 .wrapper = mlx4_ACCESS_REG_wrapper,
Saeed Mahameedadbc7ac2014-10-27 11:37:37 +02001448 },
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001449 /* Native multicast commands are not available for guests */
1450 {
1451 .opcode = MLX4_CMD_QP_ATTACH,
1452 .has_inbox = true,
1453 .has_outbox = false,
1454 .out_is_imm = false,
1455 .encode_slave_id = false,
1456 .verify = NULL,
1457 .wrapper = mlx4_QP_ATTACH_wrapper
1458 },
1459 {
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +00001460 .opcode = MLX4_CMD_PROMISC,
1461 .has_inbox = false,
1462 .has_outbox = false,
1463 .out_is_imm = false,
1464 .encode_slave_id = false,
1465 .verify = NULL,
1466 .wrapper = mlx4_PROMISC_wrapper
1467 },
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001468 /* Ethernet specific commands */
1469 {
1470 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1471 .has_inbox = true,
1472 .has_outbox = false,
1473 .out_is_imm = false,
1474 .encode_slave_id = false,
1475 .verify = NULL,
1476 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1477 },
1478 {
1479 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1480 .has_inbox = false,
1481 .has_outbox = false,
1482 .out_is_imm = false,
1483 .encode_slave_id = false,
1484 .verify = NULL,
1485 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1486 },
1487 {
1488 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1489 .has_inbox = false,
1490 .has_outbox = true,
1491 .out_is_imm = false,
1492 .encode_slave_id = false,
1493 .verify = NULL,
1494 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1495 },
Eugenia Emantayev0ec2c0f2011-12-13 04:16:02 +00001496 {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001497 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1498 .has_inbox = false,
1499 .has_outbox = false,
1500 .out_is_imm = false,
1501 .encode_slave_id = false,
1502 .verify = NULL,
1503 .wrapper = NULL
1504 },
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00001505 /* flow steering commands */
1506 {
1507 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1508 .has_inbox = true,
1509 .has_outbox = false,
1510 .out_is_imm = true,
1511 .encode_slave_id = false,
1512 .verify = NULL,
1513 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1514 },
1515 {
1516 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1517 .has_inbox = false,
1518 .has_outbox = false,
1519 .out_is_imm = false,
1520 .encode_slave_id = false,
1521 .verify = NULL,
1522 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1523 },
Matan Barak4de65802013-11-07 15:25:14 +02001524 {
1525 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1526 .has_inbox = false,
1527 .has_outbox = false,
1528 .out_is_imm = false,
1529 .encode_slave_id = false,
1530 .verify = NULL,
Or Gerlitzb7475792014-03-27 14:02:02 +02001531 .wrapper = mlx4_CMD_EPERM_wrapper
Matan Barak4de65802013-11-07 15:25:14 +02001532 },
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001533};
1534
1535static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1536 struct mlx4_vhcr_cmd *in_vhcr)
1537{
1538 struct mlx4_priv *priv = mlx4_priv(dev);
1539 struct mlx4_cmd_info *cmd = NULL;
1540 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1541 struct mlx4_vhcr *vhcr;
1542 struct mlx4_cmd_mailbox *inbox = NULL;
1543 struct mlx4_cmd_mailbox *outbox = NULL;
1544 u64 in_param;
1545 u64 out_param;
1546 int ret = 0;
1547 int i;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001548 int err = 0;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001549
1550 /* Create sw representation of Virtual HCR */
1551 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1552 if (!vhcr)
1553 return -ENOMEM;
1554
1555 /* DMA in the vHCR */
1556 if (!in_vhcr) {
1557 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1558 priv->mfunc.master.slave_state[slave].vhcr_dma,
1559 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1560 MLX4_ACCESS_MEM_ALIGN), 1);
1561 if (ret) {
Joe Perches1a91de22014-05-07 12:52:57 -07001562 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1563 __func__, ret);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001564 kfree(vhcr);
1565 return ret;
1566 }
1567 }
1568
1569 /* Fill SW VHCR fields */
1570 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1571 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1572 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1573 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1574 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1575 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1576 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1577
1578 /* Lookup command */
1579 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1580 if (vhcr->op == cmd_info[i].opcode) {
1581 cmd = &cmd_info[i];
1582 break;
1583 }
1584 }
1585 if (!cmd) {
1586 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1587 vhcr->op, slave);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001588 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001589 goto out_status;
1590 }
1591
1592 /* Read inbox */
1593 if (cmd->has_inbox) {
1594 vhcr->in_param &= INBOX_MASK;
1595 inbox = mlx4_alloc_cmd_mailbox(dev);
1596 if (IS_ERR(inbox)) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001597 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001598 inbox = NULL;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001599 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001600 }
1601
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001602 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1603 vhcr->in_param,
1604 MLX4_MAILBOX_SIZE, 1)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001605 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1606 __func__, cmd->opcode);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001607 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1608 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001609 }
1610 }
1611
1612 /* Apply permission and bound checks if applicable */
1613 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001614 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1615 vhcr->op, slave, vhcr->in_modifier);
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001616 vhcr_cmd->status = CMD_STAT_BAD_OP;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001617 goto out_status;
1618 }
1619
1620 /* Allocate outbox */
1621 if (cmd->has_outbox) {
1622 outbox = mlx4_alloc_cmd_mailbox(dev);
1623 if (IS_ERR(outbox)) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001624 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001625 outbox = NULL;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001626 goto out_status;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001627 }
1628 }
1629
1630 /* Execute the command! */
1631 if (cmd->wrapper) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001632 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1633 cmd);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001634 if (cmd->out_is_imm)
1635 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1636 } else {
1637 in_param = cmd->has_inbox ? (u64) inbox->dma :
1638 vhcr->in_param;
1639 out_param = cmd->has_outbox ? (u64) outbox->dma :
1640 vhcr->out_param;
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001641 err = __mlx4_cmd(dev, in_param, &out_param,
1642 cmd->out_is_imm, vhcr->in_modifier,
1643 vhcr->op_modifier, vhcr->op,
1644 MLX4_CMD_TIME_CLASS_A,
1645 MLX4_CMD_NATIVE);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001646
1647 if (cmd->out_is_imm) {
1648 vhcr->out_param = out_param;
1649 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1650 }
1651 }
1652
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001653 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001654 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001655 vhcr->op, slave, vhcr->errno, err);
1656 vhcr_cmd->status = mlx4_errno_to_status(err);
1657 goto out_status;
1658 }
1659
1660
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001661 /* Write outbox if command completed successfully */
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001662 if (cmd->has_outbox && !vhcr_cmd->status) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001663 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1664 vhcr->out_param,
1665 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1666 if (ret) {
Yevgeny Petrilin72be84f2011-12-19 04:03:53 +00001667 /* If we failed to write back the outbox after the
1668 *command was successfully executed, we must fail this
1669 * slave, as it is now in undefined state */
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001670 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1671 goto out;
1672 }
1673 }
1674
1675out_status:
1676 /* DMA back vhcr result */
1677 if (!in_vhcr) {
1678 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1679 priv->mfunc.master.slave_state[slave].vhcr_dma,
1680 ALIGN(sizeof(struct mlx4_vhcr),
1681 MLX4_ACCESS_MEM_ALIGN),
1682 MLX4_CMD_WRAPPED);
1683 if (ret)
1684 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1685 __func__);
1686 else if (vhcr->e_bit &&
1687 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
Joe Perches1a91de22014-05-07 12:52:57 -07001688 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1689 slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001690 }
1691
1692out:
1693 kfree(vhcr);
1694 mlx4_free_cmd_mailbox(dev, inbox);
1695 mlx4_free_cmd_mailbox(dev, outbox);
1696 return ret;
1697}
1698
Jingoo Hanf0946682013-08-05 18:04:51 +09001699static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001700 int slave, int port)
1701{
1702 struct mlx4_vport_oper_state *vp_oper;
1703 struct mlx4_vport_state *vp_admin;
1704 struct mlx4_vf_immed_vlan_work *work;
Rony Efraim0a6eac22013-06-27 19:05:22 +03001705 struct mlx4_dev *dev = &(priv->dev);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001706 int err;
1707 int admin_vlan_ix = NO_INDX;
1708
1709 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1710 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1711
1712 if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
Rony Efraim0a6eac22013-06-27 19:05:22 +03001713 vp_oper->state.default_qos == vp_admin->default_qos &&
1714 vp_oper->state.link_state == vp_admin->link_state)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001715 return 0;
1716
Rony Efraim0a6eac22013-06-27 19:05:22 +03001717 if (!(priv->mfunc.master.slave_state[slave].active &&
Rony Efraimf0f829b2013-11-07 12:19:51 +02001718 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
Rony Efraim0a6eac22013-06-27 19:05:22 +03001719 /* even if the UPDATE_QP command isn't supported, we still want
1720 * to set this VF link according to the admin directive
1721 */
1722 vp_oper->state.link_state = vp_admin->link_state;
1723 return -1;
1724 }
1725
1726 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1727 slave, port);
Joe Perches1a91de22014-05-07 12:52:57 -07001728 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1729 vp_admin->default_vlan, vp_admin->default_qos,
1730 vp_admin->link_state);
Rony Efraim0a6eac22013-06-27 19:05:22 +03001731
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001732 work = kzalloc(sizeof(*work), GFP_KERNEL);
1733 if (!work)
1734 return -ENOMEM;
1735
1736 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
Rony Efraimf0f829b2013-11-07 12:19:51 +02001737 if (MLX4_VGT != vp_admin->default_vlan) {
1738 err = __mlx4_register_vlan(&priv->dev, port,
1739 vp_admin->default_vlan,
1740 &admin_vlan_ix);
1741 if (err) {
1742 kfree(work);
Joe Perches1a91de22014-05-07 12:52:57 -07001743 mlx4_warn(&priv->dev,
Rony Efraimf0f829b2013-11-07 12:19:51 +02001744 "No vlan resources slave %d, port %d\n",
1745 slave, port);
1746 return err;
1747 }
1748 } else {
1749 admin_vlan_ix = NO_INDX;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001750 }
1751 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
Joe Perches1a91de22014-05-07 12:52:57 -07001752 mlx4_dbg(&priv->dev,
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001753 "alloc vlan %d idx %d slave %d port %d\n",
1754 (int)(vp_admin->default_vlan),
1755 admin_vlan_ix, slave, port);
1756 }
1757
1758 /* save original vlan ix and vlan id */
1759 work->orig_vlan_id = vp_oper->state.default_vlan;
1760 work->orig_vlan_ix = vp_oper->vlan_idx;
1761
1762 /* handle new qos */
1763 if (vp_oper->state.default_qos != vp_admin->default_qos)
1764 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1765
1766 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1767 vp_oper->vlan_idx = admin_vlan_ix;
1768
1769 vp_oper->state.default_vlan = vp_admin->default_vlan;
1770 vp_oper->state.default_qos = vp_admin->default_qos;
Rony Efraim0a6eac22013-06-27 19:05:22 +03001771 vp_oper->state.link_state = vp_admin->link_state;
1772
1773 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1774 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03001775
1776 /* iterate over QPs owned by this slave, using UPDATE_QP */
1777 work->port = port;
1778 work->slave = slave;
1779 work->qos = vp_oper->state.default_qos;
1780 work->vlan_id = vp_oper->state.default_vlan;
1781 work->vlan_ix = vp_oper->vlan_idx;
1782 work->priv = priv;
1783 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1784 queue_work(priv->mfunc.master.comm_wq, &work->work);
1785
1786 return 0;
1787}
1788
1789
Rony Efraim0eb62b92013-04-25 05:22:26 +00001790static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1791{
Rony Efraim3f7fb022013-04-25 05:22:28 +00001792 int port, err;
1793 struct mlx4_vport_state *vp_admin;
1794 struct mlx4_vport_oper_state *vp_oper;
Matan Barak449fc482014-03-19 18:11:52 +02001795 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1796 &priv->dev, slave);
1797 int min_port = find_first_bit(actv_ports.ports,
1798 priv->dev.caps.num_ports) + 1;
1799 int max_port = min_port - 1 +
1800 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001801
Matan Barak449fc482014-03-19 18:11:52 +02001802 for (port = min_port; port <= max_port; port++) {
1803 if (!test_bit(port - 1, actv_ports.ports))
1804 continue;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03001805 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1806 priv->mfunc.master.vf_admin[slave].enable_smi[port];
Rony Efraim3f7fb022013-04-25 05:22:28 +00001807 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1808 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1809 vp_oper->state = *vp_admin;
1810 if (MLX4_VGT != vp_admin->default_vlan) {
1811 err = __mlx4_register_vlan(&priv->dev, port,
1812 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1813 if (err) {
1814 vp_oper->vlan_idx = NO_INDX;
Joe Perches1a91de22014-05-07 12:52:57 -07001815 mlx4_warn(&priv->dev,
Masanari Iida1a84db52014-08-29 23:37:33 +09001816 "No vlan resources slave %d, port %d\n",
Rony Efraim3f7fb022013-04-25 05:22:28 +00001817 slave, port);
1818 return err;
1819 }
Joe Perches1a91de22014-05-07 12:52:57 -07001820 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
Rony Efraim3f7fb022013-04-25 05:22:28 +00001821 (int)(vp_oper->state.default_vlan),
1822 vp_oper->vlan_idx, slave, port);
1823 }
Rony Efraime6b6a232013-04-25 05:22:29 +00001824 if (vp_admin->spoofchk) {
1825 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1826 port,
1827 vp_admin->mac);
1828 if (0 > vp_oper->mac_idx) {
1829 err = vp_oper->mac_idx;
1830 vp_oper->mac_idx = NO_INDX;
Joe Perches1a91de22014-05-07 12:52:57 -07001831 mlx4_warn(&priv->dev,
Masanari Iida1a84db52014-08-29 23:37:33 +09001832 "No mac resources slave %d, port %d\n",
Rony Efraime6b6a232013-04-25 05:22:29 +00001833 slave, port);
1834 return err;
1835 }
Joe Perches1a91de22014-05-07 12:52:57 -07001836 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
Rony Efraime6b6a232013-04-25 05:22:29 +00001837 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1838 }
Rony Efraim0eb62b92013-04-25 05:22:26 +00001839 }
1840 return 0;
1841}
1842
Rony Efraim3f7fb022013-04-25 05:22:28 +00001843static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1844{
1845 int port;
1846 struct mlx4_vport_oper_state *vp_oper;
Matan Barak449fc482014-03-19 18:11:52 +02001847 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1848 &priv->dev, slave);
1849 int min_port = find_first_bit(actv_ports.ports,
1850 priv->dev.caps.num_ports) + 1;
1851 int max_port = min_port - 1 +
1852 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001853
Matan Barak449fc482014-03-19 18:11:52 +02001854
1855 for (port = min_port; port <= max_port; port++) {
1856 if (!test_bit(port - 1, actv_ports.ports))
1857 continue;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03001858 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1859 MLX4_VF_SMI_DISABLED;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001860 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1861 if (NO_INDX != vp_oper->vlan_idx) {
1862 __mlx4_unregister_vlan(&priv->dev,
Jack Morgenstein2009d002013-11-03 10:03:19 +02001863 port, vp_oper->state.default_vlan);
Rony Efraim3f7fb022013-04-25 05:22:28 +00001864 vp_oper->vlan_idx = NO_INDX;
1865 }
Rony Efraime6b6a232013-04-25 05:22:29 +00001866 if (NO_INDX != vp_oper->mac_idx) {
Jack Morgensteinc32b7df2013-11-03 10:04:07 +02001867 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
Rony Efraime6b6a232013-04-25 05:22:29 +00001868 vp_oper->mac_idx = NO_INDX;
1869 }
Rony Efraim3f7fb022013-04-25 05:22:28 +00001870 }
1871 return;
1872}
1873
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001874static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1875 u16 param, u8 toggle)
1876{
1877 struct mlx4_priv *priv = mlx4_priv(dev);
1878 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1879 u32 reply;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001880 u8 is_going_down = 0;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00001881 int i;
Jack Morgenstein311f8132012-11-27 16:24:30 +00001882 unsigned long flags;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001883
1884 slave_state[slave].comm_toggle ^= 1;
1885 reply = (u32) slave_state[slave].comm_toggle << 31;
1886 if (toggle != slave_state[slave].comm_toggle) {
Joe Perches1a91de22014-05-07 12:52:57 -07001887 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1888 toggle, slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001889 goto reset_slave;
1890 }
1891 if (cmd == MLX4_COMM_CMD_RESET) {
1892 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1893 slave_state[slave].active = false;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001894 slave_state[slave].old_vlan_api = false;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001895 mlx4_master_deactivate_admin_state(priv, slave);
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00001896 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1897 slave_state[slave].event_eq[i].eqn = -1;
1898 slave_state[slave].event_eq[i].token = 0;
1899 }
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001900 /*check if we are in the middle of FLR process,
1901 if so return "retry" status to the slave*/
Or Gerlitz162344e2012-05-15 10:34:57 +00001902 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001903 goto inform_slave_state;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001904
Jack Morgensteinfc065732012-08-03 08:40:42 +00001905 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1906
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001907 /* write the version in the event field */
1908 reply |= mlx4_comm_get_version();
1909
1910 goto reset_slave;
1911 }
1912 /*command from slave in the middle of FLR*/
1913 if (cmd != MLX4_COMM_CMD_RESET &&
1914 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
Joe Perches1a91de22014-05-07 12:52:57 -07001915 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1916 slave, cmd);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001917 return;
1918 }
1919
1920 switch (cmd) {
1921 case MLX4_COMM_CMD_VHCR0:
1922 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1923 goto reset_slave;
1924 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1925 priv->mfunc.master.slave_state[slave].cookie = 0;
1926 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1927 break;
1928 case MLX4_COMM_CMD_VHCR1:
1929 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1930 goto reset_slave;
1931 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1932 break;
1933 case MLX4_COMM_CMD_VHCR2:
1934 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1935 goto reset_slave;
1936 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1937 break;
1938 case MLX4_COMM_CMD_VHCR_EN:
1939 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1940 goto reset_slave;
1941 slave_state[slave].vhcr_dma |= param;
Rony Efraim3f7fb022013-04-25 05:22:28 +00001942 if (mlx4_master_activate_admin_state(priv, slave))
1943 goto reset_slave;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001944 slave_state[slave].active = true;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001945 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001946 break;
1947 case MLX4_COMM_CMD_VHCR_POST:
1948 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
Yishai Hadas55ad3592015-01-25 16:59:42 +02001949 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
1950 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
1951 slave, cmd, slave_state[slave].last_cmd);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001952 goto reset_slave;
Yishai Hadas55ad3592015-01-25 16:59:42 +02001953 }
Roland Dreierf3d4c892012-09-25 21:24:07 -07001954
1955 mutex_lock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001956 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
Joe Perches1a91de22014-05-07 12:52:57 -07001957 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1958 slave);
Roland Dreierf3d4c892012-09-25 21:24:07 -07001959 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001960 goto reset_slave;
1961 }
Roland Dreierf3d4c892012-09-25 21:24:07 -07001962 mutex_unlock(&priv->cmd.slave_cmd_mutex);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001963 break;
1964 default:
1965 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1966 goto reset_slave;
1967 }
Jack Morgenstein311f8132012-11-27 16:24:30 +00001968 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001969 if (!slave_state[slave].is_slave_going_down)
1970 slave_state[slave].last_cmd = cmd;
1971 else
1972 is_going_down = 1;
Jack Morgenstein311f8132012-11-27 16:24:30 +00001973 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001974 if (is_going_down) {
Joe Perches1a91de22014-05-07 12:52:57 -07001975 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00001976 cmd, slave);
1977 return;
1978 }
1979 __raw_writel((__force u32) cpu_to_be32(reply),
1980 &priv->mfunc.comm[slave].slave_read);
1981 mmiowb();
1982
1983 return;
1984
1985reset_slave:
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001986 /* cleanup any slave resources */
Yishai Hadas55ad3592015-01-25 16:59:42 +02001987 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
1988 mlx4_delete_all_resources_for_slave(dev, slave);
1989
1990 if (cmd != MLX4_COMM_CMD_RESET) {
1991 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
1992 slave, cmd);
1993 /* Turn on internal error letting slave reset itself immeditaly,
1994 * otherwise it might take till timeout on command is passed
1995 */
1996 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
1997 }
1998
Jack Morgenstein311f8132012-11-27 16:24:30 +00001999 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002000 if (!slave_state[slave].is_slave_going_down)
2001 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
Jack Morgenstein311f8132012-11-27 16:24:30 +00002002 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002003 /*with slave in the middle of flr, no need to clean resources again.*/
2004inform_slave_state:
2005 memset(&slave_state[slave].event_eq, 0,
2006 sizeof(struct mlx4_slave_event_eq_info));
2007 __raw_writel((__force u32) cpu_to_be32(reply),
2008 &priv->mfunc.comm[slave].slave_read);
2009 wmb();
2010}
2011
2012/* master command processing */
2013void mlx4_master_comm_channel(struct work_struct *work)
2014{
2015 struct mlx4_mfunc_master_ctx *master =
2016 container_of(work,
2017 struct mlx4_mfunc_master_ctx,
2018 comm_work);
2019 struct mlx4_mfunc *mfunc =
2020 container_of(master, struct mlx4_mfunc, master);
2021 struct mlx4_priv *priv =
2022 container_of(mfunc, struct mlx4_priv, mfunc);
2023 struct mlx4_dev *dev = &priv->dev;
2024 __be32 *bit_vec;
2025 u32 comm_cmd;
2026 u32 vec;
2027 int i, j, slave;
2028 int toggle;
2029 int served = 0;
2030 int reported = 0;
2031 u32 slt;
2032
2033 bit_vec = master->comm_arm_bit_vector;
2034 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2035 vec = be32_to_cpu(bit_vec[i]);
2036 for (j = 0; j < 32; j++) {
2037 if (!(vec & (1 << j)))
2038 continue;
2039 ++reported;
2040 slave = (i * 32) + j;
2041 comm_cmd = swab32(readl(
2042 &mfunc->comm[slave].slave_write));
2043 slt = swab32(readl(&mfunc->comm[slave].slave_read))
2044 >> 31;
2045 toggle = comm_cmd >> 31;
2046 if (toggle != slt) {
2047 if (master->slave_state[slave].comm_toggle
2048 != slt) {
Amir Vadaic20862c2014-05-22 15:55:40 +03002049 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2050 slave, slt,
2051 master->slave_state[slave].comm_toggle);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002052 master->slave_state[slave].comm_toggle =
2053 slt;
2054 }
2055 mlx4_master_do_cmd(dev, slave,
2056 comm_cmd >> 16 & 0xff,
2057 comm_cmd & 0xffff, toggle);
2058 ++served;
2059 }
2060 }
2061 }
2062
2063 if (reported && reported != served)
Joe Perches1a91de22014-05-07 12:52:57 -07002064 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002065 reported, served);
2066
2067 if (mlx4_ARM_COMM_CHANNEL(dev))
2068 mlx4_warn(dev, "Failed to arm comm channel events\n");
2069}
2070
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002071static int sync_toggles(struct mlx4_dev *dev)
2072{
2073 struct mlx4_priv *priv = mlx4_priv(dev);
Yishai Hadas55ad3592015-01-25 16:59:42 +02002074 u32 wr_toggle;
2075 u32 rd_toggle;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002076 unsigned long end;
2077
Yishai Hadas55ad3592015-01-25 16:59:42 +02002078 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2079 if (wr_toggle == 0xffffffff)
2080 end = jiffies + msecs_to_jiffies(30000);
2081 else
2082 end = jiffies + msecs_to_jiffies(5000);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002083
2084 while (time_before(jiffies, end)) {
Yishai Hadas55ad3592015-01-25 16:59:42 +02002085 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2086 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2087 /* PCI might be offline */
2088 msleep(100);
2089 wr_toggle = swab32(readl(&priv->mfunc.comm->
2090 slave_write));
2091 continue;
2092 }
2093
2094 if (rd_toggle >> 31 == wr_toggle >> 31) {
2095 priv->cmd.comm_toggle = rd_toggle >> 31;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002096 return 0;
2097 }
2098
2099 cond_resched();
2100 }
2101
2102 /*
2103 * we could reach here if for example the previous VM using this
2104 * function misbehaved and left the channel with unsynced state. We
2105 * should fix this here and give this VM a chance to use a properly
2106 * synced channel
2107 */
2108 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2109 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2110 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2111 priv->cmd.comm_toggle = 0;
2112
2113 return 0;
2114}
2115
2116int mlx4_multi_func_init(struct mlx4_dev *dev)
2117{
2118 struct mlx4_priv *priv = mlx4_priv(dev);
2119 struct mlx4_slave_state *s_state;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002120 int i, j, err, port;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002121
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002122 if (mlx4_is_master(dev))
2123 priv->mfunc.comm =
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002124 ioremap(pci_resource_start(dev->persist->pdev,
2125 priv->fw.comm_bar) +
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002126 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2127 else
2128 priv->mfunc.comm =
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002129 ioremap(pci_resource_start(dev->persist->pdev, 2) +
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002130 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2131 if (!priv->mfunc.comm) {
Joe Perches1a91de22014-05-07 12:52:57 -07002132 mlx4_err(dev, "Couldn't map communication vector\n");
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002133 goto err_vhcr;
2134 }
2135
2136 if (mlx4_is_master(dev)) {
2137 priv->mfunc.master.slave_state =
2138 kzalloc(dev->num_slaves *
2139 sizeof(struct mlx4_slave_state), GFP_KERNEL);
2140 if (!priv->mfunc.master.slave_state)
2141 goto err_comm;
2142
Rony Efraim0eb62b92013-04-25 05:22:26 +00002143 priv->mfunc.master.vf_admin =
2144 kzalloc(dev->num_slaves *
2145 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2146 if (!priv->mfunc.master.vf_admin)
2147 goto err_comm_admin;
2148
2149 priv->mfunc.master.vf_oper =
2150 kzalloc(dev->num_slaves *
2151 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2152 if (!priv->mfunc.master.vf_oper)
2153 goto err_comm_oper;
2154
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002155 for (i = 0; i < dev->num_slaves; ++i) {
2156 s_state = &priv->mfunc.master.slave_state[i];
2157 s_state->last_cmd = MLX4_COMM_CMD_RESET;
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002158 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2159 s_state->event_eq[j].eqn = -1;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002160 __raw_writel((__force u32) 0,
2161 &priv->mfunc.comm[i].slave_write);
2162 __raw_writel((__force u32) 0,
2163 &priv->mfunc.comm[i].slave_read);
2164 mmiowb();
2165 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2166 s_state->vlan_filter[port] =
2167 kzalloc(sizeof(struct mlx4_vlan_fltr),
2168 GFP_KERNEL);
2169 if (!s_state->vlan_filter[port]) {
2170 if (--port)
2171 kfree(s_state->vlan_filter[port]);
2172 goto err_slaves;
2173 }
2174 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
Rony Efraim0eb62b92013-04-25 05:22:26 +00002175 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002176 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
Rony Efraim0eb62b92013-04-25 05:22:26 +00002177 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2178 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002179 }
2180 spin_lock_init(&s_state->lock);
2181 }
2182
Or Gerlitz08ff3232012-10-21 14:59:24 +00002183 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002184 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2185 INIT_WORK(&priv->mfunc.master.comm_work,
2186 mlx4_master_comm_channel);
2187 INIT_WORK(&priv->mfunc.master.slave_event_work,
2188 mlx4_gen_slave_eqe);
2189 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2190 mlx4_master_handle_slave_flr);
2191 spin_lock_init(&priv->mfunc.master.slave_state_lock);
Jack Morgenstein992e8e6e2012-08-03 08:40:54 +00002192 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002193 priv->mfunc.master.comm_wq =
2194 create_singlethread_workqueue("mlx4_comm");
2195 if (!priv->mfunc.master.comm_wq)
2196 goto err_slaves;
2197
2198 if (mlx4_init_resource_tracker(dev))
2199 goto err_thread;
2200
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002201 } else {
2202 err = sync_toggles(dev);
2203 if (err) {
2204 mlx4_err(dev, "Couldn't sync toggles\n");
2205 goto err_comm;
2206 }
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002207 }
2208 return 0;
2209
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002210err_thread:
2211 flush_workqueue(priv->mfunc.master.comm_wq);
2212 destroy_workqueue(priv->mfunc.master.comm_wq);
2213err_slaves:
2214 while (--i) {
2215 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2216 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2217 }
Rony Efraim0eb62b92013-04-25 05:22:26 +00002218 kfree(priv->mfunc.master.vf_oper);
2219err_comm_oper:
2220 kfree(priv->mfunc.master.vf_admin);
2221err_comm_admin:
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002222 kfree(priv->mfunc.master.slave_state);
2223err_comm:
2224 iounmap(priv->mfunc.comm);
2225err_vhcr:
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002226 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2227 priv->mfunc.vhcr,
2228 priv->mfunc.vhcr_dma);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002229 priv->mfunc.vhcr = NULL;
2230 return -ENOMEM;
2231}
2232
Roland Dreier225c7b12007-05-08 18:00:38 -07002233int mlx4_cmd_init(struct mlx4_dev *dev)
2234{
2235 struct mlx4_priv *priv = mlx4_priv(dev);
Matan Barakffc39f62014-11-13 14:45:29 +02002236 int flags = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002237
Matan Barakffc39f62014-11-13 14:45:29 +02002238 if (!priv->cmd.initialized) {
Matan Barakffc39f62014-11-13 14:45:29 +02002239 mutex_init(&priv->cmd.slave_cmd_mutex);
2240 sema_init(&priv->cmd.poll_sem, 1);
2241 priv->cmd.use_events = 0;
2242 priv->cmd.toggle = 1;
2243 priv->cmd.initialized = 1;
2244 flags |= MLX4_CMD_CLEANUP_STRUCT;
2245 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002246
Matan Barakffc39f62014-11-13 14:45:29 +02002247 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002248 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2249 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002250 if (!priv->cmd.hcr) {
Joe Perches1a91de22014-05-07 12:52:57 -07002251 mlx4_err(dev, "Couldn't map command register\n");
Matan Barakffc39f62014-11-13 14:45:29 +02002252 goto err;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002253 }
Matan Barakffc39f62014-11-13 14:45:29 +02002254 flags |= MLX4_CMD_CLEANUP_HCR;
Roland Dreier225c7b12007-05-08 18:00:38 -07002255 }
2256
Matan Barakffc39f62014-11-13 14:45:29 +02002257 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002258 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2259 PAGE_SIZE,
Roland Dreierf3d4c892012-09-25 21:24:07 -07002260 &priv->mfunc.vhcr_dma,
2261 GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002262 if (!priv->mfunc.vhcr)
Matan Barakffc39f62014-11-13 14:45:29 +02002263 goto err;
2264
2265 flags |= MLX4_CMD_CLEANUP_VHCR;
Roland Dreierf3d4c892012-09-25 21:24:07 -07002266 }
2267
Matan Barakffc39f62014-11-13 14:45:29 +02002268 if (!priv->cmd.pool) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002269 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2270 dev->persist->pdev,
Matan Barakffc39f62014-11-13 14:45:29 +02002271 MLX4_MAILBOX_SIZE,
2272 MLX4_MAILBOX_SIZE, 0);
2273 if (!priv->cmd.pool)
2274 goto err;
2275
2276 flags |= MLX4_CMD_CLEANUP_POOL;
2277 }
Roland Dreier225c7b12007-05-08 18:00:38 -07002278
2279 return 0;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002280
Matan Barakffc39f62014-11-13 14:45:29 +02002281err:
2282 mlx4_cmd_cleanup(dev, flags);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002283 return -ENOMEM;
Roland Dreier225c7b12007-05-08 18:00:38 -07002284}
2285
Yishai Hadas55ad3592015-01-25 16:59:42 +02002286void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2287{
2288 struct mlx4_priv *priv = mlx4_priv(dev);
2289 int slave;
2290 u32 slave_read;
2291
2292 /* Report an internal error event to all
2293 * communication channels.
2294 */
2295 for (slave = 0; slave < dev->num_slaves; slave++) {
2296 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2297 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2298 __raw_writel((__force u32)cpu_to_be32(slave_read),
2299 &priv->mfunc.comm[slave].slave_read);
2300 /* Make sure that our comm channel write doesn't
2301 * get mixed in with writes from another CPU.
2302 */
2303 mmiowb();
2304 }
2305}
2306
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002307void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2308{
2309 struct mlx4_priv *priv = mlx4_priv(dev);
2310 int i, port;
2311
2312 if (mlx4_is_master(dev)) {
2313 flush_workqueue(priv->mfunc.master.comm_wq);
2314 destroy_workqueue(priv->mfunc.master.comm_wq);
2315 for (i = 0; i < dev->num_slaves; i++) {
2316 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2317 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2318 }
2319 kfree(priv->mfunc.master.slave_state);
Rony Efraim0eb62b92013-04-25 05:22:26 +00002320 kfree(priv->mfunc.master.vf_admin);
2321 kfree(priv->mfunc.master.vf_oper);
Yishai Hadas55ad3592015-01-25 16:59:42 +02002322 dev->num_slaves = 0;
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002323 }
Eugenia Emantayevf08ad062012-02-06 06:26:17 +00002324
2325 iounmap(priv->mfunc.comm);
Jack Morgensteinab9c17a2011-12-13 04:18:30 +00002326}
2327
Matan Barakffc39f62014-11-13 14:45:29 +02002328void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
Roland Dreier225c7b12007-05-08 18:00:38 -07002329{
2330 struct mlx4_priv *priv = mlx4_priv(dev);
2331
Matan Barakffc39f62014-11-13 14:45:29 +02002332 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2333 pci_pool_destroy(priv->cmd.pool);
2334 priv->cmd.pool = NULL;
2335 }
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002336
Matan Barakffc39f62014-11-13 14:45:29 +02002337 if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2338 (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002339 iounmap(priv->cmd.hcr);
Matan Barakffc39f62014-11-13 14:45:29 +02002340 priv->cmd.hcr = NULL;
2341 }
2342 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2343 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002344 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
Roland Dreierf3d4c892012-09-25 21:24:07 -07002345 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
Matan Barakffc39f62014-11-13 14:45:29 +02002346 priv->mfunc.vhcr = NULL;
2347 }
2348 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2349 priv->cmd.initialized = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002350}
2351
2352/*
2353 * Switch to using events to issue FW commands (can only be called
2354 * after event queue for command events has been initialized).
2355 */
2356int mlx4_cmd_use_events(struct mlx4_dev *dev)
2357{
2358 struct mlx4_priv *priv = mlx4_priv(dev);
2359 int i;
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002360 int err = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -07002361
2362 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2363 sizeof (struct mlx4_cmd_context),
2364 GFP_KERNEL);
2365 if (!priv->cmd.context)
2366 return -ENOMEM;
2367
2368 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2369 priv->cmd.context[i].token = i;
2370 priv->cmd.context[i].next = i + 1;
Yishai Hadasf5aef5a2015-01-25 16:59:39 +02002371 /* To support fatal error flow, initialize all
2372 * cmd contexts to allow simulating completions
2373 * with complete() at any time.
2374 */
2375 init_completion(&priv->cmd.context[i].done);
Roland Dreier225c7b12007-05-08 18:00:38 -07002376 }
2377
2378 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2379 priv->cmd.free_head = 0;
2380
2381 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2382 spin_lock_init(&priv->cmd.context_lock);
2383
2384 for (priv->cmd.token_mask = 1;
2385 priv->cmd.token_mask < priv->cmd.max_cmds;
2386 priv->cmd.token_mask <<= 1)
2387 ; /* nothing */
2388 --priv->cmd.token_mask;
2389
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002390 down(&priv->cmd.poll_sem);
Roland Dreier225c7b12007-05-08 18:00:38 -07002391 priv->cmd.use_events = 1;
2392
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002393 return err;
Roland Dreier225c7b12007-05-08 18:00:38 -07002394}
2395
2396/*
2397 * Switch back to polling (used when shutting down the device)
2398 */
2399void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2400{
2401 struct mlx4_priv *priv = mlx4_priv(dev);
2402 int i;
2403
2404 priv->cmd.use_events = 0;
2405
2406 for (i = 0; i < priv->cmd.max_cmds; ++i)
2407 down(&priv->cmd.event_sem);
2408
2409 kfree(priv->cmd.context);
2410
2411 up(&priv->cmd.poll_sem);
2412}
2413
2414struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2415{
2416 struct mlx4_cmd_mailbox *mailbox;
2417
2418 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2419 if (!mailbox)
2420 return ERR_PTR(-ENOMEM);
2421
2422 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2423 &mailbox->dma);
2424 if (!mailbox->buf) {
2425 kfree(mailbox);
2426 return ERR_PTR(-ENOMEM);
2427 }
2428
Jack Morgenstein571b8b92013-11-07 12:19:50 +02002429 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2430
Roland Dreier225c7b12007-05-08 18:00:38 -07002431 return mailbox;
2432}
2433EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2434
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002435void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2436 struct mlx4_cmd_mailbox *mailbox)
Roland Dreier225c7b12007-05-08 18:00:38 -07002437{
2438 if (!mailbox)
2439 return;
2440
2441 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2442 kfree(mailbox);
2443}
2444EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
Yevgeny Petriline8f081a2011-12-13 04:12:25 +00002445
2446u32 mlx4_comm_get_version(void)
2447{
2448 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2449}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002450
2451static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2452{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002453 if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2454 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2455 vf, dev->persist->num_vfs);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002456 return -EINVAL;
2457 }
2458
2459 return vf+1;
2460}
2461
Matan Barakf74462a2014-03-19 18:11:51 +02002462int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2463{
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002464 if (slave < 1 || slave > dev->persist->num_vfs) {
Matan Barakf74462a2014-03-19 18:11:51 +02002465 mlx4_err(dev,
2466 "Bad slave number:%d (number of activated slaves: %lu)\n",
2467 slave, dev->num_slaves);
2468 return -EINVAL;
2469 }
2470 return slave - 1;
2471}
2472
Yishai Hadasf5aef5a2015-01-25 16:59:39 +02002473void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2474{
2475 struct mlx4_priv *priv = mlx4_priv(dev);
2476 struct mlx4_cmd_context *context;
2477 int i;
2478
2479 spin_lock(&priv->cmd.context_lock);
2480 if (priv->cmd.context) {
2481 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2482 context = &priv->cmd.context[i];
2483 context->fw_status = CMD_STAT_INTERNAL_ERR;
2484 context->result =
2485 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2486 complete(&context->done);
2487 }
2488 }
2489 spin_unlock(&priv->cmd.context_lock);
2490}
2491
Matan Barakf74462a2014-03-19 18:11:51 +02002492struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2493{
2494 struct mlx4_active_ports actv_ports;
2495 int vf;
2496
2497 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2498
2499 if (slave == 0) {
2500 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2501 return actv_ports;
2502 }
2503
2504 vf = mlx4_get_vf_indx(dev, slave);
2505 if (vf < 0)
2506 return actv_ports;
2507
2508 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2509 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2510 dev->caps.num_ports));
2511
2512 return actv_ports;
2513}
2514EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2515
2516int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2517{
2518 unsigned n;
2519 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2520 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2521
2522 if (port <= 0 || port > m)
2523 return -EINVAL;
2524
2525 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2526 if (port <= n)
2527 port = n + 1;
2528
2529 return port;
2530}
2531EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2532
2533int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2534{
2535 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2536 if (test_bit(port - 1, actv_ports.ports))
2537 return port -
2538 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2539
2540 return -1;
2541}
2542EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2543
2544struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2545 int port)
2546{
2547 unsigned i;
2548 struct mlx4_slaves_pport slaves_pport;
2549
2550 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2551
2552 if (port <= 0 || port > dev->caps.num_ports)
2553 return slaves_pport;
2554
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002555 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
Matan Barakf74462a2014-03-19 18:11:51 +02002556 struct mlx4_active_ports actv_ports =
2557 mlx4_get_active_ports(dev, i);
2558 if (test_bit(port - 1, actv_ports.ports))
2559 set_bit(i, slaves_pport.slaves);
2560 }
2561
2562 return slaves_pport;
2563}
2564EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2565
2566struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2567 struct mlx4_dev *dev,
2568 const struct mlx4_active_ports *crit_ports)
2569{
2570 unsigned i;
2571 struct mlx4_slaves_pport slaves_pport;
2572
2573 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2574
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002575 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
Matan Barakf74462a2014-03-19 18:11:51 +02002576 struct mlx4_active_ports actv_ports =
2577 mlx4_get_active_ports(dev, i);
2578 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2579 dev->caps.num_ports))
2580 set_bit(i, slaves_pport.slaves);
2581 }
2582
2583 return slaves_pport;
2584}
2585EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2586
Matan Baraka91c7722014-09-10 16:41:53 +03002587static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2588{
2589 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2590 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2591 + 1;
2592 int max_port = min_port +
2593 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2594
2595 if (port < min_port)
2596 port = min_port;
2597 else if (port >= max_port)
2598 port = max_port - 1;
2599
2600 return port;
2601}
2602
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002603int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2604{
2605 struct mlx4_priv *priv = mlx4_priv(dev);
2606 struct mlx4_vport_state *s_info;
2607 int slave;
2608
2609 if (!mlx4_is_master(dev))
2610 return -EPROTONOSUPPORT;
2611
2612 slave = mlx4_get_slave_indx(dev, vf);
2613 if (slave < 0)
2614 return -EINVAL;
2615
Matan Baraka91c7722014-09-10 16:41:53 +03002616 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002617 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2618 s_info->mac = mac;
2619 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2620 vf, port, s_info->mac);
2621 return 0;
2622}
2623EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002624
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002625
Rony Efraim3f7fb022013-04-25 05:22:28 +00002626int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2627{
2628 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002629 struct mlx4_vport_state *vf_admin;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002630 int slave;
2631
2632 if ((!mlx4_is_master(dev)) ||
2633 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2634 return -EPROTONOSUPPORT;
2635
2636 if ((vlan > 4095) || (qos > 7))
2637 return -EINVAL;
2638
2639 slave = mlx4_get_slave_indx(dev, vf);
2640 if (slave < 0)
2641 return -EINVAL;
2642
Matan Baraka91c7722014-09-10 16:41:53 +03002643 port = mlx4_slaves_closest_port(dev, slave, port);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002644 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002645
Rony Efraim3f7fb022013-04-25 05:22:28 +00002646 if ((0 == vlan) && (0 == qos))
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002647 vf_admin->default_vlan = MLX4_VGT;
Rony Efraim3f7fb022013-04-25 05:22:28 +00002648 else
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002649 vf_admin->default_vlan = vlan;
2650 vf_admin->default_qos = qos;
2651
Rony Efraim0a6eac22013-06-27 19:05:22 +03002652 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2653 mlx4_info(dev,
2654 "updating vf %d port %d config will take effect on next VF restart\n",
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002655 vf, port);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002656 return 0;
2657}
2658EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
Rony Efraime6b6a232013-04-25 05:22:29 +00002659
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +02002660 /* mlx4_get_slave_default_vlan -
2661 * return true if VST ( default vlan)
2662 * if VST, will return vlan & qos (if not NULL)
2663 */
2664bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2665 u16 *vlan, u8 *qos)
2666{
2667 struct mlx4_vport_oper_state *vp_oper;
2668 struct mlx4_priv *priv;
2669
2670 priv = mlx4_priv(dev);
Matan Baraka91c7722014-09-10 16:41:53 +03002671 port = mlx4_slaves_closest_port(dev, slave, port);
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +02002672 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2673
2674 if (MLX4_VGT != vp_oper->state.default_vlan) {
2675 if (vlan)
2676 *vlan = vp_oper->state.default_vlan;
2677 if (qos)
2678 *qos = vp_oper->state.default_qos;
2679 return true;
2680 }
2681 return false;
2682}
2683EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2684
Rony Efraime6b6a232013-04-25 05:22:29 +00002685int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2686{
2687 struct mlx4_priv *priv = mlx4_priv(dev);
2688 struct mlx4_vport_state *s_info;
2689 int slave;
2690
2691 if ((!mlx4_is_master(dev)) ||
2692 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2693 return -EPROTONOSUPPORT;
2694
2695 slave = mlx4_get_slave_indx(dev, vf);
2696 if (slave < 0)
2697 return -EINVAL;
2698
Matan Baraka91c7722014-09-10 16:41:53 +03002699 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraime6b6a232013-04-25 05:22:29 +00002700 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2701 s_info->spoofchk = setting;
2702
2703 return 0;
2704}
2705EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002706
2707int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2708{
2709 struct mlx4_priv *priv = mlx4_priv(dev);
2710 struct mlx4_vport_state *s_info;
2711 int slave;
2712
2713 if (!mlx4_is_master(dev))
2714 return -EPROTONOSUPPORT;
2715
2716 slave = mlx4_get_slave_indx(dev, vf);
2717 if (slave < 0)
2718 return -EINVAL;
2719
2720 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2721 ivf->vf = vf;
2722
2723 /* need to convert it to a func */
2724 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2725 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2726 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2727 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2728 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2729 ivf->mac[5] = ((s_info->mac) & 0xff);
2730
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04002731 ivf->vlan = s_info->default_vlan;
2732 ivf->qos = s_info->default_qos;
2733 ivf->max_tx_rate = s_info->tx_rate;
2734 ivf->min_tx_rate = 0;
2735 ivf->spoofchk = s_info->spoofchk;
2736 ivf->linkstate = s_info->link_state;
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002737
2738 return 0;
2739}
2740EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
Rony Efraim948e3062013-06-13 13:19:11 +03002741
2742int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2743{
2744 struct mlx4_priv *priv = mlx4_priv(dev);
2745 struct mlx4_vport_state *s_info;
Rony Efraim948e3062013-06-13 13:19:11 +03002746 int slave;
2747 u8 link_stat_event;
2748
2749 slave = mlx4_get_slave_indx(dev, vf);
2750 if (slave < 0)
2751 return -EINVAL;
2752
Matan Baraka91c7722014-09-10 16:41:53 +03002753 port = mlx4_slaves_closest_port(dev, slave, port);
Rony Efraim948e3062013-06-13 13:19:11 +03002754 switch (link_state) {
2755 case IFLA_VF_LINK_STATE_AUTO:
2756 /* get current link state */
2757 if (!priv->sense.do_sense_port[port])
2758 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2759 else
2760 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2761 break;
2762
2763 case IFLA_VF_LINK_STATE_ENABLE:
2764 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2765 break;
2766
2767 case IFLA_VF_LINK_STATE_DISABLE:
2768 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2769 break;
2770
2771 default:
2772 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2773 link_state, slave, port);
2774 return -EINVAL;
2775 };
Rony Efraim948e3062013-06-13 13:19:11 +03002776 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
Rony Efraim948e3062013-06-13 13:19:11 +03002777 s_info->link_state = link_state;
Rony Efraim948e3062013-06-13 13:19:11 +03002778
2779 /* send event */
2780 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
Rony Efraim0a6eac22013-06-27 19:05:22 +03002781
2782 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2783 mlx4_dbg(dev,
2784 "updating vf %d port %d no link state HW enforcment\n",
2785 vf, port);
Rony Efraim948e3062013-06-13 13:19:11 +03002786 return 0;
2787}
2788EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
Jack Morgenstein97982f52014-05-29 16:31:02 +03002789
2790int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
2791{
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03002792 struct mlx4_priv *priv = mlx4_priv(dev);
2793
2794 if (slave < 1 || slave >= dev->num_slaves ||
2795 port < 1 || port > MLX4_MAX_PORTS)
2796 return 0;
2797
2798 return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
2799 MLX4_VF_SMI_ENABLED;
Jack Morgenstein97982f52014-05-29 16:31:02 +03002800}
2801EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
Jack Morgenstein65fed8a2014-05-29 16:31:04 +03002802
2803int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
2804{
2805 struct mlx4_priv *priv = mlx4_priv(dev);
2806
2807 if (slave == mlx4_master_func_num(dev))
2808 return 1;
2809
2810 if (slave < 1 || slave >= dev->num_slaves ||
2811 port < 1 || port > MLX4_MAX_PORTS)
2812 return 0;
2813
2814 return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
2815 MLX4_VF_SMI_ENABLED;
2816}
2817EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
2818
2819int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
2820 int enabled)
2821{
2822 struct mlx4_priv *priv = mlx4_priv(dev);
2823
2824 if (slave == mlx4_master_func_num(dev))
2825 return 0;
2826
2827 if (slave < 1 || slave >= dev->num_slaves ||
2828 port < 1 || port > MLX4_MAX_PORTS ||
2829 enabled < 0 || enabled > 1)
2830 return -EINVAL;
2831
2832 priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
2833 return 0;
2834}
2835EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);