blob: 07516545474f3ac76e750aaa4af2532b6ac81207 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
Eran Ben Elisha62a89052015-06-15 17:59:08 +030049#include "mlx4_stats.h"
Eli Cohenc82e9aa2011-12-13 04:15:24 +000050
51#define MLX4_MAC_VALID (1ull << 63)
Eran Ben Elisha9de92c62015-06-15 17:59:00 +030052#define MLX4_PF_COUNTERS_PER_PORT 2
53#define MLX4_VF_COUNTERS_PER_PORT 1
Eli Cohenc82e9aa2011-12-13 04:15:24 +000054
55struct mac_res {
56 struct list_head list;
57 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +020058 int ref_count;
59 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000060 u8 port;
61};
62
Jack Morgenstein48740802013-11-03 10:03:20 +020063struct vlan_res {
64 struct list_head list;
65 u16 vlan;
66 int ref_count;
67 int vlan_index;
68 u8 port;
69};
70
Eli Cohenc82e9aa2011-12-13 04:15:24 +000071struct res_common {
72 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000073 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000074 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000075 int owner;
76 int state;
77 int from_state;
78 int to_state;
79 int removing;
Matan Barakae5a2e22017-01-29 18:56:15 +020080 const char *func_name;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000081};
82
83enum {
84 RES_ANY_BUSY = 1
85};
86
87struct res_gid {
88 struct list_head list;
89 u8 gid[16];
90 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000091 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000092 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000093};
94
95enum res_qp_states {
96 RES_QP_BUSY = RES_ANY_BUSY,
97
98 /* QP number was allocated */
99 RES_QP_RESERVED,
100
101 /* ICM memory for QP context was mapped */
102 RES_QP_MAPPED,
103
104 /* QP is in hw ownership */
105 RES_QP_HW
106};
107
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000108struct res_qp {
109 struct res_common com;
110 struct res_mtt *mtt;
111 struct res_cq *rcq;
112 struct res_cq *scq;
113 struct res_srq *srq;
114 struct list_head mcg_list;
115 spinlock_t mcg_spl;
116 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000117 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300118 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200119 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300120 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200121 __be32 param3;
122 u8 vlan_control;
123 u8 fvl_rx;
124 u8 pri_path_fl;
125 u8 vlan_index;
126 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000127};
128
129enum res_mtt_states {
130 RES_MTT_BUSY = RES_ANY_BUSY,
131 RES_MTT_ALLOCATED,
132};
133
134static inline const char *mtt_states_str(enum res_mtt_states state)
135{
136 switch (state) {
137 case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
140 }
141}
142
143struct res_mtt {
144 struct res_common com;
145 int order;
146 atomic_t ref_count;
147};
148
149enum res_mpt_states {
150 RES_MPT_BUSY = RES_ANY_BUSY,
151 RES_MPT_RESERVED,
152 RES_MPT_MAPPED,
153 RES_MPT_HW,
154};
155
156struct res_mpt {
157 struct res_common com;
158 struct res_mtt *mtt;
159 int key;
160};
161
162enum res_eq_states {
163 RES_EQ_BUSY = RES_ANY_BUSY,
164 RES_EQ_RESERVED,
165 RES_EQ_HW,
166};
167
168struct res_eq {
169 struct res_common com;
170 struct res_mtt *mtt;
171};
172
173enum res_cq_states {
174 RES_CQ_BUSY = RES_ANY_BUSY,
175 RES_CQ_ALLOCATED,
176 RES_CQ_HW,
177};
178
179struct res_cq {
180 struct res_common com;
181 struct res_mtt *mtt;
182 atomic_t ref_count;
183};
184
185enum res_srq_states {
186 RES_SRQ_BUSY = RES_ANY_BUSY,
187 RES_SRQ_ALLOCATED,
188 RES_SRQ_HW,
189};
190
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000191struct res_srq {
192 struct res_common com;
193 struct res_mtt *mtt;
194 struct res_cq *cq;
195 atomic_t ref_count;
196};
197
198enum res_counter_states {
199 RES_COUNTER_BUSY = RES_ANY_BUSY,
200 RES_COUNTER_ALLOCATED,
201};
202
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000203struct res_counter {
204 struct res_common com;
205 int port;
206};
207
Jack Morgensteinba062d52012-05-15 10:35:03 +0000208enum res_xrcdn_states {
209 RES_XRCD_BUSY = RES_ANY_BUSY,
210 RES_XRCD_ALLOCATED,
211};
212
213struct res_xrcdn {
214 struct res_common com;
215 int port;
216};
217
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000218enum res_fs_rule_states {
219 RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 RES_FS_RULE_ALLOCATED,
221};
222
223struct res_fs_rule {
224 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000225 int qpn;
Moni Shoua78efed22015-12-06 18:07:40 +0200226 /* VF DMFS mbox with port flipped */
227 void *mirr_mbox;
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
230 u32 mirr_mbox_size;
231 struct list_head mirr_list;
232 u64 mirr_rule_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000233};
234
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000235static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236{
237 struct rb_node *node = root->rb_node;
238
239 while (node) {
Geliang Tang3704eb62017-01-20 22:36:57 +0800240 struct res_common *res = rb_entry(node, struct res_common,
241 node);
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000242
243 if (res_id < res->res_id)
244 node = node->rb_left;
245 else if (res_id > res->res_id)
246 node = node->rb_right;
247 else
248 return res;
249 }
250 return NULL;
251}
252
253static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254{
255 struct rb_node **new = &(root->rb_node), *parent = NULL;
256
257 /* Figure out where to put new node */
258 while (*new) {
Geliang Tang3704eb62017-01-20 22:36:57 +0800259 struct res_common *this = rb_entry(*new, struct res_common,
260 node);
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000261
262 parent = *new;
263 if (res->res_id < this->res_id)
264 new = &((*new)->rb_left);
265 else if (res->res_id > this->res_id)
266 new = &((*new)->rb_right);
267 else
268 return -EEXIST;
269 }
270
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res->node, parent, new);
273 rb_insert_color(&res->node, root);
274
275 return 0;
276}
277
Jack Morgenstein54679e12012-08-03 08:40:43 +0000278enum qp_transition {
279 QP_TRANS_INIT2RTR,
280 QP_TRANS_RTR2RTS,
281 QP_TRANS_RTS2RTS,
282 QP_TRANS_SQERR2RTS,
283 QP_TRANS_SQD2SQD,
284 QP_TRANS_SQD2RTS
285};
286
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000287/* For Debug uses */
Jack Morgenstein956463732014-06-08 13:49:45 +0300288static const char *resource_str(enum mlx4_resource rt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000289{
290 switch (rt) {
291 case RES_QP: return "RES_QP";
292 case RES_CQ: return "RES_CQ";
293 case RES_SRQ: return "RES_SRQ";
294 case RES_MPT: return "RES_MPT";
295 case RES_MTT: return "RES_MTT";
296 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200297 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000298 case RES_EQ: return "RES_EQ";
299 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000300 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000301 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000302 default: return "Unknown resource type !!!";
303 };
304}
305
Jack Morgenstein48740802013-11-03 10:03:20 +0200306static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200307static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 enum mlx4_resource res_type, int count,
309 int port)
310{
311 struct mlx4_priv *priv = mlx4_priv(dev);
312 struct resource_allocator *res_alloc =
313 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein83bd5112017-05-09 14:45:24 +0300314 int err = -EDQUOT;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200315 int allocated, free, reserved, guaranteed, from_free;
Jack Morgenstein956463732014-06-08 13:49:45 +0300316 int from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200317
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200318 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200319 return -EINVAL;
320
321 spin_lock(&res_alloc->alloc_lock);
322 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200323 res_alloc->allocated[(port - 1) *
324 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200325 res_alloc->allocated[slave];
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 res_alloc->res_free;
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 res_alloc->res_reserved;
330 guaranteed = res_alloc->guaranteed[slave];
331
Jack Morgenstein956463732014-06-08 13:49:45 +0300332 if (allocated + count > res_alloc->quota[slave]) {
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave, port, resource_str(res_type), count,
335 allocated, res_alloc->quota[slave]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200336 goto out;
Jack Morgenstein956463732014-06-08 13:49:45 +0300337 }
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200338
339 if (allocated + count <= guaranteed) {
340 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300341 from_rsvd = count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200342 } else {
343 /* portion may need to be obtained from free area */
344 if (guaranteed - allocated > 0)
345 from_free = count - (guaranteed - allocated);
346 else
347 from_free = count;
348
Jack Morgenstein956463732014-06-08 13:49:45 +0300349 from_rsvd = count - from_free;
350
351 if (free - from_free >= reserved)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200352 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300353 else
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave, port, resource_str(res_type), free,
356 from_free, reserved);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200357 }
358
359 if (!err) {
360 /* grant the request */
361 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200362 res_alloc->allocated[(port - 1) *
363 (dev->persist->num_vfs + 1) + slave] += count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200364 res_alloc->res_port_free[port - 1] -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200366 } else {
367 res_alloc->allocated[slave] += count;
368 res_alloc->res_free -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300369 res_alloc->res_reserved -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200370 }
371 }
372
373out:
374 spin_unlock(&res_alloc->alloc_lock);
375 return err;
376}
377
378static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 enum mlx4_resource res_type, int count,
380 int port)
381{
382 struct mlx4_priv *priv = mlx4_priv(dev);
383 struct resource_allocator *res_alloc =
384 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein956463732014-06-08 13:49:45 +0300385 int allocated, guaranteed, from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200386
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200387 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200388 return;
389
390 spin_lock(&res_alloc->alloc_lock);
Jack Morgenstein956463732014-06-08 13:49:45 +0300391
392 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200393 res_alloc->allocated[(port - 1) *
394 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein956463732014-06-08 13:49:45 +0300395 res_alloc->allocated[slave];
396 guaranteed = res_alloc->guaranteed[slave];
397
398 if (allocated - count >= guaranteed) {
399 from_rsvd = 0;
400 } else {
401 /* portion may need to be returned to reserved area */
402 if (allocated - guaranteed > 0)
403 from_rsvd = count - (allocated - guaranteed);
404 else
405 from_rsvd = count;
406 }
407
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200408 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200409 res_alloc->allocated[(port - 1) *
410 (dev->persist->num_vfs + 1) + slave] -= count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200411 res_alloc->res_port_free[port - 1] += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300412 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200413 } else {
414 res_alloc->allocated[slave] -= count;
415 res_alloc->res_free += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300416 res_alloc->res_reserved += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200417 }
418
419 spin_unlock(&res_alloc->alloc_lock);
420 return;
421}
422
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200423static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 struct resource_allocator *res_alloc,
425 enum mlx4_resource res_type,
426 int vf, int num_instances)
427{
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200428 res_alloc->guaranteed[vf] = num_instances /
429 (2 * (dev->persist->num_vfs + 1));
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 if (vf == mlx4_master_func_num(dev)) {
432 res_alloc->res_free = num_instances;
433 if (res_type == RES_MTT) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc->res_free += dev->caps.reserved_mtts;
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 res_alloc->quota[vf] += dev->caps.reserved_mtts;
438 }
439 }
440}
441
442void mlx4_init_quotas(struct mlx4_dev *dev)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 int pf;
446
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev))
449 return;
450
451 if (!mlx4_is_mfunc(dev)) {
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 mlx4_num_reserved_sqps(dev);
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458 return;
459 }
460
461 pf = mlx4_master_func_num(dev);
462 dev->quotas.qp =
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 dev->quotas.cq =
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 dev->quotas.srq =
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 dev->quotas.mtt =
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 dev->quotas.mpt =
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472}
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300473
474static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475{
476 /* reduce the sink counter */
477 return (dev->caps.max_counters - 1 -
478 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
479 / MLX4_MAX_PORTS;
480}
481
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000482int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483{
484 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200485 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000486 int t;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300487 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000488
489 priv->mfunc.master.res_tracker.slave_list =
490 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491 GFP_KERNEL);
492 if (!priv->mfunc.master.res_tracker.slave_list)
493 return -ENOMEM;
494
495 for (i = 0 ; i < dev->num_slaves; i++) {
496 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498 slave_list[i].res_list[t]);
499 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500 }
501
502 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503 dev->num_slaves);
504 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000505 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000506
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200507 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508 struct resource_allocator *res_alloc =
509 &priv->mfunc.master.res_tracker.res_alloc[i];
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200510 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
511 sizeof(int), GFP_KERNEL);
512 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
513 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200514 if (i == RES_MAC || i == RES_VLAN)
515 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200516 (dev->persist->num_vfs
517 + 1) *
518 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200519 else
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200520 res_alloc->allocated = kzalloc((dev->persist->
521 num_vfs + 1) *
522 sizeof(int), GFP_KERNEL);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300523 /* Reduce the sink counter */
524 if (i == RES_COUNTER)
525 res_alloc->res_free = dev->caps.max_counters - 1;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526
527 if (!res_alloc->quota || !res_alloc->guaranteed ||
528 !res_alloc->allocated)
529 goto no_mem_err;
530
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200531 spin_lock_init(&res_alloc->alloc_lock);
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200532 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
Matan Barak449fc482014-03-19 18:11:52 +0200533 struct mlx4_active_ports actv_ports =
534 mlx4_get_active_ports(dev, t);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200535 switch (i) {
536 case RES_QP:
537 initialize_res_quotas(dev, res_alloc, RES_QP,
538 t, dev->caps.num_qps -
539 dev->caps.reserved_qps -
540 mlx4_num_reserved_sqps(dev));
541 break;
542 case RES_CQ:
543 initialize_res_quotas(dev, res_alloc, RES_CQ,
544 t, dev->caps.num_cqs -
545 dev->caps.reserved_cqs);
546 break;
547 case RES_SRQ:
548 initialize_res_quotas(dev, res_alloc, RES_SRQ,
549 t, dev->caps.num_srqs -
550 dev->caps.reserved_srqs);
551 break;
552 case RES_MPT:
553 initialize_res_quotas(dev, res_alloc, RES_MPT,
554 t, dev->caps.num_mpts -
555 dev->caps.reserved_mrws);
556 break;
557 case RES_MTT:
558 initialize_res_quotas(dev, res_alloc, RES_MTT,
559 t, dev->caps.num_mtts -
560 dev->caps.reserved_mtts);
561 break;
562 case RES_MAC:
563 if (t == mlx4_master_func_num(dev)) {
Matan Barak449fc482014-03-19 18:11:52 +0200564 int max_vfs_pport = 0;
565 /* Calculate the max vfs per port for */
566 /* both ports. */
567 for (j = 0; j < dev->caps.num_ports;
568 j++) {
569 struct mlx4_slaves_pport slaves_pport =
570 mlx4_phys_to_slaves_pport(dev, j + 1);
571 unsigned current_slaves =
572 bitmap_weight(slaves_pport.slaves,
573 dev->caps.num_ports) - 1;
574 if (max_vfs_pport < current_slaves)
575 max_vfs_pport =
576 current_slaves;
577 }
578 res_alloc->quota[t] =
579 MLX4_MAX_MAC_NUM -
580 2 * max_vfs_pport;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200581 res_alloc->guaranteed[t] = 2;
582 for (j = 0; j < MLX4_MAX_PORTS; j++)
Matan Barak449fc482014-03-19 18:11:52 +0200583 res_alloc->res_port_free[j] =
584 MLX4_MAX_MAC_NUM;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200585 } else {
586 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
587 res_alloc->guaranteed[t] = 2;
588 }
589 break;
590 case RES_VLAN:
591 if (t == mlx4_master_func_num(dev)) {
592 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
593 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
594 for (j = 0; j < MLX4_MAX_PORTS; j++)
595 res_alloc->res_port_free[j] =
596 res_alloc->quota[t];
597 } else {
598 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
599 res_alloc->guaranteed[t] = 0;
600 }
601 break;
602 case RES_COUNTER:
603 res_alloc->quota[t] = dev->caps.max_counters;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200604 if (t == mlx4_master_func_num(dev))
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300605 res_alloc->guaranteed[t] =
606 MLX4_PF_COUNTERS_PER_PORT *
607 MLX4_MAX_PORTS;
608 else if (t <= max_vfs_guarantee_counter)
609 res_alloc->guaranteed[t] =
610 MLX4_VF_COUNTERS_PER_PORT *
611 MLX4_MAX_PORTS;
612 else
613 res_alloc->guaranteed[t] = 0;
614 res_alloc->res_free -= res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200615 break;
616 default:
617 break;
618 }
619 if (i == RES_MAC || i == RES_VLAN) {
Matan Barak449fc482014-03-19 18:11:52 +0200620 for (j = 0; j < dev->caps.num_ports; j++)
621 if (test_bit(j, actv_ports.ports))
622 res_alloc->res_port_rsvd[j] +=
623 res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200624 } else {
625 res_alloc->res_reserved += res_alloc->guaranteed[t];
626 }
627 }
628 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000629 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200630 return 0;
631
632no_mem_err:
633 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
634 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
635 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
636 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
637 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
638 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
639 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
640 }
641 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000642}
643
Jack Morgensteinb8924952012-05-15 10:35:02 +0000644void mlx4_free_resource_tracker(struct mlx4_dev *dev,
645 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000646{
647 struct mlx4_priv *priv = mlx4_priv(dev);
648 int i;
649
650 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200651 if (type != RES_TR_FREE_STRUCTS_ONLY) {
652 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000653 if (type == RES_TR_FREE_ALL ||
654 dev->caps.function != i)
655 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200656 }
657 /* free master's vlans */
658 i = dev->caps.function;
Jack Morgenstein111c6092014-05-27 09:26:38 +0300659 mlx4_reset_roce_gids(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200660 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
661 rem_slave_vlans(dev, i);
662 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
663 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000664
Jack Morgensteinb8924952012-05-15 10:35:02 +0000665 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200666 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
667 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
668 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
669 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
670 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
671 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
672 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
673 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000674 kfree(priv->mfunc.master.res_tracker.slave_list);
675 priv->mfunc.master.res_tracker.slave_list = NULL;
676 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000677 }
678}
679
Jack Morgenstein54679e12012-08-03 08:40:43 +0000680static void update_pkey_index(struct mlx4_dev *dev, int slave,
681 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000682{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000683 u8 sched = *(u8 *)(inbox->buf + 64);
684 u8 orig_index = *(u8 *)(inbox->buf + 35);
685 u8 new_index;
686 struct mlx4_priv *priv = mlx4_priv(dev);
687 int port;
688
689 port = (sched >> 6 & 1) + 1;
690
691 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
692 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000693}
694
695static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
696 u8 slave)
697{
698 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
699 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
700 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200701 int port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000702
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200703 if (MLX4_QP_ST_UD == ts) {
704 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
705 if (mlx4_is_eth(dev, port))
Matan Barak449fc482014-03-19 18:11:52 +0200706 qp_ctx->pri_path.mgid_index =
707 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200708 else
709 qp_ctx->pri_path.mgid_index = slave | 0x80;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000710
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200711 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
712 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
713 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
714 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200715 qp_ctx->pri_path.mgid_index +=
716 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200717 qp_ctx->pri_path.mgid_index &= 0x7f;
718 } else {
719 qp_ctx->pri_path.mgid_index = slave & 0x7F;
720 }
721 }
722 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
723 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
724 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200725 qp_ctx->alt_path.mgid_index +=
726 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200727 qp_ctx->alt_path.mgid_index &= 0x7f;
728 } else {
729 qp_ctx->alt_path.mgid_index = slave & 0x7F;
730 }
731 }
Jack Morgenstein54679e12012-08-03 08:40:43 +0000732 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000733}
734
Eran Ben Elisha68230242015-06-15 17:59:01 +0300735static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
736 u8 slave, int port);
737
Rony Efraim3f7fb022013-04-25 05:22:28 +0000738static int update_vport_qp_param(struct mlx4_dev *dev,
739 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300740 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000741{
742 struct mlx4_qp_context *qpc = inbox->buf + 8;
743 struct mlx4_vport_oper_state *vp_oper;
744 struct mlx4_priv *priv;
Matan Barak09e05c32014-09-10 16:41:56 +0300745 u32 qp_type;
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200746 int port, err = 0;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000747
748 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
749 priv = mlx4_priv(dev);
750 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
Matan Barak09e05c32014-09-10 16:41:56 +0300751 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000752
Eran Ben Elisha68230242015-06-15 17:59:01 +0300753 err = handle_counter(dev, qpc, slave, port);
754 if (err)
755 goto out;
756
Rony Efraim3f7fb022013-04-25 05:22:28 +0000757 if (MLX4_VGT != vp_oper->state.default_vlan) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300758 /* the reserved QPs (special, proxy, tunnel)
759 * do not operate over vlans
760 */
761 if (mlx4_is_qp_reserved(dev, qpn))
762 return 0;
763
Matan Barak09e05c32014-09-10 16:41:56 +0300764 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
765 if (qp_type == MLX4_QP_ST_UD ||
766 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
767 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
768 *(__be32 *)inbox->buf =
769 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
770 MLX4_QP_OPTPAR_VLAN_STRIPPING);
771 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
772 } else {
773 struct mlx4_update_qp_params params = {.flags = 0};
774
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200775 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
776 if (err)
777 goto out;
Matan Barak09e05c32014-09-10 16:41:56 +0300778 }
779 }
Rony Efraim0a6eac22013-06-27 19:05:22 +0300780
Maor Gottlieb9a892832015-10-15 14:44:38 +0300781 /* preserve IF_COUNTER flag */
782 qpc->pri_path.vlan_control &=
783 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
Rony Efraim0a6eac22013-06-27 19:05:22 +0300784 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
785 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
Maor Gottlieb9a892832015-10-15 14:44:38 +0300786 qpc->pri_path.vlan_control |=
Rony Efraim0a6eac22013-06-27 19:05:22 +0300787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
793 } else if (0 != vp_oper->state.default_vlan) {
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +0300794 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
795 /* vst QinQ should block untagged on TX,
796 * but cvlan is in payload and phv is set so
797 * hw see it as untagged. Block tagged instead.
798 */
799 qpc->pri_path.vlan_control |=
800 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
801 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
802 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
804 } else { /* vst 802.1Q */
805 qpc->pri_path.vlan_control |=
806 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
807 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
808 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
809 }
Rony Efraim7677fc92013-05-08 22:22:35 +0000810 } else { /* priority tagged */
Maor Gottlieb9a892832015-10-15 14:44:38 +0300811 qpc->pri_path.vlan_control |=
Rony Efraim7677fc92013-05-08 22:22:35 +0000812 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
813 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
814 }
815
816 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000817 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +0300818 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
819 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
820 qpc->pri_path.fl |= MLX4_FL_SV;
821 else
822 qpc->pri_path.fl |= MLX4_FL_CV;
Rony Efraim7677fc92013-05-08 22:22:35 +0000823 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000824 qpc->pri_path.sched_queue &= 0xC7;
825 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Ido Shamay08068cd2015-04-02 16:31:15 +0300826 qpc->qos_vport = vp_oper->state.qos_vport;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000827 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000828 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000829 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000830 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000831 }
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200832out:
833 return err;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000834}
835
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000836static int mpt_mask(struct mlx4_dev *dev)
837{
838 return dev->caps.num_mpts - 1;
839}
840
Matan Barakae5a2e22017-01-29 18:56:15 +0200841static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
842{
843 switch (t) {
844 case RES_QP:
845 return "QP";
846 case RES_CQ:
847 return "CQ";
848 case RES_SRQ:
849 return "SRQ";
850 case RES_XRCD:
851 return "XRCD";
852 case RES_MPT:
853 return "MPT";
854 case RES_MTT:
855 return "MTT";
856 case RES_MAC:
857 return "MAC";
858 case RES_VLAN:
859 return "VLAN";
860 case RES_COUNTER:
861 return "COUNTER";
862 case RES_FS_RULE:
863 return "FS_RULE";
864 case RES_EQ:
865 return "EQ";
866 default:
867 return "INVALID RESOURCE";
868 }
869}
870
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000871static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000872 enum mlx4_resource type)
873{
874 struct mlx4_priv *priv = mlx4_priv(dev);
875
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000876 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
877 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000878}
879
Matan Barakae5a2e22017-01-29 18:56:15 +0200880static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
881 enum mlx4_resource type,
882 void *res, const char *func_name)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000883{
884 struct res_common *r;
885 int err = 0;
886
887 spin_lock_irq(mlx4_tlock(dev));
888 r = find_res(dev, res_id, type);
889 if (!r) {
890 err = -ENONET;
891 goto exit;
892 }
893
894 if (r->state == RES_ANY_BUSY) {
Matan Barakae5a2e22017-01-29 18:56:15 +0200895 mlx4_warn(dev,
896 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
897 func_name, slave, res_id, mlx4_resource_type_to_str(type),
898 r->func_name);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000899 err = -EBUSY;
900 goto exit;
901 }
902
903 if (r->owner != slave) {
904 err = -EPERM;
905 goto exit;
906 }
907
908 r->from_state = r->state;
909 r->state = RES_ANY_BUSY;
Matan Barakae5a2e22017-01-29 18:56:15 +0200910 r->func_name = func_name;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000911
912 if (res)
913 *((struct res_common **)res) = r;
914
915exit:
916 spin_unlock_irq(mlx4_tlock(dev));
917 return err;
918}
919
Matan Barakae5a2e22017-01-29 18:56:15 +0200920#define get_res(dev, slave, res_id, type, res) \
921 _get_res((dev), (slave), (res_id), (type), (res), __func__)
922
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000923int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
924 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000925 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000926{
927
928 struct res_common *r;
929 int err = -ENOENT;
930 int id = res_id;
931
932 if (type == RES_QP)
933 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000934 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000935
936 r = find_res(dev, id, type);
937 if (r) {
938 *slave = r->owner;
939 err = 0;
940 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000941 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000942
943 return err;
944}
945
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000946static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000947 enum mlx4_resource type)
948{
949 struct res_common *r;
950
951 spin_lock_irq(mlx4_tlock(dev));
952 r = find_res(dev, res_id, type);
Matan Barakae5a2e22017-01-29 18:56:15 +0200953 if (r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000954 r->state = r->from_state;
Matan Barakae5a2e22017-01-29 18:56:15 +0200955 r->func_name = "";
956 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000957 spin_unlock_irq(mlx4_tlock(dev));
958}
959
Eran Ben Elisha68230242015-06-15 17:59:01 +0300960static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
961 u64 in_param, u64 *out_param, int port);
962
963static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
964 int counter_index)
965{
966 struct res_common *r;
967 struct res_counter *counter;
968 int ret = 0;
969
970 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
971 return ret;
972
973 spin_lock_irq(mlx4_tlock(dev));
974 r = find_res(dev, counter_index, RES_COUNTER);
Eran Ben Elisha6b94bab2016-02-17 17:24:24 +0200975 if (!r || r->owner != slave) {
Eran Ben Elisha68230242015-06-15 17:59:01 +0300976 ret = -EINVAL;
Eran Ben Elisha6b94bab2016-02-17 17:24:24 +0200977 } else {
978 counter = container_of(r, struct res_counter, com);
979 if (!counter->port)
980 counter->port = port;
981 }
Eran Ben Elisha68230242015-06-15 17:59:01 +0300982
983 spin_unlock_irq(mlx4_tlock(dev));
984 return ret;
985}
986
987static int handle_unexisting_counter(struct mlx4_dev *dev,
988 struct mlx4_qp_context *qpc, u8 slave,
989 int port)
990{
991 struct mlx4_priv *priv = mlx4_priv(dev);
992 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
993 struct res_common *tmp;
994 struct res_counter *counter;
995 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
996 int err = 0;
997
998 spin_lock_irq(mlx4_tlock(dev));
999 list_for_each_entry(tmp,
1000 &tracker->slave_list[slave].res_list[RES_COUNTER],
1001 list) {
1002 counter = container_of(tmp, struct res_counter, com);
1003 if (port == counter->port) {
1004 qpc->pri_path.counter_index = counter->com.res_id;
1005 spin_unlock_irq(mlx4_tlock(dev));
1006 return 0;
1007 }
1008 }
1009 spin_unlock_irq(mlx4_tlock(dev));
1010
1011 /* No existing counter, need to allocate a new counter */
1012 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1013 port);
1014 if (err == -ENOENT) {
1015 err = 0;
1016 } else if (err && err != -ENOSPC) {
1017 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1018 __func__, slave, err);
1019 } else {
1020 qpc->pri_path.counter_index = counter_idx;
1021 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1022 __func__, slave, qpc->pri_path.counter_index);
1023 err = 0;
1024 }
1025
1026 return err;
1027}
1028
1029static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1030 u8 slave, int port)
1031{
1032 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1033 return handle_existing_counter(dev, slave, port,
1034 qpc->pri_path.counter_index);
1035
1036 return handle_unexisting_counter(dev, qpc, slave, port);
1037}
1038
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001039static struct res_common *alloc_qp_tr(int id)
1040{
1041 struct res_qp *ret;
1042
1043 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1044 if (!ret)
1045 return NULL;
1046
1047 ret->com.res_id = id;
1048 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +00001049 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001050 INIT_LIST_HEAD(&ret->mcg_list);
1051 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001052 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001053
1054 return &ret->com;
1055}
1056
1057static struct res_common *alloc_mtt_tr(int id, int order)
1058{
1059 struct res_mtt *ret;
1060
1061 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1062 if (!ret)
1063 return NULL;
1064
1065 ret->com.res_id = id;
1066 ret->order = order;
1067 ret->com.state = RES_MTT_ALLOCATED;
1068 atomic_set(&ret->ref_count, 0);
1069
1070 return &ret->com;
1071}
1072
1073static struct res_common *alloc_mpt_tr(int id, int key)
1074{
1075 struct res_mpt *ret;
1076
1077 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1078 if (!ret)
1079 return NULL;
1080
1081 ret->com.res_id = id;
1082 ret->com.state = RES_MPT_RESERVED;
1083 ret->key = key;
1084
1085 return &ret->com;
1086}
1087
1088static struct res_common *alloc_eq_tr(int id)
1089{
1090 struct res_eq *ret;
1091
1092 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1093 if (!ret)
1094 return NULL;
1095
1096 ret->com.res_id = id;
1097 ret->com.state = RES_EQ_RESERVED;
1098
1099 return &ret->com;
1100}
1101
1102static struct res_common *alloc_cq_tr(int id)
1103{
1104 struct res_cq *ret;
1105
1106 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1107 if (!ret)
1108 return NULL;
1109
1110 ret->com.res_id = id;
1111 ret->com.state = RES_CQ_ALLOCATED;
1112 atomic_set(&ret->ref_count, 0);
1113
1114 return &ret->com;
1115}
1116
1117static struct res_common *alloc_srq_tr(int id)
1118{
1119 struct res_srq *ret;
1120
1121 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1122 if (!ret)
1123 return NULL;
1124
1125 ret->com.res_id = id;
1126 ret->com.state = RES_SRQ_ALLOCATED;
1127 atomic_set(&ret->ref_count, 0);
1128
1129 return &ret->com;
1130}
1131
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001132static struct res_common *alloc_counter_tr(int id, int port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001133{
1134 struct res_counter *ret;
1135
1136 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1137 if (!ret)
1138 return NULL;
1139
1140 ret->com.res_id = id;
1141 ret->com.state = RES_COUNTER_ALLOCATED;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001142 ret->port = port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001143
1144 return &ret->com;
1145}
1146
Jack Morgensteinba062d52012-05-15 10:35:03 +00001147static struct res_common *alloc_xrcdn_tr(int id)
1148{
1149 struct res_xrcdn *ret;
1150
1151 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1152 if (!ret)
1153 return NULL;
1154
1155 ret->com.res_id = id;
1156 ret->com.state = RES_XRCD_ALLOCATED;
1157
1158 return &ret->com;
1159}
1160
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001161static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001162{
1163 struct res_fs_rule *ret;
1164
1165 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1166 if (!ret)
1167 return NULL;
1168
1169 ret->com.res_id = id;
1170 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001171 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001172 return &ret->com;
1173}
1174
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001175static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001176 int extra)
1177{
1178 struct res_common *ret;
1179
1180 switch (type) {
1181 case RES_QP:
1182 ret = alloc_qp_tr(id);
1183 break;
1184 case RES_MPT:
1185 ret = alloc_mpt_tr(id, extra);
1186 break;
1187 case RES_MTT:
1188 ret = alloc_mtt_tr(id, extra);
1189 break;
1190 case RES_EQ:
1191 ret = alloc_eq_tr(id);
1192 break;
1193 case RES_CQ:
1194 ret = alloc_cq_tr(id);
1195 break;
1196 case RES_SRQ:
1197 ret = alloc_srq_tr(id);
1198 break;
1199 case RES_MAC:
Amir Vadaic20862c2014-05-22 15:55:40 +03001200 pr_err("implementation missing\n");
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001201 return NULL;
1202 case RES_COUNTER:
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001203 ret = alloc_counter_tr(id, extra);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001204 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +00001205 case RES_XRCD:
1206 ret = alloc_xrcdn_tr(id);
1207 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001208 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001209 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001210 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001211 default:
1212 return NULL;
1213 }
1214 if (ret)
1215 ret->owner = slave;
1216
1217 return ret;
1218}
1219
Eran Ben Elisha62a89052015-06-15 17:59:08 +03001220int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1221 struct mlx4_counter *data)
1222{
1223 struct mlx4_priv *priv = mlx4_priv(dev);
1224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225 struct res_common *tmp;
1226 struct res_counter *counter;
1227 int *counters_arr;
1228 int i = 0, err = 0;
1229
1230 memset(data, 0, sizeof(*data));
1231
1232 counters_arr = kmalloc_array(dev->caps.max_counters,
1233 sizeof(*counters_arr), GFP_KERNEL);
1234 if (!counters_arr)
1235 return -ENOMEM;
1236
1237 spin_lock_irq(mlx4_tlock(dev));
1238 list_for_each_entry(tmp,
1239 &tracker->slave_list[slave].res_list[RES_COUNTER],
1240 list) {
1241 counter = container_of(tmp, struct res_counter, com);
1242 if (counter->port == port) {
1243 counters_arr[i] = (int)tmp->res_id;
1244 i++;
1245 }
1246 }
1247 spin_unlock_irq(mlx4_tlock(dev));
1248 counters_arr[i] = -1;
1249
1250 i = 0;
1251
1252 while (counters_arr[i] != -1) {
1253 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1254 0);
1255 if (err) {
1256 memset(data, 0, sizeof(*data));
1257 goto table_changed;
1258 }
1259 i++;
1260 }
1261
1262table_changed:
1263 kfree(counters_arr);
1264 return 0;
1265}
1266
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001267static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001268 enum mlx4_resource type, int extra)
1269{
1270 int i;
1271 int err;
1272 struct mlx4_priv *priv = mlx4_priv(dev);
1273 struct res_common **res_arr;
1274 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001275 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001276
1277 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1278 if (!res_arr)
1279 return -ENOMEM;
1280
1281 for (i = 0; i < count; ++i) {
1282 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1283 if (!res_arr[i]) {
1284 for (--i; i >= 0; --i)
1285 kfree(res_arr[i]);
1286
1287 kfree(res_arr);
1288 return -ENOMEM;
1289 }
1290 }
1291
1292 spin_lock_irq(mlx4_tlock(dev));
1293 for (i = 0; i < count; ++i) {
1294 if (find_res(dev, base + i, type)) {
1295 err = -EEXIST;
1296 goto undo;
1297 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001298 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001299 if (err)
1300 goto undo;
1301 list_add_tail(&res_arr[i]->list,
1302 &tracker->slave_list[slave].res_list[type]);
1303 }
1304 spin_unlock_irq(mlx4_tlock(dev));
1305 kfree(res_arr);
1306
1307 return 0;
1308
1309undo:
Saeed Mahameed95e19632015-10-08 17:14:03 +03001310 for (--i; i >= 0; --i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001311 rb_erase(&res_arr[i]->node, root);
Saeed Mahameed95e19632015-10-08 17:14:03 +03001312 list_del_init(&res_arr[i]->list);
1313 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001314
1315 spin_unlock_irq(mlx4_tlock(dev));
1316
1317 for (i = 0; i < count; ++i)
1318 kfree(res_arr[i]);
1319
1320 kfree(res_arr);
1321
1322 return err;
1323}
1324
1325static int remove_qp_ok(struct res_qp *res)
1326{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001327 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1328 !list_empty(&res->mcg_list)) {
1329 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1330 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001331 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001332 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001333 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001334 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001335
1336 return 0;
1337}
1338
1339static int remove_mtt_ok(struct res_mtt *res, int order)
1340{
1341 if (res->com.state == RES_MTT_BUSY ||
1342 atomic_read(&res->ref_count)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03001343 pr_devel("%s-%d: state %s, ref_count %d\n",
1344 __func__, __LINE__,
1345 mtt_states_str(res->com.state),
1346 atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001347 return -EBUSY;
1348 } else if (res->com.state != RES_MTT_ALLOCATED)
1349 return -EPERM;
1350 else if (res->order != order)
1351 return -EINVAL;
1352
1353 return 0;
1354}
1355
1356static int remove_mpt_ok(struct res_mpt *res)
1357{
1358 if (res->com.state == RES_MPT_BUSY)
1359 return -EBUSY;
1360 else if (res->com.state != RES_MPT_RESERVED)
1361 return -EPERM;
1362
1363 return 0;
1364}
1365
1366static int remove_eq_ok(struct res_eq *res)
1367{
1368 if (res->com.state == RES_MPT_BUSY)
1369 return -EBUSY;
1370 else if (res->com.state != RES_MPT_RESERVED)
1371 return -EPERM;
1372
1373 return 0;
1374}
1375
1376static int remove_counter_ok(struct res_counter *res)
1377{
1378 if (res->com.state == RES_COUNTER_BUSY)
1379 return -EBUSY;
1380 else if (res->com.state != RES_COUNTER_ALLOCATED)
1381 return -EPERM;
1382
1383 return 0;
1384}
1385
Jack Morgensteinba062d52012-05-15 10:35:03 +00001386static int remove_xrcdn_ok(struct res_xrcdn *res)
1387{
1388 if (res->com.state == RES_XRCD_BUSY)
1389 return -EBUSY;
1390 else if (res->com.state != RES_XRCD_ALLOCATED)
1391 return -EPERM;
1392
1393 return 0;
1394}
1395
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001396static int remove_fs_rule_ok(struct res_fs_rule *res)
1397{
1398 if (res->com.state == RES_FS_RULE_BUSY)
1399 return -EBUSY;
1400 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1401 return -EPERM;
1402
1403 return 0;
1404}
1405
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001406static int remove_cq_ok(struct res_cq *res)
1407{
1408 if (res->com.state == RES_CQ_BUSY)
1409 return -EBUSY;
1410 else if (res->com.state != RES_CQ_ALLOCATED)
1411 return -EPERM;
1412
1413 return 0;
1414}
1415
1416static int remove_srq_ok(struct res_srq *res)
1417{
1418 if (res->com.state == RES_SRQ_BUSY)
1419 return -EBUSY;
1420 else if (res->com.state != RES_SRQ_ALLOCATED)
1421 return -EPERM;
1422
1423 return 0;
1424}
1425
1426static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1427{
1428 switch (type) {
1429 case RES_QP:
1430 return remove_qp_ok((struct res_qp *)res);
1431 case RES_CQ:
1432 return remove_cq_ok((struct res_cq *)res);
1433 case RES_SRQ:
1434 return remove_srq_ok((struct res_srq *)res);
1435 case RES_MPT:
1436 return remove_mpt_ok((struct res_mpt *)res);
1437 case RES_MTT:
1438 return remove_mtt_ok((struct res_mtt *)res, extra);
1439 case RES_MAC:
Tariq Toukan72b8eaa2017-01-29 18:56:13 +02001440 return -EOPNOTSUPP;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001441 case RES_EQ:
1442 return remove_eq_ok((struct res_eq *)res);
1443 case RES_COUNTER:
1444 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001445 case RES_XRCD:
1446 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001447 case RES_FS_RULE:
1448 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001449 default:
1450 return -EINVAL;
1451 }
1452}
1453
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001454static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001455 enum mlx4_resource type, int extra)
1456{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001457 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001458 int err;
1459 struct mlx4_priv *priv = mlx4_priv(dev);
1460 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1461 struct res_common *r;
1462
1463 spin_lock_irq(mlx4_tlock(dev));
1464 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001465 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001466 if (!r) {
1467 err = -ENOENT;
1468 goto out;
1469 }
1470 if (r->owner != slave) {
1471 err = -EPERM;
1472 goto out;
1473 }
1474 err = remove_ok(r, type, extra);
1475 if (err)
1476 goto out;
1477 }
1478
1479 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001480 r = res_tracker_lookup(&tracker->res_tree[type], i);
1481 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001482 list_del(&r->list);
1483 kfree(r);
1484 }
1485 err = 0;
1486
1487out:
1488 spin_unlock_irq(mlx4_tlock(dev));
1489
1490 return err;
1491}
1492
1493static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1494 enum res_qp_states state, struct res_qp **qp,
1495 int alloc)
1496{
1497 struct mlx4_priv *priv = mlx4_priv(dev);
1498 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1499 struct res_qp *r;
1500 int err = 0;
1501
1502 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001503 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001504 if (!r)
1505 err = -ENOENT;
1506 else if (r->com.owner != slave)
1507 err = -EPERM;
1508 else {
1509 switch (state) {
1510 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001511 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001512 __func__, r->com.res_id);
1513 err = -EBUSY;
1514 break;
1515
1516 case RES_QP_RESERVED:
1517 if (r->com.state == RES_QP_MAPPED && !alloc)
1518 break;
1519
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001520 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001521 err = -EINVAL;
1522 break;
1523
1524 case RES_QP_MAPPED:
1525 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1526 r->com.state == RES_QP_HW)
1527 break;
1528 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001529 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001530 r->com.res_id);
1531 err = -EINVAL;
1532 }
1533
1534 break;
1535
1536 case RES_QP_HW:
1537 if (r->com.state != RES_QP_MAPPED)
1538 err = -EINVAL;
1539 break;
1540 default:
1541 err = -EINVAL;
1542 }
1543
1544 if (!err) {
1545 r->com.from_state = r->com.state;
1546 r->com.to_state = state;
1547 r->com.state = RES_QP_BUSY;
1548 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001549 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001550 }
1551 }
1552
1553 spin_unlock_irq(mlx4_tlock(dev));
1554
1555 return err;
1556}
1557
1558static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1559 enum res_mpt_states state, struct res_mpt **mpt)
1560{
1561 struct mlx4_priv *priv = mlx4_priv(dev);
1562 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1563 struct res_mpt *r;
1564 int err = 0;
1565
1566 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001567 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001568 if (!r)
1569 err = -ENOENT;
1570 else if (r->com.owner != slave)
1571 err = -EPERM;
1572 else {
1573 switch (state) {
1574 case RES_MPT_BUSY:
1575 err = -EINVAL;
1576 break;
1577
1578 case RES_MPT_RESERVED:
1579 if (r->com.state != RES_MPT_MAPPED)
1580 err = -EINVAL;
1581 break;
1582
1583 case RES_MPT_MAPPED:
1584 if (r->com.state != RES_MPT_RESERVED &&
1585 r->com.state != RES_MPT_HW)
1586 err = -EINVAL;
1587 break;
1588
1589 case RES_MPT_HW:
1590 if (r->com.state != RES_MPT_MAPPED)
1591 err = -EINVAL;
1592 break;
1593 default:
1594 err = -EINVAL;
1595 }
1596
1597 if (!err) {
1598 r->com.from_state = r->com.state;
1599 r->com.to_state = state;
1600 r->com.state = RES_MPT_BUSY;
1601 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001602 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001603 }
1604 }
1605
1606 spin_unlock_irq(mlx4_tlock(dev));
1607
1608 return err;
1609}
1610
1611static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1612 enum res_eq_states state, struct res_eq **eq)
1613{
1614 struct mlx4_priv *priv = mlx4_priv(dev);
1615 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1616 struct res_eq *r;
1617 int err = 0;
1618
1619 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001620 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001621 if (!r)
1622 err = -ENOENT;
1623 else if (r->com.owner != slave)
1624 err = -EPERM;
1625 else {
1626 switch (state) {
1627 case RES_EQ_BUSY:
1628 err = -EINVAL;
1629 break;
1630
1631 case RES_EQ_RESERVED:
1632 if (r->com.state != RES_EQ_HW)
1633 err = -EINVAL;
1634 break;
1635
1636 case RES_EQ_HW:
1637 if (r->com.state != RES_EQ_RESERVED)
1638 err = -EINVAL;
1639 break;
1640
1641 default:
1642 err = -EINVAL;
1643 }
1644
1645 if (!err) {
1646 r->com.from_state = r->com.state;
1647 r->com.to_state = state;
1648 r->com.state = RES_EQ_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001649 }
1650 }
1651
1652 spin_unlock_irq(mlx4_tlock(dev));
1653
Arnd Bergmanna4256bc2016-10-25 18:16:20 +02001654 if (!err && eq)
1655 *eq = r;
1656
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001657 return err;
1658}
1659
1660static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1661 enum res_cq_states state, struct res_cq **cq)
1662{
1663 struct mlx4_priv *priv = mlx4_priv(dev);
1664 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1665 struct res_cq *r;
1666 int err;
1667
1668 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001669 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001670 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001671 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001672 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001673 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001674 } else if (state == RES_CQ_ALLOCATED) {
1675 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001676 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001677 else if (atomic_read(&r->ref_count))
1678 err = -EBUSY;
1679 else
1680 err = 0;
1681 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1682 err = -EINVAL;
1683 } else {
1684 err = 0;
1685 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001686
Paul Bollec9218a92014-01-14 20:45:36 +01001687 if (!err) {
1688 r->com.from_state = r->com.state;
1689 r->com.to_state = state;
1690 r->com.state = RES_CQ_BUSY;
1691 if (cq)
1692 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001693 }
1694
1695 spin_unlock_irq(mlx4_tlock(dev));
1696
1697 return err;
1698}
1699
1700static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001701 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001702{
1703 struct mlx4_priv *priv = mlx4_priv(dev);
1704 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1705 struct res_srq *r;
1706 int err = 0;
1707
1708 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001709 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001710 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001711 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001712 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001713 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001714 } else if (state == RES_SRQ_ALLOCATED) {
1715 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001716 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001717 else if (atomic_read(&r->ref_count))
1718 err = -EBUSY;
1719 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1720 err = -EINVAL;
1721 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001722
Paul Bollef088cbb2014-01-14 20:46:52 +01001723 if (!err) {
1724 r->com.from_state = r->com.state;
1725 r->com.to_state = state;
1726 r->com.state = RES_SRQ_BUSY;
1727 if (srq)
1728 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001729 }
1730
1731 spin_unlock_irq(mlx4_tlock(dev));
1732
1733 return err;
1734}
1735
1736static void res_abort_move(struct mlx4_dev *dev, int slave,
1737 enum mlx4_resource type, int id)
1738{
1739 struct mlx4_priv *priv = mlx4_priv(dev);
1740 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1741 struct res_common *r;
1742
1743 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001744 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001745 if (r && (r->owner == slave))
1746 r->state = r->from_state;
1747 spin_unlock_irq(mlx4_tlock(dev));
1748}
1749
1750static void res_end_move(struct mlx4_dev *dev, int slave,
1751 enum mlx4_resource type, int id)
1752{
1753 struct mlx4_priv *priv = mlx4_priv(dev);
1754 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1755 struct res_common *r;
1756
1757 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001758 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001759 if (r && (r->owner == slave))
1760 r->state = r->to_state;
1761 spin_unlock_irq(mlx4_tlock(dev));
1762}
1763
1764static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1765{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001766 return mlx4_is_qp_reserved(dev, qpn) &&
1767 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001768}
1769
Jack Morgenstein54679e12012-08-03 08:40:43 +00001770static int fw_reserved(struct mlx4_dev *dev, int qpn)
1771{
1772 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001773}
1774
1775static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1776 u64 in_param, u64 *out_param)
1777{
1778 int err;
1779 int count;
1780 int align;
1781 int base;
1782 int qpn;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001783 u8 flags;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001784
1785 switch (op) {
1786 case RES_OP_RESERVE:
Jack Morgenstein2d5c57d2014-11-25 11:54:31 +02001787 count = get_param_l(&in_param) & 0xffffff;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001788 /* Turn off all unsupported QP allocation flags that the
1789 * slave tries to set.
1790 */
1791 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001792 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001793 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001794 if (err)
1795 return err;
1796
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001797 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001798 if (err) {
1799 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1800 return err;
1801 }
1802
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001803 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1804 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001805 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001806 __mlx4_qp_release_range(dev, base, count);
1807 return err;
1808 }
1809 set_param_l(out_param, base);
1810 break;
1811 case RES_OP_MAP_ICM:
1812 qpn = get_param_l(&in_param) & 0x7fffff;
1813 if (valid_reserved(dev, slave, qpn)) {
1814 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1815 if (err)
1816 return err;
1817 }
1818
1819 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1820 NULL, 1);
1821 if (err)
1822 return err;
1823
Jack Morgenstein54679e12012-08-03 08:40:43 +00001824 if (!fw_reserved(dev, qpn)) {
Jiri Kosina40f22872014-05-11 15:15:12 +03001825 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001826 if (err) {
1827 res_abort_move(dev, slave, RES_QP, qpn);
1828 return err;
1829 }
1830 }
1831
1832 res_end_move(dev, slave, RES_QP, qpn);
1833 break;
1834
1835 default:
1836 err = -EINVAL;
1837 break;
1838 }
1839 return err;
1840}
1841
1842static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1843 u64 in_param, u64 *out_param)
1844{
1845 int err = -EINVAL;
1846 int base;
1847 int order;
1848
1849 if (op != RES_OP_RESERVE_AND_MAP)
1850 return err;
1851
1852 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001853
1854 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1855 if (err)
1856 return err;
1857
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001858 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001859 if (base == -1) {
1860 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001861 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001862 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001863
1864 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001865 if (err) {
1866 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001867 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001868 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001869 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001870 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001871
1872 return err;
1873}
1874
1875static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1876 u64 in_param, u64 *out_param)
1877{
1878 int err = -EINVAL;
1879 int index;
1880 int id;
1881 struct res_mpt *mpt;
1882
1883 switch (op) {
1884 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001885 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1886 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001887 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001888
1889 index = __mlx4_mpt_reserve(dev);
1890 if (index == -1) {
1891 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1892 break;
1893 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001894 id = index & mpt_mask(dev);
1895
1896 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1897 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001898 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001899 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001900 break;
1901 }
1902 set_param_l(out_param, index);
1903 break;
1904 case RES_OP_MAP_ICM:
1905 index = get_param_l(&in_param);
1906 id = index & mpt_mask(dev);
1907 err = mr_res_start_move_to(dev, slave, id,
1908 RES_MPT_MAPPED, &mpt);
1909 if (err)
1910 return err;
1911
Jiri Kosina40f22872014-05-11 15:15:12 +03001912 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001913 if (err) {
1914 res_abort_move(dev, slave, RES_MPT, id);
1915 return err;
1916 }
1917
1918 res_end_move(dev, slave, RES_MPT, id);
1919 break;
1920 }
1921 return err;
1922}
1923
1924static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1925 u64 in_param, u64 *out_param)
1926{
1927 int cqn;
1928 int err;
1929
1930 switch (op) {
1931 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001932 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001933 if (err)
1934 break;
1935
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001936 err = __mlx4_cq_alloc_icm(dev, &cqn);
1937 if (err) {
1938 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1939 break;
1940 }
1941
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001942 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1943 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001944 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001945 __mlx4_cq_free_icm(dev, cqn);
1946 break;
1947 }
1948
1949 set_param_l(out_param, cqn);
1950 break;
1951
1952 default:
1953 err = -EINVAL;
1954 }
1955
1956 return err;
1957}
1958
1959static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1960 u64 in_param, u64 *out_param)
1961{
1962 int srqn;
1963 int err;
1964
1965 switch (op) {
1966 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001967 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001968 if (err)
1969 break;
1970
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001971 err = __mlx4_srq_alloc_icm(dev, &srqn);
1972 if (err) {
1973 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1974 break;
1975 }
1976
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001977 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1978 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001979 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001980 __mlx4_srq_free_icm(dev, srqn);
1981 break;
1982 }
1983
1984 set_param_l(out_param, srqn);
1985 break;
1986
1987 default:
1988 err = -EINVAL;
1989 }
1990
1991 return err;
1992}
1993
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001994static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1995 u8 smac_index, u64 *mac)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001996{
1997 struct mlx4_priv *priv = mlx4_priv(dev);
1998 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001999 struct list_head *mac_list =
2000 &tracker->slave_list[slave].res_list[RES_MAC];
2001 struct mac_res *res, *tmp;
2002
2003 list_for_each_entry_safe(res, tmp, mac_list, list) {
2004 if (res->smac_index == smac_index && res->port == (u8) port) {
2005 *mac = res->mac;
2006 return 0;
2007 }
2008 }
2009 return -ENOENT;
2010}
2011
2012static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2013{
2014 struct mlx4_priv *priv = mlx4_priv(dev);
2015 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2016 struct list_head *mac_list =
2017 &tracker->slave_list[slave].res_list[RES_MAC];
2018 struct mac_res *res, *tmp;
2019
2020 list_for_each_entry_safe(res, tmp, mac_list, list) {
2021 if (res->mac == mac && res->port == (u8) port) {
2022 /* mac found. update ref count */
2023 ++res->ref_count;
2024 return 0;
2025 }
2026 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002027
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002028 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2029 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002030 res = kzalloc(sizeof *res, GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002031 if (!res) {
2032 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002033 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002034 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002035 res->mac = mac;
2036 res->port = (u8) port;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002037 res->smac_index = smac_index;
2038 res->ref_count = 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002039 list_add_tail(&res->list,
2040 &tracker->slave_list[slave].res_list[RES_MAC]);
2041 return 0;
2042}
2043
2044static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2045 int port)
2046{
2047 struct mlx4_priv *priv = mlx4_priv(dev);
2048 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2049 struct list_head *mac_list =
2050 &tracker->slave_list[slave].res_list[RES_MAC];
2051 struct mac_res *res, *tmp;
2052
2053 list_for_each_entry_safe(res, tmp, mac_list, list) {
2054 if (res->mac == mac && res->port == (u8) port) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002055 if (!--res->ref_count) {
2056 list_del(&res->list);
2057 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2058 kfree(res);
2059 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002060 break;
2061 }
2062 }
2063}
2064
2065static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2066{
2067 struct mlx4_priv *priv = mlx4_priv(dev);
2068 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2069 struct list_head *mac_list =
2070 &tracker->slave_list[slave].res_list[RES_MAC];
2071 struct mac_res *res, *tmp;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002072 int i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002073
2074 list_for_each_entry_safe(res, tmp, mac_list, list) {
2075 list_del(&res->list);
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002076 /* dereference the mac the num times the slave referenced it */
2077 for (i = 0; i < res->ref_count; i++)
2078 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002079 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002080 kfree(res);
2081 }
2082}
2083
2084static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002085 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002086{
2087 int err = -EINVAL;
2088 int port;
2089 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002090 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002091
2092 if (op != RES_OP_RESERVE_AND_MAP)
2093 return err;
2094
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002095 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002096 port = mlx4_slave_convert_port(
2097 dev, slave, port);
2098
2099 if (port < 0)
2100 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002101 mac = in_param;
2102
2103 err = __mlx4_register_mac(dev, port, mac);
2104 if (err >= 0) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002105 smac_index = err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002106 set_param_l(out_param, err);
2107 err = 0;
2108 }
2109
2110 if (!err) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002111 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002112 if (err)
2113 __mlx4_unregister_mac(dev, port, mac);
2114 }
2115 return err;
2116}
2117
Jack Morgenstein48740802013-11-03 10:03:20 +02002118static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2119 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002120{
Jack Morgenstein48740802013-11-03 10:03:20 +02002121 struct mlx4_priv *priv = mlx4_priv(dev);
2122 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2123 struct list_head *vlan_list =
2124 &tracker->slave_list[slave].res_list[RES_VLAN];
2125 struct vlan_res *res, *tmp;
2126
2127 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2128 if (res->vlan == vlan && res->port == (u8) port) {
2129 /* vlan found. update ref count */
2130 ++res->ref_count;
2131 return 0;
2132 }
2133 }
2134
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002135 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2136 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002137 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002138 if (!res) {
2139 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002140 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002141 }
Jack Morgenstein48740802013-11-03 10:03:20 +02002142 res->vlan = vlan;
2143 res->port = (u8) port;
2144 res->vlan_index = vlan_index;
2145 res->ref_count = 1;
2146 list_add_tail(&res->list,
2147 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002148 return 0;
2149}
2150
Jack Morgenstein48740802013-11-03 10:03:20 +02002151
2152static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2153 int port)
2154{
2155 struct mlx4_priv *priv = mlx4_priv(dev);
2156 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2157 struct list_head *vlan_list =
2158 &tracker->slave_list[slave].res_list[RES_VLAN];
2159 struct vlan_res *res, *tmp;
2160
2161 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2162 if (res->vlan == vlan && res->port == (u8) port) {
2163 if (!--res->ref_count) {
2164 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002165 mlx4_release_resource(dev, slave, RES_VLAN,
2166 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002167 kfree(res);
2168 }
2169 break;
2170 }
2171 }
2172}
2173
2174static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2175{
2176 struct mlx4_priv *priv = mlx4_priv(dev);
2177 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2178 struct list_head *vlan_list =
2179 &tracker->slave_list[slave].res_list[RES_VLAN];
2180 struct vlan_res *res, *tmp;
2181 int i;
2182
2183 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2184 list_del(&res->list);
2185 /* dereference the vlan the num times the slave referenced it */
2186 for (i = 0; i < res->ref_count; i++)
2187 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002188 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002189 kfree(res);
2190 }
2191}
2192
2193static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002194 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02002195{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002196 struct mlx4_priv *priv = mlx4_priv(dev);
2197 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002198 int err;
2199 u16 vlan;
2200 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002201 int port;
2202
2203 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02002204
2205 if (!port || op != RES_OP_RESERVE_AND_MAP)
2206 return -EINVAL;
2207
Matan Barak449fc482014-03-19 18:11:52 +02002208 port = mlx4_slave_convert_port(
2209 dev, slave, port);
2210
2211 if (port < 0)
2212 return -EINVAL;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002213 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2214 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2215 slave_state[slave].old_vlan_api = true;
2216 return 0;
2217 }
2218
Jack Morgenstein48740802013-11-03 10:03:20 +02002219 vlan = (u16) in_param;
2220
2221 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2222 if (!err) {
2223 set_param_l(out_param, (u32) vlan_index);
2224 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2225 if (err)
2226 __mlx4_unregister_vlan(dev, port, vlan);
2227 }
2228 return err;
2229}
2230
Jack Morgensteinba062d52012-05-15 10:35:03 +00002231static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Eran Ben Elisha68230242015-06-15 17:59:01 +03002232 u64 in_param, u64 *out_param, int port)
Jack Morgensteinba062d52012-05-15 10:35:03 +00002233{
2234 u32 index;
2235 int err;
2236
2237 if (op != RES_OP_RESERVE)
2238 return -EINVAL;
2239
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002240 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002241 if (err)
2242 return err;
2243
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002244 err = __mlx4_counter_alloc(dev, &index);
2245 if (err) {
2246 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2247 return err;
2248 }
2249
Eran Ben Elisha68230242015-06-15 17:59:01 +03002250 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002251 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002252 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002253 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2254 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002255 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002256 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00002257
2258 return err;
2259}
2260
2261static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2262 u64 in_param, u64 *out_param)
2263{
2264 u32 xrcdn;
2265 int err;
2266
2267 if (op != RES_OP_RESERVE)
2268 return -EINVAL;
2269
2270 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2271 if (err)
2272 return err;
2273
2274 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2275 if (err)
2276 __mlx4_xrcd_free(dev, xrcdn);
2277 else
2278 set_param_l(out_param, xrcdn);
2279
2280 return err;
2281}
2282
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002283int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2284 struct mlx4_vhcr *vhcr,
2285 struct mlx4_cmd_mailbox *inbox,
2286 struct mlx4_cmd_mailbox *outbox,
2287 struct mlx4_cmd_info *cmd)
2288{
2289 int err;
2290 int alop = vhcr->op_modifier;
2291
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002292 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002293 case RES_QP:
2294 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295 vhcr->in_param, &vhcr->out_param);
2296 break;
2297
2298 case RES_MTT:
2299 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2300 vhcr->in_param, &vhcr->out_param);
2301 break;
2302
2303 case RES_MPT:
2304 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2305 vhcr->in_param, &vhcr->out_param);
2306 break;
2307
2308 case RES_CQ:
2309 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2310 vhcr->in_param, &vhcr->out_param);
2311 break;
2312
2313 case RES_SRQ:
2314 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2315 vhcr->in_param, &vhcr->out_param);
2316 break;
2317
2318 case RES_MAC:
2319 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002320 vhcr->in_param, &vhcr->out_param,
2321 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002322 break;
2323
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002324 case RES_VLAN:
2325 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002326 vhcr->in_param, &vhcr->out_param,
2327 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002328 break;
2329
Jack Morgensteinba062d52012-05-15 10:35:03 +00002330 case RES_COUNTER:
2331 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
Eran Ben Elisha68230242015-06-15 17:59:01 +03002332 vhcr->in_param, &vhcr->out_param, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002333 break;
2334
2335 case RES_XRCD:
2336 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2337 vhcr->in_param, &vhcr->out_param);
2338 break;
2339
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002340 default:
2341 err = -EINVAL;
2342 break;
2343 }
2344
2345 return err;
2346}
2347
2348static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349 u64 in_param)
2350{
2351 int err;
2352 int count;
2353 int base;
2354 int qpn;
2355
2356 switch (op) {
2357 case RES_OP_RESERVE:
2358 base = get_param_l(&in_param) & 0x7fffff;
2359 count = get_param_h(&in_param);
2360 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2361 if (err)
2362 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002363 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002364 __mlx4_qp_release_range(dev, base, count);
2365 break;
2366 case RES_OP_MAP_ICM:
2367 qpn = get_param_l(&in_param) & 0x7fffff;
2368 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2369 NULL, 0);
2370 if (err)
2371 return err;
2372
Jack Morgenstein54679e12012-08-03 08:40:43 +00002373 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002374 __mlx4_qp_free_icm(dev, qpn);
2375
2376 res_end_move(dev, slave, RES_QP, qpn);
2377
2378 if (valid_reserved(dev, slave, qpn))
2379 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2380 break;
2381 default:
2382 err = -EINVAL;
2383 break;
2384 }
2385 return err;
2386}
2387
2388static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2389 u64 in_param, u64 *out_param)
2390{
2391 int err = -EINVAL;
2392 int base;
2393 int order;
2394
2395 if (op != RES_OP_RESERVE_AND_MAP)
2396 return err;
2397
2398 base = get_param_l(&in_param);
2399 order = get_param_h(&in_param);
2400 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002401 if (!err) {
2402 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002403 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002404 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002405 return err;
2406}
2407
2408static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409 u64 in_param)
2410{
2411 int err = -EINVAL;
2412 int index;
2413 int id;
2414 struct res_mpt *mpt;
2415
2416 switch (op) {
2417 case RES_OP_RESERVE:
2418 index = get_param_l(&in_param);
2419 id = index & mpt_mask(dev);
2420 err = get_res(dev, slave, id, RES_MPT, &mpt);
2421 if (err)
2422 break;
2423 index = mpt->key;
2424 put_res(dev, slave, id, RES_MPT);
2425
2426 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2427 if (err)
2428 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002429 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002430 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002431 break;
2432 case RES_OP_MAP_ICM:
Christophe Jaillet5d4de162016-07-02 14:31:05 +02002433 index = get_param_l(&in_param);
2434 id = index & mpt_mask(dev);
2435 err = mr_res_start_move_to(dev, slave, id,
2436 RES_MPT_RESERVED, &mpt);
2437 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002438 return err;
Christophe Jaillet5d4de162016-07-02 14:31:05 +02002439
2440 __mlx4_mpt_free_icm(dev, mpt->key);
2441 res_end_move(dev, slave, RES_MPT, id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002442 break;
2443 default:
2444 err = -EINVAL;
2445 break;
2446 }
2447 return err;
2448}
2449
2450static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2451 u64 in_param, u64 *out_param)
2452{
2453 int cqn;
2454 int err;
2455
2456 switch (op) {
2457 case RES_OP_RESERVE_AND_MAP:
2458 cqn = get_param_l(&in_param);
2459 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2460 if (err)
2461 break;
2462
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002463 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002464 __mlx4_cq_free_icm(dev, cqn);
2465 break;
2466
2467 default:
2468 err = -EINVAL;
2469 break;
2470 }
2471
2472 return err;
2473}
2474
2475static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2476 u64 in_param, u64 *out_param)
2477{
2478 int srqn;
2479 int err;
2480
2481 switch (op) {
2482 case RES_OP_RESERVE_AND_MAP:
2483 srqn = get_param_l(&in_param);
2484 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2485 if (err)
2486 break;
2487
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002488 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002489 __mlx4_srq_free_icm(dev, srqn);
2490 break;
2491
2492 default:
2493 err = -EINVAL;
2494 break;
2495 }
2496
2497 return err;
2498}
2499
2500static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002501 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002502{
2503 int port;
2504 int err = 0;
2505
2506 switch (op) {
2507 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002508 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002509 port = mlx4_slave_convert_port(
2510 dev, slave, port);
2511
2512 if (port < 0)
2513 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002514 mac_del_from_slave(dev, slave, in_param, port);
2515 __mlx4_unregister_mac(dev, port, in_param);
2516 break;
2517 default:
2518 err = -EINVAL;
2519 break;
2520 }
2521
2522 return err;
2523
2524}
2525
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002526static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002527 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002528{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002529 struct mlx4_priv *priv = mlx4_priv(dev);
2530 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002531 int err = 0;
2532
Matan Barak449fc482014-03-19 18:11:52 +02002533 port = mlx4_slave_convert_port(
2534 dev, slave, port);
2535
2536 if (port < 0)
2537 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002538 switch (op) {
2539 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002540 if (slave_state[slave].old_vlan_api)
2541 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002542 if (!port)
2543 return -EINVAL;
2544 vlan_del_from_slave(dev, slave, in_param, port);
2545 __mlx4_unregister_vlan(dev, port, in_param);
2546 break;
2547 default:
2548 err = -EINVAL;
2549 break;
2550 }
2551
2552 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002553}
2554
Jack Morgensteinba062d52012-05-15 10:35:03 +00002555static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2556 u64 in_param, u64 *out_param)
2557{
2558 int index;
2559 int err;
2560
2561 if (op != RES_OP_RESERVE)
2562 return -EINVAL;
2563
2564 index = get_param_l(&in_param);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03002565 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2566 return 0;
2567
Jack Morgensteinba062d52012-05-15 10:35:03 +00002568 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2569 if (err)
2570 return err;
2571
2572 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002573 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002574
2575 return err;
2576}
2577
2578static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2579 u64 in_param, u64 *out_param)
2580{
2581 int xrcdn;
2582 int err;
2583
2584 if (op != RES_OP_RESERVE)
2585 return -EINVAL;
2586
2587 xrcdn = get_param_l(&in_param);
2588 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2589 if (err)
2590 return err;
2591
2592 __mlx4_xrcd_free(dev, xrcdn);
2593
2594 return err;
2595}
2596
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002597int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2598 struct mlx4_vhcr *vhcr,
2599 struct mlx4_cmd_mailbox *inbox,
2600 struct mlx4_cmd_mailbox *outbox,
2601 struct mlx4_cmd_info *cmd)
2602{
2603 int err = -EINVAL;
2604 int alop = vhcr->op_modifier;
2605
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002606 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002607 case RES_QP:
2608 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2609 vhcr->in_param);
2610 break;
2611
2612 case RES_MTT:
2613 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2614 vhcr->in_param, &vhcr->out_param);
2615 break;
2616
2617 case RES_MPT:
2618 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2619 vhcr->in_param);
2620 break;
2621
2622 case RES_CQ:
2623 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2624 vhcr->in_param, &vhcr->out_param);
2625 break;
2626
2627 case RES_SRQ:
2628 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2629 vhcr->in_param, &vhcr->out_param);
2630 break;
2631
2632 case RES_MAC:
2633 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002634 vhcr->in_param, &vhcr->out_param,
2635 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002636 break;
2637
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002638 case RES_VLAN:
2639 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002640 vhcr->in_param, &vhcr->out_param,
2641 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002642 break;
2643
Jack Morgensteinba062d52012-05-15 10:35:03 +00002644 case RES_COUNTER:
2645 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2646 vhcr->in_param, &vhcr->out_param);
2647 break;
2648
2649 case RES_XRCD:
2650 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2651 vhcr->in_param, &vhcr->out_param);
2652
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002653 default:
2654 break;
2655 }
2656 return err;
2657}
2658
2659/* ugly but other choices are uglier */
2660static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2661{
2662 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2663}
2664
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002665static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002666{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002667 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002668}
2669
2670static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2671{
2672 return be32_to_cpu(mpt->mtt_sz);
2673}
2674
Shani Michaelicc1ade92013-02-06 16:19:10 +00002675static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2676{
2677 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2678}
2679
2680static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2681{
2682 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2683}
2684
2685static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2686{
2687 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2688}
2689
2690static int mr_is_region(struct mlx4_mpt_entry *mpt)
2691{
2692 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2693}
2694
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002695static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002696{
2697 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2698}
2699
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002700static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002701{
2702 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2703}
2704
2705static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2706{
2707 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2708 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2709 int log_sq_sride = qpc->sq_size_stride & 7;
2710 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2711 int log_rq_stride = qpc->rq_size_stride & 7;
2712 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2713 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002714 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2715 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002716 int sq_size;
2717 int rq_size;
2718 int total_pages;
2719 int total_mem;
2720 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2721
2722 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2723 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2724 total_mem = sq_size + rq_size;
2725 total_pages =
2726 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2727 page_shift);
2728
2729 return total_pages;
2730}
2731
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002732static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2733 int size, struct res_mtt *mtt)
2734{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002735 int res_start = mtt->com.res_id;
2736 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002737
2738 if (start < res_start || start + size > res_start + res_size)
2739 return -EPERM;
2740 return 0;
2741}
2742
2743int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2744 struct mlx4_vhcr *vhcr,
2745 struct mlx4_cmd_mailbox *inbox,
2746 struct mlx4_cmd_mailbox *outbox,
2747 struct mlx4_cmd_info *cmd)
2748{
2749 int err;
2750 int index = vhcr->in_modifier;
2751 struct res_mtt *mtt;
Greg Thelen8dc7d112017-04-17 23:21:35 -07002752 struct res_mpt *mpt = NULL;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002753 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002754 int phys;
2755 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002756 u32 pd;
2757 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002758
2759 id = index & mpt_mask(dev);
2760 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2761 if (err)
2762 return err;
2763
Shani Michaelicc1ade92013-02-06 16:19:10 +00002764 /* Disable memory windows for VFs. */
2765 if (!mr_is_region(inbox->buf)) {
2766 err = -EPERM;
2767 goto ex_abort;
2768 }
2769
2770 /* Make sure that the PD bits related to the slave id are zeros. */
2771 pd = mr_get_pd(inbox->buf);
2772 pd_slave = (pd >> 17) & 0x7f;
Maor Gottliebb3320682015-02-03 17:57:15 +02002773 if (pd_slave != 0 && --pd_slave != slave) {
Shani Michaelicc1ade92013-02-06 16:19:10 +00002774 err = -EPERM;
2775 goto ex_abort;
2776 }
2777
2778 if (mr_is_fmr(inbox->buf)) {
2779 /* FMR and Bind Enable are forbidden in slave devices. */
2780 if (mr_is_bind_enabled(inbox->buf)) {
2781 err = -EPERM;
2782 goto ex_abort;
2783 }
2784 /* FMR and Memory Windows are also forbidden. */
2785 if (!mr_is_region(inbox->buf)) {
2786 err = -EPERM;
2787 goto ex_abort;
2788 }
2789 }
2790
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002791 phys = mr_phys_mpt(inbox->buf);
2792 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002793 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002794 if (err)
2795 goto ex_abort;
2796
2797 err = check_mtt_range(dev, slave, mtt_base,
2798 mr_get_mtt_size(inbox->buf), mtt);
2799 if (err)
2800 goto ex_put;
2801
2802 mpt->mtt = mtt;
2803 }
2804
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002805 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2806 if (err)
2807 goto ex_put;
2808
2809 if (!phys) {
2810 atomic_inc(&mtt->ref_count);
2811 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2812 }
2813
2814 res_end_move(dev, slave, RES_MPT, id);
2815 return 0;
2816
2817ex_put:
2818 if (!phys)
2819 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2820ex_abort:
2821 res_abort_move(dev, slave, RES_MPT, id);
2822
2823 return err;
2824}
2825
2826int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2827 struct mlx4_vhcr *vhcr,
2828 struct mlx4_cmd_mailbox *inbox,
2829 struct mlx4_cmd_mailbox *outbox,
2830 struct mlx4_cmd_info *cmd)
2831{
2832 int err;
2833 int index = vhcr->in_modifier;
2834 struct res_mpt *mpt;
2835 int id;
2836
2837 id = index & mpt_mask(dev);
2838 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2839 if (err)
2840 return err;
2841
2842 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843 if (err)
2844 goto ex_abort;
2845
2846 if (mpt->mtt)
2847 atomic_dec(&mpt->mtt->ref_count);
2848
2849 res_end_move(dev, slave, RES_MPT, id);
2850 return 0;
2851
2852ex_abort:
2853 res_abort_move(dev, slave, RES_MPT, id);
2854
2855 return err;
2856}
2857
2858int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2859 struct mlx4_vhcr *vhcr,
2860 struct mlx4_cmd_mailbox *inbox,
2861 struct mlx4_cmd_mailbox *outbox,
2862 struct mlx4_cmd_info *cmd)
2863{
2864 int err;
2865 int index = vhcr->in_modifier;
2866 struct res_mpt *mpt;
2867 int id;
2868
2869 id = index & mpt_mask(dev);
2870 err = get_res(dev, slave, id, RES_MPT, &mpt);
2871 if (err)
2872 return err;
2873
Matan Barake6306642014-07-31 11:01:29 +03002874 if (mpt->com.from_state == RES_MPT_MAPPED) {
2875 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2876 * that, the VF must read the MPT. But since the MPT entry memory is not
2877 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2878 * entry contents. To guarantee that the MPT cannot be changed, the driver
2879 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2880 * ownership fofollowing the change. The change here allows the VF to
2881 * perform QUERY_MPT also when the entry is in SW ownership.
2882 */
2883 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2884 &mlx4_priv(dev)->mr_table.dmpt_table,
2885 mpt->key, NULL);
2886
2887 if (NULL == mpt_entry || NULL == outbox->buf) {
2888 err = -EINVAL;
2889 goto out;
2890 }
2891
2892 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2893
2894 err = 0;
2895 } else if (mpt->com.from_state == RES_MPT_HW) {
2896 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2897 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002898 err = -EBUSY;
2899 goto out;
2900 }
2901
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002902
2903out:
2904 put_res(dev, slave, id, RES_MPT);
2905 return err;
2906}
2907
2908static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2909{
2910 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2911}
2912
2913static int qp_get_scqn(struct mlx4_qp_context *qpc)
2914{
2915 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2916}
2917
2918static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2919{
2920 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2921}
2922
Jack Morgenstein54679e12012-08-03 08:40:43 +00002923static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2924 struct mlx4_qp_context *context)
2925{
2926 u32 qpn = vhcr->in_modifier & 0xffffff;
2927 u32 qkey = 0;
2928
2929 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2930 return;
2931
2932 /* adjust qkey in qp context */
2933 context->qkey = cpu_to_be32(qkey);
2934}
2935
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002936static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2937 struct mlx4_qp_context *qpc,
2938 struct mlx4_cmd_mailbox *inbox);
2939
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002940int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2941 struct mlx4_vhcr *vhcr,
2942 struct mlx4_cmd_mailbox *inbox,
2943 struct mlx4_cmd_mailbox *outbox,
2944 struct mlx4_cmd_info *cmd)
2945{
2946 int err;
2947 int qpn = vhcr->in_modifier & 0x7fffff;
2948 struct res_mtt *mtt;
2949 struct res_qp *qp;
2950 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002951 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002952 int mtt_size = qp_get_mtt_size(qpc);
2953 struct res_cq *rcq;
2954 struct res_cq *scq;
2955 int rcqn = qp_get_rcqn(qpc);
2956 int scqn = qp_get_scqn(qpc);
2957 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2958 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2959 struct res_srq *srq;
2960 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2961
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002962 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2963 if (err)
2964 return err;
2965
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002966 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2967 if (err)
2968 return err;
2969 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002970 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002971 qp->param3 = 0;
2972 qp->vlan_control = 0;
2973 qp->fvl_rx = 0;
2974 qp->pri_path_fl = 0;
2975 qp->vlan_index = 0;
2976 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002977 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002978
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002979 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002980 if (err)
2981 goto ex_abort;
2982
2983 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2984 if (err)
2985 goto ex_put_mtt;
2986
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002987 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2988 if (err)
2989 goto ex_put_mtt;
2990
2991 if (scqn != rcqn) {
2992 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2993 if (err)
2994 goto ex_put_rcq;
2995 } else
2996 scq = rcq;
2997
2998 if (use_srq) {
2999 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3000 if (err)
3001 goto ex_put_scq;
3002 }
3003
Jack Morgenstein54679e12012-08-03 08:40:43 +00003004 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3005 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003006 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3007 if (err)
3008 goto ex_put_srq;
3009 atomic_inc(&mtt->ref_count);
3010 qp->mtt = mtt;
3011 atomic_inc(&rcq->ref_count);
3012 qp->rcq = rcq;
3013 atomic_inc(&scq->ref_count);
3014 qp->scq = scq;
3015
3016 if (scqn != rcqn)
3017 put_res(dev, slave, scqn, RES_CQ);
3018
3019 if (use_srq) {
3020 atomic_inc(&srq->ref_count);
3021 put_res(dev, slave, srqn, RES_SRQ);
3022 qp->srq = srq;
3023 }
Jack Morgenstein7c3945bc2017-01-16 18:31:38 +02003024
3025 /* Save param3 for dynamic changes from VST back to VGT */
3026 qp->param3 = qpc->param3;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003027 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003028 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003029 res_end_move(dev, slave, RES_QP, qpn);
3030
3031 return 0;
3032
3033ex_put_srq:
3034 if (use_srq)
3035 put_res(dev, slave, srqn, RES_SRQ);
3036ex_put_scq:
3037 if (scqn != rcqn)
3038 put_res(dev, slave, scqn, RES_CQ);
3039ex_put_rcq:
3040 put_res(dev, slave, rcqn, RES_CQ);
3041ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003042 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003043ex_abort:
3044 res_abort_move(dev, slave, RES_QP, qpn);
3045
3046 return err;
3047}
3048
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003049static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003050{
3051 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3052}
3053
3054static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3055{
3056 int log_eq_size = eqc->log_eq_size & 0x1f;
3057 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3058
3059 if (log_eq_size + 5 < page_shift)
3060 return 1;
3061
3062 return 1 << (log_eq_size + 5 - page_shift);
3063}
3064
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003065static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003066{
3067 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3068}
3069
3070static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3071{
3072 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3073 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3074
3075 if (log_cq_size + 5 < page_shift)
3076 return 1;
3077
3078 return 1 << (log_cq_size + 5 - page_shift);
3079}
3080
3081int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3082 struct mlx4_vhcr *vhcr,
3083 struct mlx4_cmd_mailbox *inbox,
3084 struct mlx4_cmd_mailbox *outbox,
3085 struct mlx4_cmd_info *cmd)
3086{
3087 int err;
3088 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003089 int res_id = (slave << 10) | eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003090 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003091 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003092 int mtt_size = eq_get_mtt_size(eqc);
3093 struct res_eq *eq;
3094 struct res_mtt *mtt;
3095
3096 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3097 if (err)
3098 return err;
3099 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3100 if (err)
3101 goto out_add;
3102
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003103 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003104 if (err)
3105 goto out_move;
3106
3107 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3108 if (err)
3109 goto out_put;
3110
3111 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3112 if (err)
3113 goto out_put;
3114
3115 atomic_inc(&mtt->ref_count);
3116 eq->mtt = mtt;
3117 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3118 res_end_move(dev, slave, RES_EQ, res_id);
3119 return 0;
3120
3121out_put:
3122 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3123out_move:
3124 res_abort_move(dev, slave, RES_EQ, res_id);
3125out_add:
3126 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3127 return err;
3128}
3129
Matan Barakd475c952014-11-02 16:26:17 +02003130int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3131 struct mlx4_vhcr *vhcr,
3132 struct mlx4_cmd_mailbox *inbox,
3133 struct mlx4_cmd_mailbox *outbox,
3134 struct mlx4_cmd_info *cmd)
3135{
3136 int err;
3137 u8 get = vhcr->op_modifier;
3138
3139 if (get != 1)
3140 return -EPERM;
3141
3142 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3143
3144 return err;
3145}
3146
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003147static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3148 int len, struct res_mtt **res)
3149{
3150 struct mlx4_priv *priv = mlx4_priv(dev);
3151 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3152 struct res_mtt *mtt;
3153 int err = -EINVAL;
3154
3155 spin_lock_irq(mlx4_tlock(dev));
3156 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3157 com.list) {
3158 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3159 *res = mtt;
3160 mtt->com.from_state = mtt->com.state;
3161 mtt->com.state = RES_MTT_BUSY;
3162 err = 0;
3163 break;
3164 }
3165 }
3166 spin_unlock_irq(mlx4_tlock(dev));
3167
3168 return err;
3169}
3170
Jack Morgenstein54679e12012-08-03 08:40:43 +00003171static int verify_qp_parameters(struct mlx4_dev *dev,
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003172 struct mlx4_vhcr *vhcr,
Jack Morgenstein54679e12012-08-03 08:40:43 +00003173 struct mlx4_cmd_mailbox *inbox,
3174 enum qp_transition transition, u8 slave)
3175{
3176 u32 qp_type;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003177 u32 qpn;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003178 struct mlx4_qp_context *qp_ctx;
3179 enum mlx4_qp_optpar optpar;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003180 int port;
3181 int num_gids;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003182
3183 qp_ctx = inbox->buf + 8;
3184 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3185 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3186
Or Gerlitzfc31e252015-03-18 14:57:34 +02003187 if (slave != mlx4_master_func_num(dev)) {
Moni Shoua53f33ae2015-02-03 16:48:33 +02003188 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
Or Gerlitzfc31e252015-03-18 14:57:34 +02003189 /* setting QP rate-limit is disallowed for VFs */
3190 if (qp_ctx->rate_limit_params)
3191 return -EPERM;
3192 }
Moni Shoua53f33ae2015-02-03 16:48:33 +02003193
Jack Morgenstein54679e12012-08-03 08:40:43 +00003194 switch (qp_type) {
3195 case MLX4_QP_ST_RC:
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003196 case MLX4_QP_ST_XRC:
Jack Morgenstein54679e12012-08-03 08:40:43 +00003197 case MLX4_QP_ST_UC:
3198 switch (transition) {
3199 case QP_TRANS_INIT2RTR:
3200 case QP_TRANS_RTR2RTS:
3201 case QP_TRANS_RTS2RTS:
3202 case QP_TRANS_SQD2SQD:
3203 case QP_TRANS_SQD2RTS:
Arnd Bergmannbaefd702016-03-14 15:18:34 +01003204 if (slave != mlx4_master_func_num(dev)) {
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003205 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3206 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3207 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003208 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003209 else
3210 num_gids = 1;
3211 if (qp_ctx->pri_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003212 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003213 }
3214 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3215 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3216 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003217 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003218 else
3219 num_gids = 1;
3220 if (qp_ctx->alt_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003221 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003222 }
Arnd Bergmannbaefd702016-03-14 15:18:34 +01003223 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003224 break;
3225 default:
3226 break;
3227 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003228 break;
Roland Dreier165cb462014-05-30 15:38:58 -07003229
3230 case MLX4_QP_ST_MLX:
3231 qpn = vhcr->in_modifier & 0x7fffff;
3232 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3233 if (transition == QP_TRANS_INIT2RTR &&
3234 slave != mlx4_master_func_num(dev) &&
3235 mlx4_is_qp_reserved(dev, qpn) &&
3236 !mlx4_vf_smi_enabled(dev, slave, port)) {
3237 /* only enabled VFs may create MLX proxy QPs */
3238 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3239 __func__, slave, port);
3240 return -EPERM;
3241 }
3242 break;
3243
Jack Morgenstein54679e12012-08-03 08:40:43 +00003244 default:
3245 break;
3246 }
3247
3248 return 0;
3249}
3250
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003251int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3252 struct mlx4_vhcr *vhcr,
3253 struct mlx4_cmd_mailbox *inbox,
3254 struct mlx4_cmd_mailbox *outbox,
3255 struct mlx4_cmd_info *cmd)
3256{
3257 struct mlx4_mtt mtt;
3258 __be64 *page_list = inbox->buf;
3259 u64 *pg_list = (u64 *)page_list;
3260 int i;
3261 struct res_mtt *rmtt = NULL;
3262 int start = be64_to_cpu(page_list[0]);
3263 int npages = vhcr->in_modifier;
3264 int err;
3265
3266 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3267 if (err)
3268 return err;
3269
3270 /* Call the SW implementation of write_mtt:
3271 * - Prepare a dummy mtt struct
Joe Perchesdbedd442015-03-06 20:49:12 -08003272 * - Translate inbox contents to simple addresses in host endianness */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003273 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3274 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003275 mtt.order = 0;
3276 mtt.page_shift = 0;
3277 for (i = 0; i < npages; ++i)
3278 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3279
3280 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3281 ((u64 *)page_list + 2));
3282
3283 if (rmtt)
3284 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3285
3286 return err;
3287}
3288
3289int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3290 struct mlx4_vhcr *vhcr,
3291 struct mlx4_cmd_mailbox *inbox,
3292 struct mlx4_cmd_mailbox *outbox,
3293 struct mlx4_cmd_info *cmd)
3294{
3295 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003296 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003297 struct res_eq *eq;
3298 int err;
3299
3300 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3301 if (err)
3302 return err;
3303
3304 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3305 if (err)
3306 goto ex_abort;
3307
3308 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3309 if (err)
3310 goto ex_put;
3311
3312 atomic_dec(&eq->mtt->ref_count);
3313 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3314 res_end_move(dev, slave, RES_EQ, res_id);
3315 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3316
3317 return 0;
3318
3319ex_put:
3320 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3321ex_abort:
3322 res_abort_move(dev, slave, RES_EQ, res_id);
3323
3324 return err;
3325}
3326
3327int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3328{
3329 struct mlx4_priv *priv = mlx4_priv(dev);
3330 struct mlx4_slave_event_eq_info *event_eq;
3331 struct mlx4_cmd_mailbox *mailbox;
3332 u32 in_modifier = 0;
3333 int err;
3334 int res_id;
3335 struct res_eq *req;
3336
3337 if (!priv->mfunc.master.slave_state)
3338 return -EINVAL;
3339
Jack Morgensteinbffb0232015-03-24 15:18:39 +02003340 /* check for slave valid, slave not PF, and slave active */
3341 if (slave < 0 || slave > dev->persist->num_vfs ||
3342 slave == dev->caps.function ||
3343 !priv->mfunc.master.slave_state[slave].active)
3344 return 0;
3345
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003346 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003347
3348 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003349 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003350 return 0;
3351
3352 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003353 res_id = (slave << 10) | event_eq->eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003354 err = get_res(dev, slave, res_id, RES_EQ, &req);
3355 if (err)
3356 goto unlock;
3357
3358 if (req->com.from_state != RES_EQ_HW) {
3359 err = -EINVAL;
3360 goto put;
3361 }
3362
3363 mailbox = mlx4_alloc_cmd_mailbox(dev);
3364 if (IS_ERR(mailbox)) {
3365 err = PTR_ERR(mailbox);
3366 goto put;
3367 }
3368
3369 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3370 ++event_eq->token;
3371 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3372 }
3373
3374 memcpy(mailbox->buf, (u8 *) eqe, 28);
3375
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003376 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003377
3378 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3379 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3380 MLX4_CMD_NATIVE);
3381
3382 put_res(dev, slave, res_id, RES_EQ);
3383 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3384 mlx4_free_cmd_mailbox(dev, mailbox);
3385 return err;
3386
3387put:
3388 put_res(dev, slave, res_id, RES_EQ);
3389
3390unlock:
3391 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3392 return err;
3393}
3394
3395int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3396 struct mlx4_vhcr *vhcr,
3397 struct mlx4_cmd_mailbox *inbox,
3398 struct mlx4_cmd_mailbox *outbox,
3399 struct mlx4_cmd_info *cmd)
3400{
3401 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003402 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003403 struct res_eq *eq;
3404 int err;
3405
3406 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3407 if (err)
3408 return err;
3409
3410 if (eq->com.from_state != RES_EQ_HW) {
3411 err = -EINVAL;
3412 goto ex_put;
3413 }
3414
3415 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416
3417ex_put:
3418 put_res(dev, slave, res_id, RES_EQ);
3419 return err;
3420}
3421
3422int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3423 struct mlx4_vhcr *vhcr,
3424 struct mlx4_cmd_mailbox *inbox,
3425 struct mlx4_cmd_mailbox *outbox,
3426 struct mlx4_cmd_info *cmd)
3427{
3428 int err;
3429 int cqn = vhcr->in_modifier;
3430 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003431 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003432 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003433 struct res_mtt *mtt;
3434
3435 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3436 if (err)
3437 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003438 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003439 if (err)
3440 goto out_move;
3441 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3442 if (err)
3443 goto out_put;
3444 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3445 if (err)
3446 goto out_put;
3447 atomic_inc(&mtt->ref_count);
3448 cq->mtt = mtt;
3449 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3450 res_end_move(dev, slave, RES_CQ, cqn);
3451 return 0;
3452
3453out_put:
3454 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3455out_move:
3456 res_abort_move(dev, slave, RES_CQ, cqn);
3457 return err;
3458}
3459
3460int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3461 struct mlx4_vhcr *vhcr,
3462 struct mlx4_cmd_mailbox *inbox,
3463 struct mlx4_cmd_mailbox *outbox,
3464 struct mlx4_cmd_info *cmd)
3465{
3466 int err;
3467 int cqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003468 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003469
3470 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3471 if (err)
3472 return err;
3473 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3474 if (err)
3475 goto out_move;
3476 atomic_dec(&cq->mtt->ref_count);
3477 res_end_move(dev, slave, RES_CQ, cqn);
3478 return 0;
3479
3480out_move:
3481 res_abort_move(dev, slave, RES_CQ, cqn);
3482 return err;
3483}
3484
3485int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3486 struct mlx4_vhcr *vhcr,
3487 struct mlx4_cmd_mailbox *inbox,
3488 struct mlx4_cmd_mailbox *outbox,
3489 struct mlx4_cmd_info *cmd)
3490{
3491 int cqn = vhcr->in_modifier;
3492 struct res_cq *cq;
3493 int err;
3494
3495 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3496 if (err)
3497 return err;
3498
3499 if (cq->com.from_state != RES_CQ_HW)
3500 goto ex_put;
3501
3502 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3503ex_put:
3504 put_res(dev, slave, cqn, RES_CQ);
3505
3506 return err;
3507}
3508
3509static int handle_resize(struct mlx4_dev *dev, int slave,
3510 struct mlx4_vhcr *vhcr,
3511 struct mlx4_cmd_mailbox *inbox,
3512 struct mlx4_cmd_mailbox *outbox,
3513 struct mlx4_cmd_info *cmd,
3514 struct res_cq *cq)
3515{
3516 int err;
3517 struct res_mtt *orig_mtt;
3518 struct res_mtt *mtt;
3519 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003520 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003521
3522 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3523 if (err)
3524 return err;
3525
3526 if (orig_mtt != cq->mtt) {
3527 err = -EINVAL;
3528 goto ex_put;
3529 }
3530
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003531 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003532 if (err)
3533 goto ex_put;
3534
3535 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3536 if (err)
3537 goto ex_put1;
3538 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3539 if (err)
3540 goto ex_put1;
3541 atomic_dec(&orig_mtt->ref_count);
3542 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3543 atomic_inc(&mtt->ref_count);
3544 cq->mtt = mtt;
3545 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3546 return 0;
3547
3548ex_put1:
3549 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3550ex_put:
3551 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3552
3553 return err;
3554
3555}
3556
3557int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3558 struct mlx4_vhcr *vhcr,
3559 struct mlx4_cmd_mailbox *inbox,
3560 struct mlx4_cmd_mailbox *outbox,
3561 struct mlx4_cmd_info *cmd)
3562{
3563 int cqn = vhcr->in_modifier;
3564 struct res_cq *cq;
3565 int err;
3566
3567 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3568 if (err)
3569 return err;
3570
3571 if (cq->com.from_state != RES_CQ_HW)
3572 goto ex_put;
3573
3574 if (vhcr->op_modifier == 0) {
3575 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003576 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003577 }
3578
3579 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3580ex_put:
3581 put_res(dev, slave, cqn, RES_CQ);
3582
3583 return err;
3584}
3585
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003586static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3587{
3588 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3589 int log_rq_stride = srqc->logstride & 7;
3590 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3591
3592 if (log_srq_size + log_rq_stride + 4 < page_shift)
3593 return 1;
3594
3595 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3596}
3597
3598int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3599 struct mlx4_vhcr *vhcr,
3600 struct mlx4_cmd_mailbox *inbox,
3601 struct mlx4_cmd_mailbox *outbox,
3602 struct mlx4_cmd_info *cmd)
3603{
3604 int err;
3605 int srqn = vhcr->in_modifier;
3606 struct res_mtt *mtt;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003607 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003608 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003609 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003610
3611 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3612 return -EINVAL;
3613
3614 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3615 if (err)
3616 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003617 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003618 if (err)
3619 goto ex_abort;
3620 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3621 mtt);
3622 if (err)
3623 goto ex_put_mtt;
3624
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003625 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3626 if (err)
3627 goto ex_put_mtt;
3628
3629 atomic_inc(&mtt->ref_count);
3630 srq->mtt = mtt;
3631 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3632 res_end_move(dev, slave, RES_SRQ, srqn);
3633 return 0;
3634
3635ex_put_mtt:
3636 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3637ex_abort:
3638 res_abort_move(dev, slave, RES_SRQ, srqn);
3639
3640 return err;
3641}
3642
3643int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3644 struct mlx4_vhcr *vhcr,
3645 struct mlx4_cmd_mailbox *inbox,
3646 struct mlx4_cmd_mailbox *outbox,
3647 struct mlx4_cmd_info *cmd)
3648{
3649 int err;
3650 int srqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003651 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003652
3653 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3654 if (err)
3655 return err;
3656 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3657 if (err)
3658 goto ex_abort;
3659 atomic_dec(&srq->mtt->ref_count);
3660 if (srq->cq)
3661 atomic_dec(&srq->cq->ref_count);
3662 res_end_move(dev, slave, RES_SRQ, srqn);
3663
3664 return 0;
3665
3666ex_abort:
3667 res_abort_move(dev, slave, RES_SRQ, srqn);
3668
3669 return err;
3670}
3671
3672int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3673 struct mlx4_vhcr *vhcr,
3674 struct mlx4_cmd_mailbox *inbox,
3675 struct mlx4_cmd_mailbox *outbox,
3676 struct mlx4_cmd_info *cmd)
3677{
3678 int err;
3679 int srqn = vhcr->in_modifier;
3680 struct res_srq *srq;
3681
3682 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3683 if (err)
3684 return err;
3685 if (srq->com.from_state != RES_SRQ_HW) {
3686 err = -EBUSY;
3687 goto out;
3688 }
3689 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3690out:
3691 put_res(dev, slave, srqn, RES_SRQ);
3692 return err;
3693}
3694
3695int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3696 struct mlx4_vhcr *vhcr,
3697 struct mlx4_cmd_mailbox *inbox,
3698 struct mlx4_cmd_mailbox *outbox,
3699 struct mlx4_cmd_info *cmd)
3700{
3701 int err;
3702 int srqn = vhcr->in_modifier;
3703 struct res_srq *srq;
3704
3705 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3706 if (err)
3707 return err;
3708
3709 if (srq->com.from_state != RES_SRQ_HW) {
3710 err = -EBUSY;
3711 goto out;
3712 }
3713
3714 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3715out:
3716 put_res(dev, slave, srqn, RES_SRQ);
3717 return err;
3718}
3719
3720int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3721 struct mlx4_vhcr *vhcr,
3722 struct mlx4_cmd_mailbox *inbox,
3723 struct mlx4_cmd_mailbox *outbox,
3724 struct mlx4_cmd_info *cmd)
3725{
3726 int err;
3727 int qpn = vhcr->in_modifier & 0x7fffff;
3728 struct res_qp *qp;
3729
3730 err = get_res(dev, slave, qpn, RES_QP, &qp);
3731 if (err)
3732 return err;
3733 if (qp->com.from_state != RES_QP_HW) {
3734 err = -EBUSY;
3735 goto out;
3736 }
3737
3738 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3739out:
3740 put_res(dev, slave, qpn, RES_QP);
3741 return err;
3742}
3743
Jack Morgenstein54679e12012-08-03 08:40:43 +00003744int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3745 struct mlx4_vhcr *vhcr,
3746 struct mlx4_cmd_mailbox *inbox,
3747 struct mlx4_cmd_mailbox *outbox,
3748 struct mlx4_cmd_info *cmd)
3749{
3750 struct mlx4_qp_context *context = inbox->buf + 8;
3751 adjust_proxy_tun_qkey(dev, vhcr, context);
3752 update_pkey_index(dev, slave, inbox);
3753 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3754}
3755
Matan Barak449fc482014-03-19 18:11:52 +02003756static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3757 struct mlx4_qp_context *qpc,
3758 struct mlx4_cmd_mailbox *inbox)
3759{
3760 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3761 u8 pri_sched_queue;
3762 int port = mlx4_slave_convert_port(
3763 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3764
3765 if (port < 0)
3766 return -EINVAL;
3767
3768 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3769 ((port & 1) << 6);
3770
Or Gerlitzf40e99e2015-05-21 15:14:08 +03003771 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3772 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
Matan Barak449fc482014-03-19 18:11:52 +02003773 qpc->pri_path.sched_queue = pri_sched_queue;
3774 }
3775
3776 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3777 port = mlx4_slave_convert_port(
3778 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3779 + 1) - 1;
3780 if (port < 0)
3781 return -EINVAL;
3782 qpc->alt_path.sched_queue =
3783 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3784 (port & 1) << 6;
3785 }
3786 return 0;
3787}
3788
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003789static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3790 struct mlx4_qp_context *qpc,
3791 struct mlx4_cmd_mailbox *inbox)
3792{
3793 u64 mac;
3794 int port;
3795 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3796 u8 sched = *(u8 *)(inbox->buf + 64);
3797 u8 smac_ix;
3798
3799 port = (sched >> 6 & 1) + 1;
3800 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3801 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3802 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3803 return -ENOENT;
3804 }
3805 return 0;
3806}
3807
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003808int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3809 struct mlx4_vhcr *vhcr,
3810 struct mlx4_cmd_mailbox *inbox,
3811 struct mlx4_cmd_mailbox *outbox,
3812 struct mlx4_cmd_info *cmd)
3813{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003814 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003815 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003816 int qpn = vhcr->in_modifier & 0x7fffff;
3817 struct res_qp *qp;
3818 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003819 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3820 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3821 u8 orig_pri_path_fl = qpc->pri_path.fl;
3822 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3823 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003824
Matan Barak449fc482014-03-19 18:11:52 +02003825 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3826 if (err)
3827 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003828 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003829 if (err)
3830 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003831
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003832 if (roce_verify_mac(dev, slave, qpc, inbox))
3833 return -EINVAL;
3834
Jack Morgenstein54679e12012-08-03 08:40:43 +00003835 update_pkey_index(dev, slave, inbox);
3836 update_gid(dev, inbox, (u8)slave);
3837 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003838 orig_sched_queue = qpc->pri_path.sched_queue;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003839
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003840 err = get_res(dev, slave, qpn, RES_QP, &qp);
3841 if (err)
3842 return err;
3843 if (qp->com.from_state != RES_QP_HW) {
3844 err = -EBUSY;
3845 goto out;
3846 }
3847
Maor Gottlieb9a892832015-10-15 14:44:38 +03003848 err = update_vport_qp_param(dev, inbox, slave, qpn);
3849 if (err)
3850 goto out;
3851
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003852 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3853out:
3854 /* if no error, save sched queue value passed in by VF. This is
3855 * essentially the QOS value provided by the VF. This will be useful
3856 * if we allow dynamic changes from VST back to VGT
3857 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003858 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003859 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003860 qp->vlan_control = orig_vlan_control;
3861 qp->fvl_rx = orig_fvl_rx;
3862 qp->pri_path_fl = orig_pri_path_fl;
3863 qp->vlan_index = orig_vlan_index;
3864 qp->feup = orig_feup;
3865 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003866 put_res(dev, slave, qpn, RES_QP);
3867 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003868}
3869
3870int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3871 struct mlx4_vhcr *vhcr,
3872 struct mlx4_cmd_mailbox *inbox,
3873 struct mlx4_cmd_mailbox *outbox,
3874 struct mlx4_cmd_info *cmd)
3875{
3876 int err;
3877 struct mlx4_qp_context *context = inbox->buf + 8;
3878
Matan Barak449fc482014-03-19 18:11:52 +02003879 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3880 if (err)
3881 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003882 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003883 if (err)
3884 return err;
3885
3886 update_pkey_index(dev, slave, inbox);
3887 update_gid(dev, inbox, (u8)slave);
3888 adjust_proxy_tun_qkey(dev, vhcr, context);
3889 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3890}
3891
3892int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3893 struct mlx4_vhcr *vhcr,
3894 struct mlx4_cmd_mailbox *inbox,
3895 struct mlx4_cmd_mailbox *outbox,
3896 struct mlx4_cmd_info *cmd)
3897{
3898 int err;
3899 struct mlx4_qp_context *context = inbox->buf + 8;
3900
Matan Barak449fc482014-03-19 18:11:52 +02003901 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3902 if (err)
3903 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003904 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003905 if (err)
3906 return err;
3907
3908 update_pkey_index(dev, slave, inbox);
3909 update_gid(dev, inbox, (u8)slave);
3910 adjust_proxy_tun_qkey(dev, vhcr, context);
3911 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3912}
3913
3914
3915int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3916 struct mlx4_vhcr *vhcr,
3917 struct mlx4_cmd_mailbox *inbox,
3918 struct mlx4_cmd_mailbox *outbox,
3919 struct mlx4_cmd_info *cmd)
3920{
3921 struct mlx4_qp_context *context = inbox->buf + 8;
Matan Barak449fc482014-03-19 18:11:52 +02003922 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3923 if (err)
3924 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003925 adjust_proxy_tun_qkey(dev, vhcr, context);
3926 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3927}
3928
3929int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3930 struct mlx4_vhcr *vhcr,
3931 struct mlx4_cmd_mailbox *inbox,
3932 struct mlx4_cmd_mailbox *outbox,
3933 struct mlx4_cmd_info *cmd)
3934{
3935 int err;
3936 struct mlx4_qp_context *context = inbox->buf + 8;
3937
Matan Barak449fc482014-03-19 18:11:52 +02003938 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3939 if (err)
3940 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003941 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003942 if (err)
3943 return err;
3944
3945 adjust_proxy_tun_qkey(dev, vhcr, context);
3946 update_gid(dev, inbox, (u8)slave);
3947 update_pkey_index(dev, slave, inbox);
3948 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3949}
3950
3951int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3952 struct mlx4_vhcr *vhcr,
3953 struct mlx4_cmd_mailbox *inbox,
3954 struct mlx4_cmd_mailbox *outbox,
3955 struct mlx4_cmd_info *cmd)
3956{
3957 int err;
3958 struct mlx4_qp_context *context = inbox->buf + 8;
3959
Matan Barak449fc482014-03-19 18:11:52 +02003960 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3961 if (err)
3962 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003963 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003964 if (err)
3965 return err;
3966
3967 adjust_proxy_tun_qkey(dev, vhcr, context);
3968 update_gid(dev, inbox, (u8)slave);
3969 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003970 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3971}
3972
3973int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3974 struct mlx4_vhcr *vhcr,
3975 struct mlx4_cmd_mailbox *inbox,
3976 struct mlx4_cmd_mailbox *outbox,
3977 struct mlx4_cmd_info *cmd)
3978{
3979 int err;
3980 int qpn = vhcr->in_modifier & 0x7fffff;
3981 struct res_qp *qp;
3982
3983 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3984 if (err)
3985 return err;
3986 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3987 if (err)
3988 goto ex_abort;
3989
3990 atomic_dec(&qp->mtt->ref_count);
3991 atomic_dec(&qp->rcq->ref_count);
3992 atomic_dec(&qp->scq->ref_count);
3993 if (qp->srq)
3994 atomic_dec(&qp->srq->ref_count);
3995 res_end_move(dev, slave, RES_QP, qpn);
3996 return 0;
3997
3998ex_abort:
3999 res_abort_move(dev, slave, RES_QP, qpn);
4000
4001 return err;
4002}
4003
4004static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4005 struct res_qp *rqp, u8 *gid)
4006{
4007 struct res_gid *res;
4008
4009 list_for_each_entry(res, &rqp->mcg_list, list) {
4010 if (!memcmp(res->gid, gid, 16))
4011 return res;
4012 }
4013 return NULL;
4014}
4015
4016static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004017 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004018 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004019{
4020 struct res_gid *res;
4021 int err;
4022
4023 res = kzalloc(sizeof *res, GFP_KERNEL);
4024 if (!res)
4025 return -ENOMEM;
4026
4027 spin_lock_irq(&rqp->mcg_spl);
4028 if (find_gid(dev, slave, rqp, gid)) {
4029 kfree(res);
4030 err = -EEXIST;
4031 } else {
4032 memcpy(res->gid, gid, 16);
4033 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004034 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004035 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004036 list_add_tail(&res->list, &rqp->mcg_list);
4037 err = 0;
4038 }
4039 spin_unlock_irq(&rqp->mcg_spl);
4040
4041 return err;
4042}
4043
4044static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004045 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004046 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004047{
4048 struct res_gid *res;
4049 int err;
4050
4051 spin_lock_irq(&rqp->mcg_spl);
4052 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004053 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004054 err = -EINVAL;
4055 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004056 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004057 list_del(&res->list);
4058 kfree(res);
4059 err = 0;
4060 }
4061 spin_unlock_irq(&rqp->mcg_spl);
4062
4063 return err;
4064}
4065
Matan Barak449fc482014-03-19 18:11:52 +02004066static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4067 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004068 enum mlx4_steer_type type, u64 *reg_id)
4069{
4070 switch (dev->caps.steering_mode) {
Matan Barak449fc482014-03-19 18:11:52 +02004071 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4072 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4073 if (port < 0)
4074 return port;
4075 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004076 block_loopback, prot,
4077 reg_id);
Matan Barak449fc482014-03-19 18:11:52 +02004078 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004079 case MLX4_STEERING_MODE_B0:
Matan Barak449fc482014-03-19 18:11:52 +02004080 if (prot == MLX4_PROT_ETH) {
4081 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4082 if (port < 0)
4083 return port;
4084 gid[5] = port;
4085 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004086 return mlx4_qp_attach_common(dev, qp, gid,
4087 block_loopback, prot, type);
4088 default:
4089 return -EINVAL;
4090 }
4091}
4092
Matan Barak449fc482014-03-19 18:11:52 +02004093static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4094 u8 gid[16], enum mlx4_protocol prot,
4095 enum mlx4_steer_type type, u64 reg_id)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004096{
4097 switch (dev->caps.steering_mode) {
4098 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4099 return mlx4_flow_detach(dev, reg_id);
4100 case MLX4_STEERING_MODE_B0:
4101 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4102 default:
4103 return -EINVAL;
4104 }
4105}
4106
Jack Morgenstein531d9012014-05-04 17:07:22 +03004107static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4108 u8 *gid, enum mlx4_protocol prot)
4109{
4110 int real_port;
4111
4112 if (prot != MLX4_PROT_ETH)
4113 return 0;
4114
4115 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4116 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4117 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4118 if (real_port < 0)
4119 return -EINVAL;
4120 gid[5] = real_port;
4121 }
4122
4123 return 0;
4124}
4125
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004126int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4127 struct mlx4_vhcr *vhcr,
4128 struct mlx4_cmd_mailbox *inbox,
4129 struct mlx4_cmd_mailbox *outbox,
4130 struct mlx4_cmd_info *cmd)
4131{
4132 struct mlx4_qp qp; /* dummy for calling attach/detach */
4133 u8 *gid = inbox->buf;
4134 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00004135 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004136 int qpn;
4137 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004138 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004139 int attach = vhcr->op_modifier;
4140 int block_loopback = vhcr->in_modifier >> 31;
4141 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00004142 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004143
4144 qpn = vhcr->in_modifier & 0xffffff;
4145 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4146 if (err)
4147 return err;
4148
4149 qp.qpn = qpn;
4150 if (attach) {
Matan Barak449fc482014-03-19 18:11:52 +02004151 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004152 type, &reg_id);
4153 if (err) {
4154 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004155 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004156 }
4157 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004158 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004159 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004160 } else {
Jack Morgenstein531d9012014-05-04 17:07:22 +03004161 err = mlx4_adjust_port(dev, slave, gid, prot);
4162 if (err)
4163 goto ex_put;
4164
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004165 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004166 if (err)
4167 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004168
4169 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4170 if (err)
4171 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4172 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004173 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004174 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004175 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004176
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004177ex_detach:
4178 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004179ex_put:
4180 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004181 return err;
4182}
4183
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004184/*
4185 * MAC validation for Flow Steering rules.
4186 * VF can attach rules only with a mac address which is assigned to it.
4187 */
4188static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4189 struct list_head *rlist)
4190{
4191 struct mac_res *res, *tmp;
4192 __be64 be_mac;
4193
4194 /* make sure it isn't multicast or broadcast mac*/
4195 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4196 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4197 list_for_each_entry_safe(res, tmp, rlist, list) {
4198 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08004199 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004200 return 0;
4201 }
4202 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4203 eth_header->eth.dst_mac, slave);
4204 return -EINVAL;
4205 }
4206 return 0;
4207}
4208
4209/*
4210 * In case of missing eth header, append eth header with a MAC address
4211 * assigned to the VF.
4212 */
4213static int add_eth_header(struct mlx4_dev *dev, int slave,
4214 struct mlx4_cmd_mailbox *inbox,
4215 struct list_head *rlist, int header_id)
4216{
4217 struct mac_res *res, *tmp;
4218 u8 port;
4219 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4220 struct mlx4_net_trans_rule_hw_eth *eth_header;
4221 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4222 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4223 __be64 be_mac = 0;
4224 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4225
4226 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00004227 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004228 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4229
4230 /* Clear a space in the inbox for eth header */
4231 switch (header_id) {
4232 case MLX4_NET_TRANS_RULE_ID_IPV4:
4233 ip_header =
4234 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4235 memmove(ip_header, eth_header,
4236 sizeof(*ip_header) + sizeof(*l4_header));
4237 break;
4238 case MLX4_NET_TRANS_RULE_ID_TCP:
4239 case MLX4_NET_TRANS_RULE_ID_UDP:
4240 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4241 (eth_header + 1);
4242 memmove(l4_header, eth_header, sizeof(*l4_header));
4243 break;
4244 default:
4245 return -EINVAL;
4246 }
4247 list_for_each_entry_safe(res, tmp, rlist, list) {
4248 if (port == res->port) {
4249 be_mac = cpu_to_be64(res->mac << 16);
4250 break;
4251 }
4252 }
4253 if (!be_mac) {
Joe Perches1a91de22014-05-07 12:52:57 -07004254 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004255 port);
4256 return -EINVAL;
4257 }
4258
4259 memset(eth_header, 0, sizeof(*eth_header));
4260 eth_header->size = sizeof(*eth_header) >> 2;
4261 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4262 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4263 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4264
4265 return 0;
4266
4267}
4268
Maor Gottlieb9a892832015-10-15 14:44:38 +03004269#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4270 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4271 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
Matan Barakce8d9e02014-05-15 15:29:27 +03004272int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4273 struct mlx4_vhcr *vhcr,
4274 struct mlx4_cmd_mailbox *inbox,
4275 struct mlx4_cmd_mailbox *outbox,
4276 struct mlx4_cmd_info *cmd_info)
4277{
4278 int err;
4279 u32 qpn = vhcr->in_modifier & 0xffffff;
4280 struct res_qp *rqp;
4281 u64 mac;
4282 unsigned port;
4283 u64 pri_addr_path_mask;
4284 struct mlx4_update_qp_context *cmd;
4285 int smac_index;
4286
4287 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4288
4289 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4290 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4291 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4292 return -EPERM;
4293
Maor Gottlieb9a892832015-10-15 14:44:38 +03004294 if ((pri_addr_path_mask &
4295 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4296 !(dev->caps.flags2 &
4297 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
Christophe Jaillet5d4de162016-07-02 14:31:05 +02004298 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4299 slave);
Or Gerlitz423b3ae2017-02-23 12:02:41 +02004300 return -EOPNOTSUPP;
Maor Gottlieb9a892832015-10-15 14:44:38 +03004301 }
4302
Matan Barakce8d9e02014-05-15 15:29:27 +03004303 /* Just change the smac for the QP */
4304 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4305 if (err) {
4306 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4307 return err;
4308 }
4309
4310 port = (rqp->sched_queue >> 6 & 1) + 1;
Matan Barakb7834752014-09-10 16:41:55 +03004311
4312 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4313 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4314 err = mac_find_smac_ix_in_slave(dev, slave, port,
4315 smac_index, &mac);
4316
4317 if (err) {
4318 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4319 qpn, smac_index);
4320 goto err_mac;
4321 }
Matan Barakce8d9e02014-05-15 15:29:27 +03004322 }
4323
4324 err = mlx4_cmd(dev, inbox->dma,
4325 vhcr->in_modifier, 0,
4326 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4327 MLX4_CMD_NATIVE);
4328 if (err) {
4329 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4330 goto err_mac;
4331 }
4332
4333err_mac:
4334 put_res(dev, slave, qpn, RES_QP);
4335 return err;
4336}
4337
Moni Shoua78efed22015-12-06 18:07:40 +02004338static u32 qp_attach_mbox_size(void *mbox)
4339{
4340 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4341 struct _rule_hw *rule_header;
4342
4343 rule_header = (struct _rule_hw *)(mbox + size);
4344
4345 while (rule_header->size) {
4346 size += rule_header->size * sizeof(u32);
4347 rule_header += 1;
4348 }
4349 return size;
4350}
4351
4352static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4353
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004354int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4355 struct mlx4_vhcr *vhcr,
4356 struct mlx4_cmd_mailbox *inbox,
4357 struct mlx4_cmd_mailbox *outbox,
4358 struct mlx4_cmd_info *cmd)
4359{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004360
4361 struct mlx4_priv *priv = mlx4_priv(dev);
4362 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4363 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004364 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004365 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004366 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004367 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4368 struct _rule_hw *rule_header;
4369 int header_id;
Moni Shoua78efed22015-12-06 18:07:40 +02004370 struct res_fs_rule *rrule;
4371 u32 mbox_size;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004372
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004373 if (dev->caps.steering_mode !=
4374 MLX4_STEERING_MODE_DEVICE_MANAGED)
4375 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004376
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004377 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Andrzej Hajda2b2b31c2015-12-14 11:05:58 +01004378 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4379 if (err <= 0)
Matan Barak449fc482014-03-19 18:11:52 +02004380 return -EINVAL;
Andrzej Hajda2b2b31c2015-12-14 11:05:58 +01004381 ctrl->port = err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004382 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004383 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004384 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004385 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004386 return err;
4387 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004388 rule_header = (struct _rule_hw *)(ctrl + 1);
4389 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4390
Matan Barak48564132015-05-31 09:30:15 +03004391 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
Jack Morgenstein10b1c042016-12-29 18:37:13 +02004392 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
Matan Barak48564132015-05-31 09:30:15 +03004393
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004394 switch (header_id) {
4395 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004396 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4397 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004398 goto err_put_qp;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004399 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004400 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00004401 case MLX4_NET_TRANS_RULE_ID_IB:
4402 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004403 case MLX4_NET_TRANS_RULE_ID_IPV4:
4404 case MLX4_NET_TRANS_RULE_ID_TCP:
4405 case MLX4_NET_TRANS_RULE_ID_UDP:
Joe Perches1a91de22014-05-07 12:52:57 -07004406 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004407 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4408 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004409 goto err_put_qp;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004410 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004411 vhcr->in_modifier +=
4412 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4413 break;
4414 default:
Joe Perches1a91de22014-05-07 12:52:57 -07004415 pr_err("Corrupted mailbox\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004416 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004417 goto err_put_qp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004418 }
4419
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004420 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4421 vhcr->in_modifier, 0,
4422 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4423 MLX4_CMD_NATIVE);
4424 if (err)
Moni Shoua78efed22015-12-06 18:07:40 +02004425 goto err_put_qp;
4426
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004427
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004428 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004429 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004430 mlx4_err(dev, "Fail to add flow steering resources\n");
Moni Shoua78efed22015-12-06 18:07:40 +02004431 goto err_detach;
4432 }
4433
4434 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4435 if (err)
4436 goto err_detach;
4437
4438 mbox_size = qp_attach_mbox_size(inbox->buf);
4439 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4440 if (!rrule->mirr_mbox) {
4441 err = -ENOMEM;
4442 goto err_put_rule;
4443 }
4444 rrule->mirr_mbox_size = mbox_size;
4445 rrule->mirr_rule_id = 0;
4446 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4447
4448 /* set different port */
4449 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4450 if (ctrl->port == 1)
4451 ctrl->port = 2;
4452 else
4453 ctrl->port = 1;
4454
4455 if (mlx4_is_bonded(dev))
4456 mlx4_do_mirror_rule(dev, rrule);
4457
4458 atomic_inc(&rqp->ref_count);
4459
4460err_put_rule:
4461 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4462err_detach:
4463 /* detach rule on error */
4464 if (err)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004465 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00004466 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004467 MLX4_CMD_NATIVE);
Moni Shoua78efed22015-12-06 18:07:40 +02004468err_put_qp:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004469 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004470 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004471}
4472
Moni Shoua78efed22015-12-06 18:07:40 +02004473static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4474{
4475 int err;
4476
4477 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4478 if (err) {
4479 mlx4_err(dev, "Fail to remove flow steering resources\n");
4480 return err;
4481 }
4482
4483 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4484 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4485 return 0;
4486}
4487
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004488int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4489 struct mlx4_vhcr *vhcr,
4490 struct mlx4_cmd_mailbox *inbox,
4491 struct mlx4_cmd_mailbox *outbox,
4492 struct mlx4_cmd_info *cmd)
4493{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004494 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004495 struct res_qp *rqp;
4496 struct res_fs_rule *rrule;
Moni Shoua78efed22015-12-06 18:07:40 +02004497 u64 mirr_reg_id;
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004498 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004499
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004500 if (dev->caps.steering_mode !=
4501 MLX4_STEERING_MODE_DEVICE_MANAGED)
4502 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004503
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004504 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4505 if (err)
4506 return err;
Moni Shoua78efed22015-12-06 18:07:40 +02004507
4508 if (!rrule->mirr_mbox) {
4509 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4510 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4511 return -EINVAL;
4512 }
4513 mirr_reg_id = rrule->mirr_rule_id;
4514 kfree(rrule->mirr_mbox);
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004515 qpn = rrule->qpn;
Moni Shoua78efed22015-12-06 18:07:40 +02004516
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004517 /* Release the rule form busy state before removal */
4518 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004519 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004520 if (err)
4521 return err;
4522
Moni Shoua78efed22015-12-06 18:07:40 +02004523 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4524 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4525 if (err) {
4526 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4527 } else {
4528 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4529 mlx4_undo_mirror_rule(dev, rrule);
4530 }
4531 }
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004532 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4533 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004534 mlx4_err(dev, "Fail to remove flow steering resources\n");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004535 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004536 }
4537
4538 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4539 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4540 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004541 if (!err)
4542 atomic_dec(&rqp->ref_count);
4543out:
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004544 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004545 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004546}
4547
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004548enum {
4549 BUSY_MAX_RETRIES = 10
4550};
4551
4552int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4553 struct mlx4_vhcr *vhcr,
4554 struct mlx4_cmd_mailbox *inbox,
4555 struct mlx4_cmd_mailbox *outbox,
4556 struct mlx4_cmd_info *cmd)
4557{
4558 int err;
4559 int index = vhcr->in_modifier & 0xffff;
4560
4561 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4562 if (err)
4563 return err;
4564
4565 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4566 put_res(dev, slave, index, RES_COUNTER);
4567 return err;
4568}
4569
4570static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4571{
4572 struct res_gid *rgid;
4573 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004574 struct mlx4_qp qp; /* dummy for calling attach/detach */
4575
4576 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004577 switch (dev->caps.steering_mode) {
4578 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4579 mlx4_flow_detach(dev, rgid->reg_id);
4580 break;
4581 case MLX4_STEERING_MODE_B0:
4582 qp.qpn = rqp->local_qpn;
4583 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4584 rgid->prot, rgid->steer);
4585 break;
4586 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004587 list_del(&rgid->list);
4588 kfree(rgid);
4589 }
4590}
4591
4592static int _move_all_busy(struct mlx4_dev *dev, int slave,
4593 enum mlx4_resource type, int print)
4594{
4595 struct mlx4_priv *priv = mlx4_priv(dev);
4596 struct mlx4_resource_tracker *tracker =
4597 &priv->mfunc.master.res_tracker;
4598 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4599 struct res_common *r;
4600 struct res_common *tmp;
4601 int busy;
4602
4603 busy = 0;
4604 spin_lock_irq(mlx4_tlock(dev));
4605 list_for_each_entry_safe(r, tmp, rlist, list) {
4606 if (r->owner == slave) {
4607 if (!r->removing) {
4608 if (r->state == RES_ANY_BUSY) {
4609 if (print)
4610 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00004611 "%s id 0x%llx is busy\n",
Jack Morgenstein956463732014-06-08 13:49:45 +03004612 resource_str(type),
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004613 r->res_id);
4614 ++busy;
4615 } else {
4616 r->from_state = r->state;
4617 r->state = RES_ANY_BUSY;
4618 r->removing = 1;
4619 }
4620 }
4621 }
4622 }
4623 spin_unlock_irq(mlx4_tlock(dev));
4624
4625 return busy;
4626}
4627
4628static int move_all_busy(struct mlx4_dev *dev, int slave,
4629 enum mlx4_resource type)
4630{
4631 unsigned long begin;
4632 int busy;
4633
4634 begin = jiffies;
4635 do {
4636 busy = _move_all_busy(dev, slave, type, 0);
4637 if (time_after(jiffies, begin + 5 * HZ))
4638 break;
4639 if (busy)
4640 cond_resched();
4641 } while (busy);
4642
4643 if (busy)
4644 busy = _move_all_busy(dev, slave, type, 1);
4645
4646 return busy;
4647}
4648static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4649{
4650 struct mlx4_priv *priv = mlx4_priv(dev);
4651 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4652 struct list_head *qp_list =
4653 &tracker->slave_list[slave].res_list[RES_QP];
4654 struct res_qp *qp;
4655 struct res_qp *tmp;
4656 int state;
4657 u64 in_param;
4658 int qpn;
4659 int err;
4660
4661 err = move_all_busy(dev, slave, RES_QP);
4662 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004663 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4664 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004665
4666 spin_lock_irq(mlx4_tlock(dev));
4667 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4668 spin_unlock_irq(mlx4_tlock(dev));
4669 if (qp->com.owner == slave) {
4670 qpn = qp->com.res_id;
4671 detach_qp(dev, slave, qp);
4672 state = qp->com.from_state;
4673 while (state != 0) {
4674 switch (state) {
4675 case RES_QP_RESERVED:
4676 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004677 rb_erase(&qp->com.node,
4678 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004679 list_del(&qp->com.list);
4680 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004681 if (!valid_reserved(dev, slave, qpn)) {
4682 __mlx4_qp_release_range(dev, qpn, 1);
4683 mlx4_release_resource(dev, slave,
4684 RES_QP, 1, 0);
4685 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004686 kfree(qp);
4687 state = 0;
4688 break;
4689 case RES_QP_MAPPED:
4690 if (!valid_reserved(dev, slave, qpn))
4691 __mlx4_qp_free_icm(dev, qpn);
4692 state = RES_QP_RESERVED;
4693 break;
4694 case RES_QP_HW:
4695 in_param = slave;
4696 err = mlx4_cmd(dev, in_param,
4697 qp->local_qpn, 2,
4698 MLX4_CMD_2RST_QP,
4699 MLX4_CMD_TIME_CLASS_A,
4700 MLX4_CMD_NATIVE);
4701 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004702 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4703 slave, qp->local_qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004704 atomic_dec(&qp->rcq->ref_count);
4705 atomic_dec(&qp->scq->ref_count);
4706 atomic_dec(&qp->mtt->ref_count);
4707 if (qp->srq)
4708 atomic_dec(&qp->srq->ref_count);
4709 state = RES_QP_MAPPED;
4710 break;
4711 default:
4712 state = 0;
4713 }
4714 }
4715 }
4716 spin_lock_irq(mlx4_tlock(dev));
4717 }
4718 spin_unlock_irq(mlx4_tlock(dev));
4719}
4720
4721static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4722{
4723 struct mlx4_priv *priv = mlx4_priv(dev);
4724 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4725 struct list_head *srq_list =
4726 &tracker->slave_list[slave].res_list[RES_SRQ];
4727 struct res_srq *srq;
4728 struct res_srq *tmp;
4729 int state;
4730 u64 in_param;
4731 LIST_HEAD(tlist);
4732 int srqn;
4733 int err;
4734
4735 err = move_all_busy(dev, slave, RES_SRQ);
4736 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004737 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4738 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004739
4740 spin_lock_irq(mlx4_tlock(dev));
4741 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4742 spin_unlock_irq(mlx4_tlock(dev));
4743 if (srq->com.owner == slave) {
4744 srqn = srq->com.res_id;
4745 state = srq->com.from_state;
4746 while (state != 0) {
4747 switch (state) {
4748 case RES_SRQ_ALLOCATED:
4749 __mlx4_srq_free_icm(dev, srqn);
4750 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004751 rb_erase(&srq->com.node,
4752 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004753 list_del(&srq->com.list);
4754 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004755 mlx4_release_resource(dev, slave,
4756 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004757 kfree(srq);
4758 state = 0;
4759 break;
4760
4761 case RES_SRQ_HW:
4762 in_param = slave;
4763 err = mlx4_cmd(dev, in_param, srqn, 1,
4764 MLX4_CMD_HW2SW_SRQ,
4765 MLX4_CMD_TIME_CLASS_A,
4766 MLX4_CMD_NATIVE);
4767 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004768 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004769 slave, srqn);
4770
4771 atomic_dec(&srq->mtt->ref_count);
4772 if (srq->cq)
4773 atomic_dec(&srq->cq->ref_count);
4774 state = RES_SRQ_ALLOCATED;
4775 break;
4776
4777 default:
4778 state = 0;
4779 }
4780 }
4781 }
4782 spin_lock_irq(mlx4_tlock(dev));
4783 }
4784 spin_unlock_irq(mlx4_tlock(dev));
4785}
4786
4787static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4788{
4789 struct mlx4_priv *priv = mlx4_priv(dev);
4790 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4791 struct list_head *cq_list =
4792 &tracker->slave_list[slave].res_list[RES_CQ];
4793 struct res_cq *cq;
4794 struct res_cq *tmp;
4795 int state;
4796 u64 in_param;
4797 LIST_HEAD(tlist);
4798 int cqn;
4799 int err;
4800
4801 err = move_all_busy(dev, slave, RES_CQ);
4802 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004803 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4804 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004805
4806 spin_lock_irq(mlx4_tlock(dev));
4807 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4808 spin_unlock_irq(mlx4_tlock(dev));
4809 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4810 cqn = cq->com.res_id;
4811 state = cq->com.from_state;
4812 while (state != 0) {
4813 switch (state) {
4814 case RES_CQ_ALLOCATED:
4815 __mlx4_cq_free_icm(dev, cqn);
4816 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004817 rb_erase(&cq->com.node,
4818 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004819 list_del(&cq->com.list);
4820 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004821 mlx4_release_resource(dev, slave,
4822 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004823 kfree(cq);
4824 state = 0;
4825 break;
4826
4827 case RES_CQ_HW:
4828 in_param = slave;
4829 err = mlx4_cmd(dev, in_param, cqn, 1,
4830 MLX4_CMD_HW2SW_CQ,
4831 MLX4_CMD_TIME_CLASS_A,
4832 MLX4_CMD_NATIVE);
4833 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004834 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004835 slave, cqn);
4836 atomic_dec(&cq->mtt->ref_count);
4837 state = RES_CQ_ALLOCATED;
4838 break;
4839
4840 default:
4841 state = 0;
4842 }
4843 }
4844 }
4845 spin_lock_irq(mlx4_tlock(dev));
4846 }
4847 spin_unlock_irq(mlx4_tlock(dev));
4848}
4849
4850static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4851{
4852 struct mlx4_priv *priv = mlx4_priv(dev);
4853 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4854 struct list_head *mpt_list =
4855 &tracker->slave_list[slave].res_list[RES_MPT];
4856 struct res_mpt *mpt;
4857 struct res_mpt *tmp;
4858 int state;
4859 u64 in_param;
4860 LIST_HEAD(tlist);
4861 int mptn;
4862 int err;
4863
4864 err = move_all_busy(dev, slave, RES_MPT);
4865 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004866 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4867 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004868
4869 spin_lock_irq(mlx4_tlock(dev));
4870 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4871 spin_unlock_irq(mlx4_tlock(dev));
4872 if (mpt->com.owner == slave) {
4873 mptn = mpt->com.res_id;
4874 state = mpt->com.from_state;
4875 while (state != 0) {
4876 switch (state) {
4877 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004878 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004879 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004880 rb_erase(&mpt->com.node,
4881 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004882 list_del(&mpt->com.list);
4883 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004884 mlx4_release_resource(dev, slave,
4885 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004886 kfree(mpt);
4887 state = 0;
4888 break;
4889
4890 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004891 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004892 state = RES_MPT_RESERVED;
4893 break;
4894
4895 case RES_MPT_HW:
4896 in_param = slave;
4897 err = mlx4_cmd(dev, in_param, mptn, 0,
4898 MLX4_CMD_HW2SW_MPT,
4899 MLX4_CMD_TIME_CLASS_A,
4900 MLX4_CMD_NATIVE);
4901 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004902 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004903 slave, mptn);
4904 if (mpt->mtt)
4905 atomic_dec(&mpt->mtt->ref_count);
4906 state = RES_MPT_MAPPED;
4907 break;
4908 default:
4909 state = 0;
4910 }
4911 }
4912 }
4913 spin_lock_irq(mlx4_tlock(dev));
4914 }
4915 spin_unlock_irq(mlx4_tlock(dev));
4916}
4917
4918static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4919{
4920 struct mlx4_priv *priv = mlx4_priv(dev);
4921 struct mlx4_resource_tracker *tracker =
4922 &priv->mfunc.master.res_tracker;
4923 struct list_head *mtt_list =
4924 &tracker->slave_list[slave].res_list[RES_MTT];
4925 struct res_mtt *mtt;
4926 struct res_mtt *tmp;
4927 int state;
4928 LIST_HEAD(tlist);
4929 int base;
4930 int err;
4931
4932 err = move_all_busy(dev, slave, RES_MTT);
4933 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004934 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4935 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004936
4937 spin_lock_irq(mlx4_tlock(dev));
4938 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4939 spin_unlock_irq(mlx4_tlock(dev));
4940 if (mtt->com.owner == slave) {
4941 base = mtt->com.res_id;
4942 state = mtt->com.from_state;
4943 while (state != 0) {
4944 switch (state) {
4945 case RES_MTT_ALLOCATED:
4946 __mlx4_free_mtt_range(dev, base,
4947 mtt->order);
4948 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004949 rb_erase(&mtt->com.node,
4950 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004951 list_del(&mtt->com.list);
4952 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004953 mlx4_release_resource(dev, slave, RES_MTT,
4954 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004955 kfree(mtt);
4956 state = 0;
4957 break;
4958
4959 default:
4960 state = 0;
4961 }
4962 }
4963 }
4964 spin_lock_irq(mlx4_tlock(dev));
4965 }
4966 spin_unlock_irq(mlx4_tlock(dev));
4967}
4968
Moni Shoua78efed22015-12-06 18:07:40 +02004969static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4970{
4971 struct mlx4_cmd_mailbox *mailbox;
4972 int err;
4973 struct res_fs_rule *mirr_rule;
4974 u64 reg_id;
4975
4976 mailbox = mlx4_alloc_cmd_mailbox(dev);
4977 if (IS_ERR(mailbox))
4978 return PTR_ERR(mailbox);
4979
4980 if (!fs_rule->mirr_mbox) {
4981 mlx4_err(dev, "rule mirroring mailbox is null\n");
4982 return -EINVAL;
4983 }
4984 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4985 err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4986 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4987 MLX4_CMD_NATIVE);
4988 mlx4_free_cmd_mailbox(dev, mailbox);
4989
4990 if (err)
4991 goto err;
4992
4993 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4994 if (err)
4995 goto err_detach;
4996
4997 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4998 if (err)
4999 goto err_rem;
5000
5001 fs_rule->mirr_rule_id = reg_id;
5002 mirr_rule->mirr_rule_id = 0;
5003 mirr_rule->mirr_mbox_size = 0;
5004 mirr_rule->mirr_mbox = NULL;
5005 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5006
5007 return 0;
5008err_rem:
5009 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5010err_detach:
5011 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5012 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5013err:
5014 return err;
5015}
5016
5017static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5018{
5019 struct mlx4_priv *priv = mlx4_priv(dev);
5020 struct mlx4_resource_tracker *tracker =
5021 &priv->mfunc.master.res_tracker;
5022 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5023 struct rb_node *p;
5024 struct res_fs_rule *fs_rule;
5025 int err = 0;
5026 LIST_HEAD(mirr_list);
5027
5028 for (p = rb_first(root); p; p = rb_next(p)) {
5029 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5030 if ((bond && fs_rule->mirr_mbox_size) ||
5031 (!bond && !fs_rule->mirr_mbox_size))
5032 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5033 }
5034
5035 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5036 if (bond)
5037 err += mlx4_do_mirror_rule(dev, fs_rule);
5038 else
5039 err += mlx4_undo_mirror_rule(dev, fs_rule);
5040 }
5041 return err;
5042}
5043
5044int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5045{
5046 return mlx4_mirror_fs_rules(dev, true);
5047}
5048
5049int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5050{
5051 return mlx4_mirror_fs_rules(dev, false);
5052}
5053
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00005054static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5055{
5056 struct mlx4_priv *priv = mlx4_priv(dev);
5057 struct mlx4_resource_tracker *tracker =
5058 &priv->mfunc.master.res_tracker;
5059 struct list_head *fs_rule_list =
5060 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5061 struct res_fs_rule *fs_rule;
5062 struct res_fs_rule *tmp;
5063 int state;
5064 u64 base;
5065 int err;
5066
5067 err = move_all_busy(dev, slave, RES_FS_RULE);
5068 if (err)
5069 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5070 slave);
5071
5072 spin_lock_irq(mlx4_tlock(dev));
5073 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5074 spin_unlock_irq(mlx4_tlock(dev));
5075 if (fs_rule->com.owner == slave) {
5076 base = fs_rule->com.res_id;
5077 state = fs_rule->com.from_state;
5078 while (state != 0) {
5079 switch (state) {
5080 case RES_FS_RULE_ALLOCATED:
5081 /* detach rule */
5082 err = mlx4_cmd(dev, base, 0, 0,
5083 MLX4_QP_FLOW_STEERING_DETACH,
5084 MLX4_CMD_TIME_CLASS_A,
5085 MLX4_CMD_NATIVE);
5086
5087 spin_lock_irq(mlx4_tlock(dev));
5088 rb_erase(&fs_rule->com.node,
5089 &tracker->res_tree[RES_FS_RULE]);
5090 list_del(&fs_rule->com.list);
5091 spin_unlock_irq(mlx4_tlock(dev));
5092 kfree(fs_rule);
5093 state = 0;
5094 break;
5095
5096 default:
5097 state = 0;
5098 }
5099 }
5100 }
5101 spin_lock_irq(mlx4_tlock(dev));
5102 }
5103 spin_unlock_irq(mlx4_tlock(dev));
5104}
5105
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005106static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5107{
5108 struct mlx4_priv *priv = mlx4_priv(dev);
5109 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5110 struct list_head *eq_list =
5111 &tracker->slave_list[slave].res_list[RES_EQ];
5112 struct res_eq *eq;
5113 struct res_eq *tmp;
5114 int err;
5115 int state;
5116 LIST_HEAD(tlist);
5117 int eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005118
5119 err = move_all_busy(dev, slave, RES_EQ);
5120 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005121 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5122 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005123
5124 spin_lock_irq(mlx4_tlock(dev));
5125 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5126 spin_unlock_irq(mlx4_tlock(dev));
5127 if (eq->com.owner == slave) {
5128 eqn = eq->com.res_id;
5129 state = eq->com.from_state;
5130 while (state != 0) {
5131 switch (state) {
5132 case RES_EQ_RESERVED:
5133 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00005134 rb_erase(&eq->com.node,
5135 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005136 list_del(&eq->com.list);
5137 spin_unlock_irq(mlx4_tlock(dev));
5138 kfree(eq);
5139 state = 0;
5140 break;
5141
5142 case RES_EQ_HW:
Yishai Hadas2d3c7392015-05-05 17:07:12 +03005143 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
Jack Morgenstein30a5da52015-01-27 15:58:03 +02005144 1, MLX4_CMD_HW2SW_EQ,
5145 MLX4_CMD_TIME_CLASS_A,
5146 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00005147 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005148 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
Yishai Hadas2d3c7392015-05-05 17:07:12 +03005149 slave, eqn & 0x3ff);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00005150 atomic_dec(&eq->mtt->ref_count);
5151 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005152 break;
5153
5154 default:
5155 state = 0;
5156 }
5157 }
5158 }
5159 spin_lock_irq(mlx4_tlock(dev));
5160 }
5161 spin_unlock_irq(mlx4_tlock(dev));
5162}
5163
Jack Morgensteinba062d52012-05-15 10:35:03 +00005164static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5165{
5166 struct mlx4_priv *priv = mlx4_priv(dev);
5167 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5168 struct list_head *counter_list =
5169 &tracker->slave_list[slave].res_list[RES_COUNTER];
5170 struct res_counter *counter;
5171 struct res_counter *tmp;
5172 int err;
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005173 int *counters_arr = NULL;
5174 int i, j;
Jack Morgensteinba062d52012-05-15 10:35:03 +00005175
5176 err = move_all_busy(dev, slave, RES_COUNTER);
5177 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005178 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5179 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005180
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005181 counters_arr = kmalloc_array(dev->caps.max_counters,
5182 sizeof(*counters_arr), GFP_KERNEL);
5183 if (!counters_arr)
5184 return;
5185
5186 do {
5187 i = 0;
5188 j = 0;
5189 spin_lock_irq(mlx4_tlock(dev));
5190 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5191 if (counter->com.owner == slave) {
5192 counters_arr[i++] = counter->com.res_id;
5193 rb_erase(&counter->com.node,
5194 &tracker->res_tree[RES_COUNTER]);
5195 list_del(&counter->com.list);
5196 kfree(counter);
5197 }
5198 }
5199 spin_unlock_irq(mlx4_tlock(dev));
5200
5201 while (j < i) {
5202 __mlx4_counter_free(dev, counters_arr[j++]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02005203 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005204 }
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005205 } while (i);
5206
5207 kfree(counters_arr);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005208}
5209
5210static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5211{
5212 struct mlx4_priv *priv = mlx4_priv(dev);
5213 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5214 struct list_head *xrcdn_list =
5215 &tracker->slave_list[slave].res_list[RES_XRCD];
5216 struct res_xrcdn *xrcd;
5217 struct res_xrcdn *tmp;
5218 int err;
5219 int xrcdn;
5220
5221 err = move_all_busy(dev, slave, RES_XRCD);
5222 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005223 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5224 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005225
5226 spin_lock_irq(mlx4_tlock(dev));
5227 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5228 if (xrcd->com.owner == slave) {
5229 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00005230 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005231 list_del(&xrcd->com.list);
5232 kfree(xrcd);
5233 __mlx4_xrcd_free(dev, xrcdn);
5234 }
5235 }
5236 spin_unlock_irq(mlx4_tlock(dev));
5237}
5238
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005239void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5240{
5241 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein111c6092014-05-27 09:26:38 +03005242 mlx4_reset_roce_gids(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005243 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02005244 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005245 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00005246 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005247 rem_slave_qps(dev, slave);
5248 rem_slave_srqs(dev, slave);
5249 rem_slave_cqs(dev, slave);
5250 rem_slave_mrs(dev, slave);
5251 rem_slave_eqs(dev, slave);
5252 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005253 rem_slave_counters(dev, slave);
5254 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005257
5258void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5259{
5260 struct mlx4_vf_immed_vlan_work *work =
5261 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5262 struct mlx4_cmd_mailbox *mailbox;
5263 struct mlx4_update_qp_context *upd_context;
5264 struct mlx4_dev *dev = &work->priv->dev;
5265 struct mlx4_resource_tracker *tracker =
5266 &work->priv->mfunc.master.res_tracker;
5267 struct list_head *qp_list =
5268 &tracker->slave_list[work->slave].res_list[RES_QP];
5269 struct res_qp *qp;
5270 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02005271 u64 qp_path_mask_vlan_ctrl =
5272 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005273 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5274 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5275 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5276 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02005277 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5278
5279 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5280 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5281 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005282 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02005283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5284 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5285 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005286 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5287
5288 int err;
5289 int port, errors = 0;
5290 u8 vlan_control;
5291
5292 if (mlx4_is_slave(dev)) {
5293 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5294 work->slave);
5295 goto out;
5296 }
5297
5298 mailbox = mlx4_alloc_cmd_mailbox(dev);
5299 if (IS_ERR(mailbox))
5300 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03005301 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5302 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5303 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5304 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5305 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5306 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5307 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5308 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005309 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5310 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005311 else if (work->vlan_proto == htons(ETH_P_8021AD))
5312 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5313 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5314 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5315 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5316 else /* vst 802.1Q */
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005317 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5318 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5319 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5320
5321 upd_context = mailbox->buf;
Matan Barak311be982014-09-10 16:41:54 +03005322 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005323
5324 spin_lock_irq(mlx4_tlock(dev));
5325 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5326 spin_unlock_irq(mlx4_tlock(dev));
5327 if (qp->com.owner == work->slave) {
5328 if (qp->com.from_state != RES_QP_HW ||
5329 !qp->sched_queue || /* no INIT2RTR trans yet */
5330 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5331 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5332 spin_lock_irq(mlx4_tlock(dev));
5333 continue;
5334 }
5335 port = (qp->sched_queue >> 6 & 1) + 1;
5336 if (port != work->port) {
5337 spin_lock_irq(mlx4_tlock(dev));
5338 continue;
5339 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02005340 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5341 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5342 else
5343 upd_context->primary_addr_path_mask =
5344 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5345 if (work->vlan_id == MLX4_VGT) {
5346 upd_context->qp_context.param3 = qp->param3;
5347 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5348 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5349 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5350 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5351 upd_context->qp_context.pri_path.feup = qp->feup;
5352 upd_context->qp_context.pri_path.sched_queue =
5353 qp->sched_queue;
5354 } else {
5355 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5356 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5357 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5358 upd_context->qp_context.pri_path.fvl_rx =
5359 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5360 upd_context->qp_context.pri_path.fl =
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005361 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5362 if (work->vlan_proto == htons(ETH_P_8021AD))
5363 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5364 else
5365 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
Rony Efraimf0f829b2013-11-07 12:19:51 +02005366 upd_context->qp_context.pri_path.feup =
5367 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5368 upd_context->qp_context.pri_path.sched_queue =
5369 qp->sched_queue & 0xC7;
5370 upd_context->qp_context.pri_path.sched_queue |=
5371 ((work->qos & 0x7) << 3);
Ido Shamay08068cd2015-04-02 16:31:15 +03005372 upd_context->qp_mask |=
5373 cpu_to_be64(1ULL <<
5374 MLX4_UPD_QP_MASK_QOS_VPP);
5375 upd_context->qp_context.qos_vport =
5376 work->qos_vport;
Rony Efraimf0f829b2013-11-07 12:19:51 +02005377 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005378
5379 err = mlx4_cmd(dev, mailbox->dma,
5380 qp->local_qpn & 0xffffff,
5381 0, MLX4_CMD_UPDATE_QP,
5382 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5383 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07005384 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5385 work->slave, port, qp->local_qpn, err);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005386 errors++;
5387 }
5388 }
5389 spin_lock_irq(mlx4_tlock(dev));
5390 }
5391 spin_unlock_irq(mlx4_tlock(dev));
5392 mlx4_free_cmd_mailbox(dev, mailbox);
5393
5394 if (errors)
5395 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5396 errors, work->slave, work->port);
5397
5398 /* unregister previous vlan_id if needed and we had no errors
5399 * while updating the QPs
5400 */
5401 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5402 NO_INDX != work->orig_vlan_ix)
5403 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02005404 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005405out:
5406 kfree(work);
5407 return;
5408}