blob: fcf3689c93b80a2733f89a36646d2e72161cc911 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
Eli Cohenc82e9aa2011-12-13 04:15:24 +000051
52struct mac_res {
53 struct list_head list;
54 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +020055 int ref_count;
56 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000057 u8 port;
58};
59
Jack Morgenstein48740802013-11-03 10:03:20 +020060struct vlan_res {
61 struct list_head list;
62 u16 vlan;
63 int ref_count;
64 int vlan_index;
65 u8 port;
66};
67
Eli Cohenc82e9aa2011-12-13 04:15:24 +000068struct res_common {
69 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000070 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000071 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000072 int owner;
73 int state;
74 int from_state;
75 int to_state;
76 int removing;
77};
78
79enum {
80 RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84 struct list_head list;
85 u8 gid[16];
86 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000087 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000088 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000089};
90
91enum res_qp_states {
92 RES_QP_BUSY = RES_ANY_BUSY,
93
94 /* QP number was allocated */
95 RES_QP_RESERVED,
96
97 /* ICM memory for QP context was mapped */
98 RES_QP_MAPPED,
99
100 /* QP is in hw ownership */
101 RES_QP_HW
102};
103
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000104struct res_qp {
105 struct res_common com;
106 struct res_mtt *mtt;
107 struct res_cq *rcq;
108 struct res_cq *scq;
109 struct res_srq *srq;
110 struct list_head mcg_list;
111 spinlock_t mcg_spl;
112 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000113 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300114 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200115 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300116 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200117 __be32 param3;
118 u8 vlan_control;
119 u8 fvl_rx;
120 u8 pri_path_fl;
121 u8 vlan_index;
122 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000123};
124
125enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
127 RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132 switch (state) {
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
136 }
137}
138
139struct res_mtt {
140 struct res_common com;
141 int order;
142 atomic_t ref_count;
143};
144
145enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
147 RES_MPT_RESERVED,
148 RES_MPT_MAPPED,
149 RES_MPT_HW,
150};
151
152struct res_mpt {
153 struct res_common com;
154 struct res_mtt *mtt;
155 int key;
156};
157
158enum res_eq_states {
159 RES_EQ_BUSY = RES_ANY_BUSY,
160 RES_EQ_RESERVED,
161 RES_EQ_HW,
162};
163
164struct res_eq {
165 struct res_common com;
166 struct res_mtt *mtt;
167};
168
169enum res_cq_states {
170 RES_CQ_BUSY = RES_ANY_BUSY,
171 RES_CQ_ALLOCATED,
172 RES_CQ_HW,
173};
174
175struct res_cq {
176 struct res_common com;
177 struct res_mtt *mtt;
178 atomic_t ref_count;
179};
180
181enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
183 RES_SRQ_ALLOCATED,
184 RES_SRQ_HW,
185};
186
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000187struct res_srq {
188 struct res_common com;
189 struct res_mtt *mtt;
190 struct res_cq *cq;
191 atomic_t ref_count;
192};
193
194enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
197};
198
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000199struct res_counter {
200 struct res_common com;
201 int port;
202};
203
Jack Morgensteinba062d52012-05-15 10:35:03 +0000204enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
206 RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210 struct res_common com;
211 int port;
212};
213
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000214enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000221 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000222};
223
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231 struct rb_node *node = root->rb_node;
232
233 while (node) {
234 struct res_common *res = container_of(node, struct res_common,
235 node);
236
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
241 else
242 return res;
243 }
244 return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251 /* Figure out where to put new node */
252 while (*new) {
253 struct res_common *this = container_of(*new, struct res_common,
254 node);
255
256 parent = *new;
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
261 else
262 return -EEXIST;
263 }
264
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
268
269 return 0;
270}
271
Jack Morgenstein54679e12012-08-03 08:40:43 +0000272enum qp_transition {
273 QP_TRANS_INIT2RTR,
274 QP_TRANS_RTR2RTS,
275 QP_TRANS_RTS2RTS,
276 QP_TRANS_SQERR2RTS,
277 QP_TRANS_SQD2SQD,
278 QP_TRANS_SQD2RTS
279};
280
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000281/* For Debug uses */
Jack Morgenstein956463732014-06-08 13:49:45 +0300282static const char *resource_str(enum mlx4_resource rt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000283{
284 switch (rt) {
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200291 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000294 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000295 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000296 default: return "Unknown resource type !!!";
297 };
298}
299
Jack Morgenstein48740802013-11-03 10:03:20 +0200300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
303 int port)
304{
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free;
Jack Morgenstein956463732014-06-08 13:49:45 +0300310 int from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200311
312 if (slave > dev->num_vfs)
313 return -EINVAL;
314
315 spin_lock(&res_alloc->alloc_lock);
316 allocated = (port > 0) ?
317 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
318 res_alloc->allocated[slave];
319 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 res_alloc->res_free;
321 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
322 res_alloc->res_reserved;
323 guaranteed = res_alloc->guaranteed[slave];
324
Jack Morgenstein956463732014-06-08 13:49:45 +0300325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200329 goto out;
Jack Morgenstein956463732014-06-08 13:49:45 +0300330 }
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200331
332 if (allocated + count <= guaranteed) {
333 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300334 from_rsvd = count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200335 } else {
336 /* portion may need to be obtained from free area */
337 if (guaranteed - allocated > 0)
338 from_free = count - (guaranteed - allocated);
339 else
340 from_free = count;
341
Jack Morgenstein956463732014-06-08 13:49:45 +0300342 from_rsvd = count - from_free;
343
344 if (free - from_free >= reserved)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200345 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300346 else
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200350 }
351
352 if (!err) {
353 /* grant the request */
354 if (port > 0) {
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200358 } else {
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300361 res_alloc->res_reserved -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200362 }
363 }
364
365out:
366 spin_unlock(&res_alloc->alloc_lock);
367 return err;
368}
369
370static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
372 int port)
373{
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein956463732014-06-08 13:49:45 +0300377 int allocated, guaranteed, from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200378
379 if (slave > dev->num_vfs)
380 return;
381
382 spin_lock(&res_alloc->alloc_lock);
Jack Morgenstein956463732014-06-08 13:49:45 +0300383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200399 if (port > 0) {
400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
401 res_alloc->res_port_free[port - 1] += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200403 } else {
404 res_alloc->allocated[slave] -= count;
405 res_alloc->res_free += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300406 res_alloc->res_reserved += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200407 }
408
409 spin_unlock(&res_alloc->alloc_lock);
410 return;
411}
412
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200413static inline void initialize_res_quotas(struct mlx4_dev *dev,
414 struct resource_allocator *res_alloc,
415 enum mlx4_resource res_type,
416 int vf, int num_instances)
417{
418 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances;
422 if (res_type == RES_MTT) {
423 /* reserved mtts will be taken out of the PF allocation */
424 res_alloc->res_free += dev->caps.reserved_mtts;
425 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 res_alloc->quota[vf] += dev->caps.reserved_mtts;
427 }
428 }
429}
430
431void mlx4_init_quotas(struct mlx4_dev *dev)
432{
433 struct mlx4_priv *priv = mlx4_priv(dev);
434 int pf;
435
436 /* quotas for VFs are initialized in mlx4_slave_cap */
437 if (mlx4_is_slave(dev))
438 return;
439
440 if (!mlx4_is_mfunc(dev)) {
441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 mlx4_num_reserved_sqps(dev);
443 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447 return;
448 }
449
450 pf = mlx4_master_func_num(dev);
451 dev->quotas.qp =
452 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453 dev->quotas.cq =
454 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455 dev->quotas.srq =
456 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457 dev->quotas.mtt =
458 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459 dev->quotas.mpt =
460 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461}
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000462int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463{
464 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200465 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000466 int t;
467
468 priv->mfunc.master.res_tracker.slave_list =
469 kzalloc(dev->num_slaves * sizeof(struct slave_list),
470 GFP_KERNEL);
471 if (!priv->mfunc.master.res_tracker.slave_list)
472 return -ENOMEM;
473
474 for (i = 0 ; i < dev->num_slaves; i++) {
475 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 slave_list[i].res_list[t]);
478 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479 }
480
481 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482 dev->num_slaves);
483 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000484 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000485
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
490 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
491 if (i == RES_MAC || i == RES_VLAN)
492 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 (dev->num_vfs + 1) * sizeof(int),
494 GFP_KERNEL);
495 else
496 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
497
498 if (!res_alloc->quota || !res_alloc->guaranteed ||
499 !res_alloc->allocated)
500 goto no_mem_err;
501
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200502 spin_lock_init(&res_alloc->alloc_lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200503 for (t = 0; t < dev->num_vfs + 1; t++) {
Matan Barak449fc482014-03-19 18:11:52 +0200504 struct mlx4_active_ports actv_ports =
505 mlx4_get_active_ports(dev, t);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200506 switch (i) {
507 case RES_QP:
508 initialize_res_quotas(dev, res_alloc, RES_QP,
509 t, dev->caps.num_qps -
510 dev->caps.reserved_qps -
511 mlx4_num_reserved_sqps(dev));
512 break;
513 case RES_CQ:
514 initialize_res_quotas(dev, res_alloc, RES_CQ,
515 t, dev->caps.num_cqs -
516 dev->caps.reserved_cqs);
517 break;
518 case RES_SRQ:
519 initialize_res_quotas(dev, res_alloc, RES_SRQ,
520 t, dev->caps.num_srqs -
521 dev->caps.reserved_srqs);
522 break;
523 case RES_MPT:
524 initialize_res_quotas(dev, res_alloc, RES_MPT,
525 t, dev->caps.num_mpts -
526 dev->caps.reserved_mrws);
527 break;
528 case RES_MTT:
529 initialize_res_quotas(dev, res_alloc, RES_MTT,
530 t, dev->caps.num_mtts -
531 dev->caps.reserved_mtts);
532 break;
533 case RES_MAC:
534 if (t == mlx4_master_func_num(dev)) {
Matan Barak449fc482014-03-19 18:11:52 +0200535 int max_vfs_pport = 0;
536 /* Calculate the max vfs per port for */
537 /* both ports. */
538 for (j = 0; j < dev->caps.num_ports;
539 j++) {
540 struct mlx4_slaves_pport slaves_pport =
541 mlx4_phys_to_slaves_pport(dev, j + 1);
542 unsigned current_slaves =
543 bitmap_weight(slaves_pport.slaves,
544 dev->caps.num_ports) - 1;
545 if (max_vfs_pport < current_slaves)
546 max_vfs_pport =
547 current_slaves;
548 }
549 res_alloc->quota[t] =
550 MLX4_MAX_MAC_NUM -
551 2 * max_vfs_pport;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200552 res_alloc->guaranteed[t] = 2;
553 for (j = 0; j < MLX4_MAX_PORTS; j++)
Matan Barak449fc482014-03-19 18:11:52 +0200554 res_alloc->res_port_free[j] =
555 MLX4_MAX_MAC_NUM;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200556 } else {
557 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
558 res_alloc->guaranteed[t] = 2;
559 }
560 break;
561 case RES_VLAN:
562 if (t == mlx4_master_func_num(dev)) {
563 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
564 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
565 for (j = 0; j < MLX4_MAX_PORTS; j++)
566 res_alloc->res_port_free[j] =
567 res_alloc->quota[t];
568 } else {
569 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
570 res_alloc->guaranteed[t] = 0;
571 }
572 break;
573 case RES_COUNTER:
574 res_alloc->quota[t] = dev->caps.max_counters;
575 res_alloc->guaranteed[t] = 0;
576 if (t == mlx4_master_func_num(dev))
577 res_alloc->res_free = res_alloc->quota[t];
578 break;
579 default:
580 break;
581 }
582 if (i == RES_MAC || i == RES_VLAN) {
Matan Barak449fc482014-03-19 18:11:52 +0200583 for (j = 0; j < dev->caps.num_ports; j++)
584 if (test_bit(j, actv_ports.ports))
585 res_alloc->res_port_rsvd[j] +=
586 res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200587 } else {
588 res_alloc->res_reserved += res_alloc->guaranteed[t];
589 }
590 }
591 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000592 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200593 return 0;
594
595no_mem_err:
596 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
603 }
604 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000605}
606
Jack Morgensteinb8924952012-05-15 10:35:02 +0000607void mlx4_free_resource_tracker(struct mlx4_dev *dev,
608 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000609{
610 struct mlx4_priv *priv = mlx4_priv(dev);
611 int i;
612
613 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200614 if (type != RES_TR_FREE_STRUCTS_ONLY) {
615 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000616 if (type == RES_TR_FREE_ALL ||
617 dev->caps.function != i)
618 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200619 }
620 /* free master's vlans */
621 i = dev->caps.function;
Jack Morgenstein111c6092014-05-27 09:26:38 +0300622 mlx4_reset_roce_gids(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200623 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
624 rem_slave_vlans(dev, i);
625 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
626 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000627
Jack Morgensteinb8924952012-05-15 10:35:02 +0000628 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200629 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
631 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
632 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
633 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
634 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
635 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000637 kfree(priv->mfunc.master.res_tracker.slave_list);
638 priv->mfunc.master.res_tracker.slave_list = NULL;
639 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000640 }
641}
642
Jack Morgenstein54679e12012-08-03 08:40:43 +0000643static void update_pkey_index(struct mlx4_dev *dev, int slave,
644 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000645{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000646 u8 sched = *(u8 *)(inbox->buf + 64);
647 u8 orig_index = *(u8 *)(inbox->buf + 35);
648 u8 new_index;
649 struct mlx4_priv *priv = mlx4_priv(dev);
650 int port;
651
652 port = (sched >> 6 & 1) + 1;
653
654 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
655 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000656}
657
658static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
659 u8 slave)
660{
661 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
662 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
663 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200664 int port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000665
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200666 if (MLX4_QP_ST_UD == ts) {
667 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
668 if (mlx4_is_eth(dev, port))
Matan Barak449fc482014-03-19 18:11:52 +0200669 qp_ctx->pri_path.mgid_index =
670 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200671 else
672 qp_ctx->pri_path.mgid_index = slave | 0x80;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000673
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200674 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
675 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
676 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
677 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200678 qp_ctx->pri_path.mgid_index +=
679 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200680 qp_ctx->pri_path.mgid_index &= 0x7f;
681 } else {
682 qp_ctx->pri_path.mgid_index = slave & 0x7F;
683 }
684 }
685 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
686 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
687 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200688 qp_ctx->alt_path.mgid_index +=
689 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200690 qp_ctx->alt_path.mgid_index &= 0x7f;
691 } else {
692 qp_ctx->alt_path.mgid_index = slave & 0x7F;
693 }
694 }
Jack Morgenstein54679e12012-08-03 08:40:43 +0000695 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000696}
697
Rony Efraim3f7fb022013-04-25 05:22:28 +0000698static int update_vport_qp_param(struct mlx4_dev *dev,
699 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300700 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000701{
702 struct mlx4_qp_context *qpc = inbox->buf + 8;
703 struct mlx4_vport_oper_state *vp_oper;
704 struct mlx4_priv *priv;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000705 int port;
706
707 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
708 priv = mlx4_priv(dev);
709 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
710
711 if (MLX4_VGT != vp_oper->state.default_vlan) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300712 /* the reserved QPs (special, proxy, tunnel)
713 * do not operate over vlans
714 */
715 if (mlx4_is_qp_reserved(dev, qpn))
716 return 0;
717
Rony Efraim7677fc92013-05-08 22:22:35 +0000718 /* force strip vlan by clear vsd */
719 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
Rony Efraim0a6eac22013-06-27 19:05:22 +0300720
721 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
722 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
723 qpc->pri_path.vlan_control =
724 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
725 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
726 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
727 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
728 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
729 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
730 } else if (0 != vp_oper->state.default_vlan) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000731 qpc->pri_path.vlan_control =
732 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
733 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
734 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
735 } else { /* priority tagged */
736 qpc->pri_path.vlan_control =
737 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
738 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
739 }
740
741 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000742 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Rony Efraim7677fc92013-05-08 22:22:35 +0000743 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
744 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000745 qpc->pri_path.sched_queue &= 0xC7;
746 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000747 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000748 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000749 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000750 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000751 }
Rony Efraim3f7fb022013-04-25 05:22:28 +0000752 return 0;
753}
754
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000755static int mpt_mask(struct mlx4_dev *dev)
756{
757 return dev->caps.num_mpts - 1;
758}
759
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000760static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000761 enum mlx4_resource type)
762{
763 struct mlx4_priv *priv = mlx4_priv(dev);
764
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000765 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
766 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000767}
768
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000769static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000770 enum mlx4_resource type,
771 void *res)
772{
773 struct res_common *r;
774 int err = 0;
775
776 spin_lock_irq(mlx4_tlock(dev));
777 r = find_res(dev, res_id, type);
778 if (!r) {
779 err = -ENONET;
780 goto exit;
781 }
782
783 if (r->state == RES_ANY_BUSY) {
784 err = -EBUSY;
785 goto exit;
786 }
787
788 if (r->owner != slave) {
789 err = -EPERM;
790 goto exit;
791 }
792
793 r->from_state = r->state;
794 r->state = RES_ANY_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000795
796 if (res)
797 *((struct res_common **)res) = r;
798
799exit:
800 spin_unlock_irq(mlx4_tlock(dev));
801 return err;
802}
803
804int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
805 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000806 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000807{
808
809 struct res_common *r;
810 int err = -ENOENT;
811 int id = res_id;
812
813 if (type == RES_QP)
814 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000815 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000816
817 r = find_res(dev, id, type);
818 if (r) {
819 *slave = r->owner;
820 err = 0;
821 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000822 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000823
824 return err;
825}
826
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000827static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000828 enum mlx4_resource type)
829{
830 struct res_common *r;
831
832 spin_lock_irq(mlx4_tlock(dev));
833 r = find_res(dev, res_id, type);
834 if (r)
835 r->state = r->from_state;
836 spin_unlock_irq(mlx4_tlock(dev));
837}
838
839static struct res_common *alloc_qp_tr(int id)
840{
841 struct res_qp *ret;
842
843 ret = kzalloc(sizeof *ret, GFP_KERNEL);
844 if (!ret)
845 return NULL;
846
847 ret->com.res_id = id;
848 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +0000849 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000850 INIT_LIST_HEAD(&ret->mcg_list);
851 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000852 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000853
854 return &ret->com;
855}
856
857static struct res_common *alloc_mtt_tr(int id, int order)
858{
859 struct res_mtt *ret;
860
861 ret = kzalloc(sizeof *ret, GFP_KERNEL);
862 if (!ret)
863 return NULL;
864
865 ret->com.res_id = id;
866 ret->order = order;
867 ret->com.state = RES_MTT_ALLOCATED;
868 atomic_set(&ret->ref_count, 0);
869
870 return &ret->com;
871}
872
873static struct res_common *alloc_mpt_tr(int id, int key)
874{
875 struct res_mpt *ret;
876
877 ret = kzalloc(sizeof *ret, GFP_KERNEL);
878 if (!ret)
879 return NULL;
880
881 ret->com.res_id = id;
882 ret->com.state = RES_MPT_RESERVED;
883 ret->key = key;
884
885 return &ret->com;
886}
887
888static struct res_common *alloc_eq_tr(int id)
889{
890 struct res_eq *ret;
891
892 ret = kzalloc(sizeof *ret, GFP_KERNEL);
893 if (!ret)
894 return NULL;
895
896 ret->com.res_id = id;
897 ret->com.state = RES_EQ_RESERVED;
898
899 return &ret->com;
900}
901
902static struct res_common *alloc_cq_tr(int id)
903{
904 struct res_cq *ret;
905
906 ret = kzalloc(sizeof *ret, GFP_KERNEL);
907 if (!ret)
908 return NULL;
909
910 ret->com.res_id = id;
911 ret->com.state = RES_CQ_ALLOCATED;
912 atomic_set(&ret->ref_count, 0);
913
914 return &ret->com;
915}
916
917static struct res_common *alloc_srq_tr(int id)
918{
919 struct res_srq *ret;
920
921 ret = kzalloc(sizeof *ret, GFP_KERNEL);
922 if (!ret)
923 return NULL;
924
925 ret->com.res_id = id;
926 ret->com.state = RES_SRQ_ALLOCATED;
927 atomic_set(&ret->ref_count, 0);
928
929 return &ret->com;
930}
931
932static struct res_common *alloc_counter_tr(int id)
933{
934 struct res_counter *ret;
935
936 ret = kzalloc(sizeof *ret, GFP_KERNEL);
937 if (!ret)
938 return NULL;
939
940 ret->com.res_id = id;
941 ret->com.state = RES_COUNTER_ALLOCATED;
942
943 return &ret->com;
944}
945
Jack Morgensteinba062d52012-05-15 10:35:03 +0000946static struct res_common *alloc_xrcdn_tr(int id)
947{
948 struct res_xrcdn *ret;
949
950 ret = kzalloc(sizeof *ret, GFP_KERNEL);
951 if (!ret)
952 return NULL;
953
954 ret->com.res_id = id;
955 ret->com.state = RES_XRCD_ALLOCATED;
956
957 return &ret->com;
958}
959
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000960static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000961{
962 struct res_fs_rule *ret;
963
964 ret = kzalloc(sizeof *ret, GFP_KERNEL);
965 if (!ret)
966 return NULL;
967
968 ret->com.res_id = id;
969 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000970 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000971 return &ret->com;
972}
973
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000974static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000975 int extra)
976{
977 struct res_common *ret;
978
979 switch (type) {
980 case RES_QP:
981 ret = alloc_qp_tr(id);
982 break;
983 case RES_MPT:
984 ret = alloc_mpt_tr(id, extra);
985 break;
986 case RES_MTT:
987 ret = alloc_mtt_tr(id, extra);
988 break;
989 case RES_EQ:
990 ret = alloc_eq_tr(id);
991 break;
992 case RES_CQ:
993 ret = alloc_cq_tr(id);
994 break;
995 case RES_SRQ:
996 ret = alloc_srq_tr(id);
997 break;
998 case RES_MAC:
Amir Vadaic20862c2014-05-22 15:55:40 +0300999 pr_err("implementation missing\n");
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001000 return NULL;
1001 case RES_COUNTER:
1002 ret = alloc_counter_tr(id);
1003 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +00001004 case RES_XRCD:
1005 ret = alloc_xrcdn_tr(id);
1006 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001007 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001008 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001009 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001010 default:
1011 return NULL;
1012 }
1013 if (ret)
1014 ret->owner = slave;
1015
1016 return ret;
1017}
1018
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001019static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001020 enum mlx4_resource type, int extra)
1021{
1022 int i;
1023 int err;
1024 struct mlx4_priv *priv = mlx4_priv(dev);
1025 struct res_common **res_arr;
1026 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001027 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001028
1029 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1030 if (!res_arr)
1031 return -ENOMEM;
1032
1033 for (i = 0; i < count; ++i) {
1034 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1035 if (!res_arr[i]) {
1036 for (--i; i >= 0; --i)
1037 kfree(res_arr[i]);
1038
1039 kfree(res_arr);
1040 return -ENOMEM;
1041 }
1042 }
1043
1044 spin_lock_irq(mlx4_tlock(dev));
1045 for (i = 0; i < count; ++i) {
1046 if (find_res(dev, base + i, type)) {
1047 err = -EEXIST;
1048 goto undo;
1049 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001050 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001051 if (err)
1052 goto undo;
1053 list_add_tail(&res_arr[i]->list,
1054 &tracker->slave_list[slave].res_list[type]);
1055 }
1056 spin_unlock_irq(mlx4_tlock(dev));
1057 kfree(res_arr);
1058
1059 return 0;
1060
1061undo:
1062 for (--i; i >= base; --i)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001063 rb_erase(&res_arr[i]->node, root);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001064
1065 spin_unlock_irq(mlx4_tlock(dev));
1066
1067 for (i = 0; i < count; ++i)
1068 kfree(res_arr[i]);
1069
1070 kfree(res_arr);
1071
1072 return err;
1073}
1074
1075static int remove_qp_ok(struct res_qp *res)
1076{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001077 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1078 !list_empty(&res->mcg_list)) {
1079 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1080 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001081 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001082 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001083 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001084 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001085
1086 return 0;
1087}
1088
1089static int remove_mtt_ok(struct res_mtt *res, int order)
1090{
1091 if (res->com.state == RES_MTT_BUSY ||
1092 atomic_read(&res->ref_count)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03001093 pr_devel("%s-%d: state %s, ref_count %d\n",
1094 __func__, __LINE__,
1095 mtt_states_str(res->com.state),
1096 atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001097 return -EBUSY;
1098 } else if (res->com.state != RES_MTT_ALLOCATED)
1099 return -EPERM;
1100 else if (res->order != order)
1101 return -EINVAL;
1102
1103 return 0;
1104}
1105
1106static int remove_mpt_ok(struct res_mpt *res)
1107{
1108 if (res->com.state == RES_MPT_BUSY)
1109 return -EBUSY;
1110 else if (res->com.state != RES_MPT_RESERVED)
1111 return -EPERM;
1112
1113 return 0;
1114}
1115
1116static int remove_eq_ok(struct res_eq *res)
1117{
1118 if (res->com.state == RES_MPT_BUSY)
1119 return -EBUSY;
1120 else if (res->com.state != RES_MPT_RESERVED)
1121 return -EPERM;
1122
1123 return 0;
1124}
1125
1126static int remove_counter_ok(struct res_counter *res)
1127{
1128 if (res->com.state == RES_COUNTER_BUSY)
1129 return -EBUSY;
1130 else if (res->com.state != RES_COUNTER_ALLOCATED)
1131 return -EPERM;
1132
1133 return 0;
1134}
1135
Jack Morgensteinba062d52012-05-15 10:35:03 +00001136static int remove_xrcdn_ok(struct res_xrcdn *res)
1137{
1138 if (res->com.state == RES_XRCD_BUSY)
1139 return -EBUSY;
1140 else if (res->com.state != RES_XRCD_ALLOCATED)
1141 return -EPERM;
1142
1143 return 0;
1144}
1145
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001146static int remove_fs_rule_ok(struct res_fs_rule *res)
1147{
1148 if (res->com.state == RES_FS_RULE_BUSY)
1149 return -EBUSY;
1150 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1151 return -EPERM;
1152
1153 return 0;
1154}
1155
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001156static int remove_cq_ok(struct res_cq *res)
1157{
1158 if (res->com.state == RES_CQ_BUSY)
1159 return -EBUSY;
1160 else if (res->com.state != RES_CQ_ALLOCATED)
1161 return -EPERM;
1162
1163 return 0;
1164}
1165
1166static int remove_srq_ok(struct res_srq *res)
1167{
1168 if (res->com.state == RES_SRQ_BUSY)
1169 return -EBUSY;
1170 else if (res->com.state != RES_SRQ_ALLOCATED)
1171 return -EPERM;
1172
1173 return 0;
1174}
1175
1176static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1177{
1178 switch (type) {
1179 case RES_QP:
1180 return remove_qp_ok((struct res_qp *)res);
1181 case RES_CQ:
1182 return remove_cq_ok((struct res_cq *)res);
1183 case RES_SRQ:
1184 return remove_srq_ok((struct res_srq *)res);
1185 case RES_MPT:
1186 return remove_mpt_ok((struct res_mpt *)res);
1187 case RES_MTT:
1188 return remove_mtt_ok((struct res_mtt *)res, extra);
1189 case RES_MAC:
1190 return -ENOSYS;
1191 case RES_EQ:
1192 return remove_eq_ok((struct res_eq *)res);
1193 case RES_COUNTER:
1194 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001195 case RES_XRCD:
1196 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001197 case RES_FS_RULE:
1198 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001199 default:
1200 return -EINVAL;
1201 }
1202}
1203
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001204static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001205 enum mlx4_resource type, int extra)
1206{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001207 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001208 int err;
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1211 struct res_common *r;
1212
1213 spin_lock_irq(mlx4_tlock(dev));
1214 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001215 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001216 if (!r) {
1217 err = -ENOENT;
1218 goto out;
1219 }
1220 if (r->owner != slave) {
1221 err = -EPERM;
1222 goto out;
1223 }
1224 err = remove_ok(r, type, extra);
1225 if (err)
1226 goto out;
1227 }
1228
1229 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001230 r = res_tracker_lookup(&tracker->res_tree[type], i);
1231 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001232 list_del(&r->list);
1233 kfree(r);
1234 }
1235 err = 0;
1236
1237out:
1238 spin_unlock_irq(mlx4_tlock(dev));
1239
1240 return err;
1241}
1242
1243static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1244 enum res_qp_states state, struct res_qp **qp,
1245 int alloc)
1246{
1247 struct mlx4_priv *priv = mlx4_priv(dev);
1248 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1249 struct res_qp *r;
1250 int err = 0;
1251
1252 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001253 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001254 if (!r)
1255 err = -ENOENT;
1256 else if (r->com.owner != slave)
1257 err = -EPERM;
1258 else {
1259 switch (state) {
1260 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001261 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001262 __func__, r->com.res_id);
1263 err = -EBUSY;
1264 break;
1265
1266 case RES_QP_RESERVED:
1267 if (r->com.state == RES_QP_MAPPED && !alloc)
1268 break;
1269
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001270 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001271 err = -EINVAL;
1272 break;
1273
1274 case RES_QP_MAPPED:
1275 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1276 r->com.state == RES_QP_HW)
1277 break;
1278 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001279 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001280 r->com.res_id);
1281 err = -EINVAL;
1282 }
1283
1284 break;
1285
1286 case RES_QP_HW:
1287 if (r->com.state != RES_QP_MAPPED)
1288 err = -EINVAL;
1289 break;
1290 default:
1291 err = -EINVAL;
1292 }
1293
1294 if (!err) {
1295 r->com.from_state = r->com.state;
1296 r->com.to_state = state;
1297 r->com.state = RES_QP_BUSY;
1298 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001299 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001300 }
1301 }
1302
1303 spin_unlock_irq(mlx4_tlock(dev));
1304
1305 return err;
1306}
1307
1308static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1309 enum res_mpt_states state, struct res_mpt **mpt)
1310{
1311 struct mlx4_priv *priv = mlx4_priv(dev);
1312 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1313 struct res_mpt *r;
1314 int err = 0;
1315
1316 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001317 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001318 if (!r)
1319 err = -ENOENT;
1320 else if (r->com.owner != slave)
1321 err = -EPERM;
1322 else {
1323 switch (state) {
1324 case RES_MPT_BUSY:
1325 err = -EINVAL;
1326 break;
1327
1328 case RES_MPT_RESERVED:
1329 if (r->com.state != RES_MPT_MAPPED)
1330 err = -EINVAL;
1331 break;
1332
1333 case RES_MPT_MAPPED:
1334 if (r->com.state != RES_MPT_RESERVED &&
1335 r->com.state != RES_MPT_HW)
1336 err = -EINVAL;
1337 break;
1338
1339 case RES_MPT_HW:
1340 if (r->com.state != RES_MPT_MAPPED)
1341 err = -EINVAL;
1342 break;
1343 default:
1344 err = -EINVAL;
1345 }
1346
1347 if (!err) {
1348 r->com.from_state = r->com.state;
1349 r->com.to_state = state;
1350 r->com.state = RES_MPT_BUSY;
1351 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001352 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001353 }
1354 }
1355
1356 spin_unlock_irq(mlx4_tlock(dev));
1357
1358 return err;
1359}
1360
1361static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1362 enum res_eq_states state, struct res_eq **eq)
1363{
1364 struct mlx4_priv *priv = mlx4_priv(dev);
1365 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1366 struct res_eq *r;
1367 int err = 0;
1368
1369 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001370 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001371 if (!r)
1372 err = -ENOENT;
1373 else if (r->com.owner != slave)
1374 err = -EPERM;
1375 else {
1376 switch (state) {
1377 case RES_EQ_BUSY:
1378 err = -EINVAL;
1379 break;
1380
1381 case RES_EQ_RESERVED:
1382 if (r->com.state != RES_EQ_HW)
1383 err = -EINVAL;
1384 break;
1385
1386 case RES_EQ_HW:
1387 if (r->com.state != RES_EQ_RESERVED)
1388 err = -EINVAL;
1389 break;
1390
1391 default:
1392 err = -EINVAL;
1393 }
1394
1395 if (!err) {
1396 r->com.from_state = r->com.state;
1397 r->com.to_state = state;
1398 r->com.state = RES_EQ_BUSY;
1399 if (eq)
1400 *eq = r;
1401 }
1402 }
1403
1404 spin_unlock_irq(mlx4_tlock(dev));
1405
1406 return err;
1407}
1408
1409static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1410 enum res_cq_states state, struct res_cq **cq)
1411{
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414 struct res_cq *r;
1415 int err;
1416
1417 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001418 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001419 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001420 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001421 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001422 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001423 } else if (state == RES_CQ_ALLOCATED) {
1424 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001425 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001426 else if (atomic_read(&r->ref_count))
1427 err = -EBUSY;
1428 else
1429 err = 0;
1430 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1431 err = -EINVAL;
1432 } else {
1433 err = 0;
1434 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001435
Paul Bollec9218a92014-01-14 20:45:36 +01001436 if (!err) {
1437 r->com.from_state = r->com.state;
1438 r->com.to_state = state;
1439 r->com.state = RES_CQ_BUSY;
1440 if (cq)
1441 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001442 }
1443
1444 spin_unlock_irq(mlx4_tlock(dev));
1445
1446 return err;
1447}
1448
1449static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001450 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001451{
1452 struct mlx4_priv *priv = mlx4_priv(dev);
1453 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1454 struct res_srq *r;
1455 int err = 0;
1456
1457 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001458 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001459 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001460 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001461 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001462 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001463 } else if (state == RES_SRQ_ALLOCATED) {
1464 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001465 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001466 else if (atomic_read(&r->ref_count))
1467 err = -EBUSY;
1468 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1469 err = -EINVAL;
1470 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001471
Paul Bollef088cbb2014-01-14 20:46:52 +01001472 if (!err) {
1473 r->com.from_state = r->com.state;
1474 r->com.to_state = state;
1475 r->com.state = RES_SRQ_BUSY;
1476 if (srq)
1477 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001478 }
1479
1480 spin_unlock_irq(mlx4_tlock(dev));
1481
1482 return err;
1483}
1484
1485static void res_abort_move(struct mlx4_dev *dev, int slave,
1486 enum mlx4_resource type, int id)
1487{
1488 struct mlx4_priv *priv = mlx4_priv(dev);
1489 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1490 struct res_common *r;
1491
1492 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001493 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001494 if (r && (r->owner == slave))
1495 r->state = r->from_state;
1496 spin_unlock_irq(mlx4_tlock(dev));
1497}
1498
1499static void res_end_move(struct mlx4_dev *dev, int slave,
1500 enum mlx4_resource type, int id)
1501{
1502 struct mlx4_priv *priv = mlx4_priv(dev);
1503 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504 struct res_common *r;
1505
1506 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001507 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001508 if (r && (r->owner == slave))
1509 r->state = r->to_state;
1510 spin_unlock_irq(mlx4_tlock(dev));
1511}
1512
1513static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1514{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001515 return mlx4_is_qp_reserved(dev, qpn) &&
1516 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001517}
1518
Jack Morgenstein54679e12012-08-03 08:40:43 +00001519static int fw_reserved(struct mlx4_dev *dev, int qpn)
1520{
1521 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001522}
1523
1524static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1525 u64 in_param, u64 *out_param)
1526{
1527 int err;
1528 int count;
1529 int align;
1530 int base;
1531 int qpn;
1532
1533 switch (op) {
1534 case RES_OP_RESERVE:
1535 count = get_param_l(&in_param);
1536 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001537 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001538 if (err)
1539 return err;
1540
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001541 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1542 if (err) {
1543 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1544 return err;
1545 }
1546
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001547 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1548 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001549 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001550 __mlx4_qp_release_range(dev, base, count);
1551 return err;
1552 }
1553 set_param_l(out_param, base);
1554 break;
1555 case RES_OP_MAP_ICM:
1556 qpn = get_param_l(&in_param) & 0x7fffff;
1557 if (valid_reserved(dev, slave, qpn)) {
1558 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1559 if (err)
1560 return err;
1561 }
1562
1563 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1564 NULL, 1);
1565 if (err)
1566 return err;
1567
Jack Morgenstein54679e12012-08-03 08:40:43 +00001568 if (!fw_reserved(dev, qpn)) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001569 err = __mlx4_qp_alloc_icm(dev, qpn);
1570 if (err) {
1571 res_abort_move(dev, slave, RES_QP, qpn);
1572 return err;
1573 }
1574 }
1575
1576 res_end_move(dev, slave, RES_QP, qpn);
1577 break;
1578
1579 default:
1580 err = -EINVAL;
1581 break;
1582 }
1583 return err;
1584}
1585
1586static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1587 u64 in_param, u64 *out_param)
1588{
1589 int err = -EINVAL;
1590 int base;
1591 int order;
1592
1593 if (op != RES_OP_RESERVE_AND_MAP)
1594 return err;
1595
1596 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001597
1598 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1599 if (err)
1600 return err;
1601
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001602 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001603 if (base == -1) {
1604 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001605 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001606 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001607
1608 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001609 if (err) {
1610 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001611 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001612 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001613 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001614 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001615
1616 return err;
1617}
1618
1619static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1620 u64 in_param, u64 *out_param)
1621{
1622 int err = -EINVAL;
1623 int index;
1624 int id;
1625 struct res_mpt *mpt;
1626
1627 switch (op) {
1628 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001629 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1630 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001631 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001632
1633 index = __mlx4_mpt_reserve(dev);
1634 if (index == -1) {
1635 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1636 break;
1637 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001638 id = index & mpt_mask(dev);
1639
1640 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1641 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001642 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001643 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001644 break;
1645 }
1646 set_param_l(out_param, index);
1647 break;
1648 case RES_OP_MAP_ICM:
1649 index = get_param_l(&in_param);
1650 id = index & mpt_mask(dev);
1651 err = mr_res_start_move_to(dev, slave, id,
1652 RES_MPT_MAPPED, &mpt);
1653 if (err)
1654 return err;
1655
Shani Michaelib20e5192013-02-06 16:19:08 +00001656 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001657 if (err) {
1658 res_abort_move(dev, slave, RES_MPT, id);
1659 return err;
1660 }
1661
1662 res_end_move(dev, slave, RES_MPT, id);
1663 break;
1664 }
1665 return err;
1666}
1667
1668static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1669 u64 in_param, u64 *out_param)
1670{
1671 int cqn;
1672 int err;
1673
1674 switch (op) {
1675 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001676 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001677 if (err)
1678 break;
1679
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001680 err = __mlx4_cq_alloc_icm(dev, &cqn);
1681 if (err) {
1682 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1683 break;
1684 }
1685
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001686 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1687 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001688 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001689 __mlx4_cq_free_icm(dev, cqn);
1690 break;
1691 }
1692
1693 set_param_l(out_param, cqn);
1694 break;
1695
1696 default:
1697 err = -EINVAL;
1698 }
1699
1700 return err;
1701}
1702
1703static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1704 u64 in_param, u64 *out_param)
1705{
1706 int srqn;
1707 int err;
1708
1709 switch (op) {
1710 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001711 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001712 if (err)
1713 break;
1714
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001715 err = __mlx4_srq_alloc_icm(dev, &srqn);
1716 if (err) {
1717 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1718 break;
1719 }
1720
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001721 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1722 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001723 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001724 __mlx4_srq_free_icm(dev, srqn);
1725 break;
1726 }
1727
1728 set_param_l(out_param, srqn);
1729 break;
1730
1731 default:
1732 err = -EINVAL;
1733 }
1734
1735 return err;
1736}
1737
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001738static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1739 u8 smac_index, u64 *mac)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001740{
1741 struct mlx4_priv *priv = mlx4_priv(dev);
1742 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001743 struct list_head *mac_list =
1744 &tracker->slave_list[slave].res_list[RES_MAC];
1745 struct mac_res *res, *tmp;
1746
1747 list_for_each_entry_safe(res, tmp, mac_list, list) {
1748 if (res->smac_index == smac_index && res->port == (u8) port) {
1749 *mac = res->mac;
1750 return 0;
1751 }
1752 }
1753 return -ENOENT;
1754}
1755
1756static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1757{
1758 struct mlx4_priv *priv = mlx4_priv(dev);
1759 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1760 struct list_head *mac_list =
1761 &tracker->slave_list[slave].res_list[RES_MAC];
1762 struct mac_res *res, *tmp;
1763
1764 list_for_each_entry_safe(res, tmp, mac_list, list) {
1765 if (res->mac == mac && res->port == (u8) port) {
1766 /* mac found. update ref count */
1767 ++res->ref_count;
1768 return 0;
1769 }
1770 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001771
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001772 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1773 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001774 res = kzalloc(sizeof *res, GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001775 if (!res) {
1776 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001777 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001778 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001779 res->mac = mac;
1780 res->port = (u8) port;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001781 res->smac_index = smac_index;
1782 res->ref_count = 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001783 list_add_tail(&res->list,
1784 &tracker->slave_list[slave].res_list[RES_MAC]);
1785 return 0;
1786}
1787
1788static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1789 int port)
1790{
1791 struct mlx4_priv *priv = mlx4_priv(dev);
1792 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1793 struct list_head *mac_list =
1794 &tracker->slave_list[slave].res_list[RES_MAC];
1795 struct mac_res *res, *tmp;
1796
1797 list_for_each_entry_safe(res, tmp, mac_list, list) {
1798 if (res->mac == mac && res->port == (u8) port) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001799 if (!--res->ref_count) {
1800 list_del(&res->list);
1801 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1802 kfree(res);
1803 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001804 break;
1805 }
1806 }
1807}
1808
1809static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1810{
1811 struct mlx4_priv *priv = mlx4_priv(dev);
1812 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1813 struct list_head *mac_list =
1814 &tracker->slave_list[slave].res_list[RES_MAC];
1815 struct mac_res *res, *tmp;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001816 int i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001817
1818 list_for_each_entry_safe(res, tmp, mac_list, list) {
1819 list_del(&res->list);
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001820 /* dereference the mac the num times the slave referenced it */
1821 for (i = 0; i < res->ref_count; i++)
1822 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001823 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001824 kfree(res);
1825 }
1826}
1827
1828static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001829 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001830{
1831 int err = -EINVAL;
1832 int port;
1833 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001834 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001835
1836 if (op != RES_OP_RESERVE_AND_MAP)
1837 return err;
1838
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001839 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02001840 port = mlx4_slave_convert_port(
1841 dev, slave, port);
1842
1843 if (port < 0)
1844 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001845 mac = in_param;
1846
1847 err = __mlx4_register_mac(dev, port, mac);
1848 if (err >= 0) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001849 smac_index = err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001850 set_param_l(out_param, err);
1851 err = 0;
1852 }
1853
1854 if (!err) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001855 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001856 if (err)
1857 __mlx4_unregister_mac(dev, port, mac);
1858 }
1859 return err;
1860}
1861
Jack Morgenstein48740802013-11-03 10:03:20 +02001862static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1863 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001864{
Jack Morgenstein48740802013-11-03 10:03:20 +02001865 struct mlx4_priv *priv = mlx4_priv(dev);
1866 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1867 struct list_head *vlan_list =
1868 &tracker->slave_list[slave].res_list[RES_VLAN];
1869 struct vlan_res *res, *tmp;
1870
1871 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1872 if (res->vlan == vlan && res->port == (u8) port) {
1873 /* vlan found. update ref count */
1874 ++res->ref_count;
1875 return 0;
1876 }
1877 }
1878
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001879 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1880 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02001881 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001882 if (!res) {
1883 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001884 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001885 }
Jack Morgenstein48740802013-11-03 10:03:20 +02001886 res->vlan = vlan;
1887 res->port = (u8) port;
1888 res->vlan_index = vlan_index;
1889 res->ref_count = 1;
1890 list_add_tail(&res->list,
1891 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001892 return 0;
1893}
1894
Jack Morgenstein48740802013-11-03 10:03:20 +02001895
1896static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1897 int port)
1898{
1899 struct mlx4_priv *priv = mlx4_priv(dev);
1900 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1901 struct list_head *vlan_list =
1902 &tracker->slave_list[slave].res_list[RES_VLAN];
1903 struct vlan_res *res, *tmp;
1904
1905 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1906 if (res->vlan == vlan && res->port == (u8) port) {
1907 if (!--res->ref_count) {
1908 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001909 mlx4_release_resource(dev, slave, RES_VLAN,
1910 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001911 kfree(res);
1912 }
1913 break;
1914 }
1915 }
1916}
1917
1918static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1919{
1920 struct mlx4_priv *priv = mlx4_priv(dev);
1921 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1922 struct list_head *vlan_list =
1923 &tracker->slave_list[slave].res_list[RES_VLAN];
1924 struct vlan_res *res, *tmp;
1925 int i;
1926
1927 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1928 list_del(&res->list);
1929 /* dereference the vlan the num times the slave referenced it */
1930 for (i = 0; i < res->ref_count; i++)
1931 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001932 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001933 kfree(res);
1934 }
1935}
1936
1937static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001938 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02001939{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001940 struct mlx4_priv *priv = mlx4_priv(dev);
1941 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02001942 int err;
1943 u16 vlan;
1944 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001945 int port;
1946
1947 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02001948
1949 if (!port || op != RES_OP_RESERVE_AND_MAP)
1950 return -EINVAL;
1951
Matan Barak449fc482014-03-19 18:11:52 +02001952 port = mlx4_slave_convert_port(
1953 dev, slave, port);
1954
1955 if (port < 0)
1956 return -EINVAL;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001957 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1958 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1959 slave_state[slave].old_vlan_api = true;
1960 return 0;
1961 }
1962
Jack Morgenstein48740802013-11-03 10:03:20 +02001963 vlan = (u16) in_param;
1964
1965 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1966 if (!err) {
1967 set_param_l(out_param, (u32) vlan_index);
1968 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1969 if (err)
1970 __mlx4_unregister_vlan(dev, port, vlan);
1971 }
1972 return err;
1973}
1974
Jack Morgensteinba062d52012-05-15 10:35:03 +00001975static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1976 u64 in_param, u64 *out_param)
1977{
1978 u32 index;
1979 int err;
1980
1981 if (op != RES_OP_RESERVE)
1982 return -EINVAL;
1983
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001984 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001985 if (err)
1986 return err;
1987
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001988 err = __mlx4_counter_alloc(dev, &index);
1989 if (err) {
1990 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1991 return err;
1992 }
1993
Jack Morgensteinba062d52012-05-15 10:35:03 +00001994 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001995 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001996 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001997 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1998 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001999 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002000 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00002001
2002 return err;
2003}
2004
2005static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2006 u64 in_param, u64 *out_param)
2007{
2008 u32 xrcdn;
2009 int err;
2010
2011 if (op != RES_OP_RESERVE)
2012 return -EINVAL;
2013
2014 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2015 if (err)
2016 return err;
2017
2018 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2019 if (err)
2020 __mlx4_xrcd_free(dev, xrcdn);
2021 else
2022 set_param_l(out_param, xrcdn);
2023
2024 return err;
2025}
2026
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002027int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2028 struct mlx4_vhcr *vhcr,
2029 struct mlx4_cmd_mailbox *inbox,
2030 struct mlx4_cmd_mailbox *outbox,
2031 struct mlx4_cmd_info *cmd)
2032{
2033 int err;
2034 int alop = vhcr->op_modifier;
2035
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002036 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002037 case RES_QP:
2038 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2039 vhcr->in_param, &vhcr->out_param);
2040 break;
2041
2042 case RES_MTT:
2043 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2044 vhcr->in_param, &vhcr->out_param);
2045 break;
2046
2047 case RES_MPT:
2048 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2049 vhcr->in_param, &vhcr->out_param);
2050 break;
2051
2052 case RES_CQ:
2053 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2054 vhcr->in_param, &vhcr->out_param);
2055 break;
2056
2057 case RES_SRQ:
2058 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2059 vhcr->in_param, &vhcr->out_param);
2060 break;
2061
2062 case RES_MAC:
2063 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002064 vhcr->in_param, &vhcr->out_param,
2065 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002066 break;
2067
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002068 case RES_VLAN:
2069 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002070 vhcr->in_param, &vhcr->out_param,
2071 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002072 break;
2073
Jack Morgensteinba062d52012-05-15 10:35:03 +00002074 case RES_COUNTER:
2075 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076 vhcr->in_param, &vhcr->out_param);
2077 break;
2078
2079 case RES_XRCD:
2080 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081 vhcr->in_param, &vhcr->out_param);
2082 break;
2083
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002084 default:
2085 err = -EINVAL;
2086 break;
2087 }
2088
2089 return err;
2090}
2091
2092static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2093 u64 in_param)
2094{
2095 int err;
2096 int count;
2097 int base;
2098 int qpn;
2099
2100 switch (op) {
2101 case RES_OP_RESERVE:
2102 base = get_param_l(&in_param) & 0x7fffff;
2103 count = get_param_h(&in_param);
2104 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2105 if (err)
2106 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002107 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002108 __mlx4_qp_release_range(dev, base, count);
2109 break;
2110 case RES_OP_MAP_ICM:
2111 qpn = get_param_l(&in_param) & 0x7fffff;
2112 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2113 NULL, 0);
2114 if (err)
2115 return err;
2116
Jack Morgenstein54679e12012-08-03 08:40:43 +00002117 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002118 __mlx4_qp_free_icm(dev, qpn);
2119
2120 res_end_move(dev, slave, RES_QP, qpn);
2121
2122 if (valid_reserved(dev, slave, qpn))
2123 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2124 break;
2125 default:
2126 err = -EINVAL;
2127 break;
2128 }
2129 return err;
2130}
2131
2132static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2133 u64 in_param, u64 *out_param)
2134{
2135 int err = -EINVAL;
2136 int base;
2137 int order;
2138
2139 if (op != RES_OP_RESERVE_AND_MAP)
2140 return err;
2141
2142 base = get_param_l(&in_param);
2143 order = get_param_h(&in_param);
2144 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002145 if (!err) {
2146 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002147 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002148 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002149 return err;
2150}
2151
2152static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2153 u64 in_param)
2154{
2155 int err = -EINVAL;
2156 int index;
2157 int id;
2158 struct res_mpt *mpt;
2159
2160 switch (op) {
2161 case RES_OP_RESERVE:
2162 index = get_param_l(&in_param);
2163 id = index & mpt_mask(dev);
2164 err = get_res(dev, slave, id, RES_MPT, &mpt);
2165 if (err)
2166 break;
2167 index = mpt->key;
2168 put_res(dev, slave, id, RES_MPT);
2169
2170 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2171 if (err)
2172 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002173 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002174 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002175 break;
2176 case RES_OP_MAP_ICM:
2177 index = get_param_l(&in_param);
2178 id = index & mpt_mask(dev);
2179 err = mr_res_start_move_to(dev, slave, id,
2180 RES_MPT_RESERVED, &mpt);
2181 if (err)
2182 return err;
2183
Shani Michaelib20e5192013-02-06 16:19:08 +00002184 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002185 res_end_move(dev, slave, RES_MPT, id);
2186 return err;
2187 break;
2188 default:
2189 err = -EINVAL;
2190 break;
2191 }
2192 return err;
2193}
2194
2195static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2196 u64 in_param, u64 *out_param)
2197{
2198 int cqn;
2199 int err;
2200
2201 switch (op) {
2202 case RES_OP_RESERVE_AND_MAP:
2203 cqn = get_param_l(&in_param);
2204 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2205 if (err)
2206 break;
2207
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002208 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002209 __mlx4_cq_free_icm(dev, cqn);
2210 break;
2211
2212 default:
2213 err = -EINVAL;
2214 break;
2215 }
2216
2217 return err;
2218}
2219
2220static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2221 u64 in_param, u64 *out_param)
2222{
2223 int srqn;
2224 int err;
2225
2226 switch (op) {
2227 case RES_OP_RESERVE_AND_MAP:
2228 srqn = get_param_l(&in_param);
2229 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2230 if (err)
2231 break;
2232
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002233 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002234 __mlx4_srq_free_icm(dev, srqn);
2235 break;
2236
2237 default:
2238 err = -EINVAL;
2239 break;
2240 }
2241
2242 return err;
2243}
2244
2245static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002246 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002247{
2248 int port;
2249 int err = 0;
2250
2251 switch (op) {
2252 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002253 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002254 port = mlx4_slave_convert_port(
2255 dev, slave, port);
2256
2257 if (port < 0)
2258 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002259 mac_del_from_slave(dev, slave, in_param, port);
2260 __mlx4_unregister_mac(dev, port, in_param);
2261 break;
2262 default:
2263 err = -EINVAL;
2264 break;
2265 }
2266
2267 return err;
2268
2269}
2270
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002271static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002272 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002273{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002274 struct mlx4_priv *priv = mlx4_priv(dev);
2275 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002276 int err = 0;
2277
Matan Barak449fc482014-03-19 18:11:52 +02002278 port = mlx4_slave_convert_port(
2279 dev, slave, port);
2280
2281 if (port < 0)
2282 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002283 switch (op) {
2284 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002285 if (slave_state[slave].old_vlan_api)
2286 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002287 if (!port)
2288 return -EINVAL;
2289 vlan_del_from_slave(dev, slave, in_param, port);
2290 __mlx4_unregister_vlan(dev, port, in_param);
2291 break;
2292 default:
2293 err = -EINVAL;
2294 break;
2295 }
2296
2297 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002298}
2299
Jack Morgensteinba062d52012-05-15 10:35:03 +00002300static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301 u64 in_param, u64 *out_param)
2302{
2303 int index;
2304 int err;
2305
2306 if (op != RES_OP_RESERVE)
2307 return -EINVAL;
2308
2309 index = get_param_l(&in_param);
2310 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2311 if (err)
2312 return err;
2313
2314 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002315 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002316
2317 return err;
2318}
2319
2320static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2321 u64 in_param, u64 *out_param)
2322{
2323 int xrcdn;
2324 int err;
2325
2326 if (op != RES_OP_RESERVE)
2327 return -EINVAL;
2328
2329 xrcdn = get_param_l(&in_param);
2330 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2331 if (err)
2332 return err;
2333
2334 __mlx4_xrcd_free(dev, xrcdn);
2335
2336 return err;
2337}
2338
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002339int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2340 struct mlx4_vhcr *vhcr,
2341 struct mlx4_cmd_mailbox *inbox,
2342 struct mlx4_cmd_mailbox *outbox,
2343 struct mlx4_cmd_info *cmd)
2344{
2345 int err = -EINVAL;
2346 int alop = vhcr->op_modifier;
2347
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002348 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002349 case RES_QP:
2350 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2351 vhcr->in_param);
2352 break;
2353
2354 case RES_MTT:
2355 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2356 vhcr->in_param, &vhcr->out_param);
2357 break;
2358
2359 case RES_MPT:
2360 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2361 vhcr->in_param);
2362 break;
2363
2364 case RES_CQ:
2365 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2366 vhcr->in_param, &vhcr->out_param);
2367 break;
2368
2369 case RES_SRQ:
2370 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2371 vhcr->in_param, &vhcr->out_param);
2372 break;
2373
2374 case RES_MAC:
2375 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002376 vhcr->in_param, &vhcr->out_param,
2377 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002378 break;
2379
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002380 case RES_VLAN:
2381 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002382 vhcr->in_param, &vhcr->out_param,
2383 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002384 break;
2385
Jack Morgensteinba062d52012-05-15 10:35:03 +00002386 case RES_COUNTER:
2387 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2388 vhcr->in_param, &vhcr->out_param);
2389 break;
2390
2391 case RES_XRCD:
2392 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2393 vhcr->in_param, &vhcr->out_param);
2394
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002395 default:
2396 break;
2397 }
2398 return err;
2399}
2400
2401/* ugly but other choices are uglier */
2402static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2403{
2404 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2405}
2406
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002407static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002408{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002409 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002410}
2411
2412static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2413{
2414 return be32_to_cpu(mpt->mtt_sz);
2415}
2416
Shani Michaelicc1ade92013-02-06 16:19:10 +00002417static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2418{
2419 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2420}
2421
2422static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2423{
2424 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2425}
2426
2427static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2428{
2429 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2430}
2431
2432static int mr_is_region(struct mlx4_mpt_entry *mpt)
2433{
2434 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2435}
2436
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002437static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002438{
2439 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2440}
2441
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002442static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002443{
2444 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2445}
2446
2447static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2448{
2449 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2450 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2451 int log_sq_sride = qpc->sq_size_stride & 7;
2452 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2453 int log_rq_stride = qpc->rq_size_stride & 7;
2454 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2455 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002456 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2457 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002458 int sq_size;
2459 int rq_size;
2460 int total_pages;
2461 int total_mem;
2462 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2463
2464 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2465 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2466 total_mem = sq_size + rq_size;
2467 total_pages =
2468 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2469 page_shift);
2470
2471 return total_pages;
2472}
2473
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002474static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2475 int size, struct res_mtt *mtt)
2476{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002477 int res_start = mtt->com.res_id;
2478 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002479
2480 if (start < res_start || start + size > res_start + res_size)
2481 return -EPERM;
2482 return 0;
2483}
2484
2485int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2486 struct mlx4_vhcr *vhcr,
2487 struct mlx4_cmd_mailbox *inbox,
2488 struct mlx4_cmd_mailbox *outbox,
2489 struct mlx4_cmd_info *cmd)
2490{
2491 int err;
2492 int index = vhcr->in_modifier;
2493 struct res_mtt *mtt;
2494 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002495 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002496 int phys;
2497 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002498 u32 pd;
2499 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002500
2501 id = index & mpt_mask(dev);
2502 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2503 if (err)
2504 return err;
2505
Shani Michaelicc1ade92013-02-06 16:19:10 +00002506 /* Disable memory windows for VFs. */
2507 if (!mr_is_region(inbox->buf)) {
2508 err = -EPERM;
2509 goto ex_abort;
2510 }
2511
2512 /* Make sure that the PD bits related to the slave id are zeros. */
2513 pd = mr_get_pd(inbox->buf);
2514 pd_slave = (pd >> 17) & 0x7f;
2515 if (pd_slave != 0 && pd_slave != slave) {
2516 err = -EPERM;
2517 goto ex_abort;
2518 }
2519
2520 if (mr_is_fmr(inbox->buf)) {
2521 /* FMR and Bind Enable are forbidden in slave devices. */
2522 if (mr_is_bind_enabled(inbox->buf)) {
2523 err = -EPERM;
2524 goto ex_abort;
2525 }
2526 /* FMR and Memory Windows are also forbidden. */
2527 if (!mr_is_region(inbox->buf)) {
2528 err = -EPERM;
2529 goto ex_abort;
2530 }
2531 }
2532
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002533 phys = mr_phys_mpt(inbox->buf);
2534 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002535 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002536 if (err)
2537 goto ex_abort;
2538
2539 err = check_mtt_range(dev, slave, mtt_base,
2540 mr_get_mtt_size(inbox->buf), mtt);
2541 if (err)
2542 goto ex_put;
2543
2544 mpt->mtt = mtt;
2545 }
2546
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002547 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2548 if (err)
2549 goto ex_put;
2550
2551 if (!phys) {
2552 atomic_inc(&mtt->ref_count);
2553 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2554 }
2555
2556 res_end_move(dev, slave, RES_MPT, id);
2557 return 0;
2558
2559ex_put:
2560 if (!phys)
2561 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2562ex_abort:
2563 res_abort_move(dev, slave, RES_MPT, id);
2564
2565 return err;
2566}
2567
2568int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2569 struct mlx4_vhcr *vhcr,
2570 struct mlx4_cmd_mailbox *inbox,
2571 struct mlx4_cmd_mailbox *outbox,
2572 struct mlx4_cmd_info *cmd)
2573{
2574 int err;
2575 int index = vhcr->in_modifier;
2576 struct res_mpt *mpt;
2577 int id;
2578
2579 id = index & mpt_mask(dev);
2580 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2581 if (err)
2582 return err;
2583
2584 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2585 if (err)
2586 goto ex_abort;
2587
2588 if (mpt->mtt)
2589 atomic_dec(&mpt->mtt->ref_count);
2590
2591 res_end_move(dev, slave, RES_MPT, id);
2592 return 0;
2593
2594ex_abort:
2595 res_abort_move(dev, slave, RES_MPT, id);
2596
2597 return err;
2598}
2599
2600int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2601 struct mlx4_vhcr *vhcr,
2602 struct mlx4_cmd_mailbox *inbox,
2603 struct mlx4_cmd_mailbox *outbox,
2604 struct mlx4_cmd_info *cmd)
2605{
2606 int err;
2607 int index = vhcr->in_modifier;
2608 struct res_mpt *mpt;
2609 int id;
2610
2611 id = index & mpt_mask(dev);
2612 err = get_res(dev, slave, id, RES_MPT, &mpt);
2613 if (err)
2614 return err;
2615
2616 if (mpt->com.from_state != RES_MPT_HW) {
2617 err = -EBUSY;
2618 goto out;
2619 }
2620
2621 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2622
2623out:
2624 put_res(dev, slave, id, RES_MPT);
2625 return err;
2626}
2627
2628static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2629{
2630 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2631}
2632
2633static int qp_get_scqn(struct mlx4_qp_context *qpc)
2634{
2635 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2636}
2637
2638static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2639{
2640 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2641}
2642
Jack Morgenstein54679e12012-08-03 08:40:43 +00002643static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2644 struct mlx4_qp_context *context)
2645{
2646 u32 qpn = vhcr->in_modifier & 0xffffff;
2647 u32 qkey = 0;
2648
2649 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2650 return;
2651
2652 /* adjust qkey in qp context */
2653 context->qkey = cpu_to_be32(qkey);
2654}
2655
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002656int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2657 struct mlx4_vhcr *vhcr,
2658 struct mlx4_cmd_mailbox *inbox,
2659 struct mlx4_cmd_mailbox *outbox,
2660 struct mlx4_cmd_info *cmd)
2661{
2662 int err;
2663 int qpn = vhcr->in_modifier & 0x7fffff;
2664 struct res_mtt *mtt;
2665 struct res_qp *qp;
2666 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002667 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002668 int mtt_size = qp_get_mtt_size(qpc);
2669 struct res_cq *rcq;
2670 struct res_cq *scq;
2671 int rcqn = qp_get_rcqn(qpc);
2672 int scqn = qp_get_scqn(qpc);
2673 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2674 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2675 struct res_srq *srq;
2676 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2677
2678 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2679 if (err)
2680 return err;
2681 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002682 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002683 qp->param3 = 0;
2684 qp->vlan_control = 0;
2685 qp->fvl_rx = 0;
2686 qp->pri_path_fl = 0;
2687 qp->vlan_index = 0;
2688 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002689 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002690
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002691 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002692 if (err)
2693 goto ex_abort;
2694
2695 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2696 if (err)
2697 goto ex_put_mtt;
2698
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002699 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2700 if (err)
2701 goto ex_put_mtt;
2702
2703 if (scqn != rcqn) {
2704 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2705 if (err)
2706 goto ex_put_rcq;
2707 } else
2708 scq = rcq;
2709
2710 if (use_srq) {
2711 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2712 if (err)
2713 goto ex_put_scq;
2714 }
2715
Jack Morgenstein54679e12012-08-03 08:40:43 +00002716 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2717 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002718 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2719 if (err)
2720 goto ex_put_srq;
2721 atomic_inc(&mtt->ref_count);
2722 qp->mtt = mtt;
2723 atomic_inc(&rcq->ref_count);
2724 qp->rcq = rcq;
2725 atomic_inc(&scq->ref_count);
2726 qp->scq = scq;
2727
2728 if (scqn != rcqn)
2729 put_res(dev, slave, scqn, RES_CQ);
2730
2731 if (use_srq) {
2732 atomic_inc(&srq->ref_count);
2733 put_res(dev, slave, srqn, RES_SRQ);
2734 qp->srq = srq;
2735 }
2736 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002737 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002738 res_end_move(dev, slave, RES_QP, qpn);
2739
2740 return 0;
2741
2742ex_put_srq:
2743 if (use_srq)
2744 put_res(dev, slave, srqn, RES_SRQ);
2745ex_put_scq:
2746 if (scqn != rcqn)
2747 put_res(dev, slave, scqn, RES_CQ);
2748ex_put_rcq:
2749 put_res(dev, slave, rcqn, RES_CQ);
2750ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002751 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002752ex_abort:
2753 res_abort_move(dev, slave, RES_QP, qpn);
2754
2755 return err;
2756}
2757
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002758static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002759{
2760 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2761}
2762
2763static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2764{
2765 int log_eq_size = eqc->log_eq_size & 0x1f;
2766 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2767
2768 if (log_eq_size + 5 < page_shift)
2769 return 1;
2770
2771 return 1 << (log_eq_size + 5 - page_shift);
2772}
2773
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002774static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002775{
2776 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2777}
2778
2779static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2780{
2781 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2782 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2783
2784 if (log_cq_size + 5 < page_shift)
2785 return 1;
2786
2787 return 1 << (log_cq_size + 5 - page_shift);
2788}
2789
2790int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2791 struct mlx4_vhcr *vhcr,
2792 struct mlx4_cmd_mailbox *inbox,
2793 struct mlx4_cmd_mailbox *outbox,
2794 struct mlx4_cmd_info *cmd)
2795{
2796 int err;
2797 int eqn = vhcr->in_modifier;
2798 int res_id = (slave << 8) | eqn;
2799 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002800 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002801 int mtt_size = eq_get_mtt_size(eqc);
2802 struct res_eq *eq;
2803 struct res_mtt *mtt;
2804
2805 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2806 if (err)
2807 return err;
2808 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2809 if (err)
2810 goto out_add;
2811
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002812 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002813 if (err)
2814 goto out_move;
2815
2816 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2817 if (err)
2818 goto out_put;
2819
2820 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2821 if (err)
2822 goto out_put;
2823
2824 atomic_inc(&mtt->ref_count);
2825 eq->mtt = mtt;
2826 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2827 res_end_move(dev, slave, RES_EQ, res_id);
2828 return 0;
2829
2830out_put:
2831 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2832out_move:
2833 res_abort_move(dev, slave, RES_EQ, res_id);
2834out_add:
2835 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2836 return err;
2837}
2838
2839static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2840 int len, struct res_mtt **res)
2841{
2842 struct mlx4_priv *priv = mlx4_priv(dev);
2843 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2844 struct res_mtt *mtt;
2845 int err = -EINVAL;
2846
2847 spin_lock_irq(mlx4_tlock(dev));
2848 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2849 com.list) {
2850 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2851 *res = mtt;
2852 mtt->com.from_state = mtt->com.state;
2853 mtt->com.state = RES_MTT_BUSY;
2854 err = 0;
2855 break;
2856 }
2857 }
2858 spin_unlock_irq(mlx4_tlock(dev));
2859
2860 return err;
2861}
2862
Jack Morgenstein54679e12012-08-03 08:40:43 +00002863static int verify_qp_parameters(struct mlx4_dev *dev,
2864 struct mlx4_cmd_mailbox *inbox,
2865 enum qp_transition transition, u8 slave)
2866{
2867 u32 qp_type;
2868 struct mlx4_qp_context *qp_ctx;
2869 enum mlx4_qp_optpar optpar;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002870 int port;
2871 int num_gids;
Jack Morgenstein54679e12012-08-03 08:40:43 +00002872
2873 qp_ctx = inbox->buf + 8;
2874 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2875 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2876
2877 switch (qp_type) {
2878 case MLX4_QP_ST_RC:
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002879 case MLX4_QP_ST_XRC:
Jack Morgenstein54679e12012-08-03 08:40:43 +00002880 case MLX4_QP_ST_UC:
2881 switch (transition) {
2882 case QP_TRANS_INIT2RTR:
2883 case QP_TRANS_RTR2RTS:
2884 case QP_TRANS_RTS2RTS:
2885 case QP_TRANS_SQD2SQD:
2886 case QP_TRANS_SQD2RTS:
2887 if (slave != mlx4_master_func_num(dev))
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002888 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2889 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2890 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02002891 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002892 else
2893 num_gids = 1;
2894 if (qp_ctx->pri_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00002895 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002896 }
2897 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2898 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2899 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02002900 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002901 else
2902 num_gids = 1;
2903 if (qp_ctx->alt_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00002904 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002905 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00002906 break;
2907 default:
2908 break;
2909 }
2910
2911 break;
2912 default:
2913 break;
2914 }
2915
2916 return 0;
2917}
2918
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002919int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2920 struct mlx4_vhcr *vhcr,
2921 struct mlx4_cmd_mailbox *inbox,
2922 struct mlx4_cmd_mailbox *outbox,
2923 struct mlx4_cmd_info *cmd)
2924{
2925 struct mlx4_mtt mtt;
2926 __be64 *page_list = inbox->buf;
2927 u64 *pg_list = (u64 *)page_list;
2928 int i;
2929 struct res_mtt *rmtt = NULL;
2930 int start = be64_to_cpu(page_list[0]);
2931 int npages = vhcr->in_modifier;
2932 int err;
2933
2934 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2935 if (err)
2936 return err;
2937
2938 /* Call the SW implementation of write_mtt:
2939 * - Prepare a dummy mtt struct
2940 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002941 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2942 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002943 mtt.order = 0;
2944 mtt.page_shift = 0;
2945 for (i = 0; i < npages; ++i)
2946 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2947
2948 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2949 ((u64 *)page_list + 2));
2950
2951 if (rmtt)
2952 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2953
2954 return err;
2955}
2956
2957int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2958 struct mlx4_vhcr *vhcr,
2959 struct mlx4_cmd_mailbox *inbox,
2960 struct mlx4_cmd_mailbox *outbox,
2961 struct mlx4_cmd_info *cmd)
2962{
2963 int eqn = vhcr->in_modifier;
2964 int res_id = eqn | (slave << 8);
2965 struct res_eq *eq;
2966 int err;
2967
2968 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2969 if (err)
2970 return err;
2971
2972 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2973 if (err)
2974 goto ex_abort;
2975
2976 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2977 if (err)
2978 goto ex_put;
2979
2980 atomic_dec(&eq->mtt->ref_count);
2981 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2982 res_end_move(dev, slave, RES_EQ, res_id);
2983 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2984
2985 return 0;
2986
2987ex_put:
2988 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2989ex_abort:
2990 res_abort_move(dev, slave, RES_EQ, res_id);
2991
2992 return err;
2993}
2994
2995int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2996{
2997 struct mlx4_priv *priv = mlx4_priv(dev);
2998 struct mlx4_slave_event_eq_info *event_eq;
2999 struct mlx4_cmd_mailbox *mailbox;
3000 u32 in_modifier = 0;
3001 int err;
3002 int res_id;
3003 struct res_eq *req;
3004
3005 if (!priv->mfunc.master.slave_state)
3006 return -EINVAL;
3007
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003008 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003009
3010 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003011 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003012 return 0;
3013
3014 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3015 res_id = (slave << 8) | event_eq->eqn;
3016 err = get_res(dev, slave, res_id, RES_EQ, &req);
3017 if (err)
3018 goto unlock;
3019
3020 if (req->com.from_state != RES_EQ_HW) {
3021 err = -EINVAL;
3022 goto put;
3023 }
3024
3025 mailbox = mlx4_alloc_cmd_mailbox(dev);
3026 if (IS_ERR(mailbox)) {
3027 err = PTR_ERR(mailbox);
3028 goto put;
3029 }
3030
3031 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3032 ++event_eq->token;
3033 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3034 }
3035
3036 memcpy(mailbox->buf, (u8 *) eqe, 28);
3037
3038 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3039
3040 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3041 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3042 MLX4_CMD_NATIVE);
3043
3044 put_res(dev, slave, res_id, RES_EQ);
3045 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3046 mlx4_free_cmd_mailbox(dev, mailbox);
3047 return err;
3048
3049put:
3050 put_res(dev, slave, res_id, RES_EQ);
3051
3052unlock:
3053 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3054 return err;
3055}
3056
3057int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3058 struct mlx4_vhcr *vhcr,
3059 struct mlx4_cmd_mailbox *inbox,
3060 struct mlx4_cmd_mailbox *outbox,
3061 struct mlx4_cmd_info *cmd)
3062{
3063 int eqn = vhcr->in_modifier;
3064 int res_id = eqn | (slave << 8);
3065 struct res_eq *eq;
3066 int err;
3067
3068 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3069 if (err)
3070 return err;
3071
3072 if (eq->com.from_state != RES_EQ_HW) {
3073 err = -EINVAL;
3074 goto ex_put;
3075 }
3076
3077 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3078
3079ex_put:
3080 put_res(dev, slave, res_id, RES_EQ);
3081 return err;
3082}
3083
3084int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3085 struct mlx4_vhcr *vhcr,
3086 struct mlx4_cmd_mailbox *inbox,
3087 struct mlx4_cmd_mailbox *outbox,
3088 struct mlx4_cmd_info *cmd)
3089{
3090 int err;
3091 int cqn = vhcr->in_modifier;
3092 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003093 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003094 struct res_cq *cq;
3095 struct res_mtt *mtt;
3096
3097 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3098 if (err)
3099 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003100 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003101 if (err)
3102 goto out_move;
3103 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3104 if (err)
3105 goto out_put;
3106 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3107 if (err)
3108 goto out_put;
3109 atomic_inc(&mtt->ref_count);
3110 cq->mtt = mtt;
3111 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3112 res_end_move(dev, slave, RES_CQ, cqn);
3113 return 0;
3114
3115out_put:
3116 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3117out_move:
3118 res_abort_move(dev, slave, RES_CQ, cqn);
3119 return err;
3120}
3121
3122int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3123 struct mlx4_vhcr *vhcr,
3124 struct mlx4_cmd_mailbox *inbox,
3125 struct mlx4_cmd_mailbox *outbox,
3126 struct mlx4_cmd_info *cmd)
3127{
3128 int err;
3129 int cqn = vhcr->in_modifier;
3130 struct res_cq *cq;
3131
3132 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3133 if (err)
3134 return err;
3135 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3136 if (err)
3137 goto out_move;
3138 atomic_dec(&cq->mtt->ref_count);
3139 res_end_move(dev, slave, RES_CQ, cqn);
3140 return 0;
3141
3142out_move:
3143 res_abort_move(dev, slave, RES_CQ, cqn);
3144 return err;
3145}
3146
3147int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3148 struct mlx4_vhcr *vhcr,
3149 struct mlx4_cmd_mailbox *inbox,
3150 struct mlx4_cmd_mailbox *outbox,
3151 struct mlx4_cmd_info *cmd)
3152{
3153 int cqn = vhcr->in_modifier;
3154 struct res_cq *cq;
3155 int err;
3156
3157 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3158 if (err)
3159 return err;
3160
3161 if (cq->com.from_state != RES_CQ_HW)
3162 goto ex_put;
3163
3164 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3165ex_put:
3166 put_res(dev, slave, cqn, RES_CQ);
3167
3168 return err;
3169}
3170
3171static int handle_resize(struct mlx4_dev *dev, int slave,
3172 struct mlx4_vhcr *vhcr,
3173 struct mlx4_cmd_mailbox *inbox,
3174 struct mlx4_cmd_mailbox *outbox,
3175 struct mlx4_cmd_info *cmd,
3176 struct res_cq *cq)
3177{
3178 int err;
3179 struct res_mtt *orig_mtt;
3180 struct res_mtt *mtt;
3181 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003182 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003183
3184 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3185 if (err)
3186 return err;
3187
3188 if (orig_mtt != cq->mtt) {
3189 err = -EINVAL;
3190 goto ex_put;
3191 }
3192
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003193 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003194 if (err)
3195 goto ex_put;
3196
3197 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3198 if (err)
3199 goto ex_put1;
3200 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3201 if (err)
3202 goto ex_put1;
3203 atomic_dec(&orig_mtt->ref_count);
3204 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3205 atomic_inc(&mtt->ref_count);
3206 cq->mtt = mtt;
3207 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3208 return 0;
3209
3210ex_put1:
3211 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3212ex_put:
3213 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3214
3215 return err;
3216
3217}
3218
3219int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3220 struct mlx4_vhcr *vhcr,
3221 struct mlx4_cmd_mailbox *inbox,
3222 struct mlx4_cmd_mailbox *outbox,
3223 struct mlx4_cmd_info *cmd)
3224{
3225 int cqn = vhcr->in_modifier;
3226 struct res_cq *cq;
3227 int err;
3228
3229 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3230 if (err)
3231 return err;
3232
3233 if (cq->com.from_state != RES_CQ_HW)
3234 goto ex_put;
3235
3236 if (vhcr->op_modifier == 0) {
3237 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003238 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003239 }
3240
3241 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3242ex_put:
3243 put_res(dev, slave, cqn, RES_CQ);
3244
3245 return err;
3246}
3247
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003248static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3249{
3250 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3251 int log_rq_stride = srqc->logstride & 7;
3252 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3253
3254 if (log_srq_size + log_rq_stride + 4 < page_shift)
3255 return 1;
3256
3257 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3258}
3259
3260int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3261 struct mlx4_vhcr *vhcr,
3262 struct mlx4_cmd_mailbox *inbox,
3263 struct mlx4_cmd_mailbox *outbox,
3264 struct mlx4_cmd_info *cmd)
3265{
3266 int err;
3267 int srqn = vhcr->in_modifier;
3268 struct res_mtt *mtt;
3269 struct res_srq *srq;
3270 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003271 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003272
3273 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3274 return -EINVAL;
3275
3276 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3277 if (err)
3278 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003279 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003280 if (err)
3281 goto ex_abort;
3282 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3283 mtt);
3284 if (err)
3285 goto ex_put_mtt;
3286
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003287 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3288 if (err)
3289 goto ex_put_mtt;
3290
3291 atomic_inc(&mtt->ref_count);
3292 srq->mtt = mtt;
3293 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3294 res_end_move(dev, slave, RES_SRQ, srqn);
3295 return 0;
3296
3297ex_put_mtt:
3298 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3299ex_abort:
3300 res_abort_move(dev, slave, RES_SRQ, srqn);
3301
3302 return err;
3303}
3304
3305int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3306 struct mlx4_vhcr *vhcr,
3307 struct mlx4_cmd_mailbox *inbox,
3308 struct mlx4_cmd_mailbox *outbox,
3309 struct mlx4_cmd_info *cmd)
3310{
3311 int err;
3312 int srqn = vhcr->in_modifier;
3313 struct res_srq *srq;
3314
3315 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3316 if (err)
3317 return err;
3318 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3319 if (err)
3320 goto ex_abort;
3321 atomic_dec(&srq->mtt->ref_count);
3322 if (srq->cq)
3323 atomic_dec(&srq->cq->ref_count);
3324 res_end_move(dev, slave, RES_SRQ, srqn);
3325
3326 return 0;
3327
3328ex_abort:
3329 res_abort_move(dev, slave, RES_SRQ, srqn);
3330
3331 return err;
3332}
3333
3334int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3335 struct mlx4_vhcr *vhcr,
3336 struct mlx4_cmd_mailbox *inbox,
3337 struct mlx4_cmd_mailbox *outbox,
3338 struct mlx4_cmd_info *cmd)
3339{
3340 int err;
3341 int srqn = vhcr->in_modifier;
3342 struct res_srq *srq;
3343
3344 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3345 if (err)
3346 return err;
3347 if (srq->com.from_state != RES_SRQ_HW) {
3348 err = -EBUSY;
3349 goto out;
3350 }
3351 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3352out:
3353 put_res(dev, slave, srqn, RES_SRQ);
3354 return err;
3355}
3356
3357int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3358 struct mlx4_vhcr *vhcr,
3359 struct mlx4_cmd_mailbox *inbox,
3360 struct mlx4_cmd_mailbox *outbox,
3361 struct mlx4_cmd_info *cmd)
3362{
3363 int err;
3364 int srqn = vhcr->in_modifier;
3365 struct res_srq *srq;
3366
3367 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3368 if (err)
3369 return err;
3370
3371 if (srq->com.from_state != RES_SRQ_HW) {
3372 err = -EBUSY;
3373 goto out;
3374 }
3375
3376 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3377out:
3378 put_res(dev, slave, srqn, RES_SRQ);
3379 return err;
3380}
3381
3382int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3383 struct mlx4_vhcr *vhcr,
3384 struct mlx4_cmd_mailbox *inbox,
3385 struct mlx4_cmd_mailbox *outbox,
3386 struct mlx4_cmd_info *cmd)
3387{
3388 int err;
3389 int qpn = vhcr->in_modifier & 0x7fffff;
3390 struct res_qp *qp;
3391
3392 err = get_res(dev, slave, qpn, RES_QP, &qp);
3393 if (err)
3394 return err;
3395 if (qp->com.from_state != RES_QP_HW) {
3396 err = -EBUSY;
3397 goto out;
3398 }
3399
3400 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3401out:
3402 put_res(dev, slave, qpn, RES_QP);
3403 return err;
3404}
3405
Jack Morgenstein54679e12012-08-03 08:40:43 +00003406int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3407 struct mlx4_vhcr *vhcr,
3408 struct mlx4_cmd_mailbox *inbox,
3409 struct mlx4_cmd_mailbox *outbox,
3410 struct mlx4_cmd_info *cmd)
3411{
3412 struct mlx4_qp_context *context = inbox->buf + 8;
3413 adjust_proxy_tun_qkey(dev, vhcr, context);
3414 update_pkey_index(dev, slave, inbox);
3415 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416}
3417
Matan Barak449fc482014-03-19 18:11:52 +02003418static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3419 struct mlx4_qp_context *qpc,
3420 struct mlx4_cmd_mailbox *inbox)
3421{
3422 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3423 u8 pri_sched_queue;
3424 int port = mlx4_slave_convert_port(
3425 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3426
3427 if (port < 0)
3428 return -EINVAL;
3429
3430 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3431 ((port & 1) << 6);
3432
3433 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3434 mlx4_is_eth(dev, port + 1)) {
3435 qpc->pri_path.sched_queue = pri_sched_queue;
3436 }
3437
3438 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3439 port = mlx4_slave_convert_port(
3440 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3441 + 1) - 1;
3442 if (port < 0)
3443 return -EINVAL;
3444 qpc->alt_path.sched_queue =
3445 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3446 (port & 1) << 6;
3447 }
3448 return 0;
3449}
3450
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003451static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3452 struct mlx4_qp_context *qpc,
3453 struct mlx4_cmd_mailbox *inbox)
3454{
3455 u64 mac;
3456 int port;
3457 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3458 u8 sched = *(u8 *)(inbox->buf + 64);
3459 u8 smac_ix;
3460
3461 port = (sched >> 6 & 1) + 1;
3462 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3463 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3464 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3465 return -ENOENT;
3466 }
3467 return 0;
3468}
3469
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003470int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3471 struct mlx4_vhcr *vhcr,
3472 struct mlx4_cmd_mailbox *inbox,
3473 struct mlx4_cmd_mailbox *outbox,
3474 struct mlx4_cmd_info *cmd)
3475{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003476 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003477 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003478 int qpn = vhcr->in_modifier & 0x7fffff;
3479 struct res_qp *qp;
3480 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003481 __be32 orig_param3 = qpc->param3;
3482 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3483 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3484 u8 orig_pri_path_fl = qpc->pri_path.fl;
3485 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3486 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003487
Matan Barak449fc482014-03-19 18:11:52 +02003488 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3489 if (err)
3490 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003491 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3492 if (err)
3493 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003494
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003495 if (roce_verify_mac(dev, slave, qpc, inbox))
3496 return -EINVAL;
3497
Jack Morgenstein54679e12012-08-03 08:40:43 +00003498 update_pkey_index(dev, slave, inbox);
3499 update_gid(dev, inbox, (u8)slave);
3500 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003501 orig_sched_queue = qpc->pri_path.sched_queue;
3502 err = update_vport_qp_param(dev, inbox, slave, qpn);
Rony Efraim3f7fb022013-04-25 05:22:28 +00003503 if (err)
3504 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003505
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003506 err = get_res(dev, slave, qpn, RES_QP, &qp);
3507 if (err)
3508 return err;
3509 if (qp->com.from_state != RES_QP_HW) {
3510 err = -EBUSY;
3511 goto out;
3512 }
3513
3514 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3515out:
3516 /* if no error, save sched queue value passed in by VF. This is
3517 * essentially the QOS value provided by the VF. This will be useful
3518 * if we allow dynamic changes from VST back to VGT
3519 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003520 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003521 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003522 qp->param3 = orig_param3;
3523 qp->vlan_control = orig_vlan_control;
3524 qp->fvl_rx = orig_fvl_rx;
3525 qp->pri_path_fl = orig_pri_path_fl;
3526 qp->vlan_index = orig_vlan_index;
3527 qp->feup = orig_feup;
3528 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003529 put_res(dev, slave, qpn, RES_QP);
3530 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003531}
3532
3533int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3534 struct mlx4_vhcr *vhcr,
3535 struct mlx4_cmd_mailbox *inbox,
3536 struct mlx4_cmd_mailbox *outbox,
3537 struct mlx4_cmd_info *cmd)
3538{
3539 int err;
3540 struct mlx4_qp_context *context = inbox->buf + 8;
3541
Matan Barak449fc482014-03-19 18:11:52 +02003542 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3543 if (err)
3544 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003545 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3546 if (err)
3547 return err;
3548
3549 update_pkey_index(dev, slave, inbox);
3550 update_gid(dev, inbox, (u8)slave);
3551 adjust_proxy_tun_qkey(dev, vhcr, context);
3552 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3553}
3554
3555int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3556 struct mlx4_vhcr *vhcr,
3557 struct mlx4_cmd_mailbox *inbox,
3558 struct mlx4_cmd_mailbox *outbox,
3559 struct mlx4_cmd_info *cmd)
3560{
3561 int err;
3562 struct mlx4_qp_context *context = inbox->buf + 8;
3563
Matan Barak449fc482014-03-19 18:11:52 +02003564 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3565 if (err)
3566 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003567 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3568 if (err)
3569 return err;
3570
3571 update_pkey_index(dev, slave, inbox);
3572 update_gid(dev, inbox, (u8)slave);
3573 adjust_proxy_tun_qkey(dev, vhcr, context);
3574 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3575}
3576
3577
3578int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3579 struct mlx4_vhcr *vhcr,
3580 struct mlx4_cmd_mailbox *inbox,
3581 struct mlx4_cmd_mailbox *outbox,
3582 struct mlx4_cmd_info *cmd)
3583{
3584 struct mlx4_qp_context *context = inbox->buf + 8;
Matan Barak449fc482014-03-19 18:11:52 +02003585 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3586 if (err)
3587 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003588 adjust_proxy_tun_qkey(dev, vhcr, context);
3589 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3590}
3591
3592int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3593 struct mlx4_vhcr *vhcr,
3594 struct mlx4_cmd_mailbox *inbox,
3595 struct mlx4_cmd_mailbox *outbox,
3596 struct mlx4_cmd_info *cmd)
3597{
3598 int err;
3599 struct mlx4_qp_context *context = inbox->buf + 8;
3600
Matan Barak449fc482014-03-19 18:11:52 +02003601 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3602 if (err)
3603 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003604 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3605 if (err)
3606 return err;
3607
3608 adjust_proxy_tun_qkey(dev, vhcr, context);
3609 update_gid(dev, inbox, (u8)slave);
3610 update_pkey_index(dev, slave, inbox);
3611 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3612}
3613
3614int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3615 struct mlx4_vhcr *vhcr,
3616 struct mlx4_cmd_mailbox *inbox,
3617 struct mlx4_cmd_mailbox *outbox,
3618 struct mlx4_cmd_info *cmd)
3619{
3620 int err;
3621 struct mlx4_qp_context *context = inbox->buf + 8;
3622
Matan Barak449fc482014-03-19 18:11:52 +02003623 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3624 if (err)
3625 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003626 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3627 if (err)
3628 return err;
3629
3630 adjust_proxy_tun_qkey(dev, vhcr, context);
3631 update_gid(dev, inbox, (u8)slave);
3632 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003633 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3634}
3635
3636int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3637 struct mlx4_vhcr *vhcr,
3638 struct mlx4_cmd_mailbox *inbox,
3639 struct mlx4_cmd_mailbox *outbox,
3640 struct mlx4_cmd_info *cmd)
3641{
3642 int err;
3643 int qpn = vhcr->in_modifier & 0x7fffff;
3644 struct res_qp *qp;
3645
3646 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3647 if (err)
3648 return err;
3649 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3650 if (err)
3651 goto ex_abort;
3652
3653 atomic_dec(&qp->mtt->ref_count);
3654 atomic_dec(&qp->rcq->ref_count);
3655 atomic_dec(&qp->scq->ref_count);
3656 if (qp->srq)
3657 atomic_dec(&qp->srq->ref_count);
3658 res_end_move(dev, slave, RES_QP, qpn);
3659 return 0;
3660
3661ex_abort:
3662 res_abort_move(dev, slave, RES_QP, qpn);
3663
3664 return err;
3665}
3666
3667static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3668 struct res_qp *rqp, u8 *gid)
3669{
3670 struct res_gid *res;
3671
3672 list_for_each_entry(res, &rqp->mcg_list, list) {
3673 if (!memcmp(res->gid, gid, 16))
3674 return res;
3675 }
3676 return NULL;
3677}
3678
3679static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003680 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003681 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003682{
3683 struct res_gid *res;
3684 int err;
3685
3686 res = kzalloc(sizeof *res, GFP_KERNEL);
3687 if (!res)
3688 return -ENOMEM;
3689
3690 spin_lock_irq(&rqp->mcg_spl);
3691 if (find_gid(dev, slave, rqp, gid)) {
3692 kfree(res);
3693 err = -EEXIST;
3694 } else {
3695 memcpy(res->gid, gid, 16);
3696 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003697 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003698 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003699 list_add_tail(&res->list, &rqp->mcg_list);
3700 err = 0;
3701 }
3702 spin_unlock_irq(&rqp->mcg_spl);
3703
3704 return err;
3705}
3706
3707static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003708 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003709 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003710{
3711 struct res_gid *res;
3712 int err;
3713
3714 spin_lock_irq(&rqp->mcg_spl);
3715 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003716 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003717 err = -EINVAL;
3718 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003719 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003720 list_del(&res->list);
3721 kfree(res);
3722 err = 0;
3723 }
3724 spin_unlock_irq(&rqp->mcg_spl);
3725
3726 return err;
3727}
3728
Matan Barak449fc482014-03-19 18:11:52 +02003729static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3730 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003731 enum mlx4_steer_type type, u64 *reg_id)
3732{
3733 switch (dev->caps.steering_mode) {
Matan Barak449fc482014-03-19 18:11:52 +02003734 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3735 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3736 if (port < 0)
3737 return port;
3738 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003739 block_loopback, prot,
3740 reg_id);
Matan Barak449fc482014-03-19 18:11:52 +02003741 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003742 case MLX4_STEERING_MODE_B0:
Matan Barak449fc482014-03-19 18:11:52 +02003743 if (prot == MLX4_PROT_ETH) {
3744 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3745 if (port < 0)
3746 return port;
3747 gid[5] = port;
3748 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003749 return mlx4_qp_attach_common(dev, qp, gid,
3750 block_loopback, prot, type);
3751 default:
3752 return -EINVAL;
3753 }
3754}
3755
Matan Barak449fc482014-03-19 18:11:52 +02003756static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3757 u8 gid[16], enum mlx4_protocol prot,
3758 enum mlx4_steer_type type, u64 reg_id)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003759{
3760 switch (dev->caps.steering_mode) {
3761 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3762 return mlx4_flow_detach(dev, reg_id);
3763 case MLX4_STEERING_MODE_B0:
3764 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3765 default:
3766 return -EINVAL;
3767 }
3768}
3769
Jack Morgenstein531d9012014-05-04 17:07:22 +03003770static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3771 u8 *gid, enum mlx4_protocol prot)
3772{
3773 int real_port;
3774
3775 if (prot != MLX4_PROT_ETH)
3776 return 0;
3777
3778 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3779 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3780 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3781 if (real_port < 0)
3782 return -EINVAL;
3783 gid[5] = real_port;
3784 }
3785
3786 return 0;
3787}
3788
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003789int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3790 struct mlx4_vhcr *vhcr,
3791 struct mlx4_cmd_mailbox *inbox,
3792 struct mlx4_cmd_mailbox *outbox,
3793 struct mlx4_cmd_info *cmd)
3794{
3795 struct mlx4_qp qp; /* dummy for calling attach/detach */
3796 u8 *gid = inbox->buf;
3797 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00003798 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003799 int qpn;
3800 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003801 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003802 int attach = vhcr->op_modifier;
3803 int block_loopback = vhcr->in_modifier >> 31;
3804 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00003805 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003806
3807 qpn = vhcr->in_modifier & 0xffffff;
3808 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3809 if (err)
3810 return err;
3811
3812 qp.qpn = qpn;
3813 if (attach) {
Matan Barak449fc482014-03-19 18:11:52 +02003814 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003815 type, &reg_id);
3816 if (err) {
3817 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003818 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003819 }
3820 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003821 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003822 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003823 } else {
Jack Morgenstein531d9012014-05-04 17:07:22 +03003824 err = mlx4_adjust_port(dev, slave, gid, prot);
3825 if (err)
3826 goto ex_put;
3827
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003828 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003829 if (err)
3830 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003831
3832 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3833 if (err)
3834 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3835 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003836 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003837 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003838 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003839
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003840ex_detach:
3841 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003842ex_put:
3843 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003844 return err;
3845}
3846
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003847/*
3848 * MAC validation for Flow Steering rules.
3849 * VF can attach rules only with a mac address which is assigned to it.
3850 */
3851static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3852 struct list_head *rlist)
3853{
3854 struct mac_res *res, *tmp;
3855 __be64 be_mac;
3856
3857 /* make sure it isn't multicast or broadcast mac*/
3858 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3859 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3860 list_for_each_entry_safe(res, tmp, rlist, list) {
3861 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08003862 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003863 return 0;
3864 }
3865 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3866 eth_header->eth.dst_mac, slave);
3867 return -EINVAL;
3868 }
3869 return 0;
3870}
3871
3872/*
3873 * In case of missing eth header, append eth header with a MAC address
3874 * assigned to the VF.
3875 */
3876static int add_eth_header(struct mlx4_dev *dev, int slave,
3877 struct mlx4_cmd_mailbox *inbox,
3878 struct list_head *rlist, int header_id)
3879{
3880 struct mac_res *res, *tmp;
3881 u8 port;
3882 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3883 struct mlx4_net_trans_rule_hw_eth *eth_header;
3884 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3885 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3886 __be64 be_mac = 0;
3887 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3888
3889 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00003890 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003891 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3892
3893 /* Clear a space in the inbox for eth header */
3894 switch (header_id) {
3895 case MLX4_NET_TRANS_RULE_ID_IPV4:
3896 ip_header =
3897 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3898 memmove(ip_header, eth_header,
3899 sizeof(*ip_header) + sizeof(*l4_header));
3900 break;
3901 case MLX4_NET_TRANS_RULE_ID_TCP:
3902 case MLX4_NET_TRANS_RULE_ID_UDP:
3903 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3904 (eth_header + 1);
3905 memmove(l4_header, eth_header, sizeof(*l4_header));
3906 break;
3907 default:
3908 return -EINVAL;
3909 }
3910 list_for_each_entry_safe(res, tmp, rlist, list) {
3911 if (port == res->port) {
3912 be_mac = cpu_to_be64(res->mac << 16);
3913 break;
3914 }
3915 }
3916 if (!be_mac) {
Joe Perches1a91de22014-05-07 12:52:57 -07003917 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003918 port);
3919 return -EINVAL;
3920 }
3921
3922 memset(eth_header, 0, sizeof(*eth_header));
3923 eth_header->size = sizeof(*eth_header) >> 2;
3924 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3925 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3926 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3927
3928 return 0;
3929
3930}
3931
Matan Barakce8d9e02014-05-15 15:29:27 +03003932#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3933int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3934 struct mlx4_vhcr *vhcr,
3935 struct mlx4_cmd_mailbox *inbox,
3936 struct mlx4_cmd_mailbox *outbox,
3937 struct mlx4_cmd_info *cmd_info)
3938{
3939 int err;
3940 u32 qpn = vhcr->in_modifier & 0xffffff;
3941 struct res_qp *rqp;
3942 u64 mac;
3943 unsigned port;
3944 u64 pri_addr_path_mask;
3945 struct mlx4_update_qp_context *cmd;
3946 int smac_index;
3947
3948 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3949
3950 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3951 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3952 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3953 return -EPERM;
3954
3955 /* Just change the smac for the QP */
3956 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3957 if (err) {
3958 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3959 return err;
3960 }
3961
3962 port = (rqp->sched_queue >> 6 & 1) + 1;
3963 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3964 err = mac_find_smac_ix_in_slave(dev, slave, port,
3965 smac_index, &mac);
3966 if (err) {
3967 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3968 qpn, smac_index);
3969 goto err_mac;
3970 }
3971
3972 err = mlx4_cmd(dev, inbox->dma,
3973 vhcr->in_modifier, 0,
3974 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3975 MLX4_CMD_NATIVE);
3976 if (err) {
3977 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3978 goto err_mac;
3979 }
3980
3981err_mac:
3982 put_res(dev, slave, qpn, RES_QP);
3983 return err;
3984}
3985
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003986int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3987 struct mlx4_vhcr *vhcr,
3988 struct mlx4_cmd_mailbox *inbox,
3989 struct mlx4_cmd_mailbox *outbox,
3990 struct mlx4_cmd_info *cmd)
3991{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003992
3993 struct mlx4_priv *priv = mlx4_priv(dev);
3994 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3995 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003996 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003997 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003998 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003999 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4000 struct _rule_hw *rule_header;
4001 int header_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004002
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004003 if (dev->caps.steering_mode !=
4004 MLX4_STEERING_MODE_DEVICE_MANAGED)
4005 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004006
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004007 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Matan Barak449fc482014-03-19 18:11:52 +02004008 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4009 if (ctrl->port <= 0)
4010 return -EINVAL;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004011 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004012 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004013 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004014 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004015 return err;
4016 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004017 rule_header = (struct _rule_hw *)(ctrl + 1);
4018 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4019
4020 switch (header_id) {
4021 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004022 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4023 err = -EINVAL;
4024 goto err_put;
4025 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004026 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00004027 case MLX4_NET_TRANS_RULE_ID_IB:
4028 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004029 case MLX4_NET_TRANS_RULE_ID_IPV4:
4030 case MLX4_NET_TRANS_RULE_ID_TCP:
4031 case MLX4_NET_TRANS_RULE_ID_UDP:
Joe Perches1a91de22014-05-07 12:52:57 -07004032 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004033 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4034 err = -EINVAL;
4035 goto err_put;
4036 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004037 vhcr->in_modifier +=
4038 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4039 break;
4040 default:
Joe Perches1a91de22014-05-07 12:52:57 -07004041 pr_err("Corrupted mailbox\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004042 err = -EINVAL;
4043 goto err_put;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004044 }
4045
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004046 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4047 vhcr->in_modifier, 0,
4048 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4049 MLX4_CMD_NATIVE);
4050 if (err)
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004051 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004052
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004053 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004054 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004055 mlx4_err(dev, "Fail to add flow steering resources\n");
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004056 /* detach rule*/
4057 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00004058 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004059 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004060 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004061 }
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004062 atomic_inc(&rqp->ref_count);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004063err_put:
4064 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004065 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004066}
4067
4068int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4069 struct mlx4_vhcr *vhcr,
4070 struct mlx4_cmd_mailbox *inbox,
4071 struct mlx4_cmd_mailbox *outbox,
4072 struct mlx4_cmd_info *cmd)
4073{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004074 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004075 struct res_qp *rqp;
4076 struct res_fs_rule *rrule;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004077
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004078 if (dev->caps.steering_mode !=
4079 MLX4_STEERING_MODE_DEVICE_MANAGED)
4080 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004081
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004082 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4083 if (err)
4084 return err;
4085 /* Release the rule form busy state before removal */
4086 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4087 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4088 if (err)
4089 return err;
4090
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004091 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4092 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004093 mlx4_err(dev, "Fail to remove flow steering resources\n");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004094 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004095 }
4096
4097 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4098 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4099 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004100 if (!err)
4101 atomic_dec(&rqp->ref_count);
4102out:
4103 put_res(dev, slave, rrule->qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004104 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004105}
4106
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004107enum {
4108 BUSY_MAX_RETRIES = 10
4109};
4110
4111int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4112 struct mlx4_vhcr *vhcr,
4113 struct mlx4_cmd_mailbox *inbox,
4114 struct mlx4_cmd_mailbox *outbox,
4115 struct mlx4_cmd_info *cmd)
4116{
4117 int err;
4118 int index = vhcr->in_modifier & 0xffff;
4119
4120 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4121 if (err)
4122 return err;
4123
4124 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4125 put_res(dev, slave, index, RES_COUNTER);
4126 return err;
4127}
4128
4129static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4130{
4131 struct res_gid *rgid;
4132 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004133 struct mlx4_qp qp; /* dummy for calling attach/detach */
4134
4135 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004136 switch (dev->caps.steering_mode) {
4137 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4138 mlx4_flow_detach(dev, rgid->reg_id);
4139 break;
4140 case MLX4_STEERING_MODE_B0:
4141 qp.qpn = rqp->local_qpn;
4142 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4143 rgid->prot, rgid->steer);
4144 break;
4145 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004146 list_del(&rgid->list);
4147 kfree(rgid);
4148 }
4149}
4150
4151static int _move_all_busy(struct mlx4_dev *dev, int slave,
4152 enum mlx4_resource type, int print)
4153{
4154 struct mlx4_priv *priv = mlx4_priv(dev);
4155 struct mlx4_resource_tracker *tracker =
4156 &priv->mfunc.master.res_tracker;
4157 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4158 struct res_common *r;
4159 struct res_common *tmp;
4160 int busy;
4161
4162 busy = 0;
4163 spin_lock_irq(mlx4_tlock(dev));
4164 list_for_each_entry_safe(r, tmp, rlist, list) {
4165 if (r->owner == slave) {
4166 if (!r->removing) {
4167 if (r->state == RES_ANY_BUSY) {
4168 if (print)
4169 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00004170 "%s id 0x%llx is busy\n",
Jack Morgenstein956463732014-06-08 13:49:45 +03004171 resource_str(type),
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004172 r->res_id);
4173 ++busy;
4174 } else {
4175 r->from_state = r->state;
4176 r->state = RES_ANY_BUSY;
4177 r->removing = 1;
4178 }
4179 }
4180 }
4181 }
4182 spin_unlock_irq(mlx4_tlock(dev));
4183
4184 return busy;
4185}
4186
4187static int move_all_busy(struct mlx4_dev *dev, int slave,
4188 enum mlx4_resource type)
4189{
4190 unsigned long begin;
4191 int busy;
4192
4193 begin = jiffies;
4194 do {
4195 busy = _move_all_busy(dev, slave, type, 0);
4196 if (time_after(jiffies, begin + 5 * HZ))
4197 break;
4198 if (busy)
4199 cond_resched();
4200 } while (busy);
4201
4202 if (busy)
4203 busy = _move_all_busy(dev, slave, type, 1);
4204
4205 return busy;
4206}
4207static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4208{
4209 struct mlx4_priv *priv = mlx4_priv(dev);
4210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4211 struct list_head *qp_list =
4212 &tracker->slave_list[slave].res_list[RES_QP];
4213 struct res_qp *qp;
4214 struct res_qp *tmp;
4215 int state;
4216 u64 in_param;
4217 int qpn;
4218 int err;
4219
4220 err = move_all_busy(dev, slave, RES_QP);
4221 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004222 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4223 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004224
4225 spin_lock_irq(mlx4_tlock(dev));
4226 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4227 spin_unlock_irq(mlx4_tlock(dev));
4228 if (qp->com.owner == slave) {
4229 qpn = qp->com.res_id;
4230 detach_qp(dev, slave, qp);
4231 state = qp->com.from_state;
4232 while (state != 0) {
4233 switch (state) {
4234 case RES_QP_RESERVED:
4235 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004236 rb_erase(&qp->com.node,
4237 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004238 list_del(&qp->com.list);
4239 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004240 if (!valid_reserved(dev, slave, qpn)) {
4241 __mlx4_qp_release_range(dev, qpn, 1);
4242 mlx4_release_resource(dev, slave,
4243 RES_QP, 1, 0);
4244 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004245 kfree(qp);
4246 state = 0;
4247 break;
4248 case RES_QP_MAPPED:
4249 if (!valid_reserved(dev, slave, qpn))
4250 __mlx4_qp_free_icm(dev, qpn);
4251 state = RES_QP_RESERVED;
4252 break;
4253 case RES_QP_HW:
4254 in_param = slave;
4255 err = mlx4_cmd(dev, in_param,
4256 qp->local_qpn, 2,
4257 MLX4_CMD_2RST_QP,
4258 MLX4_CMD_TIME_CLASS_A,
4259 MLX4_CMD_NATIVE);
4260 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004261 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4262 slave, qp->local_qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004263 atomic_dec(&qp->rcq->ref_count);
4264 atomic_dec(&qp->scq->ref_count);
4265 atomic_dec(&qp->mtt->ref_count);
4266 if (qp->srq)
4267 atomic_dec(&qp->srq->ref_count);
4268 state = RES_QP_MAPPED;
4269 break;
4270 default:
4271 state = 0;
4272 }
4273 }
4274 }
4275 spin_lock_irq(mlx4_tlock(dev));
4276 }
4277 spin_unlock_irq(mlx4_tlock(dev));
4278}
4279
4280static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4281{
4282 struct mlx4_priv *priv = mlx4_priv(dev);
4283 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4284 struct list_head *srq_list =
4285 &tracker->slave_list[slave].res_list[RES_SRQ];
4286 struct res_srq *srq;
4287 struct res_srq *tmp;
4288 int state;
4289 u64 in_param;
4290 LIST_HEAD(tlist);
4291 int srqn;
4292 int err;
4293
4294 err = move_all_busy(dev, slave, RES_SRQ);
4295 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004296 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4297 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004298
4299 spin_lock_irq(mlx4_tlock(dev));
4300 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4301 spin_unlock_irq(mlx4_tlock(dev));
4302 if (srq->com.owner == slave) {
4303 srqn = srq->com.res_id;
4304 state = srq->com.from_state;
4305 while (state != 0) {
4306 switch (state) {
4307 case RES_SRQ_ALLOCATED:
4308 __mlx4_srq_free_icm(dev, srqn);
4309 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004310 rb_erase(&srq->com.node,
4311 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004312 list_del(&srq->com.list);
4313 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004314 mlx4_release_resource(dev, slave,
4315 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004316 kfree(srq);
4317 state = 0;
4318 break;
4319
4320 case RES_SRQ_HW:
4321 in_param = slave;
4322 err = mlx4_cmd(dev, in_param, srqn, 1,
4323 MLX4_CMD_HW2SW_SRQ,
4324 MLX4_CMD_TIME_CLASS_A,
4325 MLX4_CMD_NATIVE);
4326 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004327 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004328 slave, srqn);
4329
4330 atomic_dec(&srq->mtt->ref_count);
4331 if (srq->cq)
4332 atomic_dec(&srq->cq->ref_count);
4333 state = RES_SRQ_ALLOCATED;
4334 break;
4335
4336 default:
4337 state = 0;
4338 }
4339 }
4340 }
4341 spin_lock_irq(mlx4_tlock(dev));
4342 }
4343 spin_unlock_irq(mlx4_tlock(dev));
4344}
4345
4346static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4347{
4348 struct mlx4_priv *priv = mlx4_priv(dev);
4349 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4350 struct list_head *cq_list =
4351 &tracker->slave_list[slave].res_list[RES_CQ];
4352 struct res_cq *cq;
4353 struct res_cq *tmp;
4354 int state;
4355 u64 in_param;
4356 LIST_HEAD(tlist);
4357 int cqn;
4358 int err;
4359
4360 err = move_all_busy(dev, slave, RES_CQ);
4361 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004362 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4363 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004364
4365 spin_lock_irq(mlx4_tlock(dev));
4366 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4367 spin_unlock_irq(mlx4_tlock(dev));
4368 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4369 cqn = cq->com.res_id;
4370 state = cq->com.from_state;
4371 while (state != 0) {
4372 switch (state) {
4373 case RES_CQ_ALLOCATED:
4374 __mlx4_cq_free_icm(dev, cqn);
4375 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004376 rb_erase(&cq->com.node,
4377 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004378 list_del(&cq->com.list);
4379 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004380 mlx4_release_resource(dev, slave,
4381 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004382 kfree(cq);
4383 state = 0;
4384 break;
4385
4386 case RES_CQ_HW:
4387 in_param = slave;
4388 err = mlx4_cmd(dev, in_param, cqn, 1,
4389 MLX4_CMD_HW2SW_CQ,
4390 MLX4_CMD_TIME_CLASS_A,
4391 MLX4_CMD_NATIVE);
4392 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004393 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004394 slave, cqn);
4395 atomic_dec(&cq->mtt->ref_count);
4396 state = RES_CQ_ALLOCATED;
4397 break;
4398
4399 default:
4400 state = 0;
4401 }
4402 }
4403 }
4404 spin_lock_irq(mlx4_tlock(dev));
4405 }
4406 spin_unlock_irq(mlx4_tlock(dev));
4407}
4408
4409static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4410{
4411 struct mlx4_priv *priv = mlx4_priv(dev);
4412 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4413 struct list_head *mpt_list =
4414 &tracker->slave_list[slave].res_list[RES_MPT];
4415 struct res_mpt *mpt;
4416 struct res_mpt *tmp;
4417 int state;
4418 u64 in_param;
4419 LIST_HEAD(tlist);
4420 int mptn;
4421 int err;
4422
4423 err = move_all_busy(dev, slave, RES_MPT);
4424 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004425 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4426 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004427
4428 spin_lock_irq(mlx4_tlock(dev));
4429 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4430 spin_unlock_irq(mlx4_tlock(dev));
4431 if (mpt->com.owner == slave) {
4432 mptn = mpt->com.res_id;
4433 state = mpt->com.from_state;
4434 while (state != 0) {
4435 switch (state) {
4436 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004437 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004438 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004439 rb_erase(&mpt->com.node,
4440 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004441 list_del(&mpt->com.list);
4442 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004443 mlx4_release_resource(dev, slave,
4444 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004445 kfree(mpt);
4446 state = 0;
4447 break;
4448
4449 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004450 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004451 state = RES_MPT_RESERVED;
4452 break;
4453
4454 case RES_MPT_HW:
4455 in_param = slave;
4456 err = mlx4_cmd(dev, in_param, mptn, 0,
4457 MLX4_CMD_HW2SW_MPT,
4458 MLX4_CMD_TIME_CLASS_A,
4459 MLX4_CMD_NATIVE);
4460 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004461 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004462 slave, mptn);
4463 if (mpt->mtt)
4464 atomic_dec(&mpt->mtt->ref_count);
4465 state = RES_MPT_MAPPED;
4466 break;
4467 default:
4468 state = 0;
4469 }
4470 }
4471 }
4472 spin_lock_irq(mlx4_tlock(dev));
4473 }
4474 spin_unlock_irq(mlx4_tlock(dev));
4475}
4476
4477static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4478{
4479 struct mlx4_priv *priv = mlx4_priv(dev);
4480 struct mlx4_resource_tracker *tracker =
4481 &priv->mfunc.master.res_tracker;
4482 struct list_head *mtt_list =
4483 &tracker->slave_list[slave].res_list[RES_MTT];
4484 struct res_mtt *mtt;
4485 struct res_mtt *tmp;
4486 int state;
4487 LIST_HEAD(tlist);
4488 int base;
4489 int err;
4490
4491 err = move_all_busy(dev, slave, RES_MTT);
4492 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004493 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4494 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004495
4496 spin_lock_irq(mlx4_tlock(dev));
4497 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4498 spin_unlock_irq(mlx4_tlock(dev));
4499 if (mtt->com.owner == slave) {
4500 base = mtt->com.res_id;
4501 state = mtt->com.from_state;
4502 while (state != 0) {
4503 switch (state) {
4504 case RES_MTT_ALLOCATED:
4505 __mlx4_free_mtt_range(dev, base,
4506 mtt->order);
4507 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004508 rb_erase(&mtt->com.node,
4509 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004510 list_del(&mtt->com.list);
4511 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004512 mlx4_release_resource(dev, slave, RES_MTT,
4513 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004514 kfree(mtt);
4515 state = 0;
4516 break;
4517
4518 default:
4519 state = 0;
4520 }
4521 }
4522 }
4523 spin_lock_irq(mlx4_tlock(dev));
4524 }
4525 spin_unlock_irq(mlx4_tlock(dev));
4526}
4527
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004528static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4529{
4530 struct mlx4_priv *priv = mlx4_priv(dev);
4531 struct mlx4_resource_tracker *tracker =
4532 &priv->mfunc.master.res_tracker;
4533 struct list_head *fs_rule_list =
4534 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4535 struct res_fs_rule *fs_rule;
4536 struct res_fs_rule *tmp;
4537 int state;
4538 u64 base;
4539 int err;
4540
4541 err = move_all_busy(dev, slave, RES_FS_RULE);
4542 if (err)
4543 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4544 slave);
4545
4546 spin_lock_irq(mlx4_tlock(dev));
4547 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4548 spin_unlock_irq(mlx4_tlock(dev));
4549 if (fs_rule->com.owner == slave) {
4550 base = fs_rule->com.res_id;
4551 state = fs_rule->com.from_state;
4552 while (state != 0) {
4553 switch (state) {
4554 case RES_FS_RULE_ALLOCATED:
4555 /* detach rule */
4556 err = mlx4_cmd(dev, base, 0, 0,
4557 MLX4_QP_FLOW_STEERING_DETACH,
4558 MLX4_CMD_TIME_CLASS_A,
4559 MLX4_CMD_NATIVE);
4560
4561 spin_lock_irq(mlx4_tlock(dev));
4562 rb_erase(&fs_rule->com.node,
4563 &tracker->res_tree[RES_FS_RULE]);
4564 list_del(&fs_rule->com.list);
4565 spin_unlock_irq(mlx4_tlock(dev));
4566 kfree(fs_rule);
4567 state = 0;
4568 break;
4569
4570 default:
4571 state = 0;
4572 }
4573 }
4574 }
4575 spin_lock_irq(mlx4_tlock(dev));
4576 }
4577 spin_unlock_irq(mlx4_tlock(dev));
4578}
4579
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004580static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4581{
4582 struct mlx4_priv *priv = mlx4_priv(dev);
4583 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4584 struct list_head *eq_list =
4585 &tracker->slave_list[slave].res_list[RES_EQ];
4586 struct res_eq *eq;
4587 struct res_eq *tmp;
4588 int err;
4589 int state;
4590 LIST_HEAD(tlist);
4591 int eqn;
4592 struct mlx4_cmd_mailbox *mailbox;
4593
4594 err = move_all_busy(dev, slave, RES_EQ);
4595 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004596 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4597 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004598
4599 spin_lock_irq(mlx4_tlock(dev));
4600 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4601 spin_unlock_irq(mlx4_tlock(dev));
4602 if (eq->com.owner == slave) {
4603 eqn = eq->com.res_id;
4604 state = eq->com.from_state;
4605 while (state != 0) {
4606 switch (state) {
4607 case RES_EQ_RESERVED:
4608 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004609 rb_erase(&eq->com.node,
4610 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004611 list_del(&eq->com.list);
4612 spin_unlock_irq(mlx4_tlock(dev));
4613 kfree(eq);
4614 state = 0;
4615 break;
4616
4617 case RES_EQ_HW:
4618 mailbox = mlx4_alloc_cmd_mailbox(dev);
4619 if (IS_ERR(mailbox)) {
4620 cond_resched();
4621 continue;
4622 }
4623 err = mlx4_cmd_box(dev, slave, 0,
4624 eqn & 0xff, 0,
4625 MLX4_CMD_HW2SW_EQ,
4626 MLX4_CMD_TIME_CLASS_A,
4627 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004628 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004629 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4630 slave, eqn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004631 mlx4_free_cmd_mailbox(dev, mailbox);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004632 atomic_dec(&eq->mtt->ref_count);
4633 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004634 break;
4635
4636 default:
4637 state = 0;
4638 }
4639 }
4640 }
4641 spin_lock_irq(mlx4_tlock(dev));
4642 }
4643 spin_unlock_irq(mlx4_tlock(dev));
4644}
4645
Jack Morgensteinba062d52012-05-15 10:35:03 +00004646static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4647{
4648 struct mlx4_priv *priv = mlx4_priv(dev);
4649 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4650 struct list_head *counter_list =
4651 &tracker->slave_list[slave].res_list[RES_COUNTER];
4652 struct res_counter *counter;
4653 struct res_counter *tmp;
4654 int err;
4655 int index;
4656
4657 err = move_all_busy(dev, slave, RES_COUNTER);
4658 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004659 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4660 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004661
4662 spin_lock_irq(mlx4_tlock(dev));
4663 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4664 if (counter->com.owner == slave) {
4665 index = counter->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004666 rb_erase(&counter->com.node,
4667 &tracker->res_tree[RES_COUNTER]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004668 list_del(&counter->com.list);
4669 kfree(counter);
4670 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004671 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004672 }
4673 }
4674 spin_unlock_irq(mlx4_tlock(dev));
4675}
4676
4677static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4678{
4679 struct mlx4_priv *priv = mlx4_priv(dev);
4680 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4681 struct list_head *xrcdn_list =
4682 &tracker->slave_list[slave].res_list[RES_XRCD];
4683 struct res_xrcdn *xrcd;
4684 struct res_xrcdn *tmp;
4685 int err;
4686 int xrcdn;
4687
4688 err = move_all_busy(dev, slave, RES_XRCD);
4689 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004690 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4691 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004692
4693 spin_lock_irq(mlx4_tlock(dev));
4694 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4695 if (xrcd->com.owner == slave) {
4696 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004697 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004698 list_del(&xrcd->com.list);
4699 kfree(xrcd);
4700 __mlx4_xrcd_free(dev, xrcdn);
4701 }
4702 }
4703 spin_unlock_irq(mlx4_tlock(dev));
4704}
4705
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004706void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4707{
4708 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein111c6092014-05-27 09:26:38 +03004709 mlx4_reset_roce_gids(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004710 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02004711 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004712 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00004713 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004714 rem_slave_qps(dev, slave);
4715 rem_slave_srqs(dev, slave);
4716 rem_slave_cqs(dev, slave);
4717 rem_slave_mrs(dev, slave);
4718 rem_slave_eqs(dev, slave);
4719 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004720 rem_slave_counters(dev, slave);
4721 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004722 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4723}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004724
4725void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4726{
4727 struct mlx4_vf_immed_vlan_work *work =
4728 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4729 struct mlx4_cmd_mailbox *mailbox;
4730 struct mlx4_update_qp_context *upd_context;
4731 struct mlx4_dev *dev = &work->priv->dev;
4732 struct mlx4_resource_tracker *tracker =
4733 &work->priv->mfunc.master.res_tracker;
4734 struct list_head *qp_list =
4735 &tracker->slave_list[work->slave].res_list[RES_QP];
4736 struct res_qp *qp;
4737 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004738 u64 qp_path_mask_vlan_ctrl =
4739 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004740 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4741 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4742 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4743 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02004744 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4745
4746 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4747 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4748 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4749 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4750 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4751 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004752 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4753
4754 int err;
4755 int port, errors = 0;
4756 u8 vlan_control;
4757
4758 if (mlx4_is_slave(dev)) {
4759 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4760 work->slave);
4761 goto out;
4762 }
4763
4764 mailbox = mlx4_alloc_cmd_mailbox(dev);
4765 if (IS_ERR(mailbox))
4766 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03004767 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4768 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4769 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4770 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4771 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4772 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4773 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4774 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004775 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4776 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4777 else
4778 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4779 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4780 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4781
4782 upd_context = mailbox->buf;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004783 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004784
4785 spin_lock_irq(mlx4_tlock(dev));
4786 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4787 spin_unlock_irq(mlx4_tlock(dev));
4788 if (qp->com.owner == work->slave) {
4789 if (qp->com.from_state != RES_QP_HW ||
4790 !qp->sched_queue || /* no INIT2RTR trans yet */
4791 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4792 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4793 spin_lock_irq(mlx4_tlock(dev));
4794 continue;
4795 }
4796 port = (qp->sched_queue >> 6 & 1) + 1;
4797 if (port != work->port) {
4798 spin_lock_irq(mlx4_tlock(dev));
4799 continue;
4800 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02004801 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4802 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4803 else
4804 upd_context->primary_addr_path_mask =
4805 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4806 if (work->vlan_id == MLX4_VGT) {
4807 upd_context->qp_context.param3 = qp->param3;
4808 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4809 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4810 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4811 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4812 upd_context->qp_context.pri_path.feup = qp->feup;
4813 upd_context->qp_context.pri_path.sched_queue =
4814 qp->sched_queue;
4815 } else {
4816 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4817 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4818 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4819 upd_context->qp_context.pri_path.fvl_rx =
4820 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4821 upd_context->qp_context.pri_path.fl =
4822 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4823 upd_context->qp_context.pri_path.feup =
4824 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4825 upd_context->qp_context.pri_path.sched_queue =
4826 qp->sched_queue & 0xC7;
4827 upd_context->qp_context.pri_path.sched_queue |=
4828 ((work->qos & 0x7) << 3);
4829 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004830
4831 err = mlx4_cmd(dev, mailbox->dma,
4832 qp->local_qpn & 0xffffff,
4833 0, MLX4_CMD_UPDATE_QP,
4834 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4835 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004836 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4837 work->slave, port, qp->local_qpn, err);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004838 errors++;
4839 }
4840 }
4841 spin_lock_irq(mlx4_tlock(dev));
4842 }
4843 spin_unlock_irq(mlx4_tlock(dev));
4844 mlx4_free_cmd_mailbox(dev, mailbox);
4845
4846 if (errors)
4847 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4848 errors, work->slave, work->port);
4849
4850 /* unregister previous vlan_id if needed and we had no errors
4851 * while updating the QPs
4852 */
4853 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4854 NO_INDX != work->orig_vlan_ix)
4855 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02004856 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004857out:
4858 kfree(work);
4859 return;
4860}