blob: 74e490d70184e9a312ba3961368f535102be4fc5 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
Eli Cohenc82e9aa2011-12-13 04:15:24 +000051
52struct mac_res {
53 struct list_head list;
54 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +020055 int ref_count;
56 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000057 u8 port;
58};
59
Jack Morgenstein48740802013-11-03 10:03:20 +020060struct vlan_res {
61 struct list_head list;
62 u16 vlan;
63 int ref_count;
64 int vlan_index;
65 u8 port;
66};
67
Eli Cohenc82e9aa2011-12-13 04:15:24 +000068struct res_common {
69 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000070 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000071 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000072 int owner;
73 int state;
74 int from_state;
75 int to_state;
76 int removing;
77};
78
79enum {
80 RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84 struct list_head list;
85 u8 gid[16];
86 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000087 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000088 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000089};
90
91enum res_qp_states {
92 RES_QP_BUSY = RES_ANY_BUSY,
93
94 /* QP number was allocated */
95 RES_QP_RESERVED,
96
97 /* ICM memory for QP context was mapped */
98 RES_QP_MAPPED,
99
100 /* QP is in hw ownership */
101 RES_QP_HW
102};
103
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000104struct res_qp {
105 struct res_common com;
106 struct res_mtt *mtt;
107 struct res_cq *rcq;
108 struct res_cq *scq;
109 struct res_srq *srq;
110 struct list_head mcg_list;
111 spinlock_t mcg_spl;
112 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000113 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300114 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200115 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300116 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200117 __be32 param3;
118 u8 vlan_control;
119 u8 fvl_rx;
120 u8 pri_path_fl;
121 u8 vlan_index;
122 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000123};
124
125enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
127 RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132 switch (state) {
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
136 }
137}
138
139struct res_mtt {
140 struct res_common com;
141 int order;
142 atomic_t ref_count;
143};
144
145enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
147 RES_MPT_RESERVED,
148 RES_MPT_MAPPED,
149 RES_MPT_HW,
150};
151
152struct res_mpt {
153 struct res_common com;
154 struct res_mtt *mtt;
155 int key;
156};
157
158enum res_eq_states {
159 RES_EQ_BUSY = RES_ANY_BUSY,
160 RES_EQ_RESERVED,
161 RES_EQ_HW,
162};
163
164struct res_eq {
165 struct res_common com;
166 struct res_mtt *mtt;
167};
168
169enum res_cq_states {
170 RES_CQ_BUSY = RES_ANY_BUSY,
171 RES_CQ_ALLOCATED,
172 RES_CQ_HW,
173};
174
175struct res_cq {
176 struct res_common com;
177 struct res_mtt *mtt;
178 atomic_t ref_count;
179};
180
181enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
183 RES_SRQ_ALLOCATED,
184 RES_SRQ_HW,
185};
186
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000187struct res_srq {
188 struct res_common com;
189 struct res_mtt *mtt;
190 struct res_cq *cq;
191 atomic_t ref_count;
192};
193
194enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
197};
198
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000199struct res_counter {
200 struct res_common com;
201 int port;
202};
203
Jack Morgensteinba062d52012-05-15 10:35:03 +0000204enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
206 RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210 struct res_common com;
211 int port;
212};
213
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000214enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000221 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000222};
223
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231 struct rb_node *node = root->rb_node;
232
233 while (node) {
234 struct res_common *res = container_of(node, struct res_common,
235 node);
236
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
241 else
242 return res;
243 }
244 return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251 /* Figure out where to put new node */
252 while (*new) {
253 struct res_common *this = container_of(*new, struct res_common,
254 node);
255
256 parent = *new;
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
261 else
262 return -EEXIST;
263 }
264
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
268
269 return 0;
270}
271
Jack Morgenstein54679e12012-08-03 08:40:43 +0000272enum qp_transition {
273 QP_TRANS_INIT2RTR,
274 QP_TRANS_RTR2RTS,
275 QP_TRANS_RTS2RTS,
276 QP_TRANS_SQERR2RTS,
277 QP_TRANS_SQD2SQD,
278 QP_TRANS_SQD2RTS
279};
280
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt)
283{
284 switch (rt) {
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200291 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000294 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000295 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000296 default: return "Unknown resource type !!!";
297 };
298}
299
Jack Morgenstein48740802013-11-03 10:03:20 +0200300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
303 int port)
304{
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free;
310
311 if (slave > dev->num_vfs)
312 return -EINVAL;
313
314 spin_lock(&res_alloc->alloc_lock);
315 allocated = (port > 0) ?
316 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319 res_alloc->res_free;
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
323
324 if (allocated + count > res_alloc->quota[slave])
325 goto out;
326
327 if (allocated + count <= guaranteed) {
328 err = 0;
329 } else {
330 /* portion may need to be obtained from free area */
331 if (guaranteed - allocated > 0)
332 from_free = count - (guaranteed - allocated);
333 else
334 from_free = count;
335
336 if (free - from_free > reserved)
337 err = 0;
338 }
339
340 if (!err) {
341 /* grant the request */
342 if (port > 0) {
343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
344 res_alloc->res_port_free[port - 1] -= count;
345 } else {
346 res_alloc->allocated[slave] += count;
347 res_alloc->res_free -= count;
348 }
349 }
350
351out:
352 spin_unlock(&res_alloc->alloc_lock);
353 return err;
354}
355
356static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
357 enum mlx4_resource res_type, int count,
358 int port)
359{
360 struct mlx4_priv *priv = mlx4_priv(dev);
361 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[res_type];
363
364 if (slave > dev->num_vfs)
365 return;
366
367 spin_lock(&res_alloc->alloc_lock);
368 if (port > 0) {
369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
370 res_alloc->res_port_free[port - 1] += count;
371 } else {
372 res_alloc->allocated[slave] -= count;
373 res_alloc->res_free += count;
374 }
375
376 spin_unlock(&res_alloc->alloc_lock);
377 return;
378}
379
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200380static inline void initialize_res_quotas(struct mlx4_dev *dev,
381 struct resource_allocator *res_alloc,
382 enum mlx4_resource res_type,
383 int vf, int num_instances)
384{
385 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
386 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
387 if (vf == mlx4_master_func_num(dev)) {
388 res_alloc->res_free = num_instances;
389 if (res_type == RES_MTT) {
390 /* reserved mtts will be taken out of the PF allocation */
391 res_alloc->res_free += dev->caps.reserved_mtts;
392 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
393 res_alloc->quota[vf] += dev->caps.reserved_mtts;
394 }
395 }
396}
397
398void mlx4_init_quotas(struct mlx4_dev *dev)
399{
400 struct mlx4_priv *priv = mlx4_priv(dev);
401 int pf;
402
403 /* quotas for VFs are initialized in mlx4_slave_cap */
404 if (mlx4_is_slave(dev))
405 return;
406
407 if (!mlx4_is_mfunc(dev)) {
408 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
409 mlx4_num_reserved_sqps(dev);
410 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
411 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
412 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
413 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
414 return;
415 }
416
417 pf = mlx4_master_func_num(dev);
418 dev->quotas.qp =
419 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
420 dev->quotas.cq =
421 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
422 dev->quotas.srq =
423 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
424 dev->quotas.mtt =
425 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
426 dev->quotas.mpt =
427 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
428}
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000429int mlx4_init_resource_tracker(struct mlx4_dev *dev)
430{
431 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200432 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000433 int t;
434
435 priv->mfunc.master.res_tracker.slave_list =
436 kzalloc(dev->num_slaves * sizeof(struct slave_list),
437 GFP_KERNEL);
438 if (!priv->mfunc.master.res_tracker.slave_list)
439 return -ENOMEM;
440
441 for (i = 0 ; i < dev->num_slaves; i++) {
442 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
443 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
444 slave_list[i].res_list[t]);
445 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
446 }
447
448 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
449 dev->num_slaves);
450 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000451 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000452
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200453 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
454 struct resource_allocator *res_alloc =
455 &priv->mfunc.master.res_tracker.res_alloc[i];
456 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
457 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
458 if (i == RES_MAC || i == RES_VLAN)
459 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
460 (dev->num_vfs + 1) * sizeof(int),
461 GFP_KERNEL);
462 else
463 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
464
465 if (!res_alloc->quota || !res_alloc->guaranteed ||
466 !res_alloc->allocated)
467 goto no_mem_err;
468
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200469 spin_lock_init(&res_alloc->alloc_lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200470 for (t = 0; t < dev->num_vfs + 1; t++) {
471 switch (i) {
472 case RES_QP:
473 initialize_res_quotas(dev, res_alloc, RES_QP,
474 t, dev->caps.num_qps -
475 dev->caps.reserved_qps -
476 mlx4_num_reserved_sqps(dev));
477 break;
478 case RES_CQ:
479 initialize_res_quotas(dev, res_alloc, RES_CQ,
480 t, dev->caps.num_cqs -
481 dev->caps.reserved_cqs);
482 break;
483 case RES_SRQ:
484 initialize_res_quotas(dev, res_alloc, RES_SRQ,
485 t, dev->caps.num_srqs -
486 dev->caps.reserved_srqs);
487 break;
488 case RES_MPT:
489 initialize_res_quotas(dev, res_alloc, RES_MPT,
490 t, dev->caps.num_mpts -
491 dev->caps.reserved_mrws);
492 break;
493 case RES_MTT:
494 initialize_res_quotas(dev, res_alloc, RES_MTT,
495 t, dev->caps.num_mtts -
496 dev->caps.reserved_mtts);
497 break;
498 case RES_MAC:
499 if (t == mlx4_master_func_num(dev)) {
500 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
501 res_alloc->guaranteed[t] = 2;
502 for (j = 0; j < MLX4_MAX_PORTS; j++)
503 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
504 } else {
505 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
506 res_alloc->guaranteed[t] = 2;
507 }
508 break;
509 case RES_VLAN:
510 if (t == mlx4_master_func_num(dev)) {
511 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
512 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
513 for (j = 0; j < MLX4_MAX_PORTS; j++)
514 res_alloc->res_port_free[j] =
515 res_alloc->quota[t];
516 } else {
517 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
518 res_alloc->guaranteed[t] = 0;
519 }
520 break;
521 case RES_COUNTER:
522 res_alloc->quota[t] = dev->caps.max_counters;
523 res_alloc->guaranteed[t] = 0;
524 if (t == mlx4_master_func_num(dev))
525 res_alloc->res_free = res_alloc->quota[t];
526 break;
527 default:
528 break;
529 }
530 if (i == RES_MAC || i == RES_VLAN) {
531 for (j = 0; j < MLX4_MAX_PORTS; j++)
532 res_alloc->res_port_rsvd[j] +=
533 res_alloc->guaranteed[t];
534 } else {
535 res_alloc->res_reserved += res_alloc->guaranteed[t];
536 }
537 }
538 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000539 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200540 return 0;
541
542no_mem_err:
543 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
544 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
545 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
546 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
547 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
548 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
549 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
550 }
551 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000552}
553
Jack Morgensteinb8924952012-05-15 10:35:02 +0000554void mlx4_free_resource_tracker(struct mlx4_dev *dev,
555 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000556{
557 struct mlx4_priv *priv = mlx4_priv(dev);
558 int i;
559
560 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200561 if (type != RES_TR_FREE_STRUCTS_ONLY) {
562 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000563 if (type == RES_TR_FREE_ALL ||
564 dev->caps.function != i)
565 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200566 }
567 /* free master's vlans */
568 i = dev->caps.function;
569 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
570 rem_slave_vlans(dev, i);
571 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
572 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000573
Jack Morgensteinb8924952012-05-15 10:35:02 +0000574 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200575 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
576 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
577 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
578 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
579 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
580 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
581 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
582 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000583 kfree(priv->mfunc.master.res_tracker.slave_list);
584 priv->mfunc.master.res_tracker.slave_list = NULL;
585 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000586 }
587}
588
Jack Morgenstein54679e12012-08-03 08:40:43 +0000589static void update_pkey_index(struct mlx4_dev *dev, int slave,
590 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000591{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000592 u8 sched = *(u8 *)(inbox->buf + 64);
593 u8 orig_index = *(u8 *)(inbox->buf + 35);
594 u8 new_index;
595 struct mlx4_priv *priv = mlx4_priv(dev);
596 int port;
597
598 port = (sched >> 6 & 1) + 1;
599
600 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
601 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000602}
603
604static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
605 u8 slave)
606{
607 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
608 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
609 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200610 int port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000611
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200612 if (MLX4_QP_ST_UD == ts) {
613 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
614 if (mlx4_is_eth(dev, port))
615 qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
616 else
617 qp_ctx->pri_path.mgid_index = slave | 0x80;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000618
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200619 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
620 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
621 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
622 if (mlx4_is_eth(dev, port)) {
623 qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
624 qp_ctx->pri_path.mgid_index &= 0x7f;
625 } else {
626 qp_ctx->pri_path.mgid_index = slave & 0x7F;
627 }
628 }
629 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
630 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
631 if (mlx4_is_eth(dev, port)) {
632 qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
633 qp_ctx->alt_path.mgid_index &= 0x7f;
634 } else {
635 qp_ctx->alt_path.mgid_index = slave & 0x7F;
636 }
637 }
Jack Morgenstein54679e12012-08-03 08:40:43 +0000638 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000639}
640
Rony Efraim3f7fb022013-04-25 05:22:28 +0000641static int update_vport_qp_param(struct mlx4_dev *dev,
642 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300643 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000644{
645 struct mlx4_qp_context *qpc = inbox->buf + 8;
646 struct mlx4_vport_oper_state *vp_oper;
647 struct mlx4_priv *priv;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000648 int port;
649
650 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
651 priv = mlx4_priv(dev);
652 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
653
654 if (MLX4_VGT != vp_oper->state.default_vlan) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300655 /* the reserved QPs (special, proxy, tunnel)
656 * do not operate over vlans
657 */
658 if (mlx4_is_qp_reserved(dev, qpn))
659 return 0;
660
Rony Efraim7677fc92013-05-08 22:22:35 +0000661 /* force strip vlan by clear vsd */
662 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
Rony Efraim0a6eac22013-06-27 19:05:22 +0300663
664 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
665 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
666 qpc->pri_path.vlan_control =
667 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
668 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
669 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
670 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
671 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
672 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
673 } else if (0 != vp_oper->state.default_vlan) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000674 qpc->pri_path.vlan_control =
675 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
676 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
677 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
678 } else { /* priority tagged */
679 qpc->pri_path.vlan_control =
680 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
681 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
682 }
683
684 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000685 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Rony Efraim7677fc92013-05-08 22:22:35 +0000686 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
687 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000688 qpc->pri_path.sched_queue &= 0xC7;
689 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000690 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000691 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000692 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000693 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000694 }
Rony Efraim3f7fb022013-04-25 05:22:28 +0000695 return 0;
696}
697
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000698static int mpt_mask(struct mlx4_dev *dev)
699{
700 return dev->caps.num_mpts - 1;
701}
702
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000703static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000704 enum mlx4_resource type)
705{
706 struct mlx4_priv *priv = mlx4_priv(dev);
707
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000708 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
709 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000710}
711
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000712static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000713 enum mlx4_resource type,
714 void *res)
715{
716 struct res_common *r;
717 int err = 0;
718
719 spin_lock_irq(mlx4_tlock(dev));
720 r = find_res(dev, res_id, type);
721 if (!r) {
722 err = -ENONET;
723 goto exit;
724 }
725
726 if (r->state == RES_ANY_BUSY) {
727 err = -EBUSY;
728 goto exit;
729 }
730
731 if (r->owner != slave) {
732 err = -EPERM;
733 goto exit;
734 }
735
736 r->from_state = r->state;
737 r->state = RES_ANY_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000738
739 if (res)
740 *((struct res_common **)res) = r;
741
742exit:
743 spin_unlock_irq(mlx4_tlock(dev));
744 return err;
745}
746
747int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
748 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000749 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000750{
751
752 struct res_common *r;
753 int err = -ENOENT;
754 int id = res_id;
755
756 if (type == RES_QP)
757 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000758 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000759
760 r = find_res(dev, id, type);
761 if (r) {
762 *slave = r->owner;
763 err = 0;
764 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000765 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000766
767 return err;
768}
769
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000770static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000771 enum mlx4_resource type)
772{
773 struct res_common *r;
774
775 spin_lock_irq(mlx4_tlock(dev));
776 r = find_res(dev, res_id, type);
777 if (r)
778 r->state = r->from_state;
779 spin_unlock_irq(mlx4_tlock(dev));
780}
781
782static struct res_common *alloc_qp_tr(int id)
783{
784 struct res_qp *ret;
785
786 ret = kzalloc(sizeof *ret, GFP_KERNEL);
787 if (!ret)
788 return NULL;
789
790 ret->com.res_id = id;
791 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +0000792 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000793 INIT_LIST_HEAD(&ret->mcg_list);
794 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000795 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000796
797 return &ret->com;
798}
799
800static struct res_common *alloc_mtt_tr(int id, int order)
801{
802 struct res_mtt *ret;
803
804 ret = kzalloc(sizeof *ret, GFP_KERNEL);
805 if (!ret)
806 return NULL;
807
808 ret->com.res_id = id;
809 ret->order = order;
810 ret->com.state = RES_MTT_ALLOCATED;
811 atomic_set(&ret->ref_count, 0);
812
813 return &ret->com;
814}
815
816static struct res_common *alloc_mpt_tr(int id, int key)
817{
818 struct res_mpt *ret;
819
820 ret = kzalloc(sizeof *ret, GFP_KERNEL);
821 if (!ret)
822 return NULL;
823
824 ret->com.res_id = id;
825 ret->com.state = RES_MPT_RESERVED;
826 ret->key = key;
827
828 return &ret->com;
829}
830
831static struct res_common *alloc_eq_tr(int id)
832{
833 struct res_eq *ret;
834
835 ret = kzalloc(sizeof *ret, GFP_KERNEL);
836 if (!ret)
837 return NULL;
838
839 ret->com.res_id = id;
840 ret->com.state = RES_EQ_RESERVED;
841
842 return &ret->com;
843}
844
845static struct res_common *alloc_cq_tr(int id)
846{
847 struct res_cq *ret;
848
849 ret = kzalloc(sizeof *ret, GFP_KERNEL);
850 if (!ret)
851 return NULL;
852
853 ret->com.res_id = id;
854 ret->com.state = RES_CQ_ALLOCATED;
855 atomic_set(&ret->ref_count, 0);
856
857 return &ret->com;
858}
859
860static struct res_common *alloc_srq_tr(int id)
861{
862 struct res_srq *ret;
863
864 ret = kzalloc(sizeof *ret, GFP_KERNEL);
865 if (!ret)
866 return NULL;
867
868 ret->com.res_id = id;
869 ret->com.state = RES_SRQ_ALLOCATED;
870 atomic_set(&ret->ref_count, 0);
871
872 return &ret->com;
873}
874
875static struct res_common *alloc_counter_tr(int id)
876{
877 struct res_counter *ret;
878
879 ret = kzalloc(sizeof *ret, GFP_KERNEL);
880 if (!ret)
881 return NULL;
882
883 ret->com.res_id = id;
884 ret->com.state = RES_COUNTER_ALLOCATED;
885
886 return &ret->com;
887}
888
Jack Morgensteinba062d52012-05-15 10:35:03 +0000889static struct res_common *alloc_xrcdn_tr(int id)
890{
891 struct res_xrcdn *ret;
892
893 ret = kzalloc(sizeof *ret, GFP_KERNEL);
894 if (!ret)
895 return NULL;
896
897 ret->com.res_id = id;
898 ret->com.state = RES_XRCD_ALLOCATED;
899
900 return &ret->com;
901}
902
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000903static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000904{
905 struct res_fs_rule *ret;
906
907 ret = kzalloc(sizeof *ret, GFP_KERNEL);
908 if (!ret)
909 return NULL;
910
911 ret->com.res_id = id;
912 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000913 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000914 return &ret->com;
915}
916
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000917static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000918 int extra)
919{
920 struct res_common *ret;
921
922 switch (type) {
923 case RES_QP:
924 ret = alloc_qp_tr(id);
925 break;
926 case RES_MPT:
927 ret = alloc_mpt_tr(id, extra);
928 break;
929 case RES_MTT:
930 ret = alloc_mtt_tr(id, extra);
931 break;
932 case RES_EQ:
933 ret = alloc_eq_tr(id);
934 break;
935 case RES_CQ:
936 ret = alloc_cq_tr(id);
937 break;
938 case RES_SRQ:
939 ret = alloc_srq_tr(id);
940 break;
941 case RES_MAC:
942 printk(KERN_ERR "implementation missing\n");
943 return NULL;
944 case RES_COUNTER:
945 ret = alloc_counter_tr(id);
946 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +0000947 case RES_XRCD:
948 ret = alloc_xrcdn_tr(id);
949 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000950 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000951 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000952 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000953 default:
954 return NULL;
955 }
956 if (ret)
957 ret->owner = slave;
958
959 return ret;
960}
961
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000962static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000963 enum mlx4_resource type, int extra)
964{
965 int i;
966 int err;
967 struct mlx4_priv *priv = mlx4_priv(dev);
968 struct res_common **res_arr;
969 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000970 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000971
972 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
973 if (!res_arr)
974 return -ENOMEM;
975
976 for (i = 0; i < count; ++i) {
977 res_arr[i] = alloc_tr(base + i, type, slave, extra);
978 if (!res_arr[i]) {
979 for (--i; i >= 0; --i)
980 kfree(res_arr[i]);
981
982 kfree(res_arr);
983 return -ENOMEM;
984 }
985 }
986
987 spin_lock_irq(mlx4_tlock(dev));
988 for (i = 0; i < count; ++i) {
989 if (find_res(dev, base + i, type)) {
990 err = -EEXIST;
991 goto undo;
992 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000993 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000994 if (err)
995 goto undo;
996 list_add_tail(&res_arr[i]->list,
997 &tracker->slave_list[slave].res_list[type]);
998 }
999 spin_unlock_irq(mlx4_tlock(dev));
1000 kfree(res_arr);
1001
1002 return 0;
1003
1004undo:
1005 for (--i; i >= base; --i)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001006 rb_erase(&res_arr[i]->node, root);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001007
1008 spin_unlock_irq(mlx4_tlock(dev));
1009
1010 for (i = 0; i < count; ++i)
1011 kfree(res_arr[i]);
1012
1013 kfree(res_arr);
1014
1015 return err;
1016}
1017
1018static int remove_qp_ok(struct res_qp *res)
1019{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001020 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1021 !list_empty(&res->mcg_list)) {
1022 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1023 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001024 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001025 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001026 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001027 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001028
1029 return 0;
1030}
1031
1032static int remove_mtt_ok(struct res_mtt *res, int order)
1033{
1034 if (res->com.state == RES_MTT_BUSY ||
1035 atomic_read(&res->ref_count)) {
1036 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1037 __func__, __LINE__,
1038 mtt_states_str(res->com.state),
1039 atomic_read(&res->ref_count));
1040 return -EBUSY;
1041 } else if (res->com.state != RES_MTT_ALLOCATED)
1042 return -EPERM;
1043 else if (res->order != order)
1044 return -EINVAL;
1045
1046 return 0;
1047}
1048
1049static int remove_mpt_ok(struct res_mpt *res)
1050{
1051 if (res->com.state == RES_MPT_BUSY)
1052 return -EBUSY;
1053 else if (res->com.state != RES_MPT_RESERVED)
1054 return -EPERM;
1055
1056 return 0;
1057}
1058
1059static int remove_eq_ok(struct res_eq *res)
1060{
1061 if (res->com.state == RES_MPT_BUSY)
1062 return -EBUSY;
1063 else if (res->com.state != RES_MPT_RESERVED)
1064 return -EPERM;
1065
1066 return 0;
1067}
1068
1069static int remove_counter_ok(struct res_counter *res)
1070{
1071 if (res->com.state == RES_COUNTER_BUSY)
1072 return -EBUSY;
1073 else if (res->com.state != RES_COUNTER_ALLOCATED)
1074 return -EPERM;
1075
1076 return 0;
1077}
1078
Jack Morgensteinba062d52012-05-15 10:35:03 +00001079static int remove_xrcdn_ok(struct res_xrcdn *res)
1080{
1081 if (res->com.state == RES_XRCD_BUSY)
1082 return -EBUSY;
1083 else if (res->com.state != RES_XRCD_ALLOCATED)
1084 return -EPERM;
1085
1086 return 0;
1087}
1088
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001089static int remove_fs_rule_ok(struct res_fs_rule *res)
1090{
1091 if (res->com.state == RES_FS_RULE_BUSY)
1092 return -EBUSY;
1093 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1094 return -EPERM;
1095
1096 return 0;
1097}
1098
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001099static int remove_cq_ok(struct res_cq *res)
1100{
1101 if (res->com.state == RES_CQ_BUSY)
1102 return -EBUSY;
1103 else if (res->com.state != RES_CQ_ALLOCATED)
1104 return -EPERM;
1105
1106 return 0;
1107}
1108
1109static int remove_srq_ok(struct res_srq *res)
1110{
1111 if (res->com.state == RES_SRQ_BUSY)
1112 return -EBUSY;
1113 else if (res->com.state != RES_SRQ_ALLOCATED)
1114 return -EPERM;
1115
1116 return 0;
1117}
1118
1119static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1120{
1121 switch (type) {
1122 case RES_QP:
1123 return remove_qp_ok((struct res_qp *)res);
1124 case RES_CQ:
1125 return remove_cq_ok((struct res_cq *)res);
1126 case RES_SRQ:
1127 return remove_srq_ok((struct res_srq *)res);
1128 case RES_MPT:
1129 return remove_mpt_ok((struct res_mpt *)res);
1130 case RES_MTT:
1131 return remove_mtt_ok((struct res_mtt *)res, extra);
1132 case RES_MAC:
1133 return -ENOSYS;
1134 case RES_EQ:
1135 return remove_eq_ok((struct res_eq *)res);
1136 case RES_COUNTER:
1137 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001138 case RES_XRCD:
1139 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001140 case RES_FS_RULE:
1141 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001142 default:
1143 return -EINVAL;
1144 }
1145}
1146
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001147static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001148 enum mlx4_resource type, int extra)
1149{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001150 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001151 int err;
1152 struct mlx4_priv *priv = mlx4_priv(dev);
1153 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1154 struct res_common *r;
1155
1156 spin_lock_irq(mlx4_tlock(dev));
1157 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001158 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001159 if (!r) {
1160 err = -ENOENT;
1161 goto out;
1162 }
1163 if (r->owner != slave) {
1164 err = -EPERM;
1165 goto out;
1166 }
1167 err = remove_ok(r, type, extra);
1168 if (err)
1169 goto out;
1170 }
1171
1172 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001173 r = res_tracker_lookup(&tracker->res_tree[type], i);
1174 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001175 list_del(&r->list);
1176 kfree(r);
1177 }
1178 err = 0;
1179
1180out:
1181 spin_unlock_irq(mlx4_tlock(dev));
1182
1183 return err;
1184}
1185
1186static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1187 enum res_qp_states state, struct res_qp **qp,
1188 int alloc)
1189{
1190 struct mlx4_priv *priv = mlx4_priv(dev);
1191 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1192 struct res_qp *r;
1193 int err = 0;
1194
1195 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001196 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001197 if (!r)
1198 err = -ENOENT;
1199 else if (r->com.owner != slave)
1200 err = -EPERM;
1201 else {
1202 switch (state) {
1203 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001204 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001205 __func__, r->com.res_id);
1206 err = -EBUSY;
1207 break;
1208
1209 case RES_QP_RESERVED:
1210 if (r->com.state == RES_QP_MAPPED && !alloc)
1211 break;
1212
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001213 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001214 err = -EINVAL;
1215 break;
1216
1217 case RES_QP_MAPPED:
1218 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1219 r->com.state == RES_QP_HW)
1220 break;
1221 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001222 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001223 r->com.res_id);
1224 err = -EINVAL;
1225 }
1226
1227 break;
1228
1229 case RES_QP_HW:
1230 if (r->com.state != RES_QP_MAPPED)
1231 err = -EINVAL;
1232 break;
1233 default:
1234 err = -EINVAL;
1235 }
1236
1237 if (!err) {
1238 r->com.from_state = r->com.state;
1239 r->com.to_state = state;
1240 r->com.state = RES_QP_BUSY;
1241 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001242 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001243 }
1244 }
1245
1246 spin_unlock_irq(mlx4_tlock(dev));
1247
1248 return err;
1249}
1250
1251static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1252 enum res_mpt_states state, struct res_mpt **mpt)
1253{
1254 struct mlx4_priv *priv = mlx4_priv(dev);
1255 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1256 struct res_mpt *r;
1257 int err = 0;
1258
1259 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001260 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001261 if (!r)
1262 err = -ENOENT;
1263 else if (r->com.owner != slave)
1264 err = -EPERM;
1265 else {
1266 switch (state) {
1267 case RES_MPT_BUSY:
1268 err = -EINVAL;
1269 break;
1270
1271 case RES_MPT_RESERVED:
1272 if (r->com.state != RES_MPT_MAPPED)
1273 err = -EINVAL;
1274 break;
1275
1276 case RES_MPT_MAPPED:
1277 if (r->com.state != RES_MPT_RESERVED &&
1278 r->com.state != RES_MPT_HW)
1279 err = -EINVAL;
1280 break;
1281
1282 case RES_MPT_HW:
1283 if (r->com.state != RES_MPT_MAPPED)
1284 err = -EINVAL;
1285 break;
1286 default:
1287 err = -EINVAL;
1288 }
1289
1290 if (!err) {
1291 r->com.from_state = r->com.state;
1292 r->com.to_state = state;
1293 r->com.state = RES_MPT_BUSY;
1294 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001295 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001296 }
1297 }
1298
1299 spin_unlock_irq(mlx4_tlock(dev));
1300
1301 return err;
1302}
1303
1304static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1305 enum res_eq_states state, struct res_eq **eq)
1306{
1307 struct mlx4_priv *priv = mlx4_priv(dev);
1308 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1309 struct res_eq *r;
1310 int err = 0;
1311
1312 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001313 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001314 if (!r)
1315 err = -ENOENT;
1316 else if (r->com.owner != slave)
1317 err = -EPERM;
1318 else {
1319 switch (state) {
1320 case RES_EQ_BUSY:
1321 err = -EINVAL;
1322 break;
1323
1324 case RES_EQ_RESERVED:
1325 if (r->com.state != RES_EQ_HW)
1326 err = -EINVAL;
1327 break;
1328
1329 case RES_EQ_HW:
1330 if (r->com.state != RES_EQ_RESERVED)
1331 err = -EINVAL;
1332 break;
1333
1334 default:
1335 err = -EINVAL;
1336 }
1337
1338 if (!err) {
1339 r->com.from_state = r->com.state;
1340 r->com.to_state = state;
1341 r->com.state = RES_EQ_BUSY;
1342 if (eq)
1343 *eq = r;
1344 }
1345 }
1346
1347 spin_unlock_irq(mlx4_tlock(dev));
1348
1349 return err;
1350}
1351
1352static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1353 enum res_cq_states state, struct res_cq **cq)
1354{
1355 struct mlx4_priv *priv = mlx4_priv(dev);
1356 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1357 struct res_cq *r;
1358 int err;
1359
1360 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001361 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001362 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001363 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001364 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001365 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001366 } else if (state == RES_CQ_ALLOCATED) {
1367 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001368 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001369 else if (atomic_read(&r->ref_count))
1370 err = -EBUSY;
1371 else
1372 err = 0;
1373 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1374 err = -EINVAL;
1375 } else {
1376 err = 0;
1377 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001378
Paul Bollec9218a92014-01-14 20:45:36 +01001379 if (!err) {
1380 r->com.from_state = r->com.state;
1381 r->com.to_state = state;
1382 r->com.state = RES_CQ_BUSY;
1383 if (cq)
1384 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001385 }
1386
1387 spin_unlock_irq(mlx4_tlock(dev));
1388
1389 return err;
1390}
1391
1392static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001393 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001394{
1395 struct mlx4_priv *priv = mlx4_priv(dev);
1396 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1397 struct res_srq *r;
1398 int err = 0;
1399
1400 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001401 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001402 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001403 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001404 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001405 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001406 } else if (state == RES_SRQ_ALLOCATED) {
1407 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001408 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001409 else if (atomic_read(&r->ref_count))
1410 err = -EBUSY;
1411 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1412 err = -EINVAL;
1413 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001414
Paul Bollef088cbb2014-01-14 20:46:52 +01001415 if (!err) {
1416 r->com.from_state = r->com.state;
1417 r->com.to_state = state;
1418 r->com.state = RES_SRQ_BUSY;
1419 if (srq)
1420 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001421 }
1422
1423 spin_unlock_irq(mlx4_tlock(dev));
1424
1425 return err;
1426}
1427
1428static void res_abort_move(struct mlx4_dev *dev, int slave,
1429 enum mlx4_resource type, int id)
1430{
1431 struct mlx4_priv *priv = mlx4_priv(dev);
1432 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1433 struct res_common *r;
1434
1435 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001436 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001437 if (r && (r->owner == slave))
1438 r->state = r->from_state;
1439 spin_unlock_irq(mlx4_tlock(dev));
1440}
1441
1442static void res_end_move(struct mlx4_dev *dev, int slave,
1443 enum mlx4_resource type, int id)
1444{
1445 struct mlx4_priv *priv = mlx4_priv(dev);
1446 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1447 struct res_common *r;
1448
1449 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001450 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001451 if (r && (r->owner == slave))
1452 r->state = r->to_state;
1453 spin_unlock_irq(mlx4_tlock(dev));
1454}
1455
1456static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1457{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001458 return mlx4_is_qp_reserved(dev, qpn) &&
1459 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001460}
1461
Jack Morgenstein54679e12012-08-03 08:40:43 +00001462static int fw_reserved(struct mlx4_dev *dev, int qpn)
1463{
1464 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001465}
1466
1467static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1468 u64 in_param, u64 *out_param)
1469{
1470 int err;
1471 int count;
1472 int align;
1473 int base;
1474 int qpn;
1475
1476 switch (op) {
1477 case RES_OP_RESERVE:
1478 count = get_param_l(&in_param);
1479 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001480 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001481 if (err)
1482 return err;
1483
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001484 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1485 if (err) {
1486 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1487 return err;
1488 }
1489
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001490 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1491 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001492 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001493 __mlx4_qp_release_range(dev, base, count);
1494 return err;
1495 }
1496 set_param_l(out_param, base);
1497 break;
1498 case RES_OP_MAP_ICM:
1499 qpn = get_param_l(&in_param) & 0x7fffff;
1500 if (valid_reserved(dev, slave, qpn)) {
1501 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1502 if (err)
1503 return err;
1504 }
1505
1506 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1507 NULL, 1);
1508 if (err)
1509 return err;
1510
Jack Morgenstein54679e12012-08-03 08:40:43 +00001511 if (!fw_reserved(dev, qpn)) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001512 err = __mlx4_qp_alloc_icm(dev, qpn);
1513 if (err) {
1514 res_abort_move(dev, slave, RES_QP, qpn);
1515 return err;
1516 }
1517 }
1518
1519 res_end_move(dev, slave, RES_QP, qpn);
1520 break;
1521
1522 default:
1523 err = -EINVAL;
1524 break;
1525 }
1526 return err;
1527}
1528
1529static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1530 u64 in_param, u64 *out_param)
1531{
1532 int err = -EINVAL;
1533 int base;
1534 int order;
1535
1536 if (op != RES_OP_RESERVE_AND_MAP)
1537 return err;
1538
1539 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001540
1541 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1542 if (err)
1543 return err;
1544
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001545 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001546 if (base == -1) {
1547 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001548 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001549 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001550
1551 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001552 if (err) {
1553 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001554 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001555 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001556 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001557 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001558
1559 return err;
1560}
1561
1562static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1563 u64 in_param, u64 *out_param)
1564{
1565 int err = -EINVAL;
1566 int index;
1567 int id;
1568 struct res_mpt *mpt;
1569
1570 switch (op) {
1571 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001572 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1573 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001574 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001575
1576 index = __mlx4_mpt_reserve(dev);
1577 if (index == -1) {
1578 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1579 break;
1580 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001581 id = index & mpt_mask(dev);
1582
1583 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1584 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001585 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001586 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001587 break;
1588 }
1589 set_param_l(out_param, index);
1590 break;
1591 case RES_OP_MAP_ICM:
1592 index = get_param_l(&in_param);
1593 id = index & mpt_mask(dev);
1594 err = mr_res_start_move_to(dev, slave, id,
1595 RES_MPT_MAPPED, &mpt);
1596 if (err)
1597 return err;
1598
Shani Michaelib20e5192013-02-06 16:19:08 +00001599 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001600 if (err) {
1601 res_abort_move(dev, slave, RES_MPT, id);
1602 return err;
1603 }
1604
1605 res_end_move(dev, slave, RES_MPT, id);
1606 break;
1607 }
1608 return err;
1609}
1610
1611static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1612 u64 in_param, u64 *out_param)
1613{
1614 int cqn;
1615 int err;
1616
1617 switch (op) {
1618 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001619 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001620 if (err)
1621 break;
1622
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001623 err = __mlx4_cq_alloc_icm(dev, &cqn);
1624 if (err) {
1625 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1626 break;
1627 }
1628
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001629 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1630 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001631 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001632 __mlx4_cq_free_icm(dev, cqn);
1633 break;
1634 }
1635
1636 set_param_l(out_param, cqn);
1637 break;
1638
1639 default:
1640 err = -EINVAL;
1641 }
1642
1643 return err;
1644}
1645
1646static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1647 u64 in_param, u64 *out_param)
1648{
1649 int srqn;
1650 int err;
1651
1652 switch (op) {
1653 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001654 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001655 if (err)
1656 break;
1657
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001658 err = __mlx4_srq_alloc_icm(dev, &srqn);
1659 if (err) {
1660 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1661 break;
1662 }
1663
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001664 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1665 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001666 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001667 __mlx4_srq_free_icm(dev, srqn);
1668 break;
1669 }
1670
1671 set_param_l(out_param, srqn);
1672 break;
1673
1674 default:
1675 err = -EINVAL;
1676 }
1677
1678 return err;
1679}
1680
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001681static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1682 u8 smac_index, u64 *mac)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001683{
1684 struct mlx4_priv *priv = mlx4_priv(dev);
1685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001686 struct list_head *mac_list =
1687 &tracker->slave_list[slave].res_list[RES_MAC];
1688 struct mac_res *res, *tmp;
1689
1690 list_for_each_entry_safe(res, tmp, mac_list, list) {
1691 if (res->smac_index == smac_index && res->port == (u8) port) {
1692 *mac = res->mac;
1693 return 0;
1694 }
1695 }
1696 return -ENOENT;
1697}
1698
1699static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1700{
1701 struct mlx4_priv *priv = mlx4_priv(dev);
1702 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1703 struct list_head *mac_list =
1704 &tracker->slave_list[slave].res_list[RES_MAC];
1705 struct mac_res *res, *tmp;
1706
1707 list_for_each_entry_safe(res, tmp, mac_list, list) {
1708 if (res->mac == mac && res->port == (u8) port) {
1709 /* mac found. update ref count */
1710 ++res->ref_count;
1711 return 0;
1712 }
1713 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001714
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001715 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1716 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001717 res = kzalloc(sizeof *res, GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001718 if (!res) {
1719 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001720 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001721 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001722 res->mac = mac;
1723 res->port = (u8) port;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001724 res->smac_index = smac_index;
1725 res->ref_count = 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001726 list_add_tail(&res->list,
1727 &tracker->slave_list[slave].res_list[RES_MAC]);
1728 return 0;
1729}
1730
1731static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1732 int port)
1733{
1734 struct mlx4_priv *priv = mlx4_priv(dev);
1735 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1736 struct list_head *mac_list =
1737 &tracker->slave_list[slave].res_list[RES_MAC];
1738 struct mac_res *res, *tmp;
1739
1740 list_for_each_entry_safe(res, tmp, mac_list, list) {
1741 if (res->mac == mac && res->port == (u8) port) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001742 if (!--res->ref_count) {
1743 list_del(&res->list);
1744 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1745 kfree(res);
1746 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001747 break;
1748 }
1749 }
1750}
1751
1752static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1753{
1754 struct mlx4_priv *priv = mlx4_priv(dev);
1755 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1756 struct list_head *mac_list =
1757 &tracker->slave_list[slave].res_list[RES_MAC];
1758 struct mac_res *res, *tmp;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001759 int i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001760
1761 list_for_each_entry_safe(res, tmp, mac_list, list) {
1762 list_del(&res->list);
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001763 /* dereference the mac the num times the slave referenced it */
1764 for (i = 0; i < res->ref_count; i++)
1765 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001766 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001767 kfree(res);
1768 }
1769}
1770
1771static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001772 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001773{
1774 int err = -EINVAL;
1775 int port;
1776 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001777 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001778
1779 if (op != RES_OP_RESERVE_AND_MAP)
1780 return err;
1781
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001782 port = !in_port ? get_param_l(out_param) : in_port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001783 mac = in_param;
1784
1785 err = __mlx4_register_mac(dev, port, mac);
1786 if (err >= 0) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001787 smac_index = err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001788 set_param_l(out_param, err);
1789 err = 0;
1790 }
1791
1792 if (!err) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001793 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001794 if (err)
1795 __mlx4_unregister_mac(dev, port, mac);
1796 }
1797 return err;
1798}
1799
Jack Morgenstein48740802013-11-03 10:03:20 +02001800static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1801 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001802{
Jack Morgenstein48740802013-11-03 10:03:20 +02001803 struct mlx4_priv *priv = mlx4_priv(dev);
1804 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1805 struct list_head *vlan_list =
1806 &tracker->slave_list[slave].res_list[RES_VLAN];
1807 struct vlan_res *res, *tmp;
1808
1809 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1810 if (res->vlan == vlan && res->port == (u8) port) {
1811 /* vlan found. update ref count */
1812 ++res->ref_count;
1813 return 0;
1814 }
1815 }
1816
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001817 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1818 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02001819 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001820 if (!res) {
1821 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001822 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001823 }
Jack Morgenstein48740802013-11-03 10:03:20 +02001824 res->vlan = vlan;
1825 res->port = (u8) port;
1826 res->vlan_index = vlan_index;
1827 res->ref_count = 1;
1828 list_add_tail(&res->list,
1829 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001830 return 0;
1831}
1832
Jack Morgenstein48740802013-11-03 10:03:20 +02001833
1834static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1835 int port)
1836{
1837 struct mlx4_priv *priv = mlx4_priv(dev);
1838 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1839 struct list_head *vlan_list =
1840 &tracker->slave_list[slave].res_list[RES_VLAN];
1841 struct vlan_res *res, *tmp;
1842
1843 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1844 if (res->vlan == vlan && res->port == (u8) port) {
1845 if (!--res->ref_count) {
1846 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001847 mlx4_release_resource(dev, slave, RES_VLAN,
1848 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001849 kfree(res);
1850 }
1851 break;
1852 }
1853 }
1854}
1855
1856static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1857{
1858 struct mlx4_priv *priv = mlx4_priv(dev);
1859 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1860 struct list_head *vlan_list =
1861 &tracker->slave_list[slave].res_list[RES_VLAN];
1862 struct vlan_res *res, *tmp;
1863 int i;
1864
1865 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1866 list_del(&res->list);
1867 /* dereference the vlan the num times the slave referenced it */
1868 for (i = 0; i < res->ref_count; i++)
1869 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001870 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001871 kfree(res);
1872 }
1873}
1874
1875static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001876 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02001877{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001878 struct mlx4_priv *priv = mlx4_priv(dev);
1879 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02001880 int err;
1881 u16 vlan;
1882 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001883 int port;
1884
1885 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02001886
1887 if (!port || op != RES_OP_RESERVE_AND_MAP)
1888 return -EINVAL;
1889
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001890 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1891 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1892 slave_state[slave].old_vlan_api = true;
1893 return 0;
1894 }
1895
Jack Morgenstein48740802013-11-03 10:03:20 +02001896 vlan = (u16) in_param;
1897
1898 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1899 if (!err) {
1900 set_param_l(out_param, (u32) vlan_index);
1901 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1902 if (err)
1903 __mlx4_unregister_vlan(dev, port, vlan);
1904 }
1905 return err;
1906}
1907
Jack Morgensteinba062d52012-05-15 10:35:03 +00001908static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1909 u64 in_param, u64 *out_param)
1910{
1911 u32 index;
1912 int err;
1913
1914 if (op != RES_OP_RESERVE)
1915 return -EINVAL;
1916
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001917 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001918 if (err)
1919 return err;
1920
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001921 err = __mlx4_counter_alloc(dev, &index);
1922 if (err) {
1923 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1924 return err;
1925 }
1926
Jack Morgensteinba062d52012-05-15 10:35:03 +00001927 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001928 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001929 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001930 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1931 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001932 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001933 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00001934
1935 return err;
1936}
1937
1938static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1939 u64 in_param, u64 *out_param)
1940{
1941 u32 xrcdn;
1942 int err;
1943
1944 if (op != RES_OP_RESERVE)
1945 return -EINVAL;
1946
1947 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1948 if (err)
1949 return err;
1950
1951 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1952 if (err)
1953 __mlx4_xrcd_free(dev, xrcdn);
1954 else
1955 set_param_l(out_param, xrcdn);
1956
1957 return err;
1958}
1959
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001960int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1961 struct mlx4_vhcr *vhcr,
1962 struct mlx4_cmd_mailbox *inbox,
1963 struct mlx4_cmd_mailbox *outbox,
1964 struct mlx4_cmd_info *cmd)
1965{
1966 int err;
1967 int alop = vhcr->op_modifier;
1968
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001969 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001970 case RES_QP:
1971 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1972 vhcr->in_param, &vhcr->out_param);
1973 break;
1974
1975 case RES_MTT:
1976 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1977 vhcr->in_param, &vhcr->out_param);
1978 break;
1979
1980 case RES_MPT:
1981 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1982 vhcr->in_param, &vhcr->out_param);
1983 break;
1984
1985 case RES_CQ:
1986 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1987 vhcr->in_param, &vhcr->out_param);
1988 break;
1989
1990 case RES_SRQ:
1991 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1992 vhcr->in_param, &vhcr->out_param);
1993 break;
1994
1995 case RES_MAC:
1996 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001997 vhcr->in_param, &vhcr->out_param,
1998 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001999 break;
2000
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002001 case RES_VLAN:
2002 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002003 vhcr->in_param, &vhcr->out_param,
2004 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002005 break;
2006
Jack Morgensteinba062d52012-05-15 10:35:03 +00002007 case RES_COUNTER:
2008 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2009 vhcr->in_param, &vhcr->out_param);
2010 break;
2011
2012 case RES_XRCD:
2013 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2014 vhcr->in_param, &vhcr->out_param);
2015 break;
2016
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002017 default:
2018 err = -EINVAL;
2019 break;
2020 }
2021
2022 return err;
2023}
2024
2025static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2026 u64 in_param)
2027{
2028 int err;
2029 int count;
2030 int base;
2031 int qpn;
2032
2033 switch (op) {
2034 case RES_OP_RESERVE:
2035 base = get_param_l(&in_param) & 0x7fffff;
2036 count = get_param_h(&in_param);
2037 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2038 if (err)
2039 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002040 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002041 __mlx4_qp_release_range(dev, base, count);
2042 break;
2043 case RES_OP_MAP_ICM:
2044 qpn = get_param_l(&in_param) & 0x7fffff;
2045 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2046 NULL, 0);
2047 if (err)
2048 return err;
2049
Jack Morgenstein54679e12012-08-03 08:40:43 +00002050 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002051 __mlx4_qp_free_icm(dev, qpn);
2052
2053 res_end_move(dev, slave, RES_QP, qpn);
2054
2055 if (valid_reserved(dev, slave, qpn))
2056 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2057 break;
2058 default:
2059 err = -EINVAL;
2060 break;
2061 }
2062 return err;
2063}
2064
2065static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2066 u64 in_param, u64 *out_param)
2067{
2068 int err = -EINVAL;
2069 int base;
2070 int order;
2071
2072 if (op != RES_OP_RESERVE_AND_MAP)
2073 return err;
2074
2075 base = get_param_l(&in_param);
2076 order = get_param_h(&in_param);
2077 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002078 if (!err) {
2079 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002080 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002081 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002082 return err;
2083}
2084
2085static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2086 u64 in_param)
2087{
2088 int err = -EINVAL;
2089 int index;
2090 int id;
2091 struct res_mpt *mpt;
2092
2093 switch (op) {
2094 case RES_OP_RESERVE:
2095 index = get_param_l(&in_param);
2096 id = index & mpt_mask(dev);
2097 err = get_res(dev, slave, id, RES_MPT, &mpt);
2098 if (err)
2099 break;
2100 index = mpt->key;
2101 put_res(dev, slave, id, RES_MPT);
2102
2103 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2104 if (err)
2105 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002106 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002107 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002108 break;
2109 case RES_OP_MAP_ICM:
2110 index = get_param_l(&in_param);
2111 id = index & mpt_mask(dev);
2112 err = mr_res_start_move_to(dev, slave, id,
2113 RES_MPT_RESERVED, &mpt);
2114 if (err)
2115 return err;
2116
Shani Michaelib20e5192013-02-06 16:19:08 +00002117 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002118 res_end_move(dev, slave, RES_MPT, id);
2119 return err;
2120 break;
2121 default:
2122 err = -EINVAL;
2123 break;
2124 }
2125 return err;
2126}
2127
2128static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2129 u64 in_param, u64 *out_param)
2130{
2131 int cqn;
2132 int err;
2133
2134 switch (op) {
2135 case RES_OP_RESERVE_AND_MAP:
2136 cqn = get_param_l(&in_param);
2137 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2138 if (err)
2139 break;
2140
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002141 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002142 __mlx4_cq_free_icm(dev, cqn);
2143 break;
2144
2145 default:
2146 err = -EINVAL;
2147 break;
2148 }
2149
2150 return err;
2151}
2152
2153static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2154 u64 in_param, u64 *out_param)
2155{
2156 int srqn;
2157 int err;
2158
2159 switch (op) {
2160 case RES_OP_RESERVE_AND_MAP:
2161 srqn = get_param_l(&in_param);
2162 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2163 if (err)
2164 break;
2165
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002166 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002167 __mlx4_srq_free_icm(dev, srqn);
2168 break;
2169
2170 default:
2171 err = -EINVAL;
2172 break;
2173 }
2174
2175 return err;
2176}
2177
2178static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002179 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002180{
2181 int port;
2182 int err = 0;
2183
2184 switch (op) {
2185 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002186 port = !in_port ? get_param_l(out_param) : in_port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002187 mac_del_from_slave(dev, slave, in_param, port);
2188 __mlx4_unregister_mac(dev, port, in_param);
2189 break;
2190 default:
2191 err = -EINVAL;
2192 break;
2193 }
2194
2195 return err;
2196
2197}
2198
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002199static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002200 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002201{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002202 struct mlx4_priv *priv = mlx4_priv(dev);
2203 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002204 int err = 0;
2205
2206 switch (op) {
2207 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002208 if (slave_state[slave].old_vlan_api)
2209 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002210 if (!port)
2211 return -EINVAL;
2212 vlan_del_from_slave(dev, slave, in_param, port);
2213 __mlx4_unregister_vlan(dev, port, in_param);
2214 break;
2215 default:
2216 err = -EINVAL;
2217 break;
2218 }
2219
2220 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002221}
2222
Jack Morgensteinba062d52012-05-15 10:35:03 +00002223static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2224 u64 in_param, u64 *out_param)
2225{
2226 int index;
2227 int err;
2228
2229 if (op != RES_OP_RESERVE)
2230 return -EINVAL;
2231
2232 index = get_param_l(&in_param);
2233 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2234 if (err)
2235 return err;
2236
2237 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002238 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002239
2240 return err;
2241}
2242
2243static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2244 u64 in_param, u64 *out_param)
2245{
2246 int xrcdn;
2247 int err;
2248
2249 if (op != RES_OP_RESERVE)
2250 return -EINVAL;
2251
2252 xrcdn = get_param_l(&in_param);
2253 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2254 if (err)
2255 return err;
2256
2257 __mlx4_xrcd_free(dev, xrcdn);
2258
2259 return err;
2260}
2261
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002262int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2263 struct mlx4_vhcr *vhcr,
2264 struct mlx4_cmd_mailbox *inbox,
2265 struct mlx4_cmd_mailbox *outbox,
2266 struct mlx4_cmd_info *cmd)
2267{
2268 int err = -EINVAL;
2269 int alop = vhcr->op_modifier;
2270
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002271 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002272 case RES_QP:
2273 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2274 vhcr->in_param);
2275 break;
2276
2277 case RES_MTT:
2278 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2279 vhcr->in_param, &vhcr->out_param);
2280 break;
2281
2282 case RES_MPT:
2283 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2284 vhcr->in_param);
2285 break;
2286
2287 case RES_CQ:
2288 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2289 vhcr->in_param, &vhcr->out_param);
2290 break;
2291
2292 case RES_SRQ:
2293 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2294 vhcr->in_param, &vhcr->out_param);
2295 break;
2296
2297 case RES_MAC:
2298 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002299 vhcr->in_param, &vhcr->out_param,
2300 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002301 break;
2302
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002303 case RES_VLAN:
2304 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002305 vhcr->in_param, &vhcr->out_param,
2306 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002307 break;
2308
Jack Morgensteinba062d52012-05-15 10:35:03 +00002309 case RES_COUNTER:
2310 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2311 vhcr->in_param, &vhcr->out_param);
2312 break;
2313
2314 case RES_XRCD:
2315 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2316 vhcr->in_param, &vhcr->out_param);
2317
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002318 default:
2319 break;
2320 }
2321 return err;
2322}
2323
2324/* ugly but other choices are uglier */
2325static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2326{
2327 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2328}
2329
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002330static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002331{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002332 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002333}
2334
2335static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2336{
2337 return be32_to_cpu(mpt->mtt_sz);
2338}
2339
Shani Michaelicc1ade92013-02-06 16:19:10 +00002340static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2341{
2342 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2343}
2344
2345static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2346{
2347 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2348}
2349
2350static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2351{
2352 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2353}
2354
2355static int mr_is_region(struct mlx4_mpt_entry *mpt)
2356{
2357 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2358}
2359
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002360static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002361{
2362 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2363}
2364
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002365static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002366{
2367 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2368}
2369
2370static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2371{
2372 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2373 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2374 int log_sq_sride = qpc->sq_size_stride & 7;
2375 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2376 int log_rq_stride = qpc->rq_size_stride & 7;
2377 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2378 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002379 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2380 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002381 int sq_size;
2382 int rq_size;
2383 int total_pages;
2384 int total_mem;
2385 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2386
2387 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2388 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2389 total_mem = sq_size + rq_size;
2390 total_pages =
2391 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2392 page_shift);
2393
2394 return total_pages;
2395}
2396
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002397static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2398 int size, struct res_mtt *mtt)
2399{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002400 int res_start = mtt->com.res_id;
2401 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002402
2403 if (start < res_start || start + size > res_start + res_size)
2404 return -EPERM;
2405 return 0;
2406}
2407
2408int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2409 struct mlx4_vhcr *vhcr,
2410 struct mlx4_cmd_mailbox *inbox,
2411 struct mlx4_cmd_mailbox *outbox,
2412 struct mlx4_cmd_info *cmd)
2413{
2414 int err;
2415 int index = vhcr->in_modifier;
2416 struct res_mtt *mtt;
2417 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002418 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002419 int phys;
2420 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002421 u32 pd;
2422 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002423
2424 id = index & mpt_mask(dev);
2425 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2426 if (err)
2427 return err;
2428
Shani Michaelicc1ade92013-02-06 16:19:10 +00002429 /* Disable memory windows for VFs. */
2430 if (!mr_is_region(inbox->buf)) {
2431 err = -EPERM;
2432 goto ex_abort;
2433 }
2434
2435 /* Make sure that the PD bits related to the slave id are zeros. */
2436 pd = mr_get_pd(inbox->buf);
2437 pd_slave = (pd >> 17) & 0x7f;
2438 if (pd_slave != 0 && pd_slave != slave) {
2439 err = -EPERM;
2440 goto ex_abort;
2441 }
2442
2443 if (mr_is_fmr(inbox->buf)) {
2444 /* FMR and Bind Enable are forbidden in slave devices. */
2445 if (mr_is_bind_enabled(inbox->buf)) {
2446 err = -EPERM;
2447 goto ex_abort;
2448 }
2449 /* FMR and Memory Windows are also forbidden. */
2450 if (!mr_is_region(inbox->buf)) {
2451 err = -EPERM;
2452 goto ex_abort;
2453 }
2454 }
2455
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002456 phys = mr_phys_mpt(inbox->buf);
2457 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002458 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002459 if (err)
2460 goto ex_abort;
2461
2462 err = check_mtt_range(dev, slave, mtt_base,
2463 mr_get_mtt_size(inbox->buf), mtt);
2464 if (err)
2465 goto ex_put;
2466
2467 mpt->mtt = mtt;
2468 }
2469
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002470 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2471 if (err)
2472 goto ex_put;
2473
2474 if (!phys) {
2475 atomic_inc(&mtt->ref_count);
2476 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2477 }
2478
2479 res_end_move(dev, slave, RES_MPT, id);
2480 return 0;
2481
2482ex_put:
2483 if (!phys)
2484 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2485ex_abort:
2486 res_abort_move(dev, slave, RES_MPT, id);
2487
2488 return err;
2489}
2490
2491int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2492 struct mlx4_vhcr *vhcr,
2493 struct mlx4_cmd_mailbox *inbox,
2494 struct mlx4_cmd_mailbox *outbox,
2495 struct mlx4_cmd_info *cmd)
2496{
2497 int err;
2498 int index = vhcr->in_modifier;
2499 struct res_mpt *mpt;
2500 int id;
2501
2502 id = index & mpt_mask(dev);
2503 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2504 if (err)
2505 return err;
2506
2507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2508 if (err)
2509 goto ex_abort;
2510
2511 if (mpt->mtt)
2512 atomic_dec(&mpt->mtt->ref_count);
2513
2514 res_end_move(dev, slave, RES_MPT, id);
2515 return 0;
2516
2517ex_abort:
2518 res_abort_move(dev, slave, RES_MPT, id);
2519
2520 return err;
2521}
2522
2523int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2524 struct mlx4_vhcr *vhcr,
2525 struct mlx4_cmd_mailbox *inbox,
2526 struct mlx4_cmd_mailbox *outbox,
2527 struct mlx4_cmd_info *cmd)
2528{
2529 int err;
2530 int index = vhcr->in_modifier;
2531 struct res_mpt *mpt;
2532 int id;
2533
2534 id = index & mpt_mask(dev);
2535 err = get_res(dev, slave, id, RES_MPT, &mpt);
2536 if (err)
2537 return err;
2538
2539 if (mpt->com.from_state != RES_MPT_HW) {
2540 err = -EBUSY;
2541 goto out;
2542 }
2543
2544 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2545
2546out:
2547 put_res(dev, slave, id, RES_MPT);
2548 return err;
2549}
2550
2551static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2552{
2553 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2554}
2555
2556static int qp_get_scqn(struct mlx4_qp_context *qpc)
2557{
2558 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2559}
2560
2561static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2562{
2563 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2564}
2565
Jack Morgenstein54679e12012-08-03 08:40:43 +00002566static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2567 struct mlx4_qp_context *context)
2568{
2569 u32 qpn = vhcr->in_modifier & 0xffffff;
2570 u32 qkey = 0;
2571
2572 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2573 return;
2574
2575 /* adjust qkey in qp context */
2576 context->qkey = cpu_to_be32(qkey);
2577}
2578
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002579int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2580 struct mlx4_vhcr *vhcr,
2581 struct mlx4_cmd_mailbox *inbox,
2582 struct mlx4_cmd_mailbox *outbox,
2583 struct mlx4_cmd_info *cmd)
2584{
2585 int err;
2586 int qpn = vhcr->in_modifier & 0x7fffff;
2587 struct res_mtt *mtt;
2588 struct res_qp *qp;
2589 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002590 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002591 int mtt_size = qp_get_mtt_size(qpc);
2592 struct res_cq *rcq;
2593 struct res_cq *scq;
2594 int rcqn = qp_get_rcqn(qpc);
2595 int scqn = qp_get_scqn(qpc);
2596 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2597 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2598 struct res_srq *srq;
2599 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2600
2601 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2602 if (err)
2603 return err;
2604 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002605 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002606 qp->param3 = 0;
2607 qp->vlan_control = 0;
2608 qp->fvl_rx = 0;
2609 qp->pri_path_fl = 0;
2610 qp->vlan_index = 0;
2611 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002612 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002613
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002614 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002615 if (err)
2616 goto ex_abort;
2617
2618 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2619 if (err)
2620 goto ex_put_mtt;
2621
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002622 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2623 if (err)
2624 goto ex_put_mtt;
2625
2626 if (scqn != rcqn) {
2627 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2628 if (err)
2629 goto ex_put_rcq;
2630 } else
2631 scq = rcq;
2632
2633 if (use_srq) {
2634 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2635 if (err)
2636 goto ex_put_scq;
2637 }
2638
Jack Morgenstein54679e12012-08-03 08:40:43 +00002639 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2640 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002641 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2642 if (err)
2643 goto ex_put_srq;
2644 atomic_inc(&mtt->ref_count);
2645 qp->mtt = mtt;
2646 atomic_inc(&rcq->ref_count);
2647 qp->rcq = rcq;
2648 atomic_inc(&scq->ref_count);
2649 qp->scq = scq;
2650
2651 if (scqn != rcqn)
2652 put_res(dev, slave, scqn, RES_CQ);
2653
2654 if (use_srq) {
2655 atomic_inc(&srq->ref_count);
2656 put_res(dev, slave, srqn, RES_SRQ);
2657 qp->srq = srq;
2658 }
2659 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002660 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002661 res_end_move(dev, slave, RES_QP, qpn);
2662
2663 return 0;
2664
2665ex_put_srq:
2666 if (use_srq)
2667 put_res(dev, slave, srqn, RES_SRQ);
2668ex_put_scq:
2669 if (scqn != rcqn)
2670 put_res(dev, slave, scqn, RES_CQ);
2671ex_put_rcq:
2672 put_res(dev, slave, rcqn, RES_CQ);
2673ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002674 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002675ex_abort:
2676 res_abort_move(dev, slave, RES_QP, qpn);
2677
2678 return err;
2679}
2680
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002681static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002682{
2683 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2684}
2685
2686static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2687{
2688 int log_eq_size = eqc->log_eq_size & 0x1f;
2689 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2690
2691 if (log_eq_size + 5 < page_shift)
2692 return 1;
2693
2694 return 1 << (log_eq_size + 5 - page_shift);
2695}
2696
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002697static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002698{
2699 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2700}
2701
2702static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2703{
2704 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2705 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2706
2707 if (log_cq_size + 5 < page_shift)
2708 return 1;
2709
2710 return 1 << (log_cq_size + 5 - page_shift);
2711}
2712
2713int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2714 struct mlx4_vhcr *vhcr,
2715 struct mlx4_cmd_mailbox *inbox,
2716 struct mlx4_cmd_mailbox *outbox,
2717 struct mlx4_cmd_info *cmd)
2718{
2719 int err;
2720 int eqn = vhcr->in_modifier;
2721 int res_id = (slave << 8) | eqn;
2722 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002723 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002724 int mtt_size = eq_get_mtt_size(eqc);
2725 struct res_eq *eq;
2726 struct res_mtt *mtt;
2727
2728 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2729 if (err)
2730 return err;
2731 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2732 if (err)
2733 goto out_add;
2734
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002735 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002736 if (err)
2737 goto out_move;
2738
2739 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2740 if (err)
2741 goto out_put;
2742
2743 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2744 if (err)
2745 goto out_put;
2746
2747 atomic_inc(&mtt->ref_count);
2748 eq->mtt = mtt;
2749 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2750 res_end_move(dev, slave, RES_EQ, res_id);
2751 return 0;
2752
2753out_put:
2754 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2755out_move:
2756 res_abort_move(dev, slave, RES_EQ, res_id);
2757out_add:
2758 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2759 return err;
2760}
2761
2762static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2763 int len, struct res_mtt **res)
2764{
2765 struct mlx4_priv *priv = mlx4_priv(dev);
2766 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2767 struct res_mtt *mtt;
2768 int err = -EINVAL;
2769
2770 spin_lock_irq(mlx4_tlock(dev));
2771 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2772 com.list) {
2773 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2774 *res = mtt;
2775 mtt->com.from_state = mtt->com.state;
2776 mtt->com.state = RES_MTT_BUSY;
2777 err = 0;
2778 break;
2779 }
2780 }
2781 spin_unlock_irq(mlx4_tlock(dev));
2782
2783 return err;
2784}
2785
Jack Morgenstein54679e12012-08-03 08:40:43 +00002786static int verify_qp_parameters(struct mlx4_dev *dev,
2787 struct mlx4_cmd_mailbox *inbox,
2788 enum qp_transition transition, u8 slave)
2789{
2790 u32 qp_type;
2791 struct mlx4_qp_context *qp_ctx;
2792 enum mlx4_qp_optpar optpar;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002793 int port;
2794 int num_gids;
Jack Morgenstein54679e12012-08-03 08:40:43 +00002795
2796 qp_ctx = inbox->buf + 8;
2797 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2798 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2799
2800 switch (qp_type) {
2801 case MLX4_QP_ST_RC:
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002802 case MLX4_QP_ST_XRC:
Jack Morgenstein54679e12012-08-03 08:40:43 +00002803 case MLX4_QP_ST_UC:
2804 switch (transition) {
2805 case QP_TRANS_INIT2RTR:
2806 case QP_TRANS_RTR2RTS:
2807 case QP_TRANS_RTS2RTS:
2808 case QP_TRANS_SQD2SQD:
2809 case QP_TRANS_SQD2RTS:
2810 if (slave != mlx4_master_func_num(dev))
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002811 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2812 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2813 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2814 num_gids = mlx4_get_slave_num_gids(dev, slave);
2815 else
2816 num_gids = 1;
2817 if (qp_ctx->pri_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00002818 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002819 }
2820 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2821 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2822 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2823 num_gids = mlx4_get_slave_num_gids(dev, slave);
2824 else
2825 num_gids = 1;
2826 if (qp_ctx->alt_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00002827 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002828 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00002829 break;
2830 default:
2831 break;
2832 }
2833
2834 break;
2835 default:
2836 break;
2837 }
2838
2839 return 0;
2840}
2841
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002842int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2843 struct mlx4_vhcr *vhcr,
2844 struct mlx4_cmd_mailbox *inbox,
2845 struct mlx4_cmd_mailbox *outbox,
2846 struct mlx4_cmd_info *cmd)
2847{
2848 struct mlx4_mtt mtt;
2849 __be64 *page_list = inbox->buf;
2850 u64 *pg_list = (u64 *)page_list;
2851 int i;
2852 struct res_mtt *rmtt = NULL;
2853 int start = be64_to_cpu(page_list[0]);
2854 int npages = vhcr->in_modifier;
2855 int err;
2856
2857 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2858 if (err)
2859 return err;
2860
2861 /* Call the SW implementation of write_mtt:
2862 * - Prepare a dummy mtt struct
2863 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002864 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2865 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002866 mtt.order = 0;
2867 mtt.page_shift = 0;
2868 for (i = 0; i < npages; ++i)
2869 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2870
2871 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2872 ((u64 *)page_list + 2));
2873
2874 if (rmtt)
2875 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2876
2877 return err;
2878}
2879
2880int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2881 struct mlx4_vhcr *vhcr,
2882 struct mlx4_cmd_mailbox *inbox,
2883 struct mlx4_cmd_mailbox *outbox,
2884 struct mlx4_cmd_info *cmd)
2885{
2886 int eqn = vhcr->in_modifier;
2887 int res_id = eqn | (slave << 8);
2888 struct res_eq *eq;
2889 int err;
2890
2891 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2892 if (err)
2893 return err;
2894
2895 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2896 if (err)
2897 goto ex_abort;
2898
2899 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2900 if (err)
2901 goto ex_put;
2902
2903 atomic_dec(&eq->mtt->ref_count);
2904 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2905 res_end_move(dev, slave, RES_EQ, res_id);
2906 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2907
2908 return 0;
2909
2910ex_put:
2911 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2912ex_abort:
2913 res_abort_move(dev, slave, RES_EQ, res_id);
2914
2915 return err;
2916}
2917
2918int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2919{
2920 struct mlx4_priv *priv = mlx4_priv(dev);
2921 struct mlx4_slave_event_eq_info *event_eq;
2922 struct mlx4_cmd_mailbox *mailbox;
2923 u32 in_modifier = 0;
2924 int err;
2925 int res_id;
2926 struct res_eq *req;
2927
2928 if (!priv->mfunc.master.slave_state)
2929 return -EINVAL;
2930
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002931 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002932
2933 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002934 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002935 return 0;
2936
2937 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2938 res_id = (slave << 8) | event_eq->eqn;
2939 err = get_res(dev, slave, res_id, RES_EQ, &req);
2940 if (err)
2941 goto unlock;
2942
2943 if (req->com.from_state != RES_EQ_HW) {
2944 err = -EINVAL;
2945 goto put;
2946 }
2947
2948 mailbox = mlx4_alloc_cmd_mailbox(dev);
2949 if (IS_ERR(mailbox)) {
2950 err = PTR_ERR(mailbox);
2951 goto put;
2952 }
2953
2954 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2955 ++event_eq->token;
2956 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2957 }
2958
2959 memcpy(mailbox->buf, (u8 *) eqe, 28);
2960
2961 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2962
2963 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2964 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2965 MLX4_CMD_NATIVE);
2966
2967 put_res(dev, slave, res_id, RES_EQ);
2968 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2969 mlx4_free_cmd_mailbox(dev, mailbox);
2970 return err;
2971
2972put:
2973 put_res(dev, slave, res_id, RES_EQ);
2974
2975unlock:
2976 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2977 return err;
2978}
2979
2980int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2981 struct mlx4_vhcr *vhcr,
2982 struct mlx4_cmd_mailbox *inbox,
2983 struct mlx4_cmd_mailbox *outbox,
2984 struct mlx4_cmd_info *cmd)
2985{
2986 int eqn = vhcr->in_modifier;
2987 int res_id = eqn | (slave << 8);
2988 struct res_eq *eq;
2989 int err;
2990
2991 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2992 if (err)
2993 return err;
2994
2995 if (eq->com.from_state != RES_EQ_HW) {
2996 err = -EINVAL;
2997 goto ex_put;
2998 }
2999
3000 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3001
3002ex_put:
3003 put_res(dev, slave, res_id, RES_EQ);
3004 return err;
3005}
3006
3007int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3008 struct mlx4_vhcr *vhcr,
3009 struct mlx4_cmd_mailbox *inbox,
3010 struct mlx4_cmd_mailbox *outbox,
3011 struct mlx4_cmd_info *cmd)
3012{
3013 int err;
3014 int cqn = vhcr->in_modifier;
3015 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003016 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003017 struct res_cq *cq;
3018 struct res_mtt *mtt;
3019
3020 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3021 if (err)
3022 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003023 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003024 if (err)
3025 goto out_move;
3026 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3027 if (err)
3028 goto out_put;
3029 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3030 if (err)
3031 goto out_put;
3032 atomic_inc(&mtt->ref_count);
3033 cq->mtt = mtt;
3034 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3035 res_end_move(dev, slave, RES_CQ, cqn);
3036 return 0;
3037
3038out_put:
3039 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3040out_move:
3041 res_abort_move(dev, slave, RES_CQ, cqn);
3042 return err;
3043}
3044
3045int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3046 struct mlx4_vhcr *vhcr,
3047 struct mlx4_cmd_mailbox *inbox,
3048 struct mlx4_cmd_mailbox *outbox,
3049 struct mlx4_cmd_info *cmd)
3050{
3051 int err;
3052 int cqn = vhcr->in_modifier;
3053 struct res_cq *cq;
3054
3055 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3056 if (err)
3057 return err;
3058 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3059 if (err)
3060 goto out_move;
3061 atomic_dec(&cq->mtt->ref_count);
3062 res_end_move(dev, slave, RES_CQ, cqn);
3063 return 0;
3064
3065out_move:
3066 res_abort_move(dev, slave, RES_CQ, cqn);
3067 return err;
3068}
3069
3070int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3071 struct mlx4_vhcr *vhcr,
3072 struct mlx4_cmd_mailbox *inbox,
3073 struct mlx4_cmd_mailbox *outbox,
3074 struct mlx4_cmd_info *cmd)
3075{
3076 int cqn = vhcr->in_modifier;
3077 struct res_cq *cq;
3078 int err;
3079
3080 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3081 if (err)
3082 return err;
3083
3084 if (cq->com.from_state != RES_CQ_HW)
3085 goto ex_put;
3086
3087 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3088ex_put:
3089 put_res(dev, slave, cqn, RES_CQ);
3090
3091 return err;
3092}
3093
3094static int handle_resize(struct mlx4_dev *dev, int slave,
3095 struct mlx4_vhcr *vhcr,
3096 struct mlx4_cmd_mailbox *inbox,
3097 struct mlx4_cmd_mailbox *outbox,
3098 struct mlx4_cmd_info *cmd,
3099 struct res_cq *cq)
3100{
3101 int err;
3102 struct res_mtt *orig_mtt;
3103 struct res_mtt *mtt;
3104 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003105 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003106
3107 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3108 if (err)
3109 return err;
3110
3111 if (orig_mtt != cq->mtt) {
3112 err = -EINVAL;
3113 goto ex_put;
3114 }
3115
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003116 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003117 if (err)
3118 goto ex_put;
3119
3120 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3121 if (err)
3122 goto ex_put1;
3123 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3124 if (err)
3125 goto ex_put1;
3126 atomic_dec(&orig_mtt->ref_count);
3127 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3128 atomic_inc(&mtt->ref_count);
3129 cq->mtt = mtt;
3130 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3131 return 0;
3132
3133ex_put1:
3134 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3135ex_put:
3136 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3137
3138 return err;
3139
3140}
3141
3142int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3143 struct mlx4_vhcr *vhcr,
3144 struct mlx4_cmd_mailbox *inbox,
3145 struct mlx4_cmd_mailbox *outbox,
3146 struct mlx4_cmd_info *cmd)
3147{
3148 int cqn = vhcr->in_modifier;
3149 struct res_cq *cq;
3150 int err;
3151
3152 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3153 if (err)
3154 return err;
3155
3156 if (cq->com.from_state != RES_CQ_HW)
3157 goto ex_put;
3158
3159 if (vhcr->op_modifier == 0) {
3160 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003161 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003162 }
3163
3164 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3165ex_put:
3166 put_res(dev, slave, cqn, RES_CQ);
3167
3168 return err;
3169}
3170
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003171static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3172{
3173 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3174 int log_rq_stride = srqc->logstride & 7;
3175 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3176
3177 if (log_srq_size + log_rq_stride + 4 < page_shift)
3178 return 1;
3179
3180 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3181}
3182
3183int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3184 struct mlx4_vhcr *vhcr,
3185 struct mlx4_cmd_mailbox *inbox,
3186 struct mlx4_cmd_mailbox *outbox,
3187 struct mlx4_cmd_info *cmd)
3188{
3189 int err;
3190 int srqn = vhcr->in_modifier;
3191 struct res_mtt *mtt;
3192 struct res_srq *srq;
3193 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003194 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003195
3196 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3197 return -EINVAL;
3198
3199 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3200 if (err)
3201 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003202 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003203 if (err)
3204 goto ex_abort;
3205 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3206 mtt);
3207 if (err)
3208 goto ex_put_mtt;
3209
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003210 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3211 if (err)
3212 goto ex_put_mtt;
3213
3214 atomic_inc(&mtt->ref_count);
3215 srq->mtt = mtt;
3216 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3217 res_end_move(dev, slave, RES_SRQ, srqn);
3218 return 0;
3219
3220ex_put_mtt:
3221 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3222ex_abort:
3223 res_abort_move(dev, slave, RES_SRQ, srqn);
3224
3225 return err;
3226}
3227
3228int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3229 struct mlx4_vhcr *vhcr,
3230 struct mlx4_cmd_mailbox *inbox,
3231 struct mlx4_cmd_mailbox *outbox,
3232 struct mlx4_cmd_info *cmd)
3233{
3234 int err;
3235 int srqn = vhcr->in_modifier;
3236 struct res_srq *srq;
3237
3238 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3239 if (err)
3240 return err;
3241 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3242 if (err)
3243 goto ex_abort;
3244 atomic_dec(&srq->mtt->ref_count);
3245 if (srq->cq)
3246 atomic_dec(&srq->cq->ref_count);
3247 res_end_move(dev, slave, RES_SRQ, srqn);
3248
3249 return 0;
3250
3251ex_abort:
3252 res_abort_move(dev, slave, RES_SRQ, srqn);
3253
3254 return err;
3255}
3256
3257int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3258 struct mlx4_vhcr *vhcr,
3259 struct mlx4_cmd_mailbox *inbox,
3260 struct mlx4_cmd_mailbox *outbox,
3261 struct mlx4_cmd_info *cmd)
3262{
3263 int err;
3264 int srqn = vhcr->in_modifier;
3265 struct res_srq *srq;
3266
3267 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3268 if (err)
3269 return err;
3270 if (srq->com.from_state != RES_SRQ_HW) {
3271 err = -EBUSY;
3272 goto out;
3273 }
3274 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3275out:
3276 put_res(dev, slave, srqn, RES_SRQ);
3277 return err;
3278}
3279
3280int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3281 struct mlx4_vhcr *vhcr,
3282 struct mlx4_cmd_mailbox *inbox,
3283 struct mlx4_cmd_mailbox *outbox,
3284 struct mlx4_cmd_info *cmd)
3285{
3286 int err;
3287 int srqn = vhcr->in_modifier;
3288 struct res_srq *srq;
3289
3290 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3291 if (err)
3292 return err;
3293
3294 if (srq->com.from_state != RES_SRQ_HW) {
3295 err = -EBUSY;
3296 goto out;
3297 }
3298
3299 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3300out:
3301 put_res(dev, slave, srqn, RES_SRQ);
3302 return err;
3303}
3304
3305int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3306 struct mlx4_vhcr *vhcr,
3307 struct mlx4_cmd_mailbox *inbox,
3308 struct mlx4_cmd_mailbox *outbox,
3309 struct mlx4_cmd_info *cmd)
3310{
3311 int err;
3312 int qpn = vhcr->in_modifier & 0x7fffff;
3313 struct res_qp *qp;
3314
3315 err = get_res(dev, slave, qpn, RES_QP, &qp);
3316 if (err)
3317 return err;
3318 if (qp->com.from_state != RES_QP_HW) {
3319 err = -EBUSY;
3320 goto out;
3321 }
3322
3323 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3324out:
3325 put_res(dev, slave, qpn, RES_QP);
3326 return err;
3327}
3328
Jack Morgenstein54679e12012-08-03 08:40:43 +00003329int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3330 struct mlx4_vhcr *vhcr,
3331 struct mlx4_cmd_mailbox *inbox,
3332 struct mlx4_cmd_mailbox *outbox,
3333 struct mlx4_cmd_info *cmd)
3334{
3335 struct mlx4_qp_context *context = inbox->buf + 8;
3336 adjust_proxy_tun_qkey(dev, vhcr, context);
3337 update_pkey_index(dev, slave, inbox);
3338 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3339}
3340
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003341static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3342 struct mlx4_qp_context *qpc,
3343 struct mlx4_cmd_mailbox *inbox)
3344{
3345 u64 mac;
3346 int port;
3347 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3348 u8 sched = *(u8 *)(inbox->buf + 64);
3349 u8 smac_ix;
3350
3351 port = (sched >> 6 & 1) + 1;
3352 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3353 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3354 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3355 return -ENOENT;
3356 }
3357 return 0;
3358}
3359
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003360int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3361 struct mlx4_vhcr *vhcr,
3362 struct mlx4_cmd_mailbox *inbox,
3363 struct mlx4_cmd_mailbox *outbox,
3364 struct mlx4_cmd_info *cmd)
3365{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003366 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003367 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003368 int qpn = vhcr->in_modifier & 0x7fffff;
3369 struct res_qp *qp;
3370 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003371 __be32 orig_param3 = qpc->param3;
3372 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3373 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3374 u8 orig_pri_path_fl = qpc->pri_path.fl;
3375 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3376 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003377
Jack Morgenstein54679e12012-08-03 08:40:43 +00003378 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3379 if (err)
3380 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003381
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003382 if (roce_verify_mac(dev, slave, qpc, inbox))
3383 return -EINVAL;
3384
Jack Morgenstein54679e12012-08-03 08:40:43 +00003385 update_pkey_index(dev, slave, inbox);
3386 update_gid(dev, inbox, (u8)slave);
3387 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003388 orig_sched_queue = qpc->pri_path.sched_queue;
3389 err = update_vport_qp_param(dev, inbox, slave, qpn);
Rony Efraim3f7fb022013-04-25 05:22:28 +00003390 if (err)
3391 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003392
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003393 err = get_res(dev, slave, qpn, RES_QP, &qp);
3394 if (err)
3395 return err;
3396 if (qp->com.from_state != RES_QP_HW) {
3397 err = -EBUSY;
3398 goto out;
3399 }
3400
3401 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3402out:
3403 /* if no error, save sched queue value passed in by VF. This is
3404 * essentially the QOS value provided by the VF. This will be useful
3405 * if we allow dynamic changes from VST back to VGT
3406 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003407 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003408 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003409 qp->param3 = orig_param3;
3410 qp->vlan_control = orig_vlan_control;
3411 qp->fvl_rx = orig_fvl_rx;
3412 qp->pri_path_fl = orig_pri_path_fl;
3413 qp->vlan_index = orig_vlan_index;
3414 qp->feup = orig_feup;
3415 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003416 put_res(dev, slave, qpn, RES_QP);
3417 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003418}
3419
3420int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3421 struct mlx4_vhcr *vhcr,
3422 struct mlx4_cmd_mailbox *inbox,
3423 struct mlx4_cmd_mailbox *outbox,
3424 struct mlx4_cmd_info *cmd)
3425{
3426 int err;
3427 struct mlx4_qp_context *context = inbox->buf + 8;
3428
3429 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3430 if (err)
3431 return err;
3432
3433 update_pkey_index(dev, slave, inbox);
3434 update_gid(dev, inbox, (u8)slave);
3435 adjust_proxy_tun_qkey(dev, vhcr, context);
3436 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3437}
3438
3439int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3440 struct mlx4_vhcr *vhcr,
3441 struct mlx4_cmd_mailbox *inbox,
3442 struct mlx4_cmd_mailbox *outbox,
3443 struct mlx4_cmd_info *cmd)
3444{
3445 int err;
3446 struct mlx4_qp_context *context = inbox->buf + 8;
3447
3448 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3449 if (err)
3450 return err;
3451
3452 update_pkey_index(dev, slave, inbox);
3453 update_gid(dev, inbox, (u8)slave);
3454 adjust_proxy_tun_qkey(dev, vhcr, context);
3455 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3456}
3457
3458
3459int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3460 struct mlx4_vhcr *vhcr,
3461 struct mlx4_cmd_mailbox *inbox,
3462 struct mlx4_cmd_mailbox *outbox,
3463 struct mlx4_cmd_info *cmd)
3464{
3465 struct mlx4_qp_context *context = inbox->buf + 8;
3466 adjust_proxy_tun_qkey(dev, vhcr, context);
3467 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3468}
3469
3470int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3471 struct mlx4_vhcr *vhcr,
3472 struct mlx4_cmd_mailbox *inbox,
3473 struct mlx4_cmd_mailbox *outbox,
3474 struct mlx4_cmd_info *cmd)
3475{
3476 int err;
3477 struct mlx4_qp_context *context = inbox->buf + 8;
3478
3479 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3480 if (err)
3481 return err;
3482
3483 adjust_proxy_tun_qkey(dev, vhcr, context);
3484 update_gid(dev, inbox, (u8)slave);
3485 update_pkey_index(dev, slave, inbox);
3486 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3487}
3488
3489int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3490 struct mlx4_vhcr *vhcr,
3491 struct mlx4_cmd_mailbox *inbox,
3492 struct mlx4_cmd_mailbox *outbox,
3493 struct mlx4_cmd_info *cmd)
3494{
3495 int err;
3496 struct mlx4_qp_context *context = inbox->buf + 8;
3497
3498 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3499 if (err)
3500 return err;
3501
3502 adjust_proxy_tun_qkey(dev, vhcr, context);
3503 update_gid(dev, inbox, (u8)slave);
3504 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003505 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3506}
3507
3508int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3509 struct mlx4_vhcr *vhcr,
3510 struct mlx4_cmd_mailbox *inbox,
3511 struct mlx4_cmd_mailbox *outbox,
3512 struct mlx4_cmd_info *cmd)
3513{
3514 int err;
3515 int qpn = vhcr->in_modifier & 0x7fffff;
3516 struct res_qp *qp;
3517
3518 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3519 if (err)
3520 return err;
3521 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3522 if (err)
3523 goto ex_abort;
3524
3525 atomic_dec(&qp->mtt->ref_count);
3526 atomic_dec(&qp->rcq->ref_count);
3527 atomic_dec(&qp->scq->ref_count);
3528 if (qp->srq)
3529 atomic_dec(&qp->srq->ref_count);
3530 res_end_move(dev, slave, RES_QP, qpn);
3531 return 0;
3532
3533ex_abort:
3534 res_abort_move(dev, slave, RES_QP, qpn);
3535
3536 return err;
3537}
3538
3539static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3540 struct res_qp *rqp, u8 *gid)
3541{
3542 struct res_gid *res;
3543
3544 list_for_each_entry(res, &rqp->mcg_list, list) {
3545 if (!memcmp(res->gid, gid, 16))
3546 return res;
3547 }
3548 return NULL;
3549}
3550
3551static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003552 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003553 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003554{
3555 struct res_gid *res;
3556 int err;
3557
3558 res = kzalloc(sizeof *res, GFP_KERNEL);
3559 if (!res)
3560 return -ENOMEM;
3561
3562 spin_lock_irq(&rqp->mcg_spl);
3563 if (find_gid(dev, slave, rqp, gid)) {
3564 kfree(res);
3565 err = -EEXIST;
3566 } else {
3567 memcpy(res->gid, gid, 16);
3568 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003569 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003570 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003571 list_add_tail(&res->list, &rqp->mcg_list);
3572 err = 0;
3573 }
3574 spin_unlock_irq(&rqp->mcg_spl);
3575
3576 return err;
3577}
3578
3579static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003580 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003581 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003582{
3583 struct res_gid *res;
3584 int err;
3585
3586 spin_lock_irq(&rqp->mcg_spl);
3587 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003588 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003589 err = -EINVAL;
3590 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003591 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003592 list_del(&res->list);
3593 kfree(res);
3594 err = 0;
3595 }
3596 spin_unlock_irq(&rqp->mcg_spl);
3597
3598 return err;
3599}
3600
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003601static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3602 int block_loopback, enum mlx4_protocol prot,
3603 enum mlx4_steer_type type, u64 *reg_id)
3604{
3605 switch (dev->caps.steering_mode) {
3606 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3607 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3608 block_loopback, prot,
3609 reg_id);
3610 case MLX4_STEERING_MODE_B0:
3611 return mlx4_qp_attach_common(dev, qp, gid,
3612 block_loopback, prot, type);
3613 default:
3614 return -EINVAL;
3615 }
3616}
3617
3618static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3619 enum mlx4_protocol prot, enum mlx4_steer_type type,
3620 u64 reg_id)
3621{
3622 switch (dev->caps.steering_mode) {
3623 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3624 return mlx4_flow_detach(dev, reg_id);
3625 case MLX4_STEERING_MODE_B0:
3626 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3627 default:
3628 return -EINVAL;
3629 }
3630}
3631
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003632int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3633 struct mlx4_vhcr *vhcr,
3634 struct mlx4_cmd_mailbox *inbox,
3635 struct mlx4_cmd_mailbox *outbox,
3636 struct mlx4_cmd_info *cmd)
3637{
3638 struct mlx4_qp qp; /* dummy for calling attach/detach */
3639 u8 *gid = inbox->buf;
3640 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00003641 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003642 int qpn;
3643 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003644 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003645 int attach = vhcr->op_modifier;
3646 int block_loopback = vhcr->in_modifier >> 31;
3647 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00003648 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003649
3650 qpn = vhcr->in_modifier & 0xffffff;
3651 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3652 if (err)
3653 return err;
3654
3655 qp.qpn = qpn;
3656 if (attach) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003657 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3658 type, &reg_id);
3659 if (err) {
3660 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003661 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003662 }
3663 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003664 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003665 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003666 } else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003667 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003668 if (err)
3669 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003670
3671 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3672 if (err)
3673 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3674 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003675 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003676 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003677 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003678
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003679ex_detach:
3680 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003681ex_put:
3682 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003683 return err;
3684}
3685
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003686/*
3687 * MAC validation for Flow Steering rules.
3688 * VF can attach rules only with a mac address which is assigned to it.
3689 */
3690static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3691 struct list_head *rlist)
3692{
3693 struct mac_res *res, *tmp;
3694 __be64 be_mac;
3695
3696 /* make sure it isn't multicast or broadcast mac*/
3697 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3698 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3699 list_for_each_entry_safe(res, tmp, rlist, list) {
3700 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08003701 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003702 return 0;
3703 }
3704 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3705 eth_header->eth.dst_mac, slave);
3706 return -EINVAL;
3707 }
3708 return 0;
3709}
3710
3711/*
3712 * In case of missing eth header, append eth header with a MAC address
3713 * assigned to the VF.
3714 */
3715static int add_eth_header(struct mlx4_dev *dev, int slave,
3716 struct mlx4_cmd_mailbox *inbox,
3717 struct list_head *rlist, int header_id)
3718{
3719 struct mac_res *res, *tmp;
3720 u8 port;
3721 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3722 struct mlx4_net_trans_rule_hw_eth *eth_header;
3723 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3724 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3725 __be64 be_mac = 0;
3726 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3727
3728 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00003729 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003730 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3731
3732 /* Clear a space in the inbox for eth header */
3733 switch (header_id) {
3734 case MLX4_NET_TRANS_RULE_ID_IPV4:
3735 ip_header =
3736 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3737 memmove(ip_header, eth_header,
3738 sizeof(*ip_header) + sizeof(*l4_header));
3739 break;
3740 case MLX4_NET_TRANS_RULE_ID_TCP:
3741 case MLX4_NET_TRANS_RULE_ID_UDP:
3742 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3743 (eth_header + 1);
3744 memmove(l4_header, eth_header, sizeof(*l4_header));
3745 break;
3746 default:
3747 return -EINVAL;
3748 }
3749 list_for_each_entry_safe(res, tmp, rlist, list) {
3750 if (port == res->port) {
3751 be_mac = cpu_to_be64(res->mac << 16);
3752 break;
3753 }
3754 }
3755 if (!be_mac) {
3756 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3757 port);
3758 return -EINVAL;
3759 }
3760
3761 memset(eth_header, 0, sizeof(*eth_header));
3762 eth_header->size = sizeof(*eth_header) >> 2;
3763 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3764 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3765 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3766
3767 return 0;
3768
3769}
3770
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003771int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3772 struct mlx4_vhcr *vhcr,
3773 struct mlx4_cmd_mailbox *inbox,
3774 struct mlx4_cmd_mailbox *outbox,
3775 struct mlx4_cmd_info *cmd)
3776{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003777
3778 struct mlx4_priv *priv = mlx4_priv(dev);
3779 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3780 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003781 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003782 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003783 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003784 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3785 struct _rule_hw *rule_header;
3786 int header_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003787
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00003788 if (dev->caps.steering_mode !=
3789 MLX4_STEERING_MODE_DEVICE_MANAGED)
3790 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003791
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003792 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003793 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003794 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003795 if (err) {
3796 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3797 return err;
3798 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003799 rule_header = (struct _rule_hw *)(ctrl + 1);
3800 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3801
3802 switch (header_id) {
3803 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003804 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3805 err = -EINVAL;
3806 goto err_put;
3807 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003808 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00003809 case MLX4_NET_TRANS_RULE_ID_IB:
3810 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003811 case MLX4_NET_TRANS_RULE_ID_IPV4:
3812 case MLX4_NET_TRANS_RULE_ID_TCP:
3813 case MLX4_NET_TRANS_RULE_ID_UDP:
3814 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003815 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3816 err = -EINVAL;
3817 goto err_put;
3818 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003819 vhcr->in_modifier +=
3820 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3821 break;
3822 default:
3823 pr_err("Corrupted mailbox.\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003824 err = -EINVAL;
3825 goto err_put;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003826 }
3827
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003828 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3829 vhcr->in_modifier, 0,
3830 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3831 MLX4_CMD_NATIVE);
3832 if (err)
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003833 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003834
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003835 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003836 if (err) {
3837 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3838 /* detach rule*/
3839 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00003840 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003841 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003842 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003843 }
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003844 atomic_inc(&rqp->ref_count);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003845err_put:
3846 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003847 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003848}
3849
3850int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3851 struct mlx4_vhcr *vhcr,
3852 struct mlx4_cmd_mailbox *inbox,
3853 struct mlx4_cmd_mailbox *outbox,
3854 struct mlx4_cmd_info *cmd)
3855{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003856 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003857 struct res_qp *rqp;
3858 struct res_fs_rule *rrule;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003859
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00003860 if (dev->caps.steering_mode !=
3861 MLX4_STEERING_MODE_DEVICE_MANAGED)
3862 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003863
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003864 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3865 if (err)
3866 return err;
3867 /* Release the rule form busy state before removal */
3868 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3869 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3870 if (err)
3871 return err;
3872
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003873 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3874 if (err) {
3875 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003876 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003877 }
3878
3879 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3880 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3881 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003882 if (!err)
3883 atomic_dec(&rqp->ref_count);
3884out:
3885 put_res(dev, slave, rrule->qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003886 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003887}
3888
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003889enum {
3890 BUSY_MAX_RETRIES = 10
3891};
3892
3893int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3894 struct mlx4_vhcr *vhcr,
3895 struct mlx4_cmd_mailbox *inbox,
3896 struct mlx4_cmd_mailbox *outbox,
3897 struct mlx4_cmd_info *cmd)
3898{
3899 int err;
3900 int index = vhcr->in_modifier & 0xffff;
3901
3902 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3903 if (err)
3904 return err;
3905
3906 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3907 put_res(dev, slave, index, RES_COUNTER);
3908 return err;
3909}
3910
Matan Barak4de65802013-11-07 15:25:14 +02003911int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
3912 struct mlx4_vhcr *vhcr,
3913 struct mlx4_cmd_mailbox *inbox,
3914 struct mlx4_cmd_mailbox *outbox,
3915 struct mlx4_cmd_info *cmd)
3916{
3917 return -EPERM;
3918}
3919
3920
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003921static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3922{
3923 struct res_gid *rgid;
3924 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003925 struct mlx4_qp qp; /* dummy for calling attach/detach */
3926
3927 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003928 switch (dev->caps.steering_mode) {
3929 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3930 mlx4_flow_detach(dev, rgid->reg_id);
3931 break;
3932 case MLX4_STEERING_MODE_B0:
3933 qp.qpn = rqp->local_qpn;
3934 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3935 rgid->prot, rgid->steer);
3936 break;
3937 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003938 list_del(&rgid->list);
3939 kfree(rgid);
3940 }
3941}
3942
3943static int _move_all_busy(struct mlx4_dev *dev, int slave,
3944 enum mlx4_resource type, int print)
3945{
3946 struct mlx4_priv *priv = mlx4_priv(dev);
3947 struct mlx4_resource_tracker *tracker =
3948 &priv->mfunc.master.res_tracker;
3949 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3950 struct res_common *r;
3951 struct res_common *tmp;
3952 int busy;
3953
3954 busy = 0;
3955 spin_lock_irq(mlx4_tlock(dev));
3956 list_for_each_entry_safe(r, tmp, rlist, list) {
3957 if (r->owner == slave) {
3958 if (!r->removing) {
3959 if (r->state == RES_ANY_BUSY) {
3960 if (print)
3961 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00003962 "%s id 0x%llx is busy\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003963 ResourceType(type),
3964 r->res_id);
3965 ++busy;
3966 } else {
3967 r->from_state = r->state;
3968 r->state = RES_ANY_BUSY;
3969 r->removing = 1;
3970 }
3971 }
3972 }
3973 }
3974 spin_unlock_irq(mlx4_tlock(dev));
3975
3976 return busy;
3977}
3978
3979static int move_all_busy(struct mlx4_dev *dev, int slave,
3980 enum mlx4_resource type)
3981{
3982 unsigned long begin;
3983 int busy;
3984
3985 begin = jiffies;
3986 do {
3987 busy = _move_all_busy(dev, slave, type, 0);
3988 if (time_after(jiffies, begin + 5 * HZ))
3989 break;
3990 if (busy)
3991 cond_resched();
3992 } while (busy);
3993
3994 if (busy)
3995 busy = _move_all_busy(dev, slave, type, 1);
3996
3997 return busy;
3998}
3999static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4000{
4001 struct mlx4_priv *priv = mlx4_priv(dev);
4002 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4003 struct list_head *qp_list =
4004 &tracker->slave_list[slave].res_list[RES_QP];
4005 struct res_qp *qp;
4006 struct res_qp *tmp;
4007 int state;
4008 u64 in_param;
4009 int qpn;
4010 int err;
4011
4012 err = move_all_busy(dev, slave, RES_QP);
4013 if (err)
4014 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
4015 "for slave %d\n", slave);
4016
4017 spin_lock_irq(mlx4_tlock(dev));
4018 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4019 spin_unlock_irq(mlx4_tlock(dev));
4020 if (qp->com.owner == slave) {
4021 qpn = qp->com.res_id;
4022 detach_qp(dev, slave, qp);
4023 state = qp->com.from_state;
4024 while (state != 0) {
4025 switch (state) {
4026 case RES_QP_RESERVED:
4027 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004028 rb_erase(&qp->com.node,
4029 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004030 list_del(&qp->com.list);
4031 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004032 if (!valid_reserved(dev, slave, qpn)) {
4033 __mlx4_qp_release_range(dev, qpn, 1);
4034 mlx4_release_resource(dev, slave,
4035 RES_QP, 1, 0);
4036 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004037 kfree(qp);
4038 state = 0;
4039 break;
4040 case RES_QP_MAPPED:
4041 if (!valid_reserved(dev, slave, qpn))
4042 __mlx4_qp_free_icm(dev, qpn);
4043 state = RES_QP_RESERVED;
4044 break;
4045 case RES_QP_HW:
4046 in_param = slave;
4047 err = mlx4_cmd(dev, in_param,
4048 qp->local_qpn, 2,
4049 MLX4_CMD_2RST_QP,
4050 MLX4_CMD_TIME_CLASS_A,
4051 MLX4_CMD_NATIVE);
4052 if (err)
4053 mlx4_dbg(dev, "rem_slave_qps: failed"
4054 " to move slave %d qpn %d to"
4055 " reset\n", slave,
4056 qp->local_qpn);
4057 atomic_dec(&qp->rcq->ref_count);
4058 atomic_dec(&qp->scq->ref_count);
4059 atomic_dec(&qp->mtt->ref_count);
4060 if (qp->srq)
4061 atomic_dec(&qp->srq->ref_count);
4062 state = RES_QP_MAPPED;
4063 break;
4064 default:
4065 state = 0;
4066 }
4067 }
4068 }
4069 spin_lock_irq(mlx4_tlock(dev));
4070 }
4071 spin_unlock_irq(mlx4_tlock(dev));
4072}
4073
4074static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4075{
4076 struct mlx4_priv *priv = mlx4_priv(dev);
4077 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4078 struct list_head *srq_list =
4079 &tracker->slave_list[slave].res_list[RES_SRQ];
4080 struct res_srq *srq;
4081 struct res_srq *tmp;
4082 int state;
4083 u64 in_param;
4084 LIST_HEAD(tlist);
4085 int srqn;
4086 int err;
4087
4088 err = move_all_busy(dev, slave, RES_SRQ);
4089 if (err)
4090 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4091 "busy for slave %d\n", slave);
4092
4093 spin_lock_irq(mlx4_tlock(dev));
4094 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4095 spin_unlock_irq(mlx4_tlock(dev));
4096 if (srq->com.owner == slave) {
4097 srqn = srq->com.res_id;
4098 state = srq->com.from_state;
4099 while (state != 0) {
4100 switch (state) {
4101 case RES_SRQ_ALLOCATED:
4102 __mlx4_srq_free_icm(dev, srqn);
4103 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004104 rb_erase(&srq->com.node,
4105 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004106 list_del(&srq->com.list);
4107 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004108 mlx4_release_resource(dev, slave,
4109 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004110 kfree(srq);
4111 state = 0;
4112 break;
4113
4114 case RES_SRQ_HW:
4115 in_param = slave;
4116 err = mlx4_cmd(dev, in_param, srqn, 1,
4117 MLX4_CMD_HW2SW_SRQ,
4118 MLX4_CMD_TIME_CLASS_A,
4119 MLX4_CMD_NATIVE);
4120 if (err)
4121 mlx4_dbg(dev, "rem_slave_srqs: failed"
4122 " to move slave %d srq %d to"
4123 " SW ownership\n",
4124 slave, srqn);
4125
4126 atomic_dec(&srq->mtt->ref_count);
4127 if (srq->cq)
4128 atomic_dec(&srq->cq->ref_count);
4129 state = RES_SRQ_ALLOCATED;
4130 break;
4131
4132 default:
4133 state = 0;
4134 }
4135 }
4136 }
4137 spin_lock_irq(mlx4_tlock(dev));
4138 }
4139 spin_unlock_irq(mlx4_tlock(dev));
4140}
4141
4142static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4143{
4144 struct mlx4_priv *priv = mlx4_priv(dev);
4145 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4146 struct list_head *cq_list =
4147 &tracker->slave_list[slave].res_list[RES_CQ];
4148 struct res_cq *cq;
4149 struct res_cq *tmp;
4150 int state;
4151 u64 in_param;
4152 LIST_HEAD(tlist);
4153 int cqn;
4154 int err;
4155
4156 err = move_all_busy(dev, slave, RES_CQ);
4157 if (err)
4158 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4159 "busy for slave %d\n", slave);
4160
4161 spin_lock_irq(mlx4_tlock(dev));
4162 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4163 spin_unlock_irq(mlx4_tlock(dev));
4164 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4165 cqn = cq->com.res_id;
4166 state = cq->com.from_state;
4167 while (state != 0) {
4168 switch (state) {
4169 case RES_CQ_ALLOCATED:
4170 __mlx4_cq_free_icm(dev, cqn);
4171 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004172 rb_erase(&cq->com.node,
4173 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004174 list_del(&cq->com.list);
4175 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004176 mlx4_release_resource(dev, slave,
4177 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004178 kfree(cq);
4179 state = 0;
4180 break;
4181
4182 case RES_CQ_HW:
4183 in_param = slave;
4184 err = mlx4_cmd(dev, in_param, cqn, 1,
4185 MLX4_CMD_HW2SW_CQ,
4186 MLX4_CMD_TIME_CLASS_A,
4187 MLX4_CMD_NATIVE);
4188 if (err)
4189 mlx4_dbg(dev, "rem_slave_cqs: failed"
4190 " to move slave %d cq %d to"
4191 " SW ownership\n",
4192 slave, cqn);
4193 atomic_dec(&cq->mtt->ref_count);
4194 state = RES_CQ_ALLOCATED;
4195 break;
4196
4197 default:
4198 state = 0;
4199 }
4200 }
4201 }
4202 spin_lock_irq(mlx4_tlock(dev));
4203 }
4204 spin_unlock_irq(mlx4_tlock(dev));
4205}
4206
4207static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4208{
4209 struct mlx4_priv *priv = mlx4_priv(dev);
4210 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4211 struct list_head *mpt_list =
4212 &tracker->slave_list[slave].res_list[RES_MPT];
4213 struct res_mpt *mpt;
4214 struct res_mpt *tmp;
4215 int state;
4216 u64 in_param;
4217 LIST_HEAD(tlist);
4218 int mptn;
4219 int err;
4220
4221 err = move_all_busy(dev, slave, RES_MPT);
4222 if (err)
4223 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4224 "busy for slave %d\n", slave);
4225
4226 spin_lock_irq(mlx4_tlock(dev));
4227 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4228 spin_unlock_irq(mlx4_tlock(dev));
4229 if (mpt->com.owner == slave) {
4230 mptn = mpt->com.res_id;
4231 state = mpt->com.from_state;
4232 while (state != 0) {
4233 switch (state) {
4234 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004235 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004236 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004237 rb_erase(&mpt->com.node,
4238 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004239 list_del(&mpt->com.list);
4240 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004241 mlx4_release_resource(dev, slave,
4242 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004243 kfree(mpt);
4244 state = 0;
4245 break;
4246
4247 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004248 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004249 state = RES_MPT_RESERVED;
4250 break;
4251
4252 case RES_MPT_HW:
4253 in_param = slave;
4254 err = mlx4_cmd(dev, in_param, mptn, 0,
4255 MLX4_CMD_HW2SW_MPT,
4256 MLX4_CMD_TIME_CLASS_A,
4257 MLX4_CMD_NATIVE);
4258 if (err)
4259 mlx4_dbg(dev, "rem_slave_mrs: failed"
4260 " to move slave %d mpt %d to"
4261 " SW ownership\n",
4262 slave, mptn);
4263 if (mpt->mtt)
4264 atomic_dec(&mpt->mtt->ref_count);
4265 state = RES_MPT_MAPPED;
4266 break;
4267 default:
4268 state = 0;
4269 }
4270 }
4271 }
4272 spin_lock_irq(mlx4_tlock(dev));
4273 }
4274 spin_unlock_irq(mlx4_tlock(dev));
4275}
4276
4277static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4278{
4279 struct mlx4_priv *priv = mlx4_priv(dev);
4280 struct mlx4_resource_tracker *tracker =
4281 &priv->mfunc.master.res_tracker;
4282 struct list_head *mtt_list =
4283 &tracker->slave_list[slave].res_list[RES_MTT];
4284 struct res_mtt *mtt;
4285 struct res_mtt *tmp;
4286 int state;
4287 LIST_HEAD(tlist);
4288 int base;
4289 int err;
4290
4291 err = move_all_busy(dev, slave, RES_MTT);
4292 if (err)
4293 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4294 "busy for slave %d\n", slave);
4295
4296 spin_lock_irq(mlx4_tlock(dev));
4297 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4298 spin_unlock_irq(mlx4_tlock(dev));
4299 if (mtt->com.owner == slave) {
4300 base = mtt->com.res_id;
4301 state = mtt->com.from_state;
4302 while (state != 0) {
4303 switch (state) {
4304 case RES_MTT_ALLOCATED:
4305 __mlx4_free_mtt_range(dev, base,
4306 mtt->order);
4307 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004308 rb_erase(&mtt->com.node,
4309 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004310 list_del(&mtt->com.list);
4311 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004312 mlx4_release_resource(dev, slave, RES_MTT,
4313 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004314 kfree(mtt);
4315 state = 0;
4316 break;
4317
4318 default:
4319 state = 0;
4320 }
4321 }
4322 }
4323 spin_lock_irq(mlx4_tlock(dev));
4324 }
4325 spin_unlock_irq(mlx4_tlock(dev));
4326}
4327
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004328static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4329{
4330 struct mlx4_priv *priv = mlx4_priv(dev);
4331 struct mlx4_resource_tracker *tracker =
4332 &priv->mfunc.master.res_tracker;
4333 struct list_head *fs_rule_list =
4334 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4335 struct res_fs_rule *fs_rule;
4336 struct res_fs_rule *tmp;
4337 int state;
4338 u64 base;
4339 int err;
4340
4341 err = move_all_busy(dev, slave, RES_FS_RULE);
4342 if (err)
4343 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4344 slave);
4345
4346 spin_lock_irq(mlx4_tlock(dev));
4347 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4348 spin_unlock_irq(mlx4_tlock(dev));
4349 if (fs_rule->com.owner == slave) {
4350 base = fs_rule->com.res_id;
4351 state = fs_rule->com.from_state;
4352 while (state != 0) {
4353 switch (state) {
4354 case RES_FS_RULE_ALLOCATED:
4355 /* detach rule */
4356 err = mlx4_cmd(dev, base, 0, 0,
4357 MLX4_QP_FLOW_STEERING_DETACH,
4358 MLX4_CMD_TIME_CLASS_A,
4359 MLX4_CMD_NATIVE);
4360
4361 spin_lock_irq(mlx4_tlock(dev));
4362 rb_erase(&fs_rule->com.node,
4363 &tracker->res_tree[RES_FS_RULE]);
4364 list_del(&fs_rule->com.list);
4365 spin_unlock_irq(mlx4_tlock(dev));
4366 kfree(fs_rule);
4367 state = 0;
4368 break;
4369
4370 default:
4371 state = 0;
4372 }
4373 }
4374 }
4375 spin_lock_irq(mlx4_tlock(dev));
4376 }
4377 spin_unlock_irq(mlx4_tlock(dev));
4378}
4379
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004380static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4381{
4382 struct mlx4_priv *priv = mlx4_priv(dev);
4383 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4384 struct list_head *eq_list =
4385 &tracker->slave_list[slave].res_list[RES_EQ];
4386 struct res_eq *eq;
4387 struct res_eq *tmp;
4388 int err;
4389 int state;
4390 LIST_HEAD(tlist);
4391 int eqn;
4392 struct mlx4_cmd_mailbox *mailbox;
4393
4394 err = move_all_busy(dev, slave, RES_EQ);
4395 if (err)
4396 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4397 "busy for slave %d\n", slave);
4398
4399 spin_lock_irq(mlx4_tlock(dev));
4400 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4401 spin_unlock_irq(mlx4_tlock(dev));
4402 if (eq->com.owner == slave) {
4403 eqn = eq->com.res_id;
4404 state = eq->com.from_state;
4405 while (state != 0) {
4406 switch (state) {
4407 case RES_EQ_RESERVED:
4408 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004409 rb_erase(&eq->com.node,
4410 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004411 list_del(&eq->com.list);
4412 spin_unlock_irq(mlx4_tlock(dev));
4413 kfree(eq);
4414 state = 0;
4415 break;
4416
4417 case RES_EQ_HW:
4418 mailbox = mlx4_alloc_cmd_mailbox(dev);
4419 if (IS_ERR(mailbox)) {
4420 cond_resched();
4421 continue;
4422 }
4423 err = mlx4_cmd_box(dev, slave, 0,
4424 eqn & 0xff, 0,
4425 MLX4_CMD_HW2SW_EQ,
4426 MLX4_CMD_TIME_CLASS_A,
4427 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004428 if (err)
4429 mlx4_dbg(dev, "rem_slave_eqs: failed"
4430 " to move slave %d eqs %d to"
4431 " SW ownership\n", slave, eqn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004432 mlx4_free_cmd_mailbox(dev, mailbox);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004433 atomic_dec(&eq->mtt->ref_count);
4434 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004435 break;
4436
4437 default:
4438 state = 0;
4439 }
4440 }
4441 }
4442 spin_lock_irq(mlx4_tlock(dev));
4443 }
4444 spin_unlock_irq(mlx4_tlock(dev));
4445}
4446
Jack Morgensteinba062d52012-05-15 10:35:03 +00004447static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4448{
4449 struct mlx4_priv *priv = mlx4_priv(dev);
4450 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4451 struct list_head *counter_list =
4452 &tracker->slave_list[slave].res_list[RES_COUNTER];
4453 struct res_counter *counter;
4454 struct res_counter *tmp;
4455 int err;
4456 int index;
4457
4458 err = move_all_busy(dev, slave, RES_COUNTER);
4459 if (err)
4460 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4461 "busy for slave %d\n", slave);
4462
4463 spin_lock_irq(mlx4_tlock(dev));
4464 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4465 if (counter->com.owner == slave) {
4466 index = counter->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004467 rb_erase(&counter->com.node,
4468 &tracker->res_tree[RES_COUNTER]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004469 list_del(&counter->com.list);
4470 kfree(counter);
4471 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004472 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004473 }
4474 }
4475 spin_unlock_irq(mlx4_tlock(dev));
4476}
4477
4478static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4479{
4480 struct mlx4_priv *priv = mlx4_priv(dev);
4481 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4482 struct list_head *xrcdn_list =
4483 &tracker->slave_list[slave].res_list[RES_XRCD];
4484 struct res_xrcdn *xrcd;
4485 struct res_xrcdn *tmp;
4486 int err;
4487 int xrcdn;
4488
4489 err = move_all_busy(dev, slave, RES_XRCD);
4490 if (err)
4491 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4492 "busy for slave %d\n", slave);
4493
4494 spin_lock_irq(mlx4_tlock(dev));
4495 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4496 if (xrcd->com.owner == slave) {
4497 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004498 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004499 list_del(&xrcd->com.list);
4500 kfree(xrcd);
4501 __mlx4_xrcd_free(dev, xrcdn);
4502 }
4503 }
4504 spin_unlock_irq(mlx4_tlock(dev));
4505}
4506
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004507void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4508{
4509 struct mlx4_priv *priv = mlx4_priv(dev);
4510
4511 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02004512 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004513 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00004514 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004515 rem_slave_qps(dev, slave);
4516 rem_slave_srqs(dev, slave);
4517 rem_slave_cqs(dev, slave);
4518 rem_slave_mrs(dev, slave);
4519 rem_slave_eqs(dev, slave);
4520 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004521 rem_slave_counters(dev, slave);
4522 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004523 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4524}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004525
4526void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4527{
4528 struct mlx4_vf_immed_vlan_work *work =
4529 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4530 struct mlx4_cmd_mailbox *mailbox;
4531 struct mlx4_update_qp_context *upd_context;
4532 struct mlx4_dev *dev = &work->priv->dev;
4533 struct mlx4_resource_tracker *tracker =
4534 &work->priv->mfunc.master.res_tracker;
4535 struct list_head *qp_list =
4536 &tracker->slave_list[work->slave].res_list[RES_QP];
4537 struct res_qp *qp;
4538 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004539 u64 qp_path_mask_vlan_ctrl =
4540 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004541 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4542 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4543 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4544 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02004545 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4546
4547 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4548 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4549 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4550 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4551 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4552 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004553 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4554
4555 int err;
4556 int port, errors = 0;
4557 u8 vlan_control;
4558
4559 if (mlx4_is_slave(dev)) {
4560 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4561 work->slave);
4562 goto out;
4563 }
4564
4565 mailbox = mlx4_alloc_cmd_mailbox(dev);
4566 if (IS_ERR(mailbox))
4567 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03004568 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4569 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4570 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4571 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4572 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4573 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4574 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4575 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004576 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4577 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4578 else
4579 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4580 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4581 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4582
4583 upd_context = mailbox->buf;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004584 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004585
4586 spin_lock_irq(mlx4_tlock(dev));
4587 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4588 spin_unlock_irq(mlx4_tlock(dev));
4589 if (qp->com.owner == work->slave) {
4590 if (qp->com.from_state != RES_QP_HW ||
4591 !qp->sched_queue || /* no INIT2RTR trans yet */
4592 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4593 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4594 spin_lock_irq(mlx4_tlock(dev));
4595 continue;
4596 }
4597 port = (qp->sched_queue >> 6 & 1) + 1;
4598 if (port != work->port) {
4599 spin_lock_irq(mlx4_tlock(dev));
4600 continue;
4601 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02004602 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4603 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4604 else
4605 upd_context->primary_addr_path_mask =
4606 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4607 if (work->vlan_id == MLX4_VGT) {
4608 upd_context->qp_context.param3 = qp->param3;
4609 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4610 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4611 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4612 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4613 upd_context->qp_context.pri_path.feup = qp->feup;
4614 upd_context->qp_context.pri_path.sched_queue =
4615 qp->sched_queue;
4616 } else {
4617 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4618 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4619 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4620 upd_context->qp_context.pri_path.fvl_rx =
4621 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4622 upd_context->qp_context.pri_path.fl =
4623 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4624 upd_context->qp_context.pri_path.feup =
4625 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4626 upd_context->qp_context.pri_path.sched_queue =
4627 qp->sched_queue & 0xC7;
4628 upd_context->qp_context.pri_path.sched_queue |=
4629 ((work->qos & 0x7) << 3);
4630 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004631
4632 err = mlx4_cmd(dev, mailbox->dma,
4633 qp->local_qpn & 0xffffff,
4634 0, MLX4_CMD_UPDATE_QP,
4635 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4636 if (err) {
4637 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4638 "port %d, qpn %d (%d)\n",
4639 work->slave, port, qp->local_qpn,
4640 err);
4641 errors++;
4642 }
4643 }
4644 spin_lock_irq(mlx4_tlock(dev));
4645 }
4646 spin_unlock_irq(mlx4_tlock(dev));
4647 mlx4_free_cmd_mailbox(dev, mailbox);
4648
4649 if (errors)
4650 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4651 errors, work->slave, work->port);
4652
4653 /* unregister previous vlan_id if needed and we had no errors
4654 * while updating the QPs
4655 */
4656 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4657 NO_INDX != work->orig_vlan_ix)
4658 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02004659 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004660out:
4661 kfree(work);
4662 return;
4663}