blob: 734423d847c87fd6a6c2b4c6f0c0a2be525e1921 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
Eli Cohenc82e9aa2011-12-13 04:15:24 +000051
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
Jack Morgenstein48740802013-11-03 10:03:20 +020058struct vlan_res {
59 struct list_head list;
60 u16 vlan;
61 int ref_count;
62 int vlan_index;
63 u8 port;
64};
65
Eli Cohenc82e9aa2011-12-13 04:15:24 +000066struct res_common {
67 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000068 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000069 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000070 int owner;
71 int state;
72 int from_state;
73 int to_state;
74 int removing;
75};
76
77enum {
78 RES_ANY_BUSY = 1
79};
80
81struct res_gid {
82 struct list_head list;
83 u8 gid[16];
84 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000085 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000086 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000087};
88
89enum res_qp_states {
90 RES_QP_BUSY = RES_ANY_BUSY,
91
92 /* QP number was allocated */
93 RES_QP_RESERVED,
94
95 /* ICM memory for QP context was mapped */
96 RES_QP_MAPPED,
97
98 /* QP is in hw ownership */
99 RES_QP_HW
100};
101
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000111 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300112 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200113 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300114 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200115 __be32 param3;
116 u8 vlan_control;
117 u8 fvl_rx;
118 u8 pri_path_fl;
119 u8 vlan_index;
120 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000121};
122
123enum res_mtt_states {
124 RES_MTT_BUSY = RES_ANY_BUSY,
125 RES_MTT_ALLOCATED,
126};
127
128static inline const char *mtt_states_str(enum res_mtt_states state)
129{
130 switch (state) {
131 case RES_MTT_BUSY: return "RES_MTT_BUSY";
132 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
133 default: return "Unknown";
134 }
135}
136
137struct res_mtt {
138 struct res_common com;
139 int order;
140 atomic_t ref_count;
141};
142
143enum res_mpt_states {
144 RES_MPT_BUSY = RES_ANY_BUSY,
145 RES_MPT_RESERVED,
146 RES_MPT_MAPPED,
147 RES_MPT_HW,
148};
149
150struct res_mpt {
151 struct res_common com;
152 struct res_mtt *mtt;
153 int key;
154};
155
156enum res_eq_states {
157 RES_EQ_BUSY = RES_ANY_BUSY,
158 RES_EQ_RESERVED,
159 RES_EQ_HW,
160};
161
162struct res_eq {
163 struct res_common com;
164 struct res_mtt *mtt;
165};
166
167enum res_cq_states {
168 RES_CQ_BUSY = RES_ANY_BUSY,
169 RES_CQ_ALLOCATED,
170 RES_CQ_HW,
171};
172
173struct res_cq {
174 struct res_common com;
175 struct res_mtt *mtt;
176 atomic_t ref_count;
177};
178
179enum res_srq_states {
180 RES_SRQ_BUSY = RES_ANY_BUSY,
181 RES_SRQ_ALLOCATED,
182 RES_SRQ_HW,
183};
184
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000185struct res_srq {
186 struct res_common com;
187 struct res_mtt *mtt;
188 struct res_cq *cq;
189 atomic_t ref_count;
190};
191
192enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
195};
196
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000197struct res_counter {
198 struct res_common com;
199 int port;
200};
201
Jack Morgensteinba062d52012-05-15 10:35:03 +0000202enum res_xrcdn_states {
203 RES_XRCD_BUSY = RES_ANY_BUSY,
204 RES_XRCD_ALLOCATED,
205};
206
207struct res_xrcdn {
208 struct res_common com;
209 int port;
210};
211
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000212enum res_fs_rule_states {
213 RES_FS_RULE_BUSY = RES_ANY_BUSY,
214 RES_FS_RULE_ALLOCATED,
215};
216
217struct res_fs_rule {
218 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000219 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000220};
221
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000222static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
223{
224 struct rb_node *node = root->rb_node;
225
226 while (node) {
227 struct res_common *res = container_of(node, struct res_common,
228 node);
229
230 if (res_id < res->res_id)
231 node = node->rb_left;
232 else if (res_id > res->res_id)
233 node = node->rb_right;
234 else
235 return res;
236 }
237 return NULL;
238}
239
240static int res_tracker_insert(struct rb_root *root, struct res_common *res)
241{
242 struct rb_node **new = &(root->rb_node), *parent = NULL;
243
244 /* Figure out where to put new node */
245 while (*new) {
246 struct res_common *this = container_of(*new, struct res_common,
247 node);
248
249 parent = *new;
250 if (res->res_id < this->res_id)
251 new = &((*new)->rb_left);
252 else if (res->res_id > this->res_id)
253 new = &((*new)->rb_right);
254 else
255 return -EEXIST;
256 }
257
258 /* Add new node and rebalance tree. */
259 rb_link_node(&res->node, parent, new);
260 rb_insert_color(&res->node, root);
261
262 return 0;
263}
264
Jack Morgenstein54679e12012-08-03 08:40:43 +0000265enum qp_transition {
266 QP_TRANS_INIT2RTR,
267 QP_TRANS_RTR2RTS,
268 QP_TRANS_RTS2RTS,
269 QP_TRANS_SQERR2RTS,
270 QP_TRANS_SQD2SQD,
271 QP_TRANS_SQD2RTS
272};
273
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000274/* For Debug uses */
275static const char *ResourceType(enum mlx4_resource rt)
276{
277 switch (rt) {
278 case RES_QP: return "RES_QP";
279 case RES_CQ: return "RES_CQ";
280 case RES_SRQ: return "RES_SRQ";
281 case RES_MPT: return "RES_MPT";
282 case RES_MTT: return "RES_MTT";
283 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200284 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000285 case RES_EQ: return "RES_EQ";
286 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000287 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000288 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000289 default: return "Unknown resource type !!!";
290 };
291}
292
Jack Morgenstein48740802013-11-03 10:03:20 +0200293static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200294static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
295 enum mlx4_resource res_type, int count,
296 int port)
297{
298 struct mlx4_priv *priv = mlx4_priv(dev);
299 struct resource_allocator *res_alloc =
300 &priv->mfunc.master.res_tracker.res_alloc[res_type];
301 int err = -EINVAL;
302 int allocated, free, reserved, guaranteed, from_free;
303
304 if (slave > dev->num_vfs)
305 return -EINVAL;
306
307 spin_lock(&res_alloc->alloc_lock);
308 allocated = (port > 0) ?
309 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
310 res_alloc->allocated[slave];
311 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
312 res_alloc->res_free;
313 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
314 res_alloc->res_reserved;
315 guaranteed = res_alloc->guaranteed[slave];
316
317 if (allocated + count > res_alloc->quota[slave])
318 goto out;
319
320 if (allocated + count <= guaranteed) {
321 err = 0;
322 } else {
323 /* portion may need to be obtained from free area */
324 if (guaranteed - allocated > 0)
325 from_free = count - (guaranteed - allocated);
326 else
327 from_free = count;
328
329 if (free - from_free > reserved)
330 err = 0;
331 }
332
333 if (!err) {
334 /* grant the request */
335 if (port > 0) {
336 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
337 res_alloc->res_port_free[port - 1] -= count;
338 } else {
339 res_alloc->allocated[slave] += count;
340 res_alloc->res_free -= count;
341 }
342 }
343
344out:
345 spin_unlock(&res_alloc->alloc_lock);
346 return err;
347}
348
349static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
350 enum mlx4_resource res_type, int count,
351 int port)
352{
353 struct mlx4_priv *priv = mlx4_priv(dev);
354 struct resource_allocator *res_alloc =
355 &priv->mfunc.master.res_tracker.res_alloc[res_type];
356
357 if (slave > dev->num_vfs)
358 return;
359
360 spin_lock(&res_alloc->alloc_lock);
361 if (port > 0) {
362 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
363 res_alloc->res_port_free[port - 1] += count;
364 } else {
365 res_alloc->allocated[slave] -= count;
366 res_alloc->res_free += count;
367 }
368
369 spin_unlock(&res_alloc->alloc_lock);
370 return;
371}
372
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200373static inline void initialize_res_quotas(struct mlx4_dev *dev,
374 struct resource_allocator *res_alloc,
375 enum mlx4_resource res_type,
376 int vf, int num_instances)
377{
378 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
379 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
380 if (vf == mlx4_master_func_num(dev)) {
381 res_alloc->res_free = num_instances;
382 if (res_type == RES_MTT) {
383 /* reserved mtts will be taken out of the PF allocation */
384 res_alloc->res_free += dev->caps.reserved_mtts;
385 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
386 res_alloc->quota[vf] += dev->caps.reserved_mtts;
387 }
388 }
389}
390
391void mlx4_init_quotas(struct mlx4_dev *dev)
392{
393 struct mlx4_priv *priv = mlx4_priv(dev);
394 int pf;
395
396 /* quotas for VFs are initialized in mlx4_slave_cap */
397 if (mlx4_is_slave(dev))
398 return;
399
400 if (!mlx4_is_mfunc(dev)) {
401 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
402 mlx4_num_reserved_sqps(dev);
403 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
404 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
405 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
406 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
407 return;
408 }
409
410 pf = mlx4_master_func_num(dev);
411 dev->quotas.qp =
412 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
413 dev->quotas.cq =
414 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
415 dev->quotas.srq =
416 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
417 dev->quotas.mtt =
418 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
419 dev->quotas.mpt =
420 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
421}
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000422int mlx4_init_resource_tracker(struct mlx4_dev *dev)
423{
424 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200425 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000426 int t;
427
428 priv->mfunc.master.res_tracker.slave_list =
429 kzalloc(dev->num_slaves * sizeof(struct slave_list),
430 GFP_KERNEL);
431 if (!priv->mfunc.master.res_tracker.slave_list)
432 return -ENOMEM;
433
434 for (i = 0 ; i < dev->num_slaves; i++) {
435 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
436 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
437 slave_list[i].res_list[t]);
438 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
439 }
440
441 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
442 dev->num_slaves);
443 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000444 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000445
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200446 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
447 struct resource_allocator *res_alloc =
448 &priv->mfunc.master.res_tracker.res_alloc[i];
449 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
450 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
451 if (i == RES_MAC || i == RES_VLAN)
452 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
453 (dev->num_vfs + 1) * sizeof(int),
454 GFP_KERNEL);
455 else
456 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
457
458 if (!res_alloc->quota || !res_alloc->guaranteed ||
459 !res_alloc->allocated)
460 goto no_mem_err;
461
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200462 spin_lock_init(&res_alloc->alloc_lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200463 for (t = 0; t < dev->num_vfs + 1; t++) {
464 switch (i) {
465 case RES_QP:
466 initialize_res_quotas(dev, res_alloc, RES_QP,
467 t, dev->caps.num_qps -
468 dev->caps.reserved_qps -
469 mlx4_num_reserved_sqps(dev));
470 break;
471 case RES_CQ:
472 initialize_res_quotas(dev, res_alloc, RES_CQ,
473 t, dev->caps.num_cqs -
474 dev->caps.reserved_cqs);
475 break;
476 case RES_SRQ:
477 initialize_res_quotas(dev, res_alloc, RES_SRQ,
478 t, dev->caps.num_srqs -
479 dev->caps.reserved_srqs);
480 break;
481 case RES_MPT:
482 initialize_res_quotas(dev, res_alloc, RES_MPT,
483 t, dev->caps.num_mpts -
484 dev->caps.reserved_mrws);
485 break;
486 case RES_MTT:
487 initialize_res_quotas(dev, res_alloc, RES_MTT,
488 t, dev->caps.num_mtts -
489 dev->caps.reserved_mtts);
490 break;
491 case RES_MAC:
492 if (t == mlx4_master_func_num(dev)) {
493 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
494 res_alloc->guaranteed[t] = 2;
495 for (j = 0; j < MLX4_MAX_PORTS; j++)
496 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
497 } else {
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2;
500 }
501 break;
502 case RES_VLAN:
503 if (t == mlx4_master_func_num(dev)) {
504 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
505 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
506 for (j = 0; j < MLX4_MAX_PORTS; j++)
507 res_alloc->res_port_free[j] =
508 res_alloc->quota[t];
509 } else {
510 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
511 res_alloc->guaranteed[t] = 0;
512 }
513 break;
514 case RES_COUNTER:
515 res_alloc->quota[t] = dev->caps.max_counters;
516 res_alloc->guaranteed[t] = 0;
517 if (t == mlx4_master_func_num(dev))
518 res_alloc->res_free = res_alloc->quota[t];
519 break;
520 default:
521 break;
522 }
523 if (i == RES_MAC || i == RES_VLAN) {
524 for (j = 0; j < MLX4_MAX_PORTS; j++)
525 res_alloc->res_port_rsvd[j] +=
526 res_alloc->guaranteed[t];
527 } else {
528 res_alloc->res_reserved += res_alloc->guaranteed[t];
529 }
530 }
531 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000532 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200533 return 0;
534
535no_mem_err:
536 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
537 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
538 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
539 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
540 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
541 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
542 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
543 }
544 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000545}
546
Jack Morgensteinb8924952012-05-15 10:35:02 +0000547void mlx4_free_resource_tracker(struct mlx4_dev *dev,
548 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000549{
550 struct mlx4_priv *priv = mlx4_priv(dev);
551 int i;
552
553 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200554 if (type != RES_TR_FREE_STRUCTS_ONLY) {
555 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000556 if (type == RES_TR_FREE_ALL ||
557 dev->caps.function != i)
558 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200559 }
560 /* free master's vlans */
561 i = dev->caps.function;
562 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
563 rem_slave_vlans(dev, i);
564 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
565 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000566
Jack Morgensteinb8924952012-05-15 10:35:02 +0000567 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200568 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
569 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
570 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
571 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
572 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
573 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
574 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
575 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000576 kfree(priv->mfunc.master.res_tracker.slave_list);
577 priv->mfunc.master.res_tracker.slave_list = NULL;
578 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000579 }
580}
581
Jack Morgenstein54679e12012-08-03 08:40:43 +0000582static void update_pkey_index(struct mlx4_dev *dev, int slave,
583 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000584{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000585 u8 sched = *(u8 *)(inbox->buf + 64);
586 u8 orig_index = *(u8 *)(inbox->buf + 35);
587 u8 new_index;
588 struct mlx4_priv *priv = mlx4_priv(dev);
589 int port;
590
591 port = (sched >> 6 & 1) + 1;
592
593 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
594 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000595}
596
597static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
598 u8 slave)
599{
600 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
601 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
602 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000603
604 if (MLX4_QP_ST_UD == ts)
605 qp_ctx->pri_path.mgid_index = 0x80 | slave;
606
Jack Morgenstein54679e12012-08-03 08:40:43 +0000607 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
608 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
609 qp_ctx->pri_path.mgid_index = slave & 0x7F;
610 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
611 qp_ctx->alt_path.mgid_index = slave & 0x7F;
612 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000613}
614
Rony Efraim3f7fb022013-04-25 05:22:28 +0000615static int update_vport_qp_param(struct mlx4_dev *dev,
616 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300617 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000618{
619 struct mlx4_qp_context *qpc = inbox->buf + 8;
620 struct mlx4_vport_oper_state *vp_oper;
621 struct mlx4_priv *priv;
622 u32 qp_type;
623 int port;
624
625 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
626 priv = mlx4_priv(dev);
627 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
628
629 if (MLX4_VGT != vp_oper->state.default_vlan) {
630 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300631 if (MLX4_QP_ST_RC == qp_type ||
632 (MLX4_QP_ST_UD == qp_type &&
633 !mlx4_is_qp_reserved(dev, qpn)))
Rony Efraim3f7fb022013-04-25 05:22:28 +0000634 return -EINVAL;
635
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300636 /* the reserved QPs (special, proxy, tunnel)
637 * do not operate over vlans
638 */
639 if (mlx4_is_qp_reserved(dev, qpn))
640 return 0;
641
Rony Efraim7677fc92013-05-08 22:22:35 +0000642 /* force strip vlan by clear vsd */
643 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
Rony Efraim0a6eac22013-06-27 19:05:22 +0300644
645 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
646 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
647 qpc->pri_path.vlan_control =
648 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
649 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
650 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
651 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
652 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
653 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
654 } else if (0 != vp_oper->state.default_vlan) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000655 qpc->pri_path.vlan_control =
656 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
657 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
658 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
659 } else { /* priority tagged */
660 qpc->pri_path.vlan_control =
661 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
662 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
663 }
664
665 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000666 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Rony Efraim7677fc92013-05-08 22:22:35 +0000667 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
668 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000669 qpc->pri_path.sched_queue &= 0xC7;
670 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000671 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000672 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000673 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000674 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000675 }
Rony Efraim3f7fb022013-04-25 05:22:28 +0000676 return 0;
677}
678
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000679static int mpt_mask(struct mlx4_dev *dev)
680{
681 return dev->caps.num_mpts - 1;
682}
683
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000684static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000685 enum mlx4_resource type)
686{
687 struct mlx4_priv *priv = mlx4_priv(dev);
688
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000689 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
690 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000691}
692
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000693static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000694 enum mlx4_resource type,
695 void *res)
696{
697 struct res_common *r;
698 int err = 0;
699
700 spin_lock_irq(mlx4_tlock(dev));
701 r = find_res(dev, res_id, type);
702 if (!r) {
703 err = -ENONET;
704 goto exit;
705 }
706
707 if (r->state == RES_ANY_BUSY) {
708 err = -EBUSY;
709 goto exit;
710 }
711
712 if (r->owner != slave) {
713 err = -EPERM;
714 goto exit;
715 }
716
717 r->from_state = r->state;
718 r->state = RES_ANY_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000719
720 if (res)
721 *((struct res_common **)res) = r;
722
723exit:
724 spin_unlock_irq(mlx4_tlock(dev));
725 return err;
726}
727
728int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
729 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000730 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000731{
732
733 struct res_common *r;
734 int err = -ENOENT;
735 int id = res_id;
736
737 if (type == RES_QP)
738 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000739 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000740
741 r = find_res(dev, id, type);
742 if (r) {
743 *slave = r->owner;
744 err = 0;
745 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000746 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000747
748 return err;
749}
750
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000751static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000752 enum mlx4_resource type)
753{
754 struct res_common *r;
755
756 spin_lock_irq(mlx4_tlock(dev));
757 r = find_res(dev, res_id, type);
758 if (r)
759 r->state = r->from_state;
760 spin_unlock_irq(mlx4_tlock(dev));
761}
762
763static struct res_common *alloc_qp_tr(int id)
764{
765 struct res_qp *ret;
766
767 ret = kzalloc(sizeof *ret, GFP_KERNEL);
768 if (!ret)
769 return NULL;
770
771 ret->com.res_id = id;
772 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +0000773 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000774 INIT_LIST_HEAD(&ret->mcg_list);
775 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000776 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000777
778 return &ret->com;
779}
780
781static struct res_common *alloc_mtt_tr(int id, int order)
782{
783 struct res_mtt *ret;
784
785 ret = kzalloc(sizeof *ret, GFP_KERNEL);
786 if (!ret)
787 return NULL;
788
789 ret->com.res_id = id;
790 ret->order = order;
791 ret->com.state = RES_MTT_ALLOCATED;
792 atomic_set(&ret->ref_count, 0);
793
794 return &ret->com;
795}
796
797static struct res_common *alloc_mpt_tr(int id, int key)
798{
799 struct res_mpt *ret;
800
801 ret = kzalloc(sizeof *ret, GFP_KERNEL);
802 if (!ret)
803 return NULL;
804
805 ret->com.res_id = id;
806 ret->com.state = RES_MPT_RESERVED;
807 ret->key = key;
808
809 return &ret->com;
810}
811
812static struct res_common *alloc_eq_tr(int id)
813{
814 struct res_eq *ret;
815
816 ret = kzalloc(sizeof *ret, GFP_KERNEL);
817 if (!ret)
818 return NULL;
819
820 ret->com.res_id = id;
821 ret->com.state = RES_EQ_RESERVED;
822
823 return &ret->com;
824}
825
826static struct res_common *alloc_cq_tr(int id)
827{
828 struct res_cq *ret;
829
830 ret = kzalloc(sizeof *ret, GFP_KERNEL);
831 if (!ret)
832 return NULL;
833
834 ret->com.res_id = id;
835 ret->com.state = RES_CQ_ALLOCATED;
836 atomic_set(&ret->ref_count, 0);
837
838 return &ret->com;
839}
840
841static struct res_common *alloc_srq_tr(int id)
842{
843 struct res_srq *ret;
844
845 ret = kzalloc(sizeof *ret, GFP_KERNEL);
846 if (!ret)
847 return NULL;
848
849 ret->com.res_id = id;
850 ret->com.state = RES_SRQ_ALLOCATED;
851 atomic_set(&ret->ref_count, 0);
852
853 return &ret->com;
854}
855
856static struct res_common *alloc_counter_tr(int id)
857{
858 struct res_counter *ret;
859
860 ret = kzalloc(sizeof *ret, GFP_KERNEL);
861 if (!ret)
862 return NULL;
863
864 ret->com.res_id = id;
865 ret->com.state = RES_COUNTER_ALLOCATED;
866
867 return &ret->com;
868}
869
Jack Morgensteinba062d52012-05-15 10:35:03 +0000870static struct res_common *alloc_xrcdn_tr(int id)
871{
872 struct res_xrcdn *ret;
873
874 ret = kzalloc(sizeof *ret, GFP_KERNEL);
875 if (!ret)
876 return NULL;
877
878 ret->com.res_id = id;
879 ret->com.state = RES_XRCD_ALLOCATED;
880
881 return &ret->com;
882}
883
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000884static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000885{
886 struct res_fs_rule *ret;
887
888 ret = kzalloc(sizeof *ret, GFP_KERNEL);
889 if (!ret)
890 return NULL;
891
892 ret->com.res_id = id;
893 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000894 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000895 return &ret->com;
896}
897
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000898static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000899 int extra)
900{
901 struct res_common *ret;
902
903 switch (type) {
904 case RES_QP:
905 ret = alloc_qp_tr(id);
906 break;
907 case RES_MPT:
908 ret = alloc_mpt_tr(id, extra);
909 break;
910 case RES_MTT:
911 ret = alloc_mtt_tr(id, extra);
912 break;
913 case RES_EQ:
914 ret = alloc_eq_tr(id);
915 break;
916 case RES_CQ:
917 ret = alloc_cq_tr(id);
918 break;
919 case RES_SRQ:
920 ret = alloc_srq_tr(id);
921 break;
922 case RES_MAC:
923 printk(KERN_ERR "implementation missing\n");
924 return NULL;
925 case RES_COUNTER:
926 ret = alloc_counter_tr(id);
927 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +0000928 case RES_XRCD:
929 ret = alloc_xrcdn_tr(id);
930 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000931 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000932 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000933 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000934 default:
935 return NULL;
936 }
937 if (ret)
938 ret->owner = slave;
939
940 return ret;
941}
942
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000943static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000944 enum mlx4_resource type, int extra)
945{
946 int i;
947 int err;
948 struct mlx4_priv *priv = mlx4_priv(dev);
949 struct res_common **res_arr;
950 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000951 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000952
953 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
954 if (!res_arr)
955 return -ENOMEM;
956
957 for (i = 0; i < count; ++i) {
958 res_arr[i] = alloc_tr(base + i, type, slave, extra);
959 if (!res_arr[i]) {
960 for (--i; i >= 0; --i)
961 kfree(res_arr[i]);
962
963 kfree(res_arr);
964 return -ENOMEM;
965 }
966 }
967
968 spin_lock_irq(mlx4_tlock(dev));
969 for (i = 0; i < count; ++i) {
970 if (find_res(dev, base + i, type)) {
971 err = -EEXIST;
972 goto undo;
973 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000974 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000975 if (err)
976 goto undo;
977 list_add_tail(&res_arr[i]->list,
978 &tracker->slave_list[slave].res_list[type]);
979 }
980 spin_unlock_irq(mlx4_tlock(dev));
981 kfree(res_arr);
982
983 return 0;
984
985undo:
986 for (--i; i >= base; --i)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000987 rb_erase(&res_arr[i]->node, root);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000988
989 spin_unlock_irq(mlx4_tlock(dev));
990
991 for (i = 0; i < count; ++i)
992 kfree(res_arr[i]);
993
994 kfree(res_arr);
995
996 return err;
997}
998
999static int remove_qp_ok(struct res_qp *res)
1000{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001001 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1002 !list_empty(&res->mcg_list)) {
1003 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1004 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001005 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001006 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001007 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001008 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001009
1010 return 0;
1011}
1012
1013static int remove_mtt_ok(struct res_mtt *res, int order)
1014{
1015 if (res->com.state == RES_MTT_BUSY ||
1016 atomic_read(&res->ref_count)) {
1017 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1018 __func__, __LINE__,
1019 mtt_states_str(res->com.state),
1020 atomic_read(&res->ref_count));
1021 return -EBUSY;
1022 } else if (res->com.state != RES_MTT_ALLOCATED)
1023 return -EPERM;
1024 else if (res->order != order)
1025 return -EINVAL;
1026
1027 return 0;
1028}
1029
1030static int remove_mpt_ok(struct res_mpt *res)
1031{
1032 if (res->com.state == RES_MPT_BUSY)
1033 return -EBUSY;
1034 else if (res->com.state != RES_MPT_RESERVED)
1035 return -EPERM;
1036
1037 return 0;
1038}
1039
1040static int remove_eq_ok(struct res_eq *res)
1041{
1042 if (res->com.state == RES_MPT_BUSY)
1043 return -EBUSY;
1044 else if (res->com.state != RES_MPT_RESERVED)
1045 return -EPERM;
1046
1047 return 0;
1048}
1049
1050static int remove_counter_ok(struct res_counter *res)
1051{
1052 if (res->com.state == RES_COUNTER_BUSY)
1053 return -EBUSY;
1054 else if (res->com.state != RES_COUNTER_ALLOCATED)
1055 return -EPERM;
1056
1057 return 0;
1058}
1059
Jack Morgensteinba062d52012-05-15 10:35:03 +00001060static int remove_xrcdn_ok(struct res_xrcdn *res)
1061{
1062 if (res->com.state == RES_XRCD_BUSY)
1063 return -EBUSY;
1064 else if (res->com.state != RES_XRCD_ALLOCATED)
1065 return -EPERM;
1066
1067 return 0;
1068}
1069
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001070static int remove_fs_rule_ok(struct res_fs_rule *res)
1071{
1072 if (res->com.state == RES_FS_RULE_BUSY)
1073 return -EBUSY;
1074 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1075 return -EPERM;
1076
1077 return 0;
1078}
1079
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001080static int remove_cq_ok(struct res_cq *res)
1081{
1082 if (res->com.state == RES_CQ_BUSY)
1083 return -EBUSY;
1084 else if (res->com.state != RES_CQ_ALLOCATED)
1085 return -EPERM;
1086
1087 return 0;
1088}
1089
1090static int remove_srq_ok(struct res_srq *res)
1091{
1092 if (res->com.state == RES_SRQ_BUSY)
1093 return -EBUSY;
1094 else if (res->com.state != RES_SRQ_ALLOCATED)
1095 return -EPERM;
1096
1097 return 0;
1098}
1099
1100static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1101{
1102 switch (type) {
1103 case RES_QP:
1104 return remove_qp_ok((struct res_qp *)res);
1105 case RES_CQ:
1106 return remove_cq_ok((struct res_cq *)res);
1107 case RES_SRQ:
1108 return remove_srq_ok((struct res_srq *)res);
1109 case RES_MPT:
1110 return remove_mpt_ok((struct res_mpt *)res);
1111 case RES_MTT:
1112 return remove_mtt_ok((struct res_mtt *)res, extra);
1113 case RES_MAC:
1114 return -ENOSYS;
1115 case RES_EQ:
1116 return remove_eq_ok((struct res_eq *)res);
1117 case RES_COUNTER:
1118 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001119 case RES_XRCD:
1120 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001121 case RES_FS_RULE:
1122 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001123 default:
1124 return -EINVAL;
1125 }
1126}
1127
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001128static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001129 enum mlx4_resource type, int extra)
1130{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001131 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001132 int err;
1133 struct mlx4_priv *priv = mlx4_priv(dev);
1134 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135 struct res_common *r;
1136
1137 spin_lock_irq(mlx4_tlock(dev));
1138 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001139 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001140 if (!r) {
1141 err = -ENOENT;
1142 goto out;
1143 }
1144 if (r->owner != slave) {
1145 err = -EPERM;
1146 goto out;
1147 }
1148 err = remove_ok(r, type, extra);
1149 if (err)
1150 goto out;
1151 }
1152
1153 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001154 r = res_tracker_lookup(&tracker->res_tree[type], i);
1155 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001156 list_del(&r->list);
1157 kfree(r);
1158 }
1159 err = 0;
1160
1161out:
1162 spin_unlock_irq(mlx4_tlock(dev));
1163
1164 return err;
1165}
1166
1167static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1168 enum res_qp_states state, struct res_qp **qp,
1169 int alloc)
1170{
1171 struct mlx4_priv *priv = mlx4_priv(dev);
1172 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1173 struct res_qp *r;
1174 int err = 0;
1175
1176 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001177 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001178 if (!r)
1179 err = -ENOENT;
1180 else if (r->com.owner != slave)
1181 err = -EPERM;
1182 else {
1183 switch (state) {
1184 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001185 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001186 __func__, r->com.res_id);
1187 err = -EBUSY;
1188 break;
1189
1190 case RES_QP_RESERVED:
1191 if (r->com.state == RES_QP_MAPPED && !alloc)
1192 break;
1193
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001194 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001195 err = -EINVAL;
1196 break;
1197
1198 case RES_QP_MAPPED:
1199 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1200 r->com.state == RES_QP_HW)
1201 break;
1202 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001203 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001204 r->com.res_id);
1205 err = -EINVAL;
1206 }
1207
1208 break;
1209
1210 case RES_QP_HW:
1211 if (r->com.state != RES_QP_MAPPED)
1212 err = -EINVAL;
1213 break;
1214 default:
1215 err = -EINVAL;
1216 }
1217
1218 if (!err) {
1219 r->com.from_state = r->com.state;
1220 r->com.to_state = state;
1221 r->com.state = RES_QP_BUSY;
1222 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001223 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001224 }
1225 }
1226
1227 spin_unlock_irq(mlx4_tlock(dev));
1228
1229 return err;
1230}
1231
1232static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1233 enum res_mpt_states state, struct res_mpt **mpt)
1234{
1235 struct mlx4_priv *priv = mlx4_priv(dev);
1236 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1237 struct res_mpt *r;
1238 int err = 0;
1239
1240 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001241 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001242 if (!r)
1243 err = -ENOENT;
1244 else if (r->com.owner != slave)
1245 err = -EPERM;
1246 else {
1247 switch (state) {
1248 case RES_MPT_BUSY:
1249 err = -EINVAL;
1250 break;
1251
1252 case RES_MPT_RESERVED:
1253 if (r->com.state != RES_MPT_MAPPED)
1254 err = -EINVAL;
1255 break;
1256
1257 case RES_MPT_MAPPED:
1258 if (r->com.state != RES_MPT_RESERVED &&
1259 r->com.state != RES_MPT_HW)
1260 err = -EINVAL;
1261 break;
1262
1263 case RES_MPT_HW:
1264 if (r->com.state != RES_MPT_MAPPED)
1265 err = -EINVAL;
1266 break;
1267 default:
1268 err = -EINVAL;
1269 }
1270
1271 if (!err) {
1272 r->com.from_state = r->com.state;
1273 r->com.to_state = state;
1274 r->com.state = RES_MPT_BUSY;
1275 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001276 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001277 }
1278 }
1279
1280 spin_unlock_irq(mlx4_tlock(dev));
1281
1282 return err;
1283}
1284
1285static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1286 enum res_eq_states state, struct res_eq **eq)
1287{
1288 struct mlx4_priv *priv = mlx4_priv(dev);
1289 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1290 struct res_eq *r;
1291 int err = 0;
1292
1293 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001294 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001295 if (!r)
1296 err = -ENOENT;
1297 else if (r->com.owner != slave)
1298 err = -EPERM;
1299 else {
1300 switch (state) {
1301 case RES_EQ_BUSY:
1302 err = -EINVAL;
1303 break;
1304
1305 case RES_EQ_RESERVED:
1306 if (r->com.state != RES_EQ_HW)
1307 err = -EINVAL;
1308 break;
1309
1310 case RES_EQ_HW:
1311 if (r->com.state != RES_EQ_RESERVED)
1312 err = -EINVAL;
1313 break;
1314
1315 default:
1316 err = -EINVAL;
1317 }
1318
1319 if (!err) {
1320 r->com.from_state = r->com.state;
1321 r->com.to_state = state;
1322 r->com.state = RES_EQ_BUSY;
1323 if (eq)
1324 *eq = r;
1325 }
1326 }
1327
1328 spin_unlock_irq(mlx4_tlock(dev));
1329
1330 return err;
1331}
1332
1333static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1334 enum res_cq_states state, struct res_cq **cq)
1335{
1336 struct mlx4_priv *priv = mlx4_priv(dev);
1337 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1338 struct res_cq *r;
1339 int err;
1340
1341 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001342 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001343 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001344 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001345 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001346 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001347 } else if (state == RES_CQ_ALLOCATED) {
1348 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001349 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001350 else if (atomic_read(&r->ref_count))
1351 err = -EBUSY;
1352 else
1353 err = 0;
1354 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1355 err = -EINVAL;
1356 } else {
1357 err = 0;
1358 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001359
Paul Bollec9218a92014-01-14 20:45:36 +01001360 if (!err) {
1361 r->com.from_state = r->com.state;
1362 r->com.to_state = state;
1363 r->com.state = RES_CQ_BUSY;
1364 if (cq)
1365 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001366 }
1367
1368 spin_unlock_irq(mlx4_tlock(dev));
1369
1370 return err;
1371}
1372
1373static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001374 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001375{
1376 struct mlx4_priv *priv = mlx4_priv(dev);
1377 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1378 struct res_srq *r;
1379 int err = 0;
1380
1381 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001382 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001383 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001384 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001385 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001386 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001387 } else if (state == RES_SRQ_ALLOCATED) {
1388 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001389 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001390 else if (atomic_read(&r->ref_count))
1391 err = -EBUSY;
1392 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1393 err = -EINVAL;
1394 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001395
Paul Bollef088cbb2014-01-14 20:46:52 +01001396 if (!err) {
1397 r->com.from_state = r->com.state;
1398 r->com.to_state = state;
1399 r->com.state = RES_SRQ_BUSY;
1400 if (srq)
1401 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001402 }
1403
1404 spin_unlock_irq(mlx4_tlock(dev));
1405
1406 return err;
1407}
1408
1409static void res_abort_move(struct mlx4_dev *dev, int slave,
1410 enum mlx4_resource type, int id)
1411{
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414 struct res_common *r;
1415
1416 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001417 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001418 if (r && (r->owner == slave))
1419 r->state = r->from_state;
1420 spin_unlock_irq(mlx4_tlock(dev));
1421}
1422
1423static void res_end_move(struct mlx4_dev *dev, int slave,
1424 enum mlx4_resource type, int id)
1425{
1426 struct mlx4_priv *priv = mlx4_priv(dev);
1427 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428 struct res_common *r;
1429
1430 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001431 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001432 if (r && (r->owner == slave))
1433 r->state = r->to_state;
1434 spin_unlock_irq(mlx4_tlock(dev));
1435}
1436
1437static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1438{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001439 return mlx4_is_qp_reserved(dev, qpn) &&
1440 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001441}
1442
Jack Morgenstein54679e12012-08-03 08:40:43 +00001443static int fw_reserved(struct mlx4_dev *dev, int qpn)
1444{
1445 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001446}
1447
1448static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1449 u64 in_param, u64 *out_param)
1450{
1451 int err;
1452 int count;
1453 int align;
1454 int base;
1455 int qpn;
1456
1457 switch (op) {
1458 case RES_OP_RESERVE:
1459 count = get_param_l(&in_param);
1460 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001461 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001462 if (err)
1463 return err;
1464
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001465 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1466 if (err) {
1467 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1468 return err;
1469 }
1470
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001471 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1472 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001473 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001474 __mlx4_qp_release_range(dev, base, count);
1475 return err;
1476 }
1477 set_param_l(out_param, base);
1478 break;
1479 case RES_OP_MAP_ICM:
1480 qpn = get_param_l(&in_param) & 0x7fffff;
1481 if (valid_reserved(dev, slave, qpn)) {
1482 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1483 if (err)
1484 return err;
1485 }
1486
1487 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1488 NULL, 1);
1489 if (err)
1490 return err;
1491
Jack Morgenstein54679e12012-08-03 08:40:43 +00001492 if (!fw_reserved(dev, qpn)) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001493 err = __mlx4_qp_alloc_icm(dev, qpn);
1494 if (err) {
1495 res_abort_move(dev, slave, RES_QP, qpn);
1496 return err;
1497 }
1498 }
1499
1500 res_end_move(dev, slave, RES_QP, qpn);
1501 break;
1502
1503 default:
1504 err = -EINVAL;
1505 break;
1506 }
1507 return err;
1508}
1509
1510static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1511 u64 in_param, u64 *out_param)
1512{
1513 int err = -EINVAL;
1514 int base;
1515 int order;
1516
1517 if (op != RES_OP_RESERVE_AND_MAP)
1518 return err;
1519
1520 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001521
1522 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1523 if (err)
1524 return err;
1525
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001526 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001527 if (base == -1) {
1528 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001529 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001530 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001531
1532 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001533 if (err) {
1534 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001535 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001536 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001537 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001538 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001539
1540 return err;
1541}
1542
1543static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1544 u64 in_param, u64 *out_param)
1545{
1546 int err = -EINVAL;
1547 int index;
1548 int id;
1549 struct res_mpt *mpt;
1550
1551 switch (op) {
1552 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001553 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1554 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001555 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001556
1557 index = __mlx4_mpt_reserve(dev);
1558 if (index == -1) {
1559 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1560 break;
1561 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001562 id = index & mpt_mask(dev);
1563
1564 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1565 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001566 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001567 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001568 break;
1569 }
1570 set_param_l(out_param, index);
1571 break;
1572 case RES_OP_MAP_ICM:
1573 index = get_param_l(&in_param);
1574 id = index & mpt_mask(dev);
1575 err = mr_res_start_move_to(dev, slave, id,
1576 RES_MPT_MAPPED, &mpt);
1577 if (err)
1578 return err;
1579
Shani Michaelib20e5192013-02-06 16:19:08 +00001580 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001581 if (err) {
1582 res_abort_move(dev, slave, RES_MPT, id);
1583 return err;
1584 }
1585
1586 res_end_move(dev, slave, RES_MPT, id);
1587 break;
1588 }
1589 return err;
1590}
1591
1592static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1593 u64 in_param, u64 *out_param)
1594{
1595 int cqn;
1596 int err;
1597
1598 switch (op) {
1599 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001600 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001601 if (err)
1602 break;
1603
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001604 err = __mlx4_cq_alloc_icm(dev, &cqn);
1605 if (err) {
1606 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1607 break;
1608 }
1609
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001610 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1611 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001612 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001613 __mlx4_cq_free_icm(dev, cqn);
1614 break;
1615 }
1616
1617 set_param_l(out_param, cqn);
1618 break;
1619
1620 default:
1621 err = -EINVAL;
1622 }
1623
1624 return err;
1625}
1626
1627static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1628 u64 in_param, u64 *out_param)
1629{
1630 int srqn;
1631 int err;
1632
1633 switch (op) {
1634 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001635 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001636 if (err)
1637 break;
1638
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001639 err = __mlx4_srq_alloc_icm(dev, &srqn);
1640 if (err) {
1641 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1642 break;
1643 }
1644
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001645 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1646 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001647 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001648 __mlx4_srq_free_icm(dev, srqn);
1649 break;
1650 }
1651
1652 set_param_l(out_param, srqn);
1653 break;
1654
1655 default:
1656 err = -EINVAL;
1657 }
1658
1659 return err;
1660}
1661
1662static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1663{
1664 struct mlx4_priv *priv = mlx4_priv(dev);
1665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666 struct mac_res *res;
1667
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001668 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1669 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001670 res = kzalloc(sizeof *res, GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001671 if (!res) {
1672 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001673 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001674 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001675 res->mac = mac;
1676 res->port = (u8) port;
1677 list_add_tail(&res->list,
1678 &tracker->slave_list[slave].res_list[RES_MAC]);
1679 return 0;
1680}
1681
1682static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1683 int port)
1684{
1685 struct mlx4_priv *priv = mlx4_priv(dev);
1686 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1687 struct list_head *mac_list =
1688 &tracker->slave_list[slave].res_list[RES_MAC];
1689 struct mac_res *res, *tmp;
1690
1691 list_for_each_entry_safe(res, tmp, mac_list, list) {
1692 if (res->mac == mac && res->port == (u8) port) {
1693 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001694 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001695 kfree(res);
1696 break;
1697 }
1698 }
1699}
1700
1701static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1702{
1703 struct mlx4_priv *priv = mlx4_priv(dev);
1704 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1705 struct list_head *mac_list =
1706 &tracker->slave_list[slave].res_list[RES_MAC];
1707 struct mac_res *res, *tmp;
1708
1709 list_for_each_entry_safe(res, tmp, mac_list, list) {
1710 list_del(&res->list);
1711 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001712 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001713 kfree(res);
1714 }
1715}
1716
1717static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001718 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001719{
1720 int err = -EINVAL;
1721 int port;
1722 u64 mac;
1723
1724 if (op != RES_OP_RESERVE_AND_MAP)
1725 return err;
1726
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001727 port = !in_port ? get_param_l(out_param) : in_port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001728 mac = in_param;
1729
1730 err = __mlx4_register_mac(dev, port, mac);
1731 if (err >= 0) {
1732 set_param_l(out_param, err);
1733 err = 0;
1734 }
1735
1736 if (!err) {
1737 err = mac_add_to_slave(dev, slave, mac, port);
1738 if (err)
1739 __mlx4_unregister_mac(dev, port, mac);
1740 }
1741 return err;
1742}
1743
Jack Morgenstein48740802013-11-03 10:03:20 +02001744static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1745 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001746{
Jack Morgenstein48740802013-11-03 10:03:20 +02001747 struct mlx4_priv *priv = mlx4_priv(dev);
1748 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1749 struct list_head *vlan_list =
1750 &tracker->slave_list[slave].res_list[RES_VLAN];
1751 struct vlan_res *res, *tmp;
1752
1753 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1754 if (res->vlan == vlan && res->port == (u8) port) {
1755 /* vlan found. update ref count */
1756 ++res->ref_count;
1757 return 0;
1758 }
1759 }
1760
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001761 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1762 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02001763 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001764 if (!res) {
1765 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001766 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001767 }
Jack Morgenstein48740802013-11-03 10:03:20 +02001768 res->vlan = vlan;
1769 res->port = (u8) port;
1770 res->vlan_index = vlan_index;
1771 res->ref_count = 1;
1772 list_add_tail(&res->list,
1773 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001774 return 0;
1775}
1776
Jack Morgenstein48740802013-11-03 10:03:20 +02001777
1778static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1779 int port)
1780{
1781 struct mlx4_priv *priv = mlx4_priv(dev);
1782 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1783 struct list_head *vlan_list =
1784 &tracker->slave_list[slave].res_list[RES_VLAN];
1785 struct vlan_res *res, *tmp;
1786
1787 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1788 if (res->vlan == vlan && res->port == (u8) port) {
1789 if (!--res->ref_count) {
1790 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001791 mlx4_release_resource(dev, slave, RES_VLAN,
1792 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001793 kfree(res);
1794 }
1795 break;
1796 }
1797 }
1798}
1799
1800static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1801{
1802 struct mlx4_priv *priv = mlx4_priv(dev);
1803 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1804 struct list_head *vlan_list =
1805 &tracker->slave_list[slave].res_list[RES_VLAN];
1806 struct vlan_res *res, *tmp;
1807 int i;
1808
1809 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1810 list_del(&res->list);
1811 /* dereference the vlan the num times the slave referenced it */
1812 for (i = 0; i < res->ref_count; i++)
1813 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001814 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001815 kfree(res);
1816 }
1817}
1818
1819static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001820 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02001821{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001822 struct mlx4_priv *priv = mlx4_priv(dev);
1823 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02001824 int err;
1825 u16 vlan;
1826 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001827 int port;
1828
1829 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02001830
1831 if (!port || op != RES_OP_RESERVE_AND_MAP)
1832 return -EINVAL;
1833
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001834 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1835 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1836 slave_state[slave].old_vlan_api = true;
1837 return 0;
1838 }
1839
Jack Morgenstein48740802013-11-03 10:03:20 +02001840 vlan = (u16) in_param;
1841
1842 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1843 if (!err) {
1844 set_param_l(out_param, (u32) vlan_index);
1845 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1846 if (err)
1847 __mlx4_unregister_vlan(dev, port, vlan);
1848 }
1849 return err;
1850}
1851
Jack Morgensteinba062d52012-05-15 10:35:03 +00001852static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1853 u64 in_param, u64 *out_param)
1854{
1855 u32 index;
1856 int err;
1857
1858 if (op != RES_OP_RESERVE)
1859 return -EINVAL;
1860
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001861 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001862 if (err)
1863 return err;
1864
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001865 err = __mlx4_counter_alloc(dev, &index);
1866 if (err) {
1867 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1868 return err;
1869 }
1870
Jack Morgensteinba062d52012-05-15 10:35:03 +00001871 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001872 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001873 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001874 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1875 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00001876 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001877 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00001878
1879 return err;
1880}
1881
1882static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1883 u64 in_param, u64 *out_param)
1884{
1885 u32 xrcdn;
1886 int err;
1887
1888 if (op != RES_OP_RESERVE)
1889 return -EINVAL;
1890
1891 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1892 if (err)
1893 return err;
1894
1895 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1896 if (err)
1897 __mlx4_xrcd_free(dev, xrcdn);
1898 else
1899 set_param_l(out_param, xrcdn);
1900
1901 return err;
1902}
1903
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001904int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1905 struct mlx4_vhcr *vhcr,
1906 struct mlx4_cmd_mailbox *inbox,
1907 struct mlx4_cmd_mailbox *outbox,
1908 struct mlx4_cmd_info *cmd)
1909{
1910 int err;
1911 int alop = vhcr->op_modifier;
1912
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001913 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001914 case RES_QP:
1915 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1916 vhcr->in_param, &vhcr->out_param);
1917 break;
1918
1919 case RES_MTT:
1920 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1921 vhcr->in_param, &vhcr->out_param);
1922 break;
1923
1924 case RES_MPT:
1925 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1926 vhcr->in_param, &vhcr->out_param);
1927 break;
1928
1929 case RES_CQ:
1930 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1931 vhcr->in_param, &vhcr->out_param);
1932 break;
1933
1934 case RES_SRQ:
1935 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1936 vhcr->in_param, &vhcr->out_param);
1937 break;
1938
1939 case RES_MAC:
1940 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001941 vhcr->in_param, &vhcr->out_param,
1942 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001943 break;
1944
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001945 case RES_VLAN:
1946 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001947 vhcr->in_param, &vhcr->out_param,
1948 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001949 break;
1950
Jack Morgensteinba062d52012-05-15 10:35:03 +00001951 case RES_COUNTER:
1952 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1953 vhcr->in_param, &vhcr->out_param);
1954 break;
1955
1956 case RES_XRCD:
1957 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1958 vhcr->in_param, &vhcr->out_param);
1959 break;
1960
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001961 default:
1962 err = -EINVAL;
1963 break;
1964 }
1965
1966 return err;
1967}
1968
1969static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1970 u64 in_param)
1971{
1972 int err;
1973 int count;
1974 int base;
1975 int qpn;
1976
1977 switch (op) {
1978 case RES_OP_RESERVE:
1979 base = get_param_l(&in_param) & 0x7fffff;
1980 count = get_param_h(&in_param);
1981 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1982 if (err)
1983 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001984 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001985 __mlx4_qp_release_range(dev, base, count);
1986 break;
1987 case RES_OP_MAP_ICM:
1988 qpn = get_param_l(&in_param) & 0x7fffff;
1989 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1990 NULL, 0);
1991 if (err)
1992 return err;
1993
Jack Morgenstein54679e12012-08-03 08:40:43 +00001994 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001995 __mlx4_qp_free_icm(dev, qpn);
1996
1997 res_end_move(dev, slave, RES_QP, qpn);
1998
1999 if (valid_reserved(dev, slave, qpn))
2000 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2001 break;
2002 default:
2003 err = -EINVAL;
2004 break;
2005 }
2006 return err;
2007}
2008
2009static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2010 u64 in_param, u64 *out_param)
2011{
2012 int err = -EINVAL;
2013 int base;
2014 int order;
2015
2016 if (op != RES_OP_RESERVE_AND_MAP)
2017 return err;
2018
2019 base = get_param_l(&in_param);
2020 order = get_param_h(&in_param);
2021 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002022 if (!err) {
2023 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002024 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002025 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002026 return err;
2027}
2028
2029static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2030 u64 in_param)
2031{
2032 int err = -EINVAL;
2033 int index;
2034 int id;
2035 struct res_mpt *mpt;
2036
2037 switch (op) {
2038 case RES_OP_RESERVE:
2039 index = get_param_l(&in_param);
2040 id = index & mpt_mask(dev);
2041 err = get_res(dev, slave, id, RES_MPT, &mpt);
2042 if (err)
2043 break;
2044 index = mpt->key;
2045 put_res(dev, slave, id, RES_MPT);
2046
2047 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2048 if (err)
2049 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002050 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002051 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002052 break;
2053 case RES_OP_MAP_ICM:
2054 index = get_param_l(&in_param);
2055 id = index & mpt_mask(dev);
2056 err = mr_res_start_move_to(dev, slave, id,
2057 RES_MPT_RESERVED, &mpt);
2058 if (err)
2059 return err;
2060
Shani Michaelib20e5192013-02-06 16:19:08 +00002061 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002062 res_end_move(dev, slave, RES_MPT, id);
2063 return err;
2064 break;
2065 default:
2066 err = -EINVAL;
2067 break;
2068 }
2069 return err;
2070}
2071
2072static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2073 u64 in_param, u64 *out_param)
2074{
2075 int cqn;
2076 int err;
2077
2078 switch (op) {
2079 case RES_OP_RESERVE_AND_MAP:
2080 cqn = get_param_l(&in_param);
2081 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2082 if (err)
2083 break;
2084
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002085 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002086 __mlx4_cq_free_icm(dev, cqn);
2087 break;
2088
2089 default:
2090 err = -EINVAL;
2091 break;
2092 }
2093
2094 return err;
2095}
2096
2097static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2098 u64 in_param, u64 *out_param)
2099{
2100 int srqn;
2101 int err;
2102
2103 switch (op) {
2104 case RES_OP_RESERVE_AND_MAP:
2105 srqn = get_param_l(&in_param);
2106 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2107 if (err)
2108 break;
2109
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002110 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002111 __mlx4_srq_free_icm(dev, srqn);
2112 break;
2113
2114 default:
2115 err = -EINVAL;
2116 break;
2117 }
2118
2119 return err;
2120}
2121
2122static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002123 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002124{
2125 int port;
2126 int err = 0;
2127
2128 switch (op) {
2129 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002130 port = !in_port ? get_param_l(out_param) : in_port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002131 mac_del_from_slave(dev, slave, in_param, port);
2132 __mlx4_unregister_mac(dev, port, in_param);
2133 break;
2134 default:
2135 err = -EINVAL;
2136 break;
2137 }
2138
2139 return err;
2140
2141}
2142
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002143static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002144 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002145{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002146 struct mlx4_priv *priv = mlx4_priv(dev);
2147 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002148 int err = 0;
2149
2150 switch (op) {
2151 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002152 if (slave_state[slave].old_vlan_api)
2153 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002154 if (!port)
2155 return -EINVAL;
2156 vlan_del_from_slave(dev, slave, in_param, port);
2157 __mlx4_unregister_vlan(dev, port, in_param);
2158 break;
2159 default:
2160 err = -EINVAL;
2161 break;
2162 }
2163
2164 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002165}
2166
Jack Morgensteinba062d52012-05-15 10:35:03 +00002167static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2168 u64 in_param, u64 *out_param)
2169{
2170 int index;
2171 int err;
2172
2173 if (op != RES_OP_RESERVE)
2174 return -EINVAL;
2175
2176 index = get_param_l(&in_param);
2177 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2178 if (err)
2179 return err;
2180
2181 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002182 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002183
2184 return err;
2185}
2186
2187static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2188 u64 in_param, u64 *out_param)
2189{
2190 int xrcdn;
2191 int err;
2192
2193 if (op != RES_OP_RESERVE)
2194 return -EINVAL;
2195
2196 xrcdn = get_param_l(&in_param);
2197 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2198 if (err)
2199 return err;
2200
2201 __mlx4_xrcd_free(dev, xrcdn);
2202
2203 return err;
2204}
2205
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002206int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2207 struct mlx4_vhcr *vhcr,
2208 struct mlx4_cmd_mailbox *inbox,
2209 struct mlx4_cmd_mailbox *outbox,
2210 struct mlx4_cmd_info *cmd)
2211{
2212 int err = -EINVAL;
2213 int alop = vhcr->op_modifier;
2214
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002215 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002216 case RES_QP:
2217 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2218 vhcr->in_param);
2219 break;
2220
2221 case RES_MTT:
2222 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2223 vhcr->in_param, &vhcr->out_param);
2224 break;
2225
2226 case RES_MPT:
2227 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2228 vhcr->in_param);
2229 break;
2230
2231 case RES_CQ:
2232 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2233 vhcr->in_param, &vhcr->out_param);
2234 break;
2235
2236 case RES_SRQ:
2237 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2238 vhcr->in_param, &vhcr->out_param);
2239 break;
2240
2241 case RES_MAC:
2242 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002243 vhcr->in_param, &vhcr->out_param,
2244 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002245 break;
2246
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002247 case RES_VLAN:
2248 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002249 vhcr->in_param, &vhcr->out_param,
2250 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002251 break;
2252
Jack Morgensteinba062d52012-05-15 10:35:03 +00002253 case RES_COUNTER:
2254 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2255 vhcr->in_param, &vhcr->out_param);
2256 break;
2257
2258 case RES_XRCD:
2259 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2260 vhcr->in_param, &vhcr->out_param);
2261
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002262 default:
2263 break;
2264 }
2265 return err;
2266}
2267
2268/* ugly but other choices are uglier */
2269static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2270{
2271 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2272}
2273
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002274static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002275{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002276 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002277}
2278
2279static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2280{
2281 return be32_to_cpu(mpt->mtt_sz);
2282}
2283
Shani Michaelicc1ade92013-02-06 16:19:10 +00002284static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2285{
2286 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2287}
2288
2289static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2290{
2291 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2292}
2293
2294static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2295{
2296 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2297}
2298
2299static int mr_is_region(struct mlx4_mpt_entry *mpt)
2300{
2301 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2302}
2303
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002304static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002305{
2306 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2307}
2308
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002309static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002310{
2311 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2312}
2313
2314static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2315{
2316 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2317 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2318 int log_sq_sride = qpc->sq_size_stride & 7;
2319 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2320 int log_rq_stride = qpc->rq_size_stride & 7;
2321 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2322 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002323 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2324 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002325 int sq_size;
2326 int rq_size;
2327 int total_pages;
2328 int total_mem;
2329 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2330
2331 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2332 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2333 total_mem = sq_size + rq_size;
2334 total_pages =
2335 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2336 page_shift);
2337
2338 return total_pages;
2339}
2340
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002341static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2342 int size, struct res_mtt *mtt)
2343{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002344 int res_start = mtt->com.res_id;
2345 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002346
2347 if (start < res_start || start + size > res_start + res_size)
2348 return -EPERM;
2349 return 0;
2350}
2351
2352int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2353 struct mlx4_vhcr *vhcr,
2354 struct mlx4_cmd_mailbox *inbox,
2355 struct mlx4_cmd_mailbox *outbox,
2356 struct mlx4_cmd_info *cmd)
2357{
2358 int err;
2359 int index = vhcr->in_modifier;
2360 struct res_mtt *mtt;
2361 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002362 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002363 int phys;
2364 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002365 u32 pd;
2366 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002367
2368 id = index & mpt_mask(dev);
2369 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2370 if (err)
2371 return err;
2372
Shani Michaelicc1ade92013-02-06 16:19:10 +00002373 /* Disable memory windows for VFs. */
2374 if (!mr_is_region(inbox->buf)) {
2375 err = -EPERM;
2376 goto ex_abort;
2377 }
2378
2379 /* Make sure that the PD bits related to the slave id are zeros. */
2380 pd = mr_get_pd(inbox->buf);
2381 pd_slave = (pd >> 17) & 0x7f;
2382 if (pd_slave != 0 && pd_slave != slave) {
2383 err = -EPERM;
2384 goto ex_abort;
2385 }
2386
2387 if (mr_is_fmr(inbox->buf)) {
2388 /* FMR and Bind Enable are forbidden in slave devices. */
2389 if (mr_is_bind_enabled(inbox->buf)) {
2390 err = -EPERM;
2391 goto ex_abort;
2392 }
2393 /* FMR and Memory Windows are also forbidden. */
2394 if (!mr_is_region(inbox->buf)) {
2395 err = -EPERM;
2396 goto ex_abort;
2397 }
2398 }
2399
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002400 phys = mr_phys_mpt(inbox->buf);
2401 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002402 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002403 if (err)
2404 goto ex_abort;
2405
2406 err = check_mtt_range(dev, slave, mtt_base,
2407 mr_get_mtt_size(inbox->buf), mtt);
2408 if (err)
2409 goto ex_put;
2410
2411 mpt->mtt = mtt;
2412 }
2413
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002414 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2415 if (err)
2416 goto ex_put;
2417
2418 if (!phys) {
2419 atomic_inc(&mtt->ref_count);
2420 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2421 }
2422
2423 res_end_move(dev, slave, RES_MPT, id);
2424 return 0;
2425
2426ex_put:
2427 if (!phys)
2428 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2429ex_abort:
2430 res_abort_move(dev, slave, RES_MPT, id);
2431
2432 return err;
2433}
2434
2435int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2436 struct mlx4_vhcr *vhcr,
2437 struct mlx4_cmd_mailbox *inbox,
2438 struct mlx4_cmd_mailbox *outbox,
2439 struct mlx4_cmd_info *cmd)
2440{
2441 int err;
2442 int index = vhcr->in_modifier;
2443 struct res_mpt *mpt;
2444 int id;
2445
2446 id = index & mpt_mask(dev);
2447 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2448 if (err)
2449 return err;
2450
2451 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2452 if (err)
2453 goto ex_abort;
2454
2455 if (mpt->mtt)
2456 atomic_dec(&mpt->mtt->ref_count);
2457
2458 res_end_move(dev, slave, RES_MPT, id);
2459 return 0;
2460
2461ex_abort:
2462 res_abort_move(dev, slave, RES_MPT, id);
2463
2464 return err;
2465}
2466
2467int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2468 struct mlx4_vhcr *vhcr,
2469 struct mlx4_cmd_mailbox *inbox,
2470 struct mlx4_cmd_mailbox *outbox,
2471 struct mlx4_cmd_info *cmd)
2472{
2473 int err;
2474 int index = vhcr->in_modifier;
2475 struct res_mpt *mpt;
2476 int id;
2477
2478 id = index & mpt_mask(dev);
2479 err = get_res(dev, slave, id, RES_MPT, &mpt);
2480 if (err)
2481 return err;
2482
2483 if (mpt->com.from_state != RES_MPT_HW) {
2484 err = -EBUSY;
2485 goto out;
2486 }
2487
2488 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2489
2490out:
2491 put_res(dev, slave, id, RES_MPT);
2492 return err;
2493}
2494
2495static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2496{
2497 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2498}
2499
2500static int qp_get_scqn(struct mlx4_qp_context *qpc)
2501{
2502 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2503}
2504
2505static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2506{
2507 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2508}
2509
Jack Morgenstein54679e12012-08-03 08:40:43 +00002510static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2511 struct mlx4_qp_context *context)
2512{
2513 u32 qpn = vhcr->in_modifier & 0xffffff;
2514 u32 qkey = 0;
2515
2516 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2517 return;
2518
2519 /* adjust qkey in qp context */
2520 context->qkey = cpu_to_be32(qkey);
2521}
2522
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002523int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2524 struct mlx4_vhcr *vhcr,
2525 struct mlx4_cmd_mailbox *inbox,
2526 struct mlx4_cmd_mailbox *outbox,
2527 struct mlx4_cmd_info *cmd)
2528{
2529 int err;
2530 int qpn = vhcr->in_modifier & 0x7fffff;
2531 struct res_mtt *mtt;
2532 struct res_qp *qp;
2533 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002534 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002535 int mtt_size = qp_get_mtt_size(qpc);
2536 struct res_cq *rcq;
2537 struct res_cq *scq;
2538 int rcqn = qp_get_rcqn(qpc);
2539 int scqn = qp_get_scqn(qpc);
2540 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2541 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2542 struct res_srq *srq;
2543 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2544
2545 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2546 if (err)
2547 return err;
2548 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002549 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002550 qp->param3 = 0;
2551 qp->vlan_control = 0;
2552 qp->fvl_rx = 0;
2553 qp->pri_path_fl = 0;
2554 qp->vlan_index = 0;
2555 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002556 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002557
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002558 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002559 if (err)
2560 goto ex_abort;
2561
2562 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2563 if (err)
2564 goto ex_put_mtt;
2565
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002566 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2567 if (err)
2568 goto ex_put_mtt;
2569
2570 if (scqn != rcqn) {
2571 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2572 if (err)
2573 goto ex_put_rcq;
2574 } else
2575 scq = rcq;
2576
2577 if (use_srq) {
2578 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2579 if (err)
2580 goto ex_put_scq;
2581 }
2582
Jack Morgenstein54679e12012-08-03 08:40:43 +00002583 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2584 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002585 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2586 if (err)
2587 goto ex_put_srq;
2588 atomic_inc(&mtt->ref_count);
2589 qp->mtt = mtt;
2590 atomic_inc(&rcq->ref_count);
2591 qp->rcq = rcq;
2592 atomic_inc(&scq->ref_count);
2593 qp->scq = scq;
2594
2595 if (scqn != rcqn)
2596 put_res(dev, slave, scqn, RES_CQ);
2597
2598 if (use_srq) {
2599 atomic_inc(&srq->ref_count);
2600 put_res(dev, slave, srqn, RES_SRQ);
2601 qp->srq = srq;
2602 }
2603 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002604 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002605 res_end_move(dev, slave, RES_QP, qpn);
2606
2607 return 0;
2608
2609ex_put_srq:
2610 if (use_srq)
2611 put_res(dev, slave, srqn, RES_SRQ);
2612ex_put_scq:
2613 if (scqn != rcqn)
2614 put_res(dev, slave, scqn, RES_CQ);
2615ex_put_rcq:
2616 put_res(dev, slave, rcqn, RES_CQ);
2617ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002618 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002619ex_abort:
2620 res_abort_move(dev, slave, RES_QP, qpn);
2621
2622 return err;
2623}
2624
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002625static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002626{
2627 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2628}
2629
2630static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2631{
2632 int log_eq_size = eqc->log_eq_size & 0x1f;
2633 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2634
2635 if (log_eq_size + 5 < page_shift)
2636 return 1;
2637
2638 return 1 << (log_eq_size + 5 - page_shift);
2639}
2640
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002641static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002642{
2643 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2644}
2645
2646static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2647{
2648 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2649 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2650
2651 if (log_cq_size + 5 < page_shift)
2652 return 1;
2653
2654 return 1 << (log_cq_size + 5 - page_shift);
2655}
2656
2657int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2658 struct mlx4_vhcr *vhcr,
2659 struct mlx4_cmd_mailbox *inbox,
2660 struct mlx4_cmd_mailbox *outbox,
2661 struct mlx4_cmd_info *cmd)
2662{
2663 int err;
2664 int eqn = vhcr->in_modifier;
2665 int res_id = (slave << 8) | eqn;
2666 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002667 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002668 int mtt_size = eq_get_mtt_size(eqc);
2669 struct res_eq *eq;
2670 struct res_mtt *mtt;
2671
2672 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2673 if (err)
2674 return err;
2675 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2676 if (err)
2677 goto out_add;
2678
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002679 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002680 if (err)
2681 goto out_move;
2682
2683 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2684 if (err)
2685 goto out_put;
2686
2687 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2688 if (err)
2689 goto out_put;
2690
2691 atomic_inc(&mtt->ref_count);
2692 eq->mtt = mtt;
2693 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2694 res_end_move(dev, slave, RES_EQ, res_id);
2695 return 0;
2696
2697out_put:
2698 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2699out_move:
2700 res_abort_move(dev, slave, RES_EQ, res_id);
2701out_add:
2702 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2703 return err;
2704}
2705
2706static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2707 int len, struct res_mtt **res)
2708{
2709 struct mlx4_priv *priv = mlx4_priv(dev);
2710 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2711 struct res_mtt *mtt;
2712 int err = -EINVAL;
2713
2714 spin_lock_irq(mlx4_tlock(dev));
2715 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2716 com.list) {
2717 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2718 *res = mtt;
2719 mtt->com.from_state = mtt->com.state;
2720 mtt->com.state = RES_MTT_BUSY;
2721 err = 0;
2722 break;
2723 }
2724 }
2725 spin_unlock_irq(mlx4_tlock(dev));
2726
2727 return err;
2728}
2729
Jack Morgenstein54679e12012-08-03 08:40:43 +00002730static int verify_qp_parameters(struct mlx4_dev *dev,
2731 struct mlx4_cmd_mailbox *inbox,
2732 enum qp_transition transition, u8 slave)
2733{
2734 u32 qp_type;
2735 struct mlx4_qp_context *qp_ctx;
2736 enum mlx4_qp_optpar optpar;
2737
2738 qp_ctx = inbox->buf + 8;
2739 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2740 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2741
2742 switch (qp_type) {
2743 case MLX4_QP_ST_RC:
2744 case MLX4_QP_ST_UC:
2745 switch (transition) {
2746 case QP_TRANS_INIT2RTR:
2747 case QP_TRANS_RTR2RTS:
2748 case QP_TRANS_RTS2RTS:
2749 case QP_TRANS_SQD2SQD:
2750 case QP_TRANS_SQD2RTS:
2751 if (slave != mlx4_master_func_num(dev))
2752 /* slaves have only gid index 0 */
2753 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2754 if (qp_ctx->pri_path.mgid_index)
2755 return -EINVAL;
2756 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2757 if (qp_ctx->alt_path.mgid_index)
2758 return -EINVAL;
2759 break;
2760 default:
2761 break;
2762 }
2763
2764 break;
2765 default:
2766 break;
2767 }
2768
2769 return 0;
2770}
2771
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002772int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2773 struct mlx4_vhcr *vhcr,
2774 struct mlx4_cmd_mailbox *inbox,
2775 struct mlx4_cmd_mailbox *outbox,
2776 struct mlx4_cmd_info *cmd)
2777{
2778 struct mlx4_mtt mtt;
2779 __be64 *page_list = inbox->buf;
2780 u64 *pg_list = (u64 *)page_list;
2781 int i;
2782 struct res_mtt *rmtt = NULL;
2783 int start = be64_to_cpu(page_list[0]);
2784 int npages = vhcr->in_modifier;
2785 int err;
2786
2787 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2788 if (err)
2789 return err;
2790
2791 /* Call the SW implementation of write_mtt:
2792 * - Prepare a dummy mtt struct
2793 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002794 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2795 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002796 mtt.order = 0;
2797 mtt.page_shift = 0;
2798 for (i = 0; i < npages; ++i)
2799 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2800
2801 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2802 ((u64 *)page_list + 2));
2803
2804 if (rmtt)
2805 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2806
2807 return err;
2808}
2809
2810int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2811 struct mlx4_vhcr *vhcr,
2812 struct mlx4_cmd_mailbox *inbox,
2813 struct mlx4_cmd_mailbox *outbox,
2814 struct mlx4_cmd_info *cmd)
2815{
2816 int eqn = vhcr->in_modifier;
2817 int res_id = eqn | (slave << 8);
2818 struct res_eq *eq;
2819 int err;
2820
2821 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2822 if (err)
2823 return err;
2824
2825 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2826 if (err)
2827 goto ex_abort;
2828
2829 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2830 if (err)
2831 goto ex_put;
2832
2833 atomic_dec(&eq->mtt->ref_count);
2834 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2835 res_end_move(dev, slave, RES_EQ, res_id);
2836 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2837
2838 return 0;
2839
2840ex_put:
2841 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2842ex_abort:
2843 res_abort_move(dev, slave, RES_EQ, res_id);
2844
2845 return err;
2846}
2847
2848int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2849{
2850 struct mlx4_priv *priv = mlx4_priv(dev);
2851 struct mlx4_slave_event_eq_info *event_eq;
2852 struct mlx4_cmd_mailbox *mailbox;
2853 u32 in_modifier = 0;
2854 int err;
2855 int res_id;
2856 struct res_eq *req;
2857
2858 if (!priv->mfunc.master.slave_state)
2859 return -EINVAL;
2860
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002861 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002862
2863 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002864 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002865 return 0;
2866
2867 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2868 res_id = (slave << 8) | event_eq->eqn;
2869 err = get_res(dev, slave, res_id, RES_EQ, &req);
2870 if (err)
2871 goto unlock;
2872
2873 if (req->com.from_state != RES_EQ_HW) {
2874 err = -EINVAL;
2875 goto put;
2876 }
2877
2878 mailbox = mlx4_alloc_cmd_mailbox(dev);
2879 if (IS_ERR(mailbox)) {
2880 err = PTR_ERR(mailbox);
2881 goto put;
2882 }
2883
2884 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2885 ++event_eq->token;
2886 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2887 }
2888
2889 memcpy(mailbox->buf, (u8 *) eqe, 28);
2890
2891 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2892
2893 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2894 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2895 MLX4_CMD_NATIVE);
2896
2897 put_res(dev, slave, res_id, RES_EQ);
2898 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2899 mlx4_free_cmd_mailbox(dev, mailbox);
2900 return err;
2901
2902put:
2903 put_res(dev, slave, res_id, RES_EQ);
2904
2905unlock:
2906 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2907 return err;
2908}
2909
2910int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2911 struct mlx4_vhcr *vhcr,
2912 struct mlx4_cmd_mailbox *inbox,
2913 struct mlx4_cmd_mailbox *outbox,
2914 struct mlx4_cmd_info *cmd)
2915{
2916 int eqn = vhcr->in_modifier;
2917 int res_id = eqn | (slave << 8);
2918 struct res_eq *eq;
2919 int err;
2920
2921 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2922 if (err)
2923 return err;
2924
2925 if (eq->com.from_state != RES_EQ_HW) {
2926 err = -EINVAL;
2927 goto ex_put;
2928 }
2929
2930 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2931
2932ex_put:
2933 put_res(dev, slave, res_id, RES_EQ);
2934 return err;
2935}
2936
2937int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2938 struct mlx4_vhcr *vhcr,
2939 struct mlx4_cmd_mailbox *inbox,
2940 struct mlx4_cmd_mailbox *outbox,
2941 struct mlx4_cmd_info *cmd)
2942{
2943 int err;
2944 int cqn = vhcr->in_modifier;
2945 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002946 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002947 struct res_cq *cq;
2948 struct res_mtt *mtt;
2949
2950 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2951 if (err)
2952 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002953 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002954 if (err)
2955 goto out_move;
2956 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2957 if (err)
2958 goto out_put;
2959 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2960 if (err)
2961 goto out_put;
2962 atomic_inc(&mtt->ref_count);
2963 cq->mtt = mtt;
2964 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2965 res_end_move(dev, slave, RES_CQ, cqn);
2966 return 0;
2967
2968out_put:
2969 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2970out_move:
2971 res_abort_move(dev, slave, RES_CQ, cqn);
2972 return err;
2973}
2974
2975int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2976 struct mlx4_vhcr *vhcr,
2977 struct mlx4_cmd_mailbox *inbox,
2978 struct mlx4_cmd_mailbox *outbox,
2979 struct mlx4_cmd_info *cmd)
2980{
2981 int err;
2982 int cqn = vhcr->in_modifier;
2983 struct res_cq *cq;
2984
2985 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2986 if (err)
2987 return err;
2988 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2989 if (err)
2990 goto out_move;
2991 atomic_dec(&cq->mtt->ref_count);
2992 res_end_move(dev, slave, RES_CQ, cqn);
2993 return 0;
2994
2995out_move:
2996 res_abort_move(dev, slave, RES_CQ, cqn);
2997 return err;
2998}
2999
3000int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3001 struct mlx4_vhcr *vhcr,
3002 struct mlx4_cmd_mailbox *inbox,
3003 struct mlx4_cmd_mailbox *outbox,
3004 struct mlx4_cmd_info *cmd)
3005{
3006 int cqn = vhcr->in_modifier;
3007 struct res_cq *cq;
3008 int err;
3009
3010 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3011 if (err)
3012 return err;
3013
3014 if (cq->com.from_state != RES_CQ_HW)
3015 goto ex_put;
3016
3017 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3018ex_put:
3019 put_res(dev, slave, cqn, RES_CQ);
3020
3021 return err;
3022}
3023
3024static int handle_resize(struct mlx4_dev *dev, int slave,
3025 struct mlx4_vhcr *vhcr,
3026 struct mlx4_cmd_mailbox *inbox,
3027 struct mlx4_cmd_mailbox *outbox,
3028 struct mlx4_cmd_info *cmd,
3029 struct res_cq *cq)
3030{
3031 int err;
3032 struct res_mtt *orig_mtt;
3033 struct res_mtt *mtt;
3034 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003035 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003036
3037 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3038 if (err)
3039 return err;
3040
3041 if (orig_mtt != cq->mtt) {
3042 err = -EINVAL;
3043 goto ex_put;
3044 }
3045
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003046 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003047 if (err)
3048 goto ex_put;
3049
3050 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3051 if (err)
3052 goto ex_put1;
3053 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3054 if (err)
3055 goto ex_put1;
3056 atomic_dec(&orig_mtt->ref_count);
3057 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3058 atomic_inc(&mtt->ref_count);
3059 cq->mtt = mtt;
3060 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3061 return 0;
3062
3063ex_put1:
3064 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3065ex_put:
3066 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3067
3068 return err;
3069
3070}
3071
3072int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3073 struct mlx4_vhcr *vhcr,
3074 struct mlx4_cmd_mailbox *inbox,
3075 struct mlx4_cmd_mailbox *outbox,
3076 struct mlx4_cmd_info *cmd)
3077{
3078 int cqn = vhcr->in_modifier;
3079 struct res_cq *cq;
3080 int err;
3081
3082 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3083 if (err)
3084 return err;
3085
3086 if (cq->com.from_state != RES_CQ_HW)
3087 goto ex_put;
3088
3089 if (vhcr->op_modifier == 0) {
3090 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003091 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003092 }
3093
3094 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3095ex_put:
3096 put_res(dev, slave, cqn, RES_CQ);
3097
3098 return err;
3099}
3100
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003101static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3102{
3103 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3104 int log_rq_stride = srqc->logstride & 7;
3105 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3106
3107 if (log_srq_size + log_rq_stride + 4 < page_shift)
3108 return 1;
3109
3110 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3111}
3112
3113int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3114 struct mlx4_vhcr *vhcr,
3115 struct mlx4_cmd_mailbox *inbox,
3116 struct mlx4_cmd_mailbox *outbox,
3117 struct mlx4_cmd_info *cmd)
3118{
3119 int err;
3120 int srqn = vhcr->in_modifier;
3121 struct res_mtt *mtt;
3122 struct res_srq *srq;
3123 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003124 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003125
3126 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3127 return -EINVAL;
3128
3129 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3130 if (err)
3131 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003132 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003133 if (err)
3134 goto ex_abort;
3135 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3136 mtt);
3137 if (err)
3138 goto ex_put_mtt;
3139
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003140 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3141 if (err)
3142 goto ex_put_mtt;
3143
3144 atomic_inc(&mtt->ref_count);
3145 srq->mtt = mtt;
3146 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3147 res_end_move(dev, slave, RES_SRQ, srqn);
3148 return 0;
3149
3150ex_put_mtt:
3151 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3152ex_abort:
3153 res_abort_move(dev, slave, RES_SRQ, srqn);
3154
3155 return err;
3156}
3157
3158int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3159 struct mlx4_vhcr *vhcr,
3160 struct mlx4_cmd_mailbox *inbox,
3161 struct mlx4_cmd_mailbox *outbox,
3162 struct mlx4_cmd_info *cmd)
3163{
3164 int err;
3165 int srqn = vhcr->in_modifier;
3166 struct res_srq *srq;
3167
3168 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3169 if (err)
3170 return err;
3171 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3172 if (err)
3173 goto ex_abort;
3174 atomic_dec(&srq->mtt->ref_count);
3175 if (srq->cq)
3176 atomic_dec(&srq->cq->ref_count);
3177 res_end_move(dev, slave, RES_SRQ, srqn);
3178
3179 return 0;
3180
3181ex_abort:
3182 res_abort_move(dev, slave, RES_SRQ, srqn);
3183
3184 return err;
3185}
3186
3187int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3188 struct mlx4_vhcr *vhcr,
3189 struct mlx4_cmd_mailbox *inbox,
3190 struct mlx4_cmd_mailbox *outbox,
3191 struct mlx4_cmd_info *cmd)
3192{
3193 int err;
3194 int srqn = vhcr->in_modifier;
3195 struct res_srq *srq;
3196
3197 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3198 if (err)
3199 return err;
3200 if (srq->com.from_state != RES_SRQ_HW) {
3201 err = -EBUSY;
3202 goto out;
3203 }
3204 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3205out:
3206 put_res(dev, slave, srqn, RES_SRQ);
3207 return err;
3208}
3209
3210int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3211 struct mlx4_vhcr *vhcr,
3212 struct mlx4_cmd_mailbox *inbox,
3213 struct mlx4_cmd_mailbox *outbox,
3214 struct mlx4_cmd_info *cmd)
3215{
3216 int err;
3217 int srqn = vhcr->in_modifier;
3218 struct res_srq *srq;
3219
3220 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3221 if (err)
3222 return err;
3223
3224 if (srq->com.from_state != RES_SRQ_HW) {
3225 err = -EBUSY;
3226 goto out;
3227 }
3228
3229 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3230out:
3231 put_res(dev, slave, srqn, RES_SRQ);
3232 return err;
3233}
3234
3235int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3236 struct mlx4_vhcr *vhcr,
3237 struct mlx4_cmd_mailbox *inbox,
3238 struct mlx4_cmd_mailbox *outbox,
3239 struct mlx4_cmd_info *cmd)
3240{
3241 int err;
3242 int qpn = vhcr->in_modifier & 0x7fffff;
3243 struct res_qp *qp;
3244
3245 err = get_res(dev, slave, qpn, RES_QP, &qp);
3246 if (err)
3247 return err;
3248 if (qp->com.from_state != RES_QP_HW) {
3249 err = -EBUSY;
3250 goto out;
3251 }
3252
3253 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3254out:
3255 put_res(dev, slave, qpn, RES_QP);
3256 return err;
3257}
3258
Jack Morgenstein54679e12012-08-03 08:40:43 +00003259int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3260 struct mlx4_vhcr *vhcr,
3261 struct mlx4_cmd_mailbox *inbox,
3262 struct mlx4_cmd_mailbox *outbox,
3263 struct mlx4_cmd_info *cmd)
3264{
3265 struct mlx4_qp_context *context = inbox->buf + 8;
3266 adjust_proxy_tun_qkey(dev, vhcr, context);
3267 update_pkey_index(dev, slave, inbox);
3268 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269}
3270
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003271int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3272 struct mlx4_vhcr *vhcr,
3273 struct mlx4_cmd_mailbox *inbox,
3274 struct mlx4_cmd_mailbox *outbox,
3275 struct mlx4_cmd_info *cmd)
3276{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003277 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003278 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003279 int qpn = vhcr->in_modifier & 0x7fffff;
3280 struct res_qp *qp;
3281 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003282 __be32 orig_param3 = qpc->param3;
3283 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3284 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3285 u8 orig_pri_path_fl = qpc->pri_path.fl;
3286 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3287 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003288
Jack Morgenstein54679e12012-08-03 08:40:43 +00003289 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3290 if (err)
3291 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003292
Jack Morgenstein54679e12012-08-03 08:40:43 +00003293 update_pkey_index(dev, slave, inbox);
3294 update_gid(dev, inbox, (u8)slave);
3295 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003296 orig_sched_queue = qpc->pri_path.sched_queue;
3297 err = update_vport_qp_param(dev, inbox, slave, qpn);
Rony Efraim3f7fb022013-04-25 05:22:28 +00003298 if (err)
3299 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003300
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003301 err = get_res(dev, slave, qpn, RES_QP, &qp);
3302 if (err)
3303 return err;
3304 if (qp->com.from_state != RES_QP_HW) {
3305 err = -EBUSY;
3306 goto out;
3307 }
3308
3309 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3310out:
3311 /* if no error, save sched queue value passed in by VF. This is
3312 * essentially the QOS value provided by the VF. This will be useful
3313 * if we allow dynamic changes from VST back to VGT
3314 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003315 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003316 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003317 qp->param3 = orig_param3;
3318 qp->vlan_control = orig_vlan_control;
3319 qp->fvl_rx = orig_fvl_rx;
3320 qp->pri_path_fl = orig_pri_path_fl;
3321 qp->vlan_index = orig_vlan_index;
3322 qp->feup = orig_feup;
3323 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003324 put_res(dev, slave, qpn, RES_QP);
3325 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003326}
3327
3328int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3329 struct mlx4_vhcr *vhcr,
3330 struct mlx4_cmd_mailbox *inbox,
3331 struct mlx4_cmd_mailbox *outbox,
3332 struct mlx4_cmd_info *cmd)
3333{
3334 int err;
3335 struct mlx4_qp_context *context = inbox->buf + 8;
3336
3337 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3338 if (err)
3339 return err;
3340
3341 update_pkey_index(dev, slave, inbox);
3342 update_gid(dev, inbox, (u8)slave);
3343 adjust_proxy_tun_qkey(dev, vhcr, context);
3344 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3345}
3346
3347int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3348 struct mlx4_vhcr *vhcr,
3349 struct mlx4_cmd_mailbox *inbox,
3350 struct mlx4_cmd_mailbox *outbox,
3351 struct mlx4_cmd_info *cmd)
3352{
3353 int err;
3354 struct mlx4_qp_context *context = inbox->buf + 8;
3355
3356 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3357 if (err)
3358 return err;
3359
3360 update_pkey_index(dev, slave, inbox);
3361 update_gid(dev, inbox, (u8)slave);
3362 adjust_proxy_tun_qkey(dev, vhcr, context);
3363 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3364}
3365
3366
3367int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3368 struct mlx4_vhcr *vhcr,
3369 struct mlx4_cmd_mailbox *inbox,
3370 struct mlx4_cmd_mailbox *outbox,
3371 struct mlx4_cmd_info *cmd)
3372{
3373 struct mlx4_qp_context *context = inbox->buf + 8;
3374 adjust_proxy_tun_qkey(dev, vhcr, context);
3375 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3376}
3377
3378int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3379 struct mlx4_vhcr *vhcr,
3380 struct mlx4_cmd_mailbox *inbox,
3381 struct mlx4_cmd_mailbox *outbox,
3382 struct mlx4_cmd_info *cmd)
3383{
3384 int err;
3385 struct mlx4_qp_context *context = inbox->buf + 8;
3386
3387 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3388 if (err)
3389 return err;
3390
3391 adjust_proxy_tun_qkey(dev, vhcr, context);
3392 update_gid(dev, inbox, (u8)slave);
3393 update_pkey_index(dev, slave, inbox);
3394 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3395}
3396
3397int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3398 struct mlx4_vhcr *vhcr,
3399 struct mlx4_cmd_mailbox *inbox,
3400 struct mlx4_cmd_mailbox *outbox,
3401 struct mlx4_cmd_info *cmd)
3402{
3403 int err;
3404 struct mlx4_qp_context *context = inbox->buf + 8;
3405
3406 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3407 if (err)
3408 return err;
3409
3410 adjust_proxy_tun_qkey(dev, vhcr, context);
3411 update_gid(dev, inbox, (u8)slave);
3412 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003413 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414}
3415
3416int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3417 struct mlx4_vhcr *vhcr,
3418 struct mlx4_cmd_mailbox *inbox,
3419 struct mlx4_cmd_mailbox *outbox,
3420 struct mlx4_cmd_info *cmd)
3421{
3422 int err;
3423 int qpn = vhcr->in_modifier & 0x7fffff;
3424 struct res_qp *qp;
3425
3426 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3427 if (err)
3428 return err;
3429 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3430 if (err)
3431 goto ex_abort;
3432
3433 atomic_dec(&qp->mtt->ref_count);
3434 atomic_dec(&qp->rcq->ref_count);
3435 atomic_dec(&qp->scq->ref_count);
3436 if (qp->srq)
3437 atomic_dec(&qp->srq->ref_count);
3438 res_end_move(dev, slave, RES_QP, qpn);
3439 return 0;
3440
3441ex_abort:
3442 res_abort_move(dev, slave, RES_QP, qpn);
3443
3444 return err;
3445}
3446
3447static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3448 struct res_qp *rqp, u8 *gid)
3449{
3450 struct res_gid *res;
3451
3452 list_for_each_entry(res, &rqp->mcg_list, list) {
3453 if (!memcmp(res->gid, gid, 16))
3454 return res;
3455 }
3456 return NULL;
3457}
3458
3459static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003460 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003461 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003462{
3463 struct res_gid *res;
3464 int err;
3465
3466 res = kzalloc(sizeof *res, GFP_KERNEL);
3467 if (!res)
3468 return -ENOMEM;
3469
3470 spin_lock_irq(&rqp->mcg_spl);
3471 if (find_gid(dev, slave, rqp, gid)) {
3472 kfree(res);
3473 err = -EEXIST;
3474 } else {
3475 memcpy(res->gid, gid, 16);
3476 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003477 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003478 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003479 list_add_tail(&res->list, &rqp->mcg_list);
3480 err = 0;
3481 }
3482 spin_unlock_irq(&rqp->mcg_spl);
3483
3484 return err;
3485}
3486
3487static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003488 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003489 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003490{
3491 struct res_gid *res;
3492 int err;
3493
3494 spin_lock_irq(&rqp->mcg_spl);
3495 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003496 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003497 err = -EINVAL;
3498 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003499 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003500 list_del(&res->list);
3501 kfree(res);
3502 err = 0;
3503 }
3504 spin_unlock_irq(&rqp->mcg_spl);
3505
3506 return err;
3507}
3508
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003509static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3510 int block_loopback, enum mlx4_protocol prot,
3511 enum mlx4_steer_type type, u64 *reg_id)
3512{
3513 switch (dev->caps.steering_mode) {
3514 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3515 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3516 block_loopback, prot,
3517 reg_id);
3518 case MLX4_STEERING_MODE_B0:
3519 return mlx4_qp_attach_common(dev, qp, gid,
3520 block_loopback, prot, type);
3521 default:
3522 return -EINVAL;
3523 }
3524}
3525
3526static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3527 enum mlx4_protocol prot, enum mlx4_steer_type type,
3528 u64 reg_id)
3529{
3530 switch (dev->caps.steering_mode) {
3531 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3532 return mlx4_flow_detach(dev, reg_id);
3533 case MLX4_STEERING_MODE_B0:
3534 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3535 default:
3536 return -EINVAL;
3537 }
3538}
3539
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003540int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3541 struct mlx4_vhcr *vhcr,
3542 struct mlx4_cmd_mailbox *inbox,
3543 struct mlx4_cmd_mailbox *outbox,
3544 struct mlx4_cmd_info *cmd)
3545{
3546 struct mlx4_qp qp; /* dummy for calling attach/detach */
3547 u8 *gid = inbox->buf;
3548 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00003549 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003550 int qpn;
3551 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003552 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003553 int attach = vhcr->op_modifier;
3554 int block_loopback = vhcr->in_modifier >> 31;
3555 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00003556 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003557
3558 qpn = vhcr->in_modifier & 0xffffff;
3559 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3560 if (err)
3561 return err;
3562
3563 qp.qpn = qpn;
3564 if (attach) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003565 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3566 type, &reg_id);
3567 if (err) {
3568 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003569 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003570 }
3571 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003572 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003573 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003574 } else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003575 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003576 if (err)
3577 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003578
3579 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3580 if (err)
3581 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3582 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003583 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003584 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003585 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003586
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003587ex_detach:
3588 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003589ex_put:
3590 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003591 return err;
3592}
3593
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003594/*
3595 * MAC validation for Flow Steering rules.
3596 * VF can attach rules only with a mac address which is assigned to it.
3597 */
3598static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3599 struct list_head *rlist)
3600{
3601 struct mac_res *res, *tmp;
3602 __be64 be_mac;
3603
3604 /* make sure it isn't multicast or broadcast mac*/
3605 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3606 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3607 list_for_each_entry_safe(res, tmp, rlist, list) {
3608 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08003609 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003610 return 0;
3611 }
3612 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3613 eth_header->eth.dst_mac, slave);
3614 return -EINVAL;
3615 }
3616 return 0;
3617}
3618
3619/*
3620 * In case of missing eth header, append eth header with a MAC address
3621 * assigned to the VF.
3622 */
3623static int add_eth_header(struct mlx4_dev *dev, int slave,
3624 struct mlx4_cmd_mailbox *inbox,
3625 struct list_head *rlist, int header_id)
3626{
3627 struct mac_res *res, *tmp;
3628 u8 port;
3629 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3630 struct mlx4_net_trans_rule_hw_eth *eth_header;
3631 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3632 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3633 __be64 be_mac = 0;
3634 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3635
3636 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00003637 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003638 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3639
3640 /* Clear a space in the inbox for eth header */
3641 switch (header_id) {
3642 case MLX4_NET_TRANS_RULE_ID_IPV4:
3643 ip_header =
3644 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3645 memmove(ip_header, eth_header,
3646 sizeof(*ip_header) + sizeof(*l4_header));
3647 break;
3648 case MLX4_NET_TRANS_RULE_ID_TCP:
3649 case MLX4_NET_TRANS_RULE_ID_UDP:
3650 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3651 (eth_header + 1);
3652 memmove(l4_header, eth_header, sizeof(*l4_header));
3653 break;
3654 default:
3655 return -EINVAL;
3656 }
3657 list_for_each_entry_safe(res, tmp, rlist, list) {
3658 if (port == res->port) {
3659 be_mac = cpu_to_be64(res->mac << 16);
3660 break;
3661 }
3662 }
3663 if (!be_mac) {
3664 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3665 port);
3666 return -EINVAL;
3667 }
3668
3669 memset(eth_header, 0, sizeof(*eth_header));
3670 eth_header->size = sizeof(*eth_header) >> 2;
3671 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3672 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3673 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3674
3675 return 0;
3676
3677}
3678
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003679int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3680 struct mlx4_vhcr *vhcr,
3681 struct mlx4_cmd_mailbox *inbox,
3682 struct mlx4_cmd_mailbox *outbox,
3683 struct mlx4_cmd_info *cmd)
3684{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003685
3686 struct mlx4_priv *priv = mlx4_priv(dev);
3687 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3688 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003689 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003690 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003691 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003692 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3693 struct _rule_hw *rule_header;
3694 int header_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003695
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00003696 if (dev->caps.steering_mode !=
3697 MLX4_STEERING_MODE_DEVICE_MANAGED)
3698 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003699
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003700 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003701 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003702 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003703 if (err) {
3704 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3705 return err;
3706 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003707 rule_header = (struct _rule_hw *)(ctrl + 1);
3708 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3709
3710 switch (header_id) {
3711 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003712 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3713 err = -EINVAL;
3714 goto err_put;
3715 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003716 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00003717 case MLX4_NET_TRANS_RULE_ID_IB:
3718 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003719 case MLX4_NET_TRANS_RULE_ID_IPV4:
3720 case MLX4_NET_TRANS_RULE_ID_TCP:
3721 case MLX4_NET_TRANS_RULE_ID_UDP:
3722 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003723 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3724 err = -EINVAL;
3725 goto err_put;
3726 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003727 vhcr->in_modifier +=
3728 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3729 break;
3730 default:
3731 pr_err("Corrupted mailbox.\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003732 err = -EINVAL;
3733 goto err_put;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003734 }
3735
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003736 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3737 vhcr->in_modifier, 0,
3738 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3739 MLX4_CMD_NATIVE);
3740 if (err)
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003741 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003742
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003743 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003744 if (err) {
3745 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3746 /* detach rule*/
3747 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00003748 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003749 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003750 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003751 }
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003752 atomic_inc(&rqp->ref_count);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00003753err_put:
3754 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003755 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003756}
3757
3758int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3759 struct mlx4_vhcr *vhcr,
3760 struct mlx4_cmd_mailbox *inbox,
3761 struct mlx4_cmd_mailbox *outbox,
3762 struct mlx4_cmd_info *cmd)
3763{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003764 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003765 struct res_qp *rqp;
3766 struct res_fs_rule *rrule;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003767
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00003768 if (dev->caps.steering_mode !=
3769 MLX4_STEERING_MODE_DEVICE_MANAGED)
3770 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003771
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003772 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3773 if (err)
3774 return err;
3775 /* Release the rule form busy state before removal */
3776 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3777 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3778 if (err)
3779 return err;
3780
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003781 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3782 if (err) {
3783 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003784 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003785 }
3786
3787 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3788 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3789 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00003790 if (!err)
3791 atomic_dec(&rqp->ref_count);
3792out:
3793 put_res(dev, slave, rrule->qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00003794 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00003795}
3796
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003797enum {
3798 BUSY_MAX_RETRIES = 10
3799};
3800
3801int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3802 struct mlx4_vhcr *vhcr,
3803 struct mlx4_cmd_mailbox *inbox,
3804 struct mlx4_cmd_mailbox *outbox,
3805 struct mlx4_cmd_info *cmd)
3806{
3807 int err;
3808 int index = vhcr->in_modifier & 0xffff;
3809
3810 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3811 if (err)
3812 return err;
3813
3814 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3815 put_res(dev, slave, index, RES_COUNTER);
3816 return err;
3817}
3818
3819static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3820{
3821 struct res_gid *rgid;
3822 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003823 struct mlx4_qp qp; /* dummy for calling attach/detach */
3824
3825 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003826 switch (dev->caps.steering_mode) {
3827 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3828 mlx4_flow_detach(dev, rgid->reg_id);
3829 break;
3830 case MLX4_STEERING_MODE_B0:
3831 qp.qpn = rqp->local_qpn;
3832 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3833 rgid->prot, rgid->steer);
3834 break;
3835 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003836 list_del(&rgid->list);
3837 kfree(rgid);
3838 }
3839}
3840
3841static int _move_all_busy(struct mlx4_dev *dev, int slave,
3842 enum mlx4_resource type, int print)
3843{
3844 struct mlx4_priv *priv = mlx4_priv(dev);
3845 struct mlx4_resource_tracker *tracker =
3846 &priv->mfunc.master.res_tracker;
3847 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3848 struct res_common *r;
3849 struct res_common *tmp;
3850 int busy;
3851
3852 busy = 0;
3853 spin_lock_irq(mlx4_tlock(dev));
3854 list_for_each_entry_safe(r, tmp, rlist, list) {
3855 if (r->owner == slave) {
3856 if (!r->removing) {
3857 if (r->state == RES_ANY_BUSY) {
3858 if (print)
3859 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00003860 "%s id 0x%llx is busy\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003861 ResourceType(type),
3862 r->res_id);
3863 ++busy;
3864 } else {
3865 r->from_state = r->state;
3866 r->state = RES_ANY_BUSY;
3867 r->removing = 1;
3868 }
3869 }
3870 }
3871 }
3872 spin_unlock_irq(mlx4_tlock(dev));
3873
3874 return busy;
3875}
3876
3877static int move_all_busy(struct mlx4_dev *dev, int slave,
3878 enum mlx4_resource type)
3879{
3880 unsigned long begin;
3881 int busy;
3882
3883 begin = jiffies;
3884 do {
3885 busy = _move_all_busy(dev, slave, type, 0);
3886 if (time_after(jiffies, begin + 5 * HZ))
3887 break;
3888 if (busy)
3889 cond_resched();
3890 } while (busy);
3891
3892 if (busy)
3893 busy = _move_all_busy(dev, slave, type, 1);
3894
3895 return busy;
3896}
3897static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3898{
3899 struct mlx4_priv *priv = mlx4_priv(dev);
3900 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3901 struct list_head *qp_list =
3902 &tracker->slave_list[slave].res_list[RES_QP];
3903 struct res_qp *qp;
3904 struct res_qp *tmp;
3905 int state;
3906 u64 in_param;
3907 int qpn;
3908 int err;
3909
3910 err = move_all_busy(dev, slave, RES_QP);
3911 if (err)
3912 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3913 "for slave %d\n", slave);
3914
3915 spin_lock_irq(mlx4_tlock(dev));
3916 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3917 spin_unlock_irq(mlx4_tlock(dev));
3918 if (qp->com.owner == slave) {
3919 qpn = qp->com.res_id;
3920 detach_qp(dev, slave, qp);
3921 state = qp->com.from_state;
3922 while (state != 0) {
3923 switch (state) {
3924 case RES_QP_RESERVED:
3925 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00003926 rb_erase(&qp->com.node,
3927 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003928 list_del(&qp->com.list);
3929 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02003930 if (!valid_reserved(dev, slave, qpn)) {
3931 __mlx4_qp_release_range(dev, qpn, 1);
3932 mlx4_release_resource(dev, slave,
3933 RES_QP, 1, 0);
3934 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003935 kfree(qp);
3936 state = 0;
3937 break;
3938 case RES_QP_MAPPED:
3939 if (!valid_reserved(dev, slave, qpn))
3940 __mlx4_qp_free_icm(dev, qpn);
3941 state = RES_QP_RESERVED;
3942 break;
3943 case RES_QP_HW:
3944 in_param = slave;
3945 err = mlx4_cmd(dev, in_param,
3946 qp->local_qpn, 2,
3947 MLX4_CMD_2RST_QP,
3948 MLX4_CMD_TIME_CLASS_A,
3949 MLX4_CMD_NATIVE);
3950 if (err)
3951 mlx4_dbg(dev, "rem_slave_qps: failed"
3952 " to move slave %d qpn %d to"
3953 " reset\n", slave,
3954 qp->local_qpn);
3955 atomic_dec(&qp->rcq->ref_count);
3956 atomic_dec(&qp->scq->ref_count);
3957 atomic_dec(&qp->mtt->ref_count);
3958 if (qp->srq)
3959 atomic_dec(&qp->srq->ref_count);
3960 state = RES_QP_MAPPED;
3961 break;
3962 default:
3963 state = 0;
3964 }
3965 }
3966 }
3967 spin_lock_irq(mlx4_tlock(dev));
3968 }
3969 spin_unlock_irq(mlx4_tlock(dev));
3970}
3971
3972static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3973{
3974 struct mlx4_priv *priv = mlx4_priv(dev);
3975 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3976 struct list_head *srq_list =
3977 &tracker->slave_list[slave].res_list[RES_SRQ];
3978 struct res_srq *srq;
3979 struct res_srq *tmp;
3980 int state;
3981 u64 in_param;
3982 LIST_HEAD(tlist);
3983 int srqn;
3984 int err;
3985
3986 err = move_all_busy(dev, slave, RES_SRQ);
3987 if (err)
3988 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3989 "busy for slave %d\n", slave);
3990
3991 spin_lock_irq(mlx4_tlock(dev));
3992 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3993 spin_unlock_irq(mlx4_tlock(dev));
3994 if (srq->com.owner == slave) {
3995 srqn = srq->com.res_id;
3996 state = srq->com.from_state;
3997 while (state != 0) {
3998 switch (state) {
3999 case RES_SRQ_ALLOCATED:
4000 __mlx4_srq_free_icm(dev, srqn);
4001 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004002 rb_erase(&srq->com.node,
4003 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004004 list_del(&srq->com.list);
4005 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004006 mlx4_release_resource(dev, slave,
4007 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004008 kfree(srq);
4009 state = 0;
4010 break;
4011
4012 case RES_SRQ_HW:
4013 in_param = slave;
4014 err = mlx4_cmd(dev, in_param, srqn, 1,
4015 MLX4_CMD_HW2SW_SRQ,
4016 MLX4_CMD_TIME_CLASS_A,
4017 MLX4_CMD_NATIVE);
4018 if (err)
4019 mlx4_dbg(dev, "rem_slave_srqs: failed"
4020 " to move slave %d srq %d to"
4021 " SW ownership\n",
4022 slave, srqn);
4023
4024 atomic_dec(&srq->mtt->ref_count);
4025 if (srq->cq)
4026 atomic_dec(&srq->cq->ref_count);
4027 state = RES_SRQ_ALLOCATED;
4028 break;
4029
4030 default:
4031 state = 0;
4032 }
4033 }
4034 }
4035 spin_lock_irq(mlx4_tlock(dev));
4036 }
4037 spin_unlock_irq(mlx4_tlock(dev));
4038}
4039
4040static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4041{
4042 struct mlx4_priv *priv = mlx4_priv(dev);
4043 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4044 struct list_head *cq_list =
4045 &tracker->slave_list[slave].res_list[RES_CQ];
4046 struct res_cq *cq;
4047 struct res_cq *tmp;
4048 int state;
4049 u64 in_param;
4050 LIST_HEAD(tlist);
4051 int cqn;
4052 int err;
4053
4054 err = move_all_busy(dev, slave, RES_CQ);
4055 if (err)
4056 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4057 "busy for slave %d\n", slave);
4058
4059 spin_lock_irq(mlx4_tlock(dev));
4060 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4061 spin_unlock_irq(mlx4_tlock(dev));
4062 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4063 cqn = cq->com.res_id;
4064 state = cq->com.from_state;
4065 while (state != 0) {
4066 switch (state) {
4067 case RES_CQ_ALLOCATED:
4068 __mlx4_cq_free_icm(dev, cqn);
4069 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004070 rb_erase(&cq->com.node,
4071 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004072 list_del(&cq->com.list);
4073 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004074 mlx4_release_resource(dev, slave,
4075 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004076 kfree(cq);
4077 state = 0;
4078 break;
4079
4080 case RES_CQ_HW:
4081 in_param = slave;
4082 err = mlx4_cmd(dev, in_param, cqn, 1,
4083 MLX4_CMD_HW2SW_CQ,
4084 MLX4_CMD_TIME_CLASS_A,
4085 MLX4_CMD_NATIVE);
4086 if (err)
4087 mlx4_dbg(dev, "rem_slave_cqs: failed"
4088 " to move slave %d cq %d to"
4089 " SW ownership\n",
4090 slave, cqn);
4091 atomic_dec(&cq->mtt->ref_count);
4092 state = RES_CQ_ALLOCATED;
4093 break;
4094
4095 default:
4096 state = 0;
4097 }
4098 }
4099 }
4100 spin_lock_irq(mlx4_tlock(dev));
4101 }
4102 spin_unlock_irq(mlx4_tlock(dev));
4103}
4104
4105static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4106{
4107 struct mlx4_priv *priv = mlx4_priv(dev);
4108 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4109 struct list_head *mpt_list =
4110 &tracker->slave_list[slave].res_list[RES_MPT];
4111 struct res_mpt *mpt;
4112 struct res_mpt *tmp;
4113 int state;
4114 u64 in_param;
4115 LIST_HEAD(tlist);
4116 int mptn;
4117 int err;
4118
4119 err = move_all_busy(dev, slave, RES_MPT);
4120 if (err)
4121 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4122 "busy for slave %d\n", slave);
4123
4124 spin_lock_irq(mlx4_tlock(dev));
4125 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4126 spin_unlock_irq(mlx4_tlock(dev));
4127 if (mpt->com.owner == slave) {
4128 mptn = mpt->com.res_id;
4129 state = mpt->com.from_state;
4130 while (state != 0) {
4131 switch (state) {
4132 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004133 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004134 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004135 rb_erase(&mpt->com.node,
4136 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004137 list_del(&mpt->com.list);
4138 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004139 mlx4_release_resource(dev, slave,
4140 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004141 kfree(mpt);
4142 state = 0;
4143 break;
4144
4145 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004146 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004147 state = RES_MPT_RESERVED;
4148 break;
4149
4150 case RES_MPT_HW:
4151 in_param = slave;
4152 err = mlx4_cmd(dev, in_param, mptn, 0,
4153 MLX4_CMD_HW2SW_MPT,
4154 MLX4_CMD_TIME_CLASS_A,
4155 MLX4_CMD_NATIVE);
4156 if (err)
4157 mlx4_dbg(dev, "rem_slave_mrs: failed"
4158 " to move slave %d mpt %d to"
4159 " SW ownership\n",
4160 slave, mptn);
4161 if (mpt->mtt)
4162 atomic_dec(&mpt->mtt->ref_count);
4163 state = RES_MPT_MAPPED;
4164 break;
4165 default:
4166 state = 0;
4167 }
4168 }
4169 }
4170 spin_lock_irq(mlx4_tlock(dev));
4171 }
4172 spin_unlock_irq(mlx4_tlock(dev));
4173}
4174
4175static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4176{
4177 struct mlx4_priv *priv = mlx4_priv(dev);
4178 struct mlx4_resource_tracker *tracker =
4179 &priv->mfunc.master.res_tracker;
4180 struct list_head *mtt_list =
4181 &tracker->slave_list[slave].res_list[RES_MTT];
4182 struct res_mtt *mtt;
4183 struct res_mtt *tmp;
4184 int state;
4185 LIST_HEAD(tlist);
4186 int base;
4187 int err;
4188
4189 err = move_all_busy(dev, slave, RES_MTT);
4190 if (err)
4191 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4192 "busy for slave %d\n", slave);
4193
4194 spin_lock_irq(mlx4_tlock(dev));
4195 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4196 spin_unlock_irq(mlx4_tlock(dev));
4197 if (mtt->com.owner == slave) {
4198 base = mtt->com.res_id;
4199 state = mtt->com.from_state;
4200 while (state != 0) {
4201 switch (state) {
4202 case RES_MTT_ALLOCATED:
4203 __mlx4_free_mtt_range(dev, base,
4204 mtt->order);
4205 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004206 rb_erase(&mtt->com.node,
4207 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004208 list_del(&mtt->com.list);
4209 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004210 mlx4_release_resource(dev, slave, RES_MTT,
4211 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004212 kfree(mtt);
4213 state = 0;
4214 break;
4215
4216 default:
4217 state = 0;
4218 }
4219 }
4220 }
4221 spin_lock_irq(mlx4_tlock(dev));
4222 }
4223 spin_unlock_irq(mlx4_tlock(dev));
4224}
4225
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004226static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4227{
4228 struct mlx4_priv *priv = mlx4_priv(dev);
4229 struct mlx4_resource_tracker *tracker =
4230 &priv->mfunc.master.res_tracker;
4231 struct list_head *fs_rule_list =
4232 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4233 struct res_fs_rule *fs_rule;
4234 struct res_fs_rule *tmp;
4235 int state;
4236 u64 base;
4237 int err;
4238
4239 err = move_all_busy(dev, slave, RES_FS_RULE);
4240 if (err)
4241 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4242 slave);
4243
4244 spin_lock_irq(mlx4_tlock(dev));
4245 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4246 spin_unlock_irq(mlx4_tlock(dev));
4247 if (fs_rule->com.owner == slave) {
4248 base = fs_rule->com.res_id;
4249 state = fs_rule->com.from_state;
4250 while (state != 0) {
4251 switch (state) {
4252 case RES_FS_RULE_ALLOCATED:
4253 /* detach rule */
4254 err = mlx4_cmd(dev, base, 0, 0,
4255 MLX4_QP_FLOW_STEERING_DETACH,
4256 MLX4_CMD_TIME_CLASS_A,
4257 MLX4_CMD_NATIVE);
4258
4259 spin_lock_irq(mlx4_tlock(dev));
4260 rb_erase(&fs_rule->com.node,
4261 &tracker->res_tree[RES_FS_RULE]);
4262 list_del(&fs_rule->com.list);
4263 spin_unlock_irq(mlx4_tlock(dev));
4264 kfree(fs_rule);
4265 state = 0;
4266 break;
4267
4268 default:
4269 state = 0;
4270 }
4271 }
4272 }
4273 spin_lock_irq(mlx4_tlock(dev));
4274 }
4275 spin_unlock_irq(mlx4_tlock(dev));
4276}
4277
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004278static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4279{
4280 struct mlx4_priv *priv = mlx4_priv(dev);
4281 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4282 struct list_head *eq_list =
4283 &tracker->slave_list[slave].res_list[RES_EQ];
4284 struct res_eq *eq;
4285 struct res_eq *tmp;
4286 int err;
4287 int state;
4288 LIST_HEAD(tlist);
4289 int eqn;
4290 struct mlx4_cmd_mailbox *mailbox;
4291
4292 err = move_all_busy(dev, slave, RES_EQ);
4293 if (err)
4294 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4295 "busy for slave %d\n", slave);
4296
4297 spin_lock_irq(mlx4_tlock(dev));
4298 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4299 spin_unlock_irq(mlx4_tlock(dev));
4300 if (eq->com.owner == slave) {
4301 eqn = eq->com.res_id;
4302 state = eq->com.from_state;
4303 while (state != 0) {
4304 switch (state) {
4305 case RES_EQ_RESERVED:
4306 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004307 rb_erase(&eq->com.node,
4308 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004309 list_del(&eq->com.list);
4310 spin_unlock_irq(mlx4_tlock(dev));
4311 kfree(eq);
4312 state = 0;
4313 break;
4314
4315 case RES_EQ_HW:
4316 mailbox = mlx4_alloc_cmd_mailbox(dev);
4317 if (IS_ERR(mailbox)) {
4318 cond_resched();
4319 continue;
4320 }
4321 err = mlx4_cmd_box(dev, slave, 0,
4322 eqn & 0xff, 0,
4323 MLX4_CMD_HW2SW_EQ,
4324 MLX4_CMD_TIME_CLASS_A,
4325 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004326 if (err)
4327 mlx4_dbg(dev, "rem_slave_eqs: failed"
4328 " to move slave %d eqs %d to"
4329 " SW ownership\n", slave, eqn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004330 mlx4_free_cmd_mailbox(dev, mailbox);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004331 atomic_dec(&eq->mtt->ref_count);
4332 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004333 break;
4334
4335 default:
4336 state = 0;
4337 }
4338 }
4339 }
4340 spin_lock_irq(mlx4_tlock(dev));
4341 }
4342 spin_unlock_irq(mlx4_tlock(dev));
4343}
4344
Jack Morgensteinba062d52012-05-15 10:35:03 +00004345static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4346{
4347 struct mlx4_priv *priv = mlx4_priv(dev);
4348 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4349 struct list_head *counter_list =
4350 &tracker->slave_list[slave].res_list[RES_COUNTER];
4351 struct res_counter *counter;
4352 struct res_counter *tmp;
4353 int err;
4354 int index;
4355
4356 err = move_all_busy(dev, slave, RES_COUNTER);
4357 if (err)
4358 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4359 "busy for slave %d\n", slave);
4360
4361 spin_lock_irq(mlx4_tlock(dev));
4362 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4363 if (counter->com.owner == slave) {
4364 index = counter->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004365 rb_erase(&counter->com.node,
4366 &tracker->res_tree[RES_COUNTER]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004367 list_del(&counter->com.list);
4368 kfree(counter);
4369 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004370 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004371 }
4372 }
4373 spin_unlock_irq(mlx4_tlock(dev));
4374}
4375
4376static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4377{
4378 struct mlx4_priv *priv = mlx4_priv(dev);
4379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4380 struct list_head *xrcdn_list =
4381 &tracker->slave_list[slave].res_list[RES_XRCD];
4382 struct res_xrcdn *xrcd;
4383 struct res_xrcdn *tmp;
4384 int err;
4385 int xrcdn;
4386
4387 err = move_all_busy(dev, slave, RES_XRCD);
4388 if (err)
4389 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4390 "busy for slave %d\n", slave);
4391
4392 spin_lock_irq(mlx4_tlock(dev));
4393 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4394 if (xrcd->com.owner == slave) {
4395 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004396 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004397 list_del(&xrcd->com.list);
4398 kfree(xrcd);
4399 __mlx4_xrcd_free(dev, xrcdn);
4400 }
4401 }
4402 spin_unlock_irq(mlx4_tlock(dev));
4403}
4404
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004405void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4406{
4407 struct mlx4_priv *priv = mlx4_priv(dev);
4408
4409 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02004410 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004411 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00004412 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004413 rem_slave_qps(dev, slave);
4414 rem_slave_srqs(dev, slave);
4415 rem_slave_cqs(dev, slave);
4416 rem_slave_mrs(dev, slave);
4417 rem_slave_eqs(dev, slave);
4418 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004419 rem_slave_counters(dev, slave);
4420 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004421 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4422}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004423
4424void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4425{
4426 struct mlx4_vf_immed_vlan_work *work =
4427 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4428 struct mlx4_cmd_mailbox *mailbox;
4429 struct mlx4_update_qp_context *upd_context;
4430 struct mlx4_dev *dev = &work->priv->dev;
4431 struct mlx4_resource_tracker *tracker =
4432 &work->priv->mfunc.master.res_tracker;
4433 struct list_head *qp_list =
4434 &tracker->slave_list[work->slave].res_list[RES_QP];
4435 struct res_qp *qp;
4436 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004437 u64 qp_path_mask_vlan_ctrl =
4438 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004439 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4440 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4441 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4442 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02004443 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4444
4445 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4446 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4447 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4448 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4449 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4450 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004451 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4452
4453 int err;
4454 int port, errors = 0;
4455 u8 vlan_control;
4456
4457 if (mlx4_is_slave(dev)) {
4458 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4459 work->slave);
4460 goto out;
4461 }
4462
4463 mailbox = mlx4_alloc_cmd_mailbox(dev);
4464 if (IS_ERR(mailbox))
4465 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03004466 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4467 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4468 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4469 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4470 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4471 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4472 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4473 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004474 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4475 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4476 else
4477 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4478 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4479 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4480
4481 upd_context = mailbox->buf;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004482 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004483
4484 spin_lock_irq(mlx4_tlock(dev));
4485 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4486 spin_unlock_irq(mlx4_tlock(dev));
4487 if (qp->com.owner == work->slave) {
4488 if (qp->com.from_state != RES_QP_HW ||
4489 !qp->sched_queue || /* no INIT2RTR trans yet */
4490 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4491 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4492 spin_lock_irq(mlx4_tlock(dev));
4493 continue;
4494 }
4495 port = (qp->sched_queue >> 6 & 1) + 1;
4496 if (port != work->port) {
4497 spin_lock_irq(mlx4_tlock(dev));
4498 continue;
4499 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02004500 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4501 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4502 else
4503 upd_context->primary_addr_path_mask =
4504 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4505 if (work->vlan_id == MLX4_VGT) {
4506 upd_context->qp_context.param3 = qp->param3;
4507 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4508 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4509 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4510 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4511 upd_context->qp_context.pri_path.feup = qp->feup;
4512 upd_context->qp_context.pri_path.sched_queue =
4513 qp->sched_queue;
4514 } else {
4515 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4516 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4517 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4518 upd_context->qp_context.pri_path.fvl_rx =
4519 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4520 upd_context->qp_context.pri_path.fl =
4521 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4522 upd_context->qp_context.pri_path.feup =
4523 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4524 upd_context->qp_context.pri_path.sched_queue =
4525 qp->sched_queue & 0xC7;
4526 upd_context->qp_context.pri_path.sched_queue |=
4527 ((work->qos & 0x7) << 3);
4528 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004529
4530 err = mlx4_cmd(dev, mailbox->dma,
4531 qp->local_qpn & 0xffffff,
4532 0, MLX4_CMD_UPDATE_QP,
4533 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4534 if (err) {
4535 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4536 "port %d, qpn %d (%d)\n",
4537 work->slave, port, qp->local_qpn,
4538 err);
4539 errors++;
4540 }
4541 }
4542 spin_lock_irq(mlx4_tlock(dev));
4543 }
4544 spin_unlock_irq(mlx4_tlock(dev));
4545 mlx4_free_cmd_mailbox(dev, mailbox);
4546
4547 if (errors)
4548 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4549 errors, work->slave, work->port);
4550
4551 /* unregister previous vlan_id if needed and we had no errors
4552 * while updating the QPs
4553 */
4554 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4555 NO_INDX != work->orig_vlan_ix)
4556 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02004557 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004558out:
4559 kfree(work);
4560 return;
4561}