blob: bdd61c35d044b3326433acd5c83701f0d618592e [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/qp.h>
43
44#include "mlx4.h"
45#include "fw.h"
46
47#define MLX4_MAC_VALID (1ull << 63)
48#define MLX4_MAC_MASK 0x7fffffffffffffffULL
49#define ETH_ALEN 6
50
51struct mac_res {
52 struct list_head list;
53 u64 mac;
54 u8 port;
55};
56
57struct res_common {
58 struct list_head list;
59 u32 res_id;
60 int owner;
61 int state;
62 int from_state;
63 int to_state;
64 int removing;
65};
66
67enum {
68 RES_ANY_BUSY = 1
69};
70
71struct res_gid {
72 struct list_head list;
73 u8 gid[16];
74 enum mlx4_protocol prot;
75};
76
77enum res_qp_states {
78 RES_QP_BUSY = RES_ANY_BUSY,
79
80 /* QP number was allocated */
81 RES_QP_RESERVED,
82
83 /* ICM memory for QP context was mapped */
84 RES_QP_MAPPED,
85
86 /* QP is in hw ownership */
87 RES_QP_HW
88};
89
90static inline const char *qp_states_str(enum res_qp_states state)
91{
92 switch (state) {
93 case RES_QP_BUSY: return "RES_QP_BUSY";
94 case RES_QP_RESERVED: return "RES_QP_RESERVED";
95 case RES_QP_MAPPED: return "RES_QP_MAPPED";
96 case RES_QP_HW: return "RES_QP_HW";
97 default: return "Unknown";
98 }
99}
100
101struct res_qp {
102 struct res_common com;
103 struct res_mtt *mtt;
104 struct res_cq *rcq;
105 struct res_cq *scq;
106 struct res_srq *srq;
107 struct list_head mcg_list;
108 spinlock_t mcg_spl;
109 int local_qpn;
110};
111
112enum res_mtt_states {
113 RES_MTT_BUSY = RES_ANY_BUSY,
114 RES_MTT_ALLOCATED,
115};
116
117static inline const char *mtt_states_str(enum res_mtt_states state)
118{
119 switch (state) {
120 case RES_MTT_BUSY: return "RES_MTT_BUSY";
121 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
122 default: return "Unknown";
123 }
124}
125
126struct res_mtt {
127 struct res_common com;
128 int order;
129 atomic_t ref_count;
130};
131
132enum res_mpt_states {
133 RES_MPT_BUSY = RES_ANY_BUSY,
134 RES_MPT_RESERVED,
135 RES_MPT_MAPPED,
136 RES_MPT_HW,
137};
138
139struct res_mpt {
140 struct res_common com;
141 struct res_mtt *mtt;
142 int key;
143};
144
145enum res_eq_states {
146 RES_EQ_BUSY = RES_ANY_BUSY,
147 RES_EQ_RESERVED,
148 RES_EQ_HW,
149};
150
151struct res_eq {
152 struct res_common com;
153 struct res_mtt *mtt;
154};
155
156enum res_cq_states {
157 RES_CQ_BUSY = RES_ANY_BUSY,
158 RES_CQ_ALLOCATED,
159 RES_CQ_HW,
160};
161
162struct res_cq {
163 struct res_common com;
164 struct res_mtt *mtt;
165 atomic_t ref_count;
166};
167
168enum res_srq_states {
169 RES_SRQ_BUSY = RES_ANY_BUSY,
170 RES_SRQ_ALLOCATED,
171 RES_SRQ_HW,
172};
173
174static inline const char *srq_states_str(enum res_srq_states state)
175{
176 switch (state) {
177 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
178 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
179 case RES_SRQ_HW: return "RES_SRQ_HW";
180 default: return "Unknown";
181 }
182}
183
184struct res_srq {
185 struct res_common com;
186 struct res_mtt *mtt;
187 struct res_cq *cq;
188 atomic_t ref_count;
189};
190
191enum res_counter_states {
192 RES_COUNTER_BUSY = RES_ANY_BUSY,
193 RES_COUNTER_ALLOCATED,
194};
195
196static inline const char *counter_states_str(enum res_counter_states state)
197{
198 switch (state) {
199 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
200 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
201 default: return "Unknown";
202 }
203}
204
205struct res_counter {
206 struct res_common com;
207 int port;
208};
209
210/* For Debug uses */
211static const char *ResourceType(enum mlx4_resource rt)
212{
213 switch (rt) {
214 case RES_QP: return "RES_QP";
215 case RES_CQ: return "RES_CQ";
216 case RES_SRQ: return "RES_SRQ";
217 case RES_MPT: return "RES_MPT";
218 case RES_MTT: return "RES_MTT";
219 case RES_MAC: return "RES_MAC";
220 case RES_EQ: return "RES_EQ";
221 case RES_COUNTER: return "RES_COUNTER";
222 default: return "Unknown resource type !!!";
223 };
224}
225
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000226int mlx4_init_resource_tracker(struct mlx4_dev *dev)
227{
228 struct mlx4_priv *priv = mlx4_priv(dev);
229 int i;
230 int t;
231
232 priv->mfunc.master.res_tracker.slave_list =
233 kzalloc(dev->num_slaves * sizeof(struct slave_list),
234 GFP_KERNEL);
235 if (!priv->mfunc.master.res_tracker.slave_list)
236 return -ENOMEM;
237
238 for (i = 0 ; i < dev->num_slaves; i++) {
239 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
240 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
241 slave_list[i].res_list[t]);
242 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
243 }
244
245 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
246 dev->num_slaves);
247 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
248 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
249 GFP_ATOMIC|__GFP_NOWARN);
250
251 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
252 return 0 ;
253}
254
255void mlx4_free_resource_tracker(struct mlx4_dev *dev)
256{
257 struct mlx4_priv *priv = mlx4_priv(dev);
258 int i;
259
260 if (priv->mfunc.master.res_tracker.slave_list) {
261 for (i = 0 ; i < dev->num_slaves; i++)
262 mlx4_delete_all_resources_for_slave(dev, i);
263
264 kfree(priv->mfunc.master.res_tracker.slave_list);
265 }
266}
267
268static void update_ud_gid(struct mlx4_dev *dev,
269 struct mlx4_qp_context *qp_ctx, u8 slave)
270{
271 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
272
273 if (MLX4_QP_ST_UD == ts)
274 qp_ctx->pri_path.mgid_index = 0x80 | slave;
275
276 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
277 slave, qp_ctx->pri_path.mgid_index);
278}
279
280static int mpt_mask(struct mlx4_dev *dev)
281{
282 return dev->caps.num_mpts - 1;
283}
284
285static void *find_res(struct mlx4_dev *dev, int res_id,
286 enum mlx4_resource type)
287{
288 struct mlx4_priv *priv = mlx4_priv(dev);
289
290 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
291 res_id);
292}
293
294static int get_res(struct mlx4_dev *dev, int slave, int res_id,
295 enum mlx4_resource type,
296 void *res)
297{
298 struct res_common *r;
299 int err = 0;
300
301 spin_lock_irq(mlx4_tlock(dev));
302 r = find_res(dev, res_id, type);
303 if (!r) {
304 err = -ENONET;
305 goto exit;
306 }
307
308 if (r->state == RES_ANY_BUSY) {
309 err = -EBUSY;
310 goto exit;
311 }
312
313 if (r->owner != slave) {
314 err = -EPERM;
315 goto exit;
316 }
317
318 r->from_state = r->state;
319 r->state = RES_ANY_BUSY;
320 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
321 ResourceType(type), r->res_id);
322
323 if (res)
324 *((struct res_common **)res) = r;
325
326exit:
327 spin_unlock_irq(mlx4_tlock(dev));
328 return err;
329}
330
331int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
332 enum mlx4_resource type,
333 int res_id, int *slave)
334{
335
336 struct res_common *r;
337 int err = -ENOENT;
338 int id = res_id;
339
340 if (type == RES_QP)
341 id &= 0x7fffff;
342 spin_lock_irq(mlx4_tlock(dev));
343
344 r = find_res(dev, id, type);
345 if (r) {
346 *slave = r->owner;
347 err = 0;
348 }
349 spin_unlock_irq(mlx4_tlock(dev));
350
351 return err;
352}
353
354static void put_res(struct mlx4_dev *dev, int slave, int res_id,
355 enum mlx4_resource type)
356{
357 struct res_common *r;
358
359 spin_lock_irq(mlx4_tlock(dev));
360 r = find_res(dev, res_id, type);
361 if (r)
362 r->state = r->from_state;
363 spin_unlock_irq(mlx4_tlock(dev));
364}
365
366static struct res_common *alloc_qp_tr(int id)
367{
368 struct res_qp *ret;
369
370 ret = kzalloc(sizeof *ret, GFP_KERNEL);
371 if (!ret)
372 return NULL;
373
374 ret->com.res_id = id;
375 ret->com.state = RES_QP_RESERVED;
376 INIT_LIST_HEAD(&ret->mcg_list);
377 spin_lock_init(&ret->mcg_spl);
378
379 return &ret->com;
380}
381
382static struct res_common *alloc_mtt_tr(int id, int order)
383{
384 struct res_mtt *ret;
385
386 ret = kzalloc(sizeof *ret, GFP_KERNEL);
387 if (!ret)
388 return NULL;
389
390 ret->com.res_id = id;
391 ret->order = order;
392 ret->com.state = RES_MTT_ALLOCATED;
393 atomic_set(&ret->ref_count, 0);
394
395 return &ret->com;
396}
397
398static struct res_common *alloc_mpt_tr(int id, int key)
399{
400 struct res_mpt *ret;
401
402 ret = kzalloc(sizeof *ret, GFP_KERNEL);
403 if (!ret)
404 return NULL;
405
406 ret->com.res_id = id;
407 ret->com.state = RES_MPT_RESERVED;
408 ret->key = key;
409
410 return &ret->com;
411}
412
413static struct res_common *alloc_eq_tr(int id)
414{
415 struct res_eq *ret;
416
417 ret = kzalloc(sizeof *ret, GFP_KERNEL);
418 if (!ret)
419 return NULL;
420
421 ret->com.res_id = id;
422 ret->com.state = RES_EQ_RESERVED;
423
424 return &ret->com;
425}
426
427static struct res_common *alloc_cq_tr(int id)
428{
429 struct res_cq *ret;
430
431 ret = kzalloc(sizeof *ret, GFP_KERNEL);
432 if (!ret)
433 return NULL;
434
435 ret->com.res_id = id;
436 ret->com.state = RES_CQ_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
438
439 return &ret->com;
440}
441
442static struct res_common *alloc_srq_tr(int id)
443{
444 struct res_srq *ret;
445
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
447 if (!ret)
448 return NULL;
449
450 ret->com.res_id = id;
451 ret->com.state = RES_SRQ_ALLOCATED;
452 atomic_set(&ret->ref_count, 0);
453
454 return &ret->com;
455}
456
457static struct res_common *alloc_counter_tr(int id)
458{
459 struct res_counter *ret;
460
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
462 if (!ret)
463 return NULL;
464
465 ret->com.res_id = id;
466 ret->com.state = RES_COUNTER_ALLOCATED;
467
468 return &ret->com;
469}
470
471static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
472 int extra)
473{
474 struct res_common *ret;
475
476 switch (type) {
477 case RES_QP:
478 ret = alloc_qp_tr(id);
479 break;
480 case RES_MPT:
481 ret = alloc_mpt_tr(id, extra);
482 break;
483 case RES_MTT:
484 ret = alloc_mtt_tr(id, extra);
485 break;
486 case RES_EQ:
487 ret = alloc_eq_tr(id);
488 break;
489 case RES_CQ:
490 ret = alloc_cq_tr(id);
491 break;
492 case RES_SRQ:
493 ret = alloc_srq_tr(id);
494 break;
495 case RES_MAC:
496 printk(KERN_ERR "implementation missing\n");
497 return NULL;
498 case RES_COUNTER:
499 ret = alloc_counter_tr(id);
500 break;
501
502 default:
503 return NULL;
504 }
505 if (ret)
506 ret->owner = slave;
507
508 return ret;
509}
510
511static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
512 enum mlx4_resource type, int extra)
513{
514 int i;
515 int err;
516 struct mlx4_priv *priv = mlx4_priv(dev);
517 struct res_common **res_arr;
518 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
519 struct radix_tree_root *root = &tracker->res_tree[type];
520
521 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
522 if (!res_arr)
523 return -ENOMEM;
524
525 for (i = 0; i < count; ++i) {
526 res_arr[i] = alloc_tr(base + i, type, slave, extra);
527 if (!res_arr[i]) {
528 for (--i; i >= 0; --i)
529 kfree(res_arr[i]);
530
531 kfree(res_arr);
532 return -ENOMEM;
533 }
534 }
535
536 spin_lock_irq(mlx4_tlock(dev));
537 for (i = 0; i < count; ++i) {
538 if (find_res(dev, base + i, type)) {
539 err = -EEXIST;
540 goto undo;
541 }
542 err = radix_tree_insert(root, base + i, res_arr[i]);
543 if (err)
544 goto undo;
545 list_add_tail(&res_arr[i]->list,
546 &tracker->slave_list[slave].res_list[type]);
547 }
548 spin_unlock_irq(mlx4_tlock(dev));
549 kfree(res_arr);
550
551 return 0;
552
553undo:
554 for (--i; i >= base; --i)
555 radix_tree_delete(&tracker->res_tree[type], i);
556
557 spin_unlock_irq(mlx4_tlock(dev));
558
559 for (i = 0; i < count; ++i)
560 kfree(res_arr[i]);
561
562 kfree(res_arr);
563
564 return err;
565}
566
567static int remove_qp_ok(struct res_qp *res)
568{
569 if (res->com.state == RES_QP_BUSY)
570 return -EBUSY;
571 else if (res->com.state != RES_QP_RESERVED)
572 return -EPERM;
573
574 return 0;
575}
576
577static int remove_mtt_ok(struct res_mtt *res, int order)
578{
579 if (res->com.state == RES_MTT_BUSY ||
580 atomic_read(&res->ref_count)) {
581 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
582 __func__, __LINE__,
583 mtt_states_str(res->com.state),
584 atomic_read(&res->ref_count));
585 return -EBUSY;
586 } else if (res->com.state != RES_MTT_ALLOCATED)
587 return -EPERM;
588 else if (res->order != order)
589 return -EINVAL;
590
591 return 0;
592}
593
594static int remove_mpt_ok(struct res_mpt *res)
595{
596 if (res->com.state == RES_MPT_BUSY)
597 return -EBUSY;
598 else if (res->com.state != RES_MPT_RESERVED)
599 return -EPERM;
600
601 return 0;
602}
603
604static int remove_eq_ok(struct res_eq *res)
605{
606 if (res->com.state == RES_MPT_BUSY)
607 return -EBUSY;
608 else if (res->com.state != RES_MPT_RESERVED)
609 return -EPERM;
610
611 return 0;
612}
613
614static int remove_counter_ok(struct res_counter *res)
615{
616 if (res->com.state == RES_COUNTER_BUSY)
617 return -EBUSY;
618 else if (res->com.state != RES_COUNTER_ALLOCATED)
619 return -EPERM;
620
621 return 0;
622}
623
624static int remove_cq_ok(struct res_cq *res)
625{
626 if (res->com.state == RES_CQ_BUSY)
627 return -EBUSY;
628 else if (res->com.state != RES_CQ_ALLOCATED)
629 return -EPERM;
630
631 return 0;
632}
633
634static int remove_srq_ok(struct res_srq *res)
635{
636 if (res->com.state == RES_SRQ_BUSY)
637 return -EBUSY;
638 else if (res->com.state != RES_SRQ_ALLOCATED)
639 return -EPERM;
640
641 return 0;
642}
643
644static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
645{
646 switch (type) {
647 case RES_QP:
648 return remove_qp_ok((struct res_qp *)res);
649 case RES_CQ:
650 return remove_cq_ok((struct res_cq *)res);
651 case RES_SRQ:
652 return remove_srq_ok((struct res_srq *)res);
653 case RES_MPT:
654 return remove_mpt_ok((struct res_mpt *)res);
655 case RES_MTT:
656 return remove_mtt_ok((struct res_mtt *)res, extra);
657 case RES_MAC:
658 return -ENOSYS;
659 case RES_EQ:
660 return remove_eq_ok((struct res_eq *)res);
661 case RES_COUNTER:
662 return remove_counter_ok((struct res_counter *)res);
663 default:
664 return -EINVAL;
665 }
666}
667
668static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
669 enum mlx4_resource type, int extra)
670{
671 int i;
672 int err;
673 struct mlx4_priv *priv = mlx4_priv(dev);
674 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
675 struct res_common *r;
676
677 spin_lock_irq(mlx4_tlock(dev));
678 for (i = base; i < base + count; ++i) {
679 r = radix_tree_lookup(&tracker->res_tree[type], i);
680 if (!r) {
681 err = -ENOENT;
682 goto out;
683 }
684 if (r->owner != slave) {
685 err = -EPERM;
686 goto out;
687 }
688 err = remove_ok(r, type, extra);
689 if (err)
690 goto out;
691 }
692
693 for (i = base; i < base + count; ++i) {
694 r = radix_tree_lookup(&tracker->res_tree[type], i);
695 radix_tree_delete(&tracker->res_tree[type], i);
696 list_del(&r->list);
697 kfree(r);
698 }
699 err = 0;
700
701out:
702 spin_unlock_irq(mlx4_tlock(dev));
703
704 return err;
705}
706
707static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
708 enum res_qp_states state, struct res_qp **qp,
709 int alloc)
710{
711 struct mlx4_priv *priv = mlx4_priv(dev);
712 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
713 struct res_qp *r;
714 int err = 0;
715
716 spin_lock_irq(mlx4_tlock(dev));
717 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
718 if (!r)
719 err = -ENOENT;
720 else if (r->com.owner != slave)
721 err = -EPERM;
722 else {
723 switch (state) {
724 case RES_QP_BUSY:
725 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
726 __func__, r->com.res_id);
727 err = -EBUSY;
728 break;
729
730 case RES_QP_RESERVED:
731 if (r->com.state == RES_QP_MAPPED && !alloc)
732 break;
733
734 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
735 err = -EINVAL;
736 break;
737
738 case RES_QP_MAPPED:
739 if ((r->com.state == RES_QP_RESERVED && alloc) ||
740 r->com.state == RES_QP_HW)
741 break;
742 else {
743 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
744 r->com.res_id);
745 err = -EINVAL;
746 }
747
748 break;
749
750 case RES_QP_HW:
751 if (r->com.state != RES_QP_MAPPED)
752 err = -EINVAL;
753 break;
754 default:
755 err = -EINVAL;
756 }
757
758 if (!err) {
759 r->com.from_state = r->com.state;
760 r->com.to_state = state;
761 r->com.state = RES_QP_BUSY;
762 if (qp)
763 *qp = (struct res_qp *)r;
764 }
765 }
766
767 spin_unlock_irq(mlx4_tlock(dev));
768
769 return err;
770}
771
772static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
773 enum res_mpt_states state, struct res_mpt **mpt)
774{
775 struct mlx4_priv *priv = mlx4_priv(dev);
776 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
777 struct res_mpt *r;
778 int err = 0;
779
780 spin_lock_irq(mlx4_tlock(dev));
781 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
782 if (!r)
783 err = -ENOENT;
784 else if (r->com.owner != slave)
785 err = -EPERM;
786 else {
787 switch (state) {
788 case RES_MPT_BUSY:
789 err = -EINVAL;
790 break;
791
792 case RES_MPT_RESERVED:
793 if (r->com.state != RES_MPT_MAPPED)
794 err = -EINVAL;
795 break;
796
797 case RES_MPT_MAPPED:
798 if (r->com.state != RES_MPT_RESERVED &&
799 r->com.state != RES_MPT_HW)
800 err = -EINVAL;
801 break;
802
803 case RES_MPT_HW:
804 if (r->com.state != RES_MPT_MAPPED)
805 err = -EINVAL;
806 break;
807 default:
808 err = -EINVAL;
809 }
810
811 if (!err) {
812 r->com.from_state = r->com.state;
813 r->com.to_state = state;
814 r->com.state = RES_MPT_BUSY;
815 if (mpt)
816 *mpt = (struct res_mpt *)r;
817 }
818 }
819
820 spin_unlock_irq(mlx4_tlock(dev));
821
822 return err;
823}
824
825static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
826 enum res_eq_states state, struct res_eq **eq)
827{
828 struct mlx4_priv *priv = mlx4_priv(dev);
829 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
830 struct res_eq *r;
831 int err = 0;
832
833 spin_lock_irq(mlx4_tlock(dev));
834 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
835 if (!r)
836 err = -ENOENT;
837 else if (r->com.owner != slave)
838 err = -EPERM;
839 else {
840 switch (state) {
841 case RES_EQ_BUSY:
842 err = -EINVAL;
843 break;
844
845 case RES_EQ_RESERVED:
846 if (r->com.state != RES_EQ_HW)
847 err = -EINVAL;
848 break;
849
850 case RES_EQ_HW:
851 if (r->com.state != RES_EQ_RESERVED)
852 err = -EINVAL;
853 break;
854
855 default:
856 err = -EINVAL;
857 }
858
859 if (!err) {
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_EQ_BUSY;
863 if (eq)
864 *eq = r;
865 }
866 }
867
868 spin_unlock_irq(mlx4_tlock(dev));
869
870 return err;
871}
872
873static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
874 enum res_cq_states state, struct res_cq **cq)
875{
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 struct res_cq *r;
879 int err;
880
881 spin_lock_irq(mlx4_tlock(dev));
882 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
883 if (!r)
884 err = -ENOENT;
885 else if (r->com.owner != slave)
886 err = -EPERM;
887 else {
888 switch (state) {
889 case RES_CQ_BUSY:
890 err = -EBUSY;
891 break;
892
893 case RES_CQ_ALLOCATED:
894 if (r->com.state != RES_CQ_HW)
895 err = -EINVAL;
896 else if (atomic_read(&r->ref_count))
897 err = -EBUSY;
898 else
899 err = 0;
900 break;
901
902 case RES_CQ_HW:
903 if (r->com.state != RES_CQ_ALLOCATED)
904 err = -EINVAL;
905 else
906 err = 0;
907 break;
908
909 default:
910 err = -EINVAL;
911 }
912
913 if (!err) {
914 r->com.from_state = r->com.state;
915 r->com.to_state = state;
916 r->com.state = RES_CQ_BUSY;
917 if (cq)
918 *cq = r;
919 }
920 }
921
922 spin_unlock_irq(mlx4_tlock(dev));
923
924 return err;
925}
926
927static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
928 enum res_cq_states state, struct res_srq **srq)
929{
930 struct mlx4_priv *priv = mlx4_priv(dev);
931 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
932 struct res_srq *r;
933 int err = 0;
934
935 spin_lock_irq(mlx4_tlock(dev));
936 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
937 if (!r)
938 err = -ENOENT;
939 else if (r->com.owner != slave)
940 err = -EPERM;
941 else {
942 switch (state) {
943 case RES_SRQ_BUSY:
944 err = -EINVAL;
945 break;
946
947 case RES_SRQ_ALLOCATED:
948 if (r->com.state != RES_SRQ_HW)
949 err = -EINVAL;
950 else if (atomic_read(&r->ref_count))
951 err = -EBUSY;
952 break;
953
954 case RES_SRQ_HW:
955 if (r->com.state != RES_SRQ_ALLOCATED)
956 err = -EINVAL;
957 break;
958
959 default:
960 err = -EINVAL;
961 }
962
963 if (!err) {
964 r->com.from_state = r->com.state;
965 r->com.to_state = state;
966 r->com.state = RES_SRQ_BUSY;
967 if (srq)
968 *srq = r;
969 }
970 }
971
972 spin_unlock_irq(mlx4_tlock(dev));
973
974 return err;
975}
976
977static void res_abort_move(struct mlx4_dev *dev, int slave,
978 enum mlx4_resource type, int id)
979{
980 struct mlx4_priv *priv = mlx4_priv(dev);
981 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
982 struct res_common *r;
983
984 spin_lock_irq(mlx4_tlock(dev));
985 r = radix_tree_lookup(&tracker->res_tree[type], id);
986 if (r && (r->owner == slave))
987 r->state = r->from_state;
988 spin_unlock_irq(mlx4_tlock(dev));
989}
990
991static void res_end_move(struct mlx4_dev *dev, int slave,
992 enum mlx4_resource type, int id)
993{
994 struct mlx4_priv *priv = mlx4_priv(dev);
995 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
996 struct res_common *r;
997
998 spin_lock_irq(mlx4_tlock(dev));
999 r = radix_tree_lookup(&tracker->res_tree[type], id);
1000 if (r && (r->owner == slave))
1001 r->state = r->to_state;
1002 spin_unlock_irq(mlx4_tlock(dev));
1003}
1004
1005static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1006{
1007 return mlx4_is_qp_reserved(dev, qpn);
1008}
1009
1010static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1011 u64 in_param, u64 *out_param)
1012{
1013 int err;
1014 int count;
1015 int align;
1016 int base;
1017 int qpn;
1018
1019 switch (op) {
1020 case RES_OP_RESERVE:
1021 count = get_param_l(&in_param);
1022 align = get_param_h(&in_param);
1023 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1024 if (err)
1025 return err;
1026
1027 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1028 if (err) {
1029 __mlx4_qp_release_range(dev, base, count);
1030 return err;
1031 }
1032 set_param_l(out_param, base);
1033 break;
1034 case RES_OP_MAP_ICM:
1035 qpn = get_param_l(&in_param) & 0x7fffff;
1036 if (valid_reserved(dev, slave, qpn)) {
1037 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1038 if (err)
1039 return err;
1040 }
1041
1042 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1043 NULL, 1);
1044 if (err)
1045 return err;
1046
1047 if (!valid_reserved(dev, slave, qpn)) {
1048 err = __mlx4_qp_alloc_icm(dev, qpn);
1049 if (err) {
1050 res_abort_move(dev, slave, RES_QP, qpn);
1051 return err;
1052 }
1053 }
1054
1055 res_end_move(dev, slave, RES_QP, qpn);
1056 break;
1057
1058 default:
1059 err = -EINVAL;
1060 break;
1061 }
1062 return err;
1063}
1064
1065static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1066 u64 in_param, u64 *out_param)
1067{
1068 int err = -EINVAL;
1069 int base;
1070 int order;
1071
1072 if (op != RES_OP_RESERVE_AND_MAP)
1073 return err;
1074
1075 order = get_param_l(&in_param);
1076 base = __mlx4_alloc_mtt_range(dev, order);
1077 if (base == -1)
1078 return -ENOMEM;
1079
1080 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1081 if (err)
1082 __mlx4_free_mtt_range(dev, base, order);
1083 else
1084 set_param_l(out_param, base);
1085
1086 return err;
1087}
1088
1089static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1090 u64 in_param, u64 *out_param)
1091{
1092 int err = -EINVAL;
1093 int index;
1094 int id;
1095 struct res_mpt *mpt;
1096
1097 switch (op) {
1098 case RES_OP_RESERVE:
1099 index = __mlx4_mr_reserve(dev);
1100 if (index == -1)
1101 break;
1102 id = index & mpt_mask(dev);
1103
1104 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1105 if (err) {
1106 __mlx4_mr_release(dev, index);
1107 break;
1108 }
1109 set_param_l(out_param, index);
1110 break;
1111 case RES_OP_MAP_ICM:
1112 index = get_param_l(&in_param);
1113 id = index & mpt_mask(dev);
1114 err = mr_res_start_move_to(dev, slave, id,
1115 RES_MPT_MAPPED, &mpt);
1116 if (err)
1117 return err;
1118
1119 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1120 if (err) {
1121 res_abort_move(dev, slave, RES_MPT, id);
1122 return err;
1123 }
1124
1125 res_end_move(dev, slave, RES_MPT, id);
1126 break;
1127 }
1128 return err;
1129}
1130
1131static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1132 u64 in_param, u64 *out_param)
1133{
1134 int cqn;
1135 int err;
1136
1137 switch (op) {
1138 case RES_OP_RESERVE_AND_MAP:
1139 err = __mlx4_cq_alloc_icm(dev, &cqn);
1140 if (err)
1141 break;
1142
1143 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1144 if (err) {
1145 __mlx4_cq_free_icm(dev, cqn);
1146 break;
1147 }
1148
1149 set_param_l(out_param, cqn);
1150 break;
1151
1152 default:
1153 err = -EINVAL;
1154 }
1155
1156 return err;
1157}
1158
1159static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1160 u64 in_param, u64 *out_param)
1161{
1162 int srqn;
1163 int err;
1164
1165 switch (op) {
1166 case RES_OP_RESERVE_AND_MAP:
1167 err = __mlx4_srq_alloc_icm(dev, &srqn);
1168 if (err)
1169 break;
1170
1171 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1172 if (err) {
1173 __mlx4_srq_free_icm(dev, srqn);
1174 break;
1175 }
1176
1177 set_param_l(out_param, srqn);
1178 break;
1179
1180 default:
1181 err = -EINVAL;
1182 }
1183
1184 return err;
1185}
1186
1187static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1188{
1189 struct mlx4_priv *priv = mlx4_priv(dev);
1190 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1191 struct mac_res *res;
1192
1193 res = kzalloc(sizeof *res, GFP_KERNEL);
1194 if (!res)
1195 return -ENOMEM;
1196 res->mac = mac;
1197 res->port = (u8) port;
1198 list_add_tail(&res->list,
1199 &tracker->slave_list[slave].res_list[RES_MAC]);
1200 return 0;
1201}
1202
1203static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1204 int port)
1205{
1206 struct mlx4_priv *priv = mlx4_priv(dev);
1207 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1208 struct list_head *mac_list =
1209 &tracker->slave_list[slave].res_list[RES_MAC];
1210 struct mac_res *res, *tmp;
1211
1212 list_for_each_entry_safe(res, tmp, mac_list, list) {
1213 if (res->mac == mac && res->port == (u8) port) {
1214 list_del(&res->list);
1215 kfree(res);
1216 break;
1217 }
1218 }
1219}
1220
1221static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1222{
1223 struct mlx4_priv *priv = mlx4_priv(dev);
1224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225 struct list_head *mac_list =
1226 &tracker->slave_list[slave].res_list[RES_MAC];
1227 struct mac_res *res, *tmp;
1228
1229 list_for_each_entry_safe(res, tmp, mac_list, list) {
1230 list_del(&res->list);
1231 __mlx4_unregister_mac(dev, res->port, res->mac);
1232 kfree(res);
1233 }
1234}
1235
1236static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237 u64 in_param, u64 *out_param)
1238{
1239 int err = -EINVAL;
1240 int port;
1241 u64 mac;
1242
1243 if (op != RES_OP_RESERVE_AND_MAP)
1244 return err;
1245
1246 port = get_param_l(out_param);
1247 mac = in_param;
1248
1249 err = __mlx4_register_mac(dev, port, mac);
1250 if (err >= 0) {
1251 set_param_l(out_param, err);
1252 err = 0;
1253 }
1254
1255 if (!err) {
1256 err = mac_add_to_slave(dev, slave, mac, port);
1257 if (err)
1258 __mlx4_unregister_mac(dev, port, mac);
1259 }
1260 return err;
1261}
1262
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001263static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1264 u64 in_param, u64 *out_param)
1265{
1266 return 0;
1267}
1268
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001269int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1270 struct mlx4_vhcr *vhcr,
1271 struct mlx4_cmd_mailbox *inbox,
1272 struct mlx4_cmd_mailbox *outbox,
1273 struct mlx4_cmd_info *cmd)
1274{
1275 int err;
1276 int alop = vhcr->op_modifier;
1277
1278 switch (vhcr->in_modifier) {
1279 case RES_QP:
1280 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1281 vhcr->in_param, &vhcr->out_param);
1282 break;
1283
1284 case RES_MTT:
1285 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1286 vhcr->in_param, &vhcr->out_param);
1287 break;
1288
1289 case RES_MPT:
1290 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1291 vhcr->in_param, &vhcr->out_param);
1292 break;
1293
1294 case RES_CQ:
1295 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1296 vhcr->in_param, &vhcr->out_param);
1297 break;
1298
1299 case RES_SRQ:
1300 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1301 vhcr->in_param, &vhcr->out_param);
1302 break;
1303
1304 case RES_MAC:
1305 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1306 vhcr->in_param, &vhcr->out_param);
1307 break;
1308
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001309 case RES_VLAN:
1310 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1311 vhcr->in_param, &vhcr->out_param);
1312 break;
1313
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001314 default:
1315 err = -EINVAL;
1316 break;
1317 }
1318
1319 return err;
1320}
1321
1322static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1323 u64 in_param)
1324{
1325 int err;
1326 int count;
1327 int base;
1328 int qpn;
1329
1330 switch (op) {
1331 case RES_OP_RESERVE:
1332 base = get_param_l(&in_param) & 0x7fffff;
1333 count = get_param_h(&in_param);
1334 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1335 if (err)
1336 break;
1337 __mlx4_qp_release_range(dev, base, count);
1338 break;
1339 case RES_OP_MAP_ICM:
1340 qpn = get_param_l(&in_param) & 0x7fffff;
1341 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1342 NULL, 0);
1343 if (err)
1344 return err;
1345
1346 if (!valid_reserved(dev, slave, qpn))
1347 __mlx4_qp_free_icm(dev, qpn);
1348
1349 res_end_move(dev, slave, RES_QP, qpn);
1350
1351 if (valid_reserved(dev, slave, qpn))
1352 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1353 break;
1354 default:
1355 err = -EINVAL;
1356 break;
1357 }
1358 return err;
1359}
1360
1361static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1362 u64 in_param, u64 *out_param)
1363{
1364 int err = -EINVAL;
1365 int base;
1366 int order;
1367
1368 if (op != RES_OP_RESERVE_AND_MAP)
1369 return err;
1370
1371 base = get_param_l(&in_param);
1372 order = get_param_h(&in_param);
1373 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1374 if (!err)
1375 __mlx4_free_mtt_range(dev, base, order);
1376 return err;
1377}
1378
1379static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1380 u64 in_param)
1381{
1382 int err = -EINVAL;
1383 int index;
1384 int id;
1385 struct res_mpt *mpt;
1386
1387 switch (op) {
1388 case RES_OP_RESERVE:
1389 index = get_param_l(&in_param);
1390 id = index & mpt_mask(dev);
1391 err = get_res(dev, slave, id, RES_MPT, &mpt);
1392 if (err)
1393 break;
1394 index = mpt->key;
1395 put_res(dev, slave, id, RES_MPT);
1396
1397 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1398 if (err)
1399 break;
1400 __mlx4_mr_release(dev, index);
1401 break;
1402 case RES_OP_MAP_ICM:
1403 index = get_param_l(&in_param);
1404 id = index & mpt_mask(dev);
1405 err = mr_res_start_move_to(dev, slave, id,
1406 RES_MPT_RESERVED, &mpt);
1407 if (err)
1408 return err;
1409
1410 __mlx4_mr_free_icm(dev, mpt->key);
1411 res_end_move(dev, slave, RES_MPT, id);
1412 return err;
1413 break;
1414 default:
1415 err = -EINVAL;
1416 break;
1417 }
1418 return err;
1419}
1420
1421static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1422 u64 in_param, u64 *out_param)
1423{
1424 int cqn;
1425 int err;
1426
1427 switch (op) {
1428 case RES_OP_RESERVE_AND_MAP:
1429 cqn = get_param_l(&in_param);
1430 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1431 if (err)
1432 break;
1433
1434 __mlx4_cq_free_icm(dev, cqn);
1435 break;
1436
1437 default:
1438 err = -EINVAL;
1439 break;
1440 }
1441
1442 return err;
1443}
1444
1445static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 u64 in_param, u64 *out_param)
1447{
1448 int srqn;
1449 int err;
1450
1451 switch (op) {
1452 case RES_OP_RESERVE_AND_MAP:
1453 srqn = get_param_l(&in_param);
1454 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1455 if (err)
1456 break;
1457
1458 __mlx4_srq_free_icm(dev, srqn);
1459 break;
1460
1461 default:
1462 err = -EINVAL;
1463 break;
1464 }
1465
1466 return err;
1467}
1468
1469static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470 u64 in_param, u64 *out_param)
1471{
1472 int port;
1473 int err = 0;
1474
1475 switch (op) {
1476 case RES_OP_RESERVE_AND_MAP:
1477 port = get_param_l(out_param);
1478 mac_del_from_slave(dev, slave, in_param, port);
1479 __mlx4_unregister_mac(dev, port, in_param);
1480 break;
1481 default:
1482 err = -EINVAL;
1483 break;
1484 }
1485
1486 return err;
1487
1488}
1489
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001490static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1491 u64 in_param, u64 *out_param)
1492{
1493 return 0;
1494}
1495
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001496int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1497 struct mlx4_vhcr *vhcr,
1498 struct mlx4_cmd_mailbox *inbox,
1499 struct mlx4_cmd_mailbox *outbox,
1500 struct mlx4_cmd_info *cmd)
1501{
1502 int err = -EINVAL;
1503 int alop = vhcr->op_modifier;
1504
1505 switch (vhcr->in_modifier) {
1506 case RES_QP:
1507 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1508 vhcr->in_param);
1509 break;
1510
1511 case RES_MTT:
1512 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1513 vhcr->in_param, &vhcr->out_param);
1514 break;
1515
1516 case RES_MPT:
1517 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1518 vhcr->in_param);
1519 break;
1520
1521 case RES_CQ:
1522 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1523 vhcr->in_param, &vhcr->out_param);
1524 break;
1525
1526 case RES_SRQ:
1527 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1528 vhcr->in_param, &vhcr->out_param);
1529 break;
1530
1531 case RES_MAC:
1532 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1533 vhcr->in_param, &vhcr->out_param);
1534 break;
1535
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001536 case RES_VLAN:
1537 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1538 vhcr->in_param, &vhcr->out_param);
1539 break;
1540
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001541 default:
1542 break;
1543 }
1544 return err;
1545}
1546
1547/* ugly but other choices are uglier */
1548static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1549{
1550 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1551}
1552
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001553static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001554{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001555 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001556}
1557
1558static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1559{
1560 return be32_to_cpu(mpt->mtt_sz);
1561}
1562
1563static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1564{
1565 return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1566}
1567
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001568static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001569{
1570 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1571}
1572
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001573static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001574{
1575 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1576}
1577
1578static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1579{
1580 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1581 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1582 int log_sq_sride = qpc->sq_size_stride & 7;
1583 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1584 int log_rq_stride = qpc->rq_size_stride & 7;
1585 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1586 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1587 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1588 int sq_size;
1589 int rq_size;
1590 int total_pages;
1591 int total_mem;
1592 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1593
1594 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1595 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1596 total_mem = sq_size + rq_size;
1597 total_pages =
1598 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1599 page_shift);
1600
1601 return total_pages;
1602}
1603
1604static int qp_get_pdn(struct mlx4_qp_context *qpc)
1605{
1606 return be32_to_cpu(qpc->pd) & 0xffffff;
1607}
1608
1609static int pdn2slave(int pdn)
1610{
1611 return (pdn >> NOT_MASKED_PD_BITS) - 1;
1612}
1613
1614static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1615 int size, struct res_mtt *mtt)
1616{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001617 int res_start = mtt->com.res_id;
1618 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001619
1620 if (start < res_start || start + size > res_start + res_size)
1621 return -EPERM;
1622 return 0;
1623}
1624
1625int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1626 struct mlx4_vhcr *vhcr,
1627 struct mlx4_cmd_mailbox *inbox,
1628 struct mlx4_cmd_mailbox *outbox,
1629 struct mlx4_cmd_info *cmd)
1630{
1631 int err;
1632 int index = vhcr->in_modifier;
1633 struct res_mtt *mtt;
1634 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001635 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001636 int phys;
1637 int id;
1638
1639 id = index & mpt_mask(dev);
1640 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1641 if (err)
1642 return err;
1643
1644 phys = mr_phys_mpt(inbox->buf);
1645 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001646 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001647 if (err)
1648 goto ex_abort;
1649
1650 err = check_mtt_range(dev, slave, mtt_base,
1651 mr_get_mtt_size(inbox->buf), mtt);
1652 if (err)
1653 goto ex_put;
1654
1655 mpt->mtt = mtt;
1656 }
1657
1658 if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1659 err = -EPERM;
1660 goto ex_put;
1661 }
1662
1663 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1664 if (err)
1665 goto ex_put;
1666
1667 if (!phys) {
1668 atomic_inc(&mtt->ref_count);
1669 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1670 }
1671
1672 res_end_move(dev, slave, RES_MPT, id);
1673 return 0;
1674
1675ex_put:
1676 if (!phys)
1677 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1678ex_abort:
1679 res_abort_move(dev, slave, RES_MPT, id);
1680
1681 return err;
1682}
1683
1684int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1685 struct mlx4_vhcr *vhcr,
1686 struct mlx4_cmd_mailbox *inbox,
1687 struct mlx4_cmd_mailbox *outbox,
1688 struct mlx4_cmd_info *cmd)
1689{
1690 int err;
1691 int index = vhcr->in_modifier;
1692 struct res_mpt *mpt;
1693 int id;
1694
1695 id = index & mpt_mask(dev);
1696 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1697 if (err)
1698 return err;
1699
1700 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1701 if (err)
1702 goto ex_abort;
1703
1704 if (mpt->mtt)
1705 atomic_dec(&mpt->mtt->ref_count);
1706
1707 res_end_move(dev, slave, RES_MPT, id);
1708 return 0;
1709
1710ex_abort:
1711 res_abort_move(dev, slave, RES_MPT, id);
1712
1713 return err;
1714}
1715
1716int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1717 struct mlx4_vhcr *vhcr,
1718 struct mlx4_cmd_mailbox *inbox,
1719 struct mlx4_cmd_mailbox *outbox,
1720 struct mlx4_cmd_info *cmd)
1721{
1722 int err;
1723 int index = vhcr->in_modifier;
1724 struct res_mpt *mpt;
1725 int id;
1726
1727 id = index & mpt_mask(dev);
1728 err = get_res(dev, slave, id, RES_MPT, &mpt);
1729 if (err)
1730 return err;
1731
1732 if (mpt->com.from_state != RES_MPT_HW) {
1733 err = -EBUSY;
1734 goto out;
1735 }
1736
1737 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1738
1739out:
1740 put_res(dev, slave, id, RES_MPT);
1741 return err;
1742}
1743
1744static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1745{
1746 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1747}
1748
1749static int qp_get_scqn(struct mlx4_qp_context *qpc)
1750{
1751 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1752}
1753
1754static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1755{
1756 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1757}
1758
1759int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1760 struct mlx4_vhcr *vhcr,
1761 struct mlx4_cmd_mailbox *inbox,
1762 struct mlx4_cmd_mailbox *outbox,
1763 struct mlx4_cmd_info *cmd)
1764{
1765 int err;
1766 int qpn = vhcr->in_modifier & 0x7fffff;
1767 struct res_mtt *mtt;
1768 struct res_qp *qp;
1769 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001770 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001771 int mtt_size = qp_get_mtt_size(qpc);
1772 struct res_cq *rcq;
1773 struct res_cq *scq;
1774 int rcqn = qp_get_rcqn(qpc);
1775 int scqn = qp_get_scqn(qpc);
1776 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1777 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1778 struct res_srq *srq;
1779 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1780
1781 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1782 if (err)
1783 return err;
1784 qp->local_qpn = local_qpn;
1785
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001786 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001787 if (err)
1788 goto ex_abort;
1789
1790 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1791 if (err)
1792 goto ex_put_mtt;
1793
1794 if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1795 err = -EPERM;
1796 goto ex_put_mtt;
1797 }
1798
1799 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1800 if (err)
1801 goto ex_put_mtt;
1802
1803 if (scqn != rcqn) {
1804 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1805 if (err)
1806 goto ex_put_rcq;
1807 } else
1808 scq = rcq;
1809
1810 if (use_srq) {
1811 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1812 if (err)
1813 goto ex_put_scq;
1814 }
1815
1816 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1817 if (err)
1818 goto ex_put_srq;
1819 atomic_inc(&mtt->ref_count);
1820 qp->mtt = mtt;
1821 atomic_inc(&rcq->ref_count);
1822 qp->rcq = rcq;
1823 atomic_inc(&scq->ref_count);
1824 qp->scq = scq;
1825
1826 if (scqn != rcqn)
1827 put_res(dev, slave, scqn, RES_CQ);
1828
1829 if (use_srq) {
1830 atomic_inc(&srq->ref_count);
1831 put_res(dev, slave, srqn, RES_SRQ);
1832 qp->srq = srq;
1833 }
1834 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001835 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001836 res_end_move(dev, slave, RES_QP, qpn);
1837
1838 return 0;
1839
1840ex_put_srq:
1841 if (use_srq)
1842 put_res(dev, slave, srqn, RES_SRQ);
1843ex_put_scq:
1844 if (scqn != rcqn)
1845 put_res(dev, slave, scqn, RES_CQ);
1846ex_put_rcq:
1847 put_res(dev, slave, rcqn, RES_CQ);
1848ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001849 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001850ex_abort:
1851 res_abort_move(dev, slave, RES_QP, qpn);
1852
1853 return err;
1854}
1855
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001856static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001857{
1858 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1859}
1860
1861static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1862{
1863 int log_eq_size = eqc->log_eq_size & 0x1f;
1864 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1865
1866 if (log_eq_size + 5 < page_shift)
1867 return 1;
1868
1869 return 1 << (log_eq_size + 5 - page_shift);
1870}
1871
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001872static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001873{
1874 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1875}
1876
1877static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1878{
1879 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1880 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1881
1882 if (log_cq_size + 5 < page_shift)
1883 return 1;
1884
1885 return 1 << (log_cq_size + 5 - page_shift);
1886}
1887
1888int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1889 struct mlx4_vhcr *vhcr,
1890 struct mlx4_cmd_mailbox *inbox,
1891 struct mlx4_cmd_mailbox *outbox,
1892 struct mlx4_cmd_info *cmd)
1893{
1894 int err;
1895 int eqn = vhcr->in_modifier;
1896 int res_id = (slave << 8) | eqn;
1897 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001898 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001899 int mtt_size = eq_get_mtt_size(eqc);
1900 struct res_eq *eq;
1901 struct res_mtt *mtt;
1902
1903 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1904 if (err)
1905 return err;
1906 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1907 if (err)
1908 goto out_add;
1909
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001910 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001911 if (err)
1912 goto out_move;
1913
1914 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1915 if (err)
1916 goto out_put;
1917
1918 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1919 if (err)
1920 goto out_put;
1921
1922 atomic_inc(&mtt->ref_count);
1923 eq->mtt = mtt;
1924 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1925 res_end_move(dev, slave, RES_EQ, res_id);
1926 return 0;
1927
1928out_put:
1929 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1930out_move:
1931 res_abort_move(dev, slave, RES_EQ, res_id);
1932out_add:
1933 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1934 return err;
1935}
1936
1937static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1938 int len, struct res_mtt **res)
1939{
1940 struct mlx4_priv *priv = mlx4_priv(dev);
1941 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1942 struct res_mtt *mtt;
1943 int err = -EINVAL;
1944
1945 spin_lock_irq(mlx4_tlock(dev));
1946 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1947 com.list) {
1948 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1949 *res = mtt;
1950 mtt->com.from_state = mtt->com.state;
1951 mtt->com.state = RES_MTT_BUSY;
1952 err = 0;
1953 break;
1954 }
1955 }
1956 spin_unlock_irq(mlx4_tlock(dev));
1957
1958 return err;
1959}
1960
1961int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1962 struct mlx4_vhcr *vhcr,
1963 struct mlx4_cmd_mailbox *inbox,
1964 struct mlx4_cmd_mailbox *outbox,
1965 struct mlx4_cmd_info *cmd)
1966{
1967 struct mlx4_mtt mtt;
1968 __be64 *page_list = inbox->buf;
1969 u64 *pg_list = (u64 *)page_list;
1970 int i;
1971 struct res_mtt *rmtt = NULL;
1972 int start = be64_to_cpu(page_list[0]);
1973 int npages = vhcr->in_modifier;
1974 int err;
1975
1976 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1977 if (err)
1978 return err;
1979
1980 /* Call the SW implementation of write_mtt:
1981 * - Prepare a dummy mtt struct
1982 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001983 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1984 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001985 mtt.order = 0;
1986 mtt.page_shift = 0;
1987 for (i = 0; i < npages; ++i)
1988 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1989
1990 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1991 ((u64 *)page_list + 2));
1992
1993 if (rmtt)
1994 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1995
1996 return err;
1997}
1998
1999int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2000 struct mlx4_vhcr *vhcr,
2001 struct mlx4_cmd_mailbox *inbox,
2002 struct mlx4_cmd_mailbox *outbox,
2003 struct mlx4_cmd_info *cmd)
2004{
2005 int eqn = vhcr->in_modifier;
2006 int res_id = eqn | (slave << 8);
2007 struct res_eq *eq;
2008 int err;
2009
2010 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2011 if (err)
2012 return err;
2013
2014 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2015 if (err)
2016 goto ex_abort;
2017
2018 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2019 if (err)
2020 goto ex_put;
2021
2022 atomic_dec(&eq->mtt->ref_count);
2023 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2024 res_end_move(dev, slave, RES_EQ, res_id);
2025 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2026
2027 return 0;
2028
2029ex_put:
2030 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2031ex_abort:
2032 res_abort_move(dev, slave, RES_EQ, res_id);
2033
2034 return err;
2035}
2036
2037int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2038{
2039 struct mlx4_priv *priv = mlx4_priv(dev);
2040 struct mlx4_slave_event_eq_info *event_eq;
2041 struct mlx4_cmd_mailbox *mailbox;
2042 u32 in_modifier = 0;
2043 int err;
2044 int res_id;
2045 struct res_eq *req;
2046
2047 if (!priv->mfunc.master.slave_state)
2048 return -EINVAL;
2049
2050 event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2051
2052 /* Create the event only if the slave is registered */
2053 if ((event_eq->event_type & (1 << eqe->type)) == 0)
2054 return 0;
2055
2056 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2057 res_id = (slave << 8) | event_eq->eqn;
2058 err = get_res(dev, slave, res_id, RES_EQ, &req);
2059 if (err)
2060 goto unlock;
2061
2062 if (req->com.from_state != RES_EQ_HW) {
2063 err = -EINVAL;
2064 goto put;
2065 }
2066
2067 mailbox = mlx4_alloc_cmd_mailbox(dev);
2068 if (IS_ERR(mailbox)) {
2069 err = PTR_ERR(mailbox);
2070 goto put;
2071 }
2072
2073 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2074 ++event_eq->token;
2075 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2076 }
2077
2078 memcpy(mailbox->buf, (u8 *) eqe, 28);
2079
2080 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2081
2082 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2083 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2084 MLX4_CMD_NATIVE);
2085
2086 put_res(dev, slave, res_id, RES_EQ);
2087 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2088 mlx4_free_cmd_mailbox(dev, mailbox);
2089 return err;
2090
2091put:
2092 put_res(dev, slave, res_id, RES_EQ);
2093
2094unlock:
2095 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2096 return err;
2097}
2098
2099int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2100 struct mlx4_vhcr *vhcr,
2101 struct mlx4_cmd_mailbox *inbox,
2102 struct mlx4_cmd_mailbox *outbox,
2103 struct mlx4_cmd_info *cmd)
2104{
2105 int eqn = vhcr->in_modifier;
2106 int res_id = eqn | (slave << 8);
2107 struct res_eq *eq;
2108 int err;
2109
2110 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2111 if (err)
2112 return err;
2113
2114 if (eq->com.from_state != RES_EQ_HW) {
2115 err = -EINVAL;
2116 goto ex_put;
2117 }
2118
2119 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2120
2121ex_put:
2122 put_res(dev, slave, res_id, RES_EQ);
2123 return err;
2124}
2125
2126int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2127 struct mlx4_vhcr *vhcr,
2128 struct mlx4_cmd_mailbox *inbox,
2129 struct mlx4_cmd_mailbox *outbox,
2130 struct mlx4_cmd_info *cmd)
2131{
2132 int err;
2133 int cqn = vhcr->in_modifier;
2134 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002135 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002136 struct res_cq *cq;
2137 struct res_mtt *mtt;
2138
2139 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2140 if (err)
2141 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002142 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002143 if (err)
2144 goto out_move;
2145 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2146 if (err)
2147 goto out_put;
2148 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2149 if (err)
2150 goto out_put;
2151 atomic_inc(&mtt->ref_count);
2152 cq->mtt = mtt;
2153 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2154 res_end_move(dev, slave, RES_CQ, cqn);
2155 return 0;
2156
2157out_put:
2158 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2159out_move:
2160 res_abort_move(dev, slave, RES_CQ, cqn);
2161 return err;
2162}
2163
2164int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2165 struct mlx4_vhcr *vhcr,
2166 struct mlx4_cmd_mailbox *inbox,
2167 struct mlx4_cmd_mailbox *outbox,
2168 struct mlx4_cmd_info *cmd)
2169{
2170 int err;
2171 int cqn = vhcr->in_modifier;
2172 struct res_cq *cq;
2173
2174 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2175 if (err)
2176 return err;
2177 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2178 if (err)
2179 goto out_move;
2180 atomic_dec(&cq->mtt->ref_count);
2181 res_end_move(dev, slave, RES_CQ, cqn);
2182 return 0;
2183
2184out_move:
2185 res_abort_move(dev, slave, RES_CQ, cqn);
2186 return err;
2187}
2188
2189int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2190 struct mlx4_vhcr *vhcr,
2191 struct mlx4_cmd_mailbox *inbox,
2192 struct mlx4_cmd_mailbox *outbox,
2193 struct mlx4_cmd_info *cmd)
2194{
2195 int cqn = vhcr->in_modifier;
2196 struct res_cq *cq;
2197 int err;
2198
2199 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2200 if (err)
2201 return err;
2202
2203 if (cq->com.from_state != RES_CQ_HW)
2204 goto ex_put;
2205
2206 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2207ex_put:
2208 put_res(dev, slave, cqn, RES_CQ);
2209
2210 return err;
2211}
2212
2213static int handle_resize(struct mlx4_dev *dev, int slave,
2214 struct mlx4_vhcr *vhcr,
2215 struct mlx4_cmd_mailbox *inbox,
2216 struct mlx4_cmd_mailbox *outbox,
2217 struct mlx4_cmd_info *cmd,
2218 struct res_cq *cq)
2219{
2220 int err;
2221 struct res_mtt *orig_mtt;
2222 struct res_mtt *mtt;
2223 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002224 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002225
2226 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2227 if (err)
2228 return err;
2229
2230 if (orig_mtt != cq->mtt) {
2231 err = -EINVAL;
2232 goto ex_put;
2233 }
2234
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002235 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002236 if (err)
2237 goto ex_put;
2238
2239 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2240 if (err)
2241 goto ex_put1;
2242 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2243 if (err)
2244 goto ex_put1;
2245 atomic_dec(&orig_mtt->ref_count);
2246 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2247 atomic_inc(&mtt->ref_count);
2248 cq->mtt = mtt;
2249 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2250 return 0;
2251
2252ex_put1:
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2254ex_put:
2255 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2256
2257 return err;
2258
2259}
2260
2261int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2262 struct mlx4_vhcr *vhcr,
2263 struct mlx4_cmd_mailbox *inbox,
2264 struct mlx4_cmd_mailbox *outbox,
2265 struct mlx4_cmd_info *cmd)
2266{
2267 int cqn = vhcr->in_modifier;
2268 struct res_cq *cq;
2269 int err;
2270
2271 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2272 if (err)
2273 return err;
2274
2275 if (cq->com.from_state != RES_CQ_HW)
2276 goto ex_put;
2277
2278 if (vhcr->op_modifier == 0) {
2279 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2280 if (err)
2281 goto ex_put;
2282 }
2283
2284 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2285ex_put:
2286 put_res(dev, slave, cqn, RES_CQ);
2287
2288 return err;
2289}
2290
2291static int srq_get_pdn(struct mlx4_srq_context *srqc)
2292{
2293 return be32_to_cpu(srqc->pd) & 0xffffff;
2294}
2295
2296static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2297{
2298 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2299 int log_rq_stride = srqc->logstride & 7;
2300 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2301
2302 if (log_srq_size + log_rq_stride + 4 < page_shift)
2303 return 1;
2304
2305 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2306}
2307
2308int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2309 struct mlx4_vhcr *vhcr,
2310 struct mlx4_cmd_mailbox *inbox,
2311 struct mlx4_cmd_mailbox *outbox,
2312 struct mlx4_cmd_info *cmd)
2313{
2314 int err;
2315 int srqn = vhcr->in_modifier;
2316 struct res_mtt *mtt;
2317 struct res_srq *srq;
2318 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002319 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002320
2321 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2322 return -EINVAL;
2323
2324 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2325 if (err)
2326 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002327 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002328 if (err)
2329 goto ex_abort;
2330 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2331 mtt);
2332 if (err)
2333 goto ex_put_mtt;
2334
2335 if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2336 err = -EPERM;
2337 goto ex_put_mtt;
2338 }
2339
2340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2341 if (err)
2342 goto ex_put_mtt;
2343
2344 atomic_inc(&mtt->ref_count);
2345 srq->mtt = mtt;
2346 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2347 res_end_move(dev, slave, RES_SRQ, srqn);
2348 return 0;
2349
2350ex_put_mtt:
2351 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2352ex_abort:
2353 res_abort_move(dev, slave, RES_SRQ, srqn);
2354
2355 return err;
2356}
2357
2358int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2359 struct mlx4_vhcr *vhcr,
2360 struct mlx4_cmd_mailbox *inbox,
2361 struct mlx4_cmd_mailbox *outbox,
2362 struct mlx4_cmd_info *cmd)
2363{
2364 int err;
2365 int srqn = vhcr->in_modifier;
2366 struct res_srq *srq;
2367
2368 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2369 if (err)
2370 return err;
2371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2372 if (err)
2373 goto ex_abort;
2374 atomic_dec(&srq->mtt->ref_count);
2375 if (srq->cq)
2376 atomic_dec(&srq->cq->ref_count);
2377 res_end_move(dev, slave, RES_SRQ, srqn);
2378
2379 return 0;
2380
2381ex_abort:
2382 res_abort_move(dev, slave, RES_SRQ, srqn);
2383
2384 return err;
2385}
2386
2387int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2388 struct mlx4_vhcr *vhcr,
2389 struct mlx4_cmd_mailbox *inbox,
2390 struct mlx4_cmd_mailbox *outbox,
2391 struct mlx4_cmd_info *cmd)
2392{
2393 int err;
2394 int srqn = vhcr->in_modifier;
2395 struct res_srq *srq;
2396
2397 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2398 if (err)
2399 return err;
2400 if (srq->com.from_state != RES_SRQ_HW) {
2401 err = -EBUSY;
2402 goto out;
2403 }
2404 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2405out:
2406 put_res(dev, slave, srqn, RES_SRQ);
2407 return err;
2408}
2409
2410int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2411 struct mlx4_vhcr *vhcr,
2412 struct mlx4_cmd_mailbox *inbox,
2413 struct mlx4_cmd_mailbox *outbox,
2414 struct mlx4_cmd_info *cmd)
2415{
2416 int err;
2417 int srqn = vhcr->in_modifier;
2418 struct res_srq *srq;
2419
2420 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2421 if (err)
2422 return err;
2423
2424 if (srq->com.from_state != RES_SRQ_HW) {
2425 err = -EBUSY;
2426 goto out;
2427 }
2428
2429 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2430out:
2431 put_res(dev, slave, srqn, RES_SRQ);
2432 return err;
2433}
2434
2435int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2436 struct mlx4_vhcr *vhcr,
2437 struct mlx4_cmd_mailbox *inbox,
2438 struct mlx4_cmd_mailbox *outbox,
2439 struct mlx4_cmd_info *cmd)
2440{
2441 int err;
2442 int qpn = vhcr->in_modifier & 0x7fffff;
2443 struct res_qp *qp;
2444
2445 err = get_res(dev, slave, qpn, RES_QP, &qp);
2446 if (err)
2447 return err;
2448 if (qp->com.from_state != RES_QP_HW) {
2449 err = -EBUSY;
2450 goto out;
2451 }
2452
2453 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2454out:
2455 put_res(dev, slave, qpn, RES_QP);
2456 return err;
2457}
2458
2459int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2460 struct mlx4_vhcr *vhcr,
2461 struct mlx4_cmd_mailbox *inbox,
2462 struct mlx4_cmd_mailbox *outbox,
2463 struct mlx4_cmd_info *cmd)
2464{
2465 struct mlx4_qp_context *qpc = inbox->buf + 8;
2466
2467 update_ud_gid(dev, qpc, (u8)slave);
2468
2469 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2470}
2471
2472int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2473 struct mlx4_vhcr *vhcr,
2474 struct mlx4_cmd_mailbox *inbox,
2475 struct mlx4_cmd_mailbox *outbox,
2476 struct mlx4_cmd_info *cmd)
2477{
2478 int err;
2479 int qpn = vhcr->in_modifier & 0x7fffff;
2480 struct res_qp *qp;
2481
2482 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2483 if (err)
2484 return err;
2485 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2486 if (err)
2487 goto ex_abort;
2488
2489 atomic_dec(&qp->mtt->ref_count);
2490 atomic_dec(&qp->rcq->ref_count);
2491 atomic_dec(&qp->scq->ref_count);
2492 if (qp->srq)
2493 atomic_dec(&qp->srq->ref_count);
2494 res_end_move(dev, slave, RES_QP, qpn);
2495 return 0;
2496
2497ex_abort:
2498 res_abort_move(dev, slave, RES_QP, qpn);
2499
2500 return err;
2501}
2502
2503static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2504 struct res_qp *rqp, u8 *gid)
2505{
2506 struct res_gid *res;
2507
2508 list_for_each_entry(res, &rqp->mcg_list, list) {
2509 if (!memcmp(res->gid, gid, 16))
2510 return res;
2511 }
2512 return NULL;
2513}
2514
2515static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2516 u8 *gid, enum mlx4_protocol prot)
2517{
2518 struct res_gid *res;
2519 int err;
2520
2521 res = kzalloc(sizeof *res, GFP_KERNEL);
2522 if (!res)
2523 return -ENOMEM;
2524
2525 spin_lock_irq(&rqp->mcg_spl);
2526 if (find_gid(dev, slave, rqp, gid)) {
2527 kfree(res);
2528 err = -EEXIST;
2529 } else {
2530 memcpy(res->gid, gid, 16);
2531 res->prot = prot;
2532 list_add_tail(&res->list, &rqp->mcg_list);
2533 err = 0;
2534 }
2535 spin_unlock_irq(&rqp->mcg_spl);
2536
2537 return err;
2538}
2539
2540static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2541 u8 *gid, enum mlx4_protocol prot)
2542{
2543 struct res_gid *res;
2544 int err;
2545
2546 spin_lock_irq(&rqp->mcg_spl);
2547 res = find_gid(dev, slave, rqp, gid);
2548 if (!res || res->prot != prot)
2549 err = -EINVAL;
2550 else {
2551 list_del(&res->list);
2552 kfree(res);
2553 err = 0;
2554 }
2555 spin_unlock_irq(&rqp->mcg_spl);
2556
2557 return err;
2558}
2559
2560int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2561 struct mlx4_vhcr *vhcr,
2562 struct mlx4_cmd_mailbox *inbox,
2563 struct mlx4_cmd_mailbox *outbox,
2564 struct mlx4_cmd_info *cmd)
2565{
2566 struct mlx4_qp qp; /* dummy for calling attach/detach */
2567 u8 *gid = inbox->buf;
2568 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2569 int err, err1;
2570 int qpn;
2571 struct res_qp *rqp;
2572 int attach = vhcr->op_modifier;
2573 int block_loopback = vhcr->in_modifier >> 31;
2574 u8 steer_type_mask = 2;
2575 enum mlx4_steer_type type = gid[7] & steer_type_mask;
2576
2577 qpn = vhcr->in_modifier & 0xffffff;
2578 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2579 if (err)
2580 return err;
2581
2582 qp.qpn = qpn;
2583 if (attach) {
2584 err = add_mcg_res(dev, slave, rqp, gid, prot);
2585 if (err)
2586 goto ex_put;
2587
2588 err = mlx4_qp_attach_common(dev, &qp, gid,
2589 block_loopback, prot, type);
2590 if (err)
2591 goto ex_rem;
2592 } else {
2593 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2594 if (err)
2595 goto ex_put;
2596 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2597 }
2598
2599 put_res(dev, slave, qpn, RES_QP);
2600 return 0;
2601
2602ex_rem:
2603 /* ignore error return below, already in error */
2604 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2605ex_put:
2606 put_res(dev, slave, qpn, RES_QP);
2607
2608 return err;
2609}
2610
2611enum {
2612 BUSY_MAX_RETRIES = 10
2613};
2614
2615int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2616 struct mlx4_vhcr *vhcr,
2617 struct mlx4_cmd_mailbox *inbox,
2618 struct mlx4_cmd_mailbox *outbox,
2619 struct mlx4_cmd_info *cmd)
2620{
2621 int err;
2622 int index = vhcr->in_modifier & 0xffff;
2623
2624 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2625 if (err)
2626 return err;
2627
2628 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2629 put_res(dev, slave, index, RES_COUNTER);
2630 return err;
2631}
2632
2633static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2634{
2635 struct res_gid *rgid;
2636 struct res_gid *tmp;
2637 int err;
2638 struct mlx4_qp qp; /* dummy for calling attach/detach */
2639
2640 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2641 qp.qpn = rqp->local_qpn;
2642 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2643 MLX4_MC_STEER);
2644 list_del(&rgid->list);
2645 kfree(rgid);
2646 }
2647}
2648
2649static int _move_all_busy(struct mlx4_dev *dev, int slave,
2650 enum mlx4_resource type, int print)
2651{
2652 struct mlx4_priv *priv = mlx4_priv(dev);
2653 struct mlx4_resource_tracker *tracker =
2654 &priv->mfunc.master.res_tracker;
2655 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2656 struct res_common *r;
2657 struct res_common *tmp;
2658 int busy;
2659
2660 busy = 0;
2661 spin_lock_irq(mlx4_tlock(dev));
2662 list_for_each_entry_safe(r, tmp, rlist, list) {
2663 if (r->owner == slave) {
2664 if (!r->removing) {
2665 if (r->state == RES_ANY_BUSY) {
2666 if (print)
2667 mlx4_dbg(dev,
2668 "%s id 0x%x is busy\n",
2669 ResourceType(type),
2670 r->res_id);
2671 ++busy;
2672 } else {
2673 r->from_state = r->state;
2674 r->state = RES_ANY_BUSY;
2675 r->removing = 1;
2676 }
2677 }
2678 }
2679 }
2680 spin_unlock_irq(mlx4_tlock(dev));
2681
2682 return busy;
2683}
2684
2685static int move_all_busy(struct mlx4_dev *dev, int slave,
2686 enum mlx4_resource type)
2687{
2688 unsigned long begin;
2689 int busy;
2690
2691 begin = jiffies;
2692 do {
2693 busy = _move_all_busy(dev, slave, type, 0);
2694 if (time_after(jiffies, begin + 5 * HZ))
2695 break;
2696 if (busy)
2697 cond_resched();
2698 } while (busy);
2699
2700 if (busy)
2701 busy = _move_all_busy(dev, slave, type, 1);
2702
2703 return busy;
2704}
2705static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2706{
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2708 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2709 struct list_head *qp_list =
2710 &tracker->slave_list[slave].res_list[RES_QP];
2711 struct res_qp *qp;
2712 struct res_qp *tmp;
2713 int state;
2714 u64 in_param;
2715 int qpn;
2716 int err;
2717
2718 err = move_all_busy(dev, slave, RES_QP);
2719 if (err)
2720 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2721 "for slave %d\n", slave);
2722
2723 spin_lock_irq(mlx4_tlock(dev));
2724 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2725 spin_unlock_irq(mlx4_tlock(dev));
2726 if (qp->com.owner == slave) {
2727 qpn = qp->com.res_id;
2728 detach_qp(dev, slave, qp);
2729 state = qp->com.from_state;
2730 while (state != 0) {
2731 switch (state) {
2732 case RES_QP_RESERVED:
2733 spin_lock_irq(mlx4_tlock(dev));
2734 radix_tree_delete(&tracker->res_tree[RES_QP],
2735 qp->com.res_id);
2736 list_del(&qp->com.list);
2737 spin_unlock_irq(mlx4_tlock(dev));
2738 kfree(qp);
2739 state = 0;
2740 break;
2741 case RES_QP_MAPPED:
2742 if (!valid_reserved(dev, slave, qpn))
2743 __mlx4_qp_free_icm(dev, qpn);
2744 state = RES_QP_RESERVED;
2745 break;
2746 case RES_QP_HW:
2747 in_param = slave;
2748 err = mlx4_cmd(dev, in_param,
2749 qp->local_qpn, 2,
2750 MLX4_CMD_2RST_QP,
2751 MLX4_CMD_TIME_CLASS_A,
2752 MLX4_CMD_NATIVE);
2753 if (err)
2754 mlx4_dbg(dev, "rem_slave_qps: failed"
2755 " to move slave %d qpn %d to"
2756 " reset\n", slave,
2757 qp->local_qpn);
2758 atomic_dec(&qp->rcq->ref_count);
2759 atomic_dec(&qp->scq->ref_count);
2760 atomic_dec(&qp->mtt->ref_count);
2761 if (qp->srq)
2762 atomic_dec(&qp->srq->ref_count);
2763 state = RES_QP_MAPPED;
2764 break;
2765 default:
2766 state = 0;
2767 }
2768 }
2769 }
2770 spin_lock_irq(mlx4_tlock(dev));
2771 }
2772 spin_unlock_irq(mlx4_tlock(dev));
2773}
2774
2775static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2776{
2777 struct mlx4_priv *priv = mlx4_priv(dev);
2778 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2779 struct list_head *srq_list =
2780 &tracker->slave_list[slave].res_list[RES_SRQ];
2781 struct res_srq *srq;
2782 struct res_srq *tmp;
2783 int state;
2784 u64 in_param;
2785 LIST_HEAD(tlist);
2786 int srqn;
2787 int err;
2788
2789 err = move_all_busy(dev, slave, RES_SRQ);
2790 if (err)
2791 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2792 "busy for slave %d\n", slave);
2793
2794 spin_lock_irq(mlx4_tlock(dev));
2795 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2796 spin_unlock_irq(mlx4_tlock(dev));
2797 if (srq->com.owner == slave) {
2798 srqn = srq->com.res_id;
2799 state = srq->com.from_state;
2800 while (state != 0) {
2801 switch (state) {
2802 case RES_SRQ_ALLOCATED:
2803 __mlx4_srq_free_icm(dev, srqn);
2804 spin_lock_irq(mlx4_tlock(dev));
2805 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2806 srqn);
2807 list_del(&srq->com.list);
2808 spin_unlock_irq(mlx4_tlock(dev));
2809 kfree(srq);
2810 state = 0;
2811 break;
2812
2813 case RES_SRQ_HW:
2814 in_param = slave;
2815 err = mlx4_cmd(dev, in_param, srqn, 1,
2816 MLX4_CMD_HW2SW_SRQ,
2817 MLX4_CMD_TIME_CLASS_A,
2818 MLX4_CMD_NATIVE);
2819 if (err)
2820 mlx4_dbg(dev, "rem_slave_srqs: failed"
2821 " to move slave %d srq %d to"
2822 " SW ownership\n",
2823 slave, srqn);
2824
2825 atomic_dec(&srq->mtt->ref_count);
2826 if (srq->cq)
2827 atomic_dec(&srq->cq->ref_count);
2828 state = RES_SRQ_ALLOCATED;
2829 break;
2830
2831 default:
2832 state = 0;
2833 }
2834 }
2835 }
2836 spin_lock_irq(mlx4_tlock(dev));
2837 }
2838 spin_unlock_irq(mlx4_tlock(dev));
2839}
2840
2841static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2842{
2843 struct mlx4_priv *priv = mlx4_priv(dev);
2844 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2845 struct list_head *cq_list =
2846 &tracker->slave_list[slave].res_list[RES_CQ];
2847 struct res_cq *cq;
2848 struct res_cq *tmp;
2849 int state;
2850 u64 in_param;
2851 LIST_HEAD(tlist);
2852 int cqn;
2853 int err;
2854
2855 err = move_all_busy(dev, slave, RES_CQ);
2856 if (err)
2857 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2858 "busy for slave %d\n", slave);
2859
2860 spin_lock_irq(mlx4_tlock(dev));
2861 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2862 spin_unlock_irq(mlx4_tlock(dev));
2863 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2864 cqn = cq->com.res_id;
2865 state = cq->com.from_state;
2866 while (state != 0) {
2867 switch (state) {
2868 case RES_CQ_ALLOCATED:
2869 __mlx4_cq_free_icm(dev, cqn);
2870 spin_lock_irq(mlx4_tlock(dev));
2871 radix_tree_delete(&tracker->res_tree[RES_CQ],
2872 cqn);
2873 list_del(&cq->com.list);
2874 spin_unlock_irq(mlx4_tlock(dev));
2875 kfree(cq);
2876 state = 0;
2877 break;
2878
2879 case RES_CQ_HW:
2880 in_param = slave;
2881 err = mlx4_cmd(dev, in_param, cqn, 1,
2882 MLX4_CMD_HW2SW_CQ,
2883 MLX4_CMD_TIME_CLASS_A,
2884 MLX4_CMD_NATIVE);
2885 if (err)
2886 mlx4_dbg(dev, "rem_slave_cqs: failed"
2887 " to move slave %d cq %d to"
2888 " SW ownership\n",
2889 slave, cqn);
2890 atomic_dec(&cq->mtt->ref_count);
2891 state = RES_CQ_ALLOCATED;
2892 break;
2893
2894 default:
2895 state = 0;
2896 }
2897 }
2898 }
2899 spin_lock_irq(mlx4_tlock(dev));
2900 }
2901 spin_unlock_irq(mlx4_tlock(dev));
2902}
2903
2904static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2905{
2906 struct mlx4_priv *priv = mlx4_priv(dev);
2907 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2908 struct list_head *mpt_list =
2909 &tracker->slave_list[slave].res_list[RES_MPT];
2910 struct res_mpt *mpt;
2911 struct res_mpt *tmp;
2912 int state;
2913 u64 in_param;
2914 LIST_HEAD(tlist);
2915 int mptn;
2916 int err;
2917
2918 err = move_all_busy(dev, slave, RES_MPT);
2919 if (err)
2920 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2921 "busy for slave %d\n", slave);
2922
2923 spin_lock_irq(mlx4_tlock(dev));
2924 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2925 spin_unlock_irq(mlx4_tlock(dev));
2926 if (mpt->com.owner == slave) {
2927 mptn = mpt->com.res_id;
2928 state = mpt->com.from_state;
2929 while (state != 0) {
2930 switch (state) {
2931 case RES_MPT_RESERVED:
2932 __mlx4_mr_release(dev, mpt->key);
2933 spin_lock_irq(mlx4_tlock(dev));
2934 radix_tree_delete(&tracker->res_tree[RES_MPT],
2935 mptn);
2936 list_del(&mpt->com.list);
2937 spin_unlock_irq(mlx4_tlock(dev));
2938 kfree(mpt);
2939 state = 0;
2940 break;
2941
2942 case RES_MPT_MAPPED:
2943 __mlx4_mr_free_icm(dev, mpt->key);
2944 state = RES_MPT_RESERVED;
2945 break;
2946
2947 case RES_MPT_HW:
2948 in_param = slave;
2949 err = mlx4_cmd(dev, in_param, mptn, 0,
2950 MLX4_CMD_HW2SW_MPT,
2951 MLX4_CMD_TIME_CLASS_A,
2952 MLX4_CMD_NATIVE);
2953 if (err)
2954 mlx4_dbg(dev, "rem_slave_mrs: failed"
2955 " to move slave %d mpt %d to"
2956 " SW ownership\n",
2957 slave, mptn);
2958 if (mpt->mtt)
2959 atomic_dec(&mpt->mtt->ref_count);
2960 state = RES_MPT_MAPPED;
2961 break;
2962 default:
2963 state = 0;
2964 }
2965 }
2966 }
2967 spin_lock_irq(mlx4_tlock(dev));
2968 }
2969 spin_unlock_irq(mlx4_tlock(dev));
2970}
2971
2972static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2973{
2974 struct mlx4_priv *priv = mlx4_priv(dev);
2975 struct mlx4_resource_tracker *tracker =
2976 &priv->mfunc.master.res_tracker;
2977 struct list_head *mtt_list =
2978 &tracker->slave_list[slave].res_list[RES_MTT];
2979 struct res_mtt *mtt;
2980 struct res_mtt *tmp;
2981 int state;
2982 LIST_HEAD(tlist);
2983 int base;
2984 int err;
2985
2986 err = move_all_busy(dev, slave, RES_MTT);
2987 if (err)
2988 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2989 "busy for slave %d\n", slave);
2990
2991 spin_lock_irq(mlx4_tlock(dev));
2992 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2993 spin_unlock_irq(mlx4_tlock(dev));
2994 if (mtt->com.owner == slave) {
2995 base = mtt->com.res_id;
2996 state = mtt->com.from_state;
2997 while (state != 0) {
2998 switch (state) {
2999 case RES_MTT_ALLOCATED:
3000 __mlx4_free_mtt_range(dev, base,
3001 mtt->order);
3002 spin_lock_irq(mlx4_tlock(dev));
3003 radix_tree_delete(&tracker->res_tree[RES_MTT],
3004 base);
3005 list_del(&mtt->com.list);
3006 spin_unlock_irq(mlx4_tlock(dev));
3007 kfree(mtt);
3008 state = 0;
3009 break;
3010
3011 default:
3012 state = 0;
3013 }
3014 }
3015 }
3016 spin_lock_irq(mlx4_tlock(dev));
3017 }
3018 spin_unlock_irq(mlx4_tlock(dev));
3019}
3020
3021static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3022{
3023 struct mlx4_priv *priv = mlx4_priv(dev);
3024 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3025 struct list_head *eq_list =
3026 &tracker->slave_list[slave].res_list[RES_EQ];
3027 struct res_eq *eq;
3028 struct res_eq *tmp;
3029 int err;
3030 int state;
3031 LIST_HEAD(tlist);
3032 int eqn;
3033 struct mlx4_cmd_mailbox *mailbox;
3034
3035 err = move_all_busy(dev, slave, RES_EQ);
3036 if (err)
3037 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3038 "busy for slave %d\n", slave);
3039
3040 spin_lock_irq(mlx4_tlock(dev));
3041 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3042 spin_unlock_irq(mlx4_tlock(dev));
3043 if (eq->com.owner == slave) {
3044 eqn = eq->com.res_id;
3045 state = eq->com.from_state;
3046 while (state != 0) {
3047 switch (state) {
3048 case RES_EQ_RESERVED:
3049 spin_lock_irq(mlx4_tlock(dev));
3050 radix_tree_delete(&tracker->res_tree[RES_EQ],
3051 eqn);
3052 list_del(&eq->com.list);
3053 spin_unlock_irq(mlx4_tlock(dev));
3054 kfree(eq);
3055 state = 0;
3056 break;
3057
3058 case RES_EQ_HW:
3059 mailbox = mlx4_alloc_cmd_mailbox(dev);
3060 if (IS_ERR(mailbox)) {
3061 cond_resched();
3062 continue;
3063 }
3064 err = mlx4_cmd_box(dev, slave, 0,
3065 eqn & 0xff, 0,
3066 MLX4_CMD_HW2SW_EQ,
3067 MLX4_CMD_TIME_CLASS_A,
3068 MLX4_CMD_NATIVE);
3069 mlx4_dbg(dev, "rem_slave_eqs: failed"
3070 " to move slave %d eqs %d to"
3071 " SW ownership\n", slave, eqn);
3072 mlx4_free_cmd_mailbox(dev, mailbox);
3073 if (!err) {
3074 atomic_dec(&eq->mtt->ref_count);
3075 state = RES_EQ_RESERVED;
3076 }
3077 break;
3078
3079 default:
3080 state = 0;
3081 }
3082 }
3083 }
3084 spin_lock_irq(mlx4_tlock(dev));
3085 }
3086 spin_unlock_irq(mlx4_tlock(dev));
3087}
3088
3089void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3090{
3091 struct mlx4_priv *priv = mlx4_priv(dev);
3092
3093 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3094 /*VLAN*/
3095 rem_slave_macs(dev, slave);
3096 rem_slave_qps(dev, slave);
3097 rem_slave_srqs(dev, slave);
3098 rem_slave_cqs(dev, slave);
3099 rem_slave_mrs(dev, slave);
3100 rem_slave_eqs(dev, slave);
3101 rem_slave_mtts(dev, slave);
3102 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3103}