blob: ed20751a057dde297a64cf25e8edc8d193ac0cb0 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
60 u32 res_id;
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66};
67
68enum {
69 RES_ANY_BUSY = 1
70};
71
72struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
76};
77
78enum res_qp_states {
79 RES_QP_BUSY = RES_ANY_BUSY,
80
81 /* QP number was allocated */
82 RES_QP_RESERVED,
83
84 /* ICM memory for QP context was mapped */
85 RES_QP_MAPPED,
86
87 /* QP is in hw ownership */
88 RES_QP_HW
89};
90
91static inline const char *qp_states_str(enum res_qp_states state)
92{
93 switch (state) {
94 case RES_QP_BUSY: return "RES_QP_BUSY";
95 case RES_QP_RESERVED: return "RES_QP_RESERVED";
96 case RES_QP_MAPPED: return "RES_QP_MAPPED";
97 case RES_QP_HW: return "RES_QP_HW";
98 default: return "Unknown";
99 }
100}
101
102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
111};
112
113enum res_mtt_states {
114 RES_MTT_BUSY = RES_ANY_BUSY,
115 RES_MTT_ALLOCATED,
116};
117
118static inline const char *mtt_states_str(enum res_mtt_states state)
119{
120 switch (state) {
121 case RES_MTT_BUSY: return "RES_MTT_BUSY";
122 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
123 default: return "Unknown";
124 }
125}
126
127struct res_mtt {
128 struct res_common com;
129 int order;
130 atomic_t ref_count;
131};
132
133enum res_mpt_states {
134 RES_MPT_BUSY = RES_ANY_BUSY,
135 RES_MPT_RESERVED,
136 RES_MPT_MAPPED,
137 RES_MPT_HW,
138};
139
140struct res_mpt {
141 struct res_common com;
142 struct res_mtt *mtt;
143 int key;
144};
145
146enum res_eq_states {
147 RES_EQ_BUSY = RES_ANY_BUSY,
148 RES_EQ_RESERVED,
149 RES_EQ_HW,
150};
151
152struct res_eq {
153 struct res_common com;
154 struct res_mtt *mtt;
155};
156
157enum res_cq_states {
158 RES_CQ_BUSY = RES_ANY_BUSY,
159 RES_CQ_ALLOCATED,
160 RES_CQ_HW,
161};
162
163struct res_cq {
164 struct res_common com;
165 struct res_mtt *mtt;
166 atomic_t ref_count;
167};
168
169enum res_srq_states {
170 RES_SRQ_BUSY = RES_ANY_BUSY,
171 RES_SRQ_ALLOCATED,
172 RES_SRQ_HW,
173};
174
175static inline const char *srq_states_str(enum res_srq_states state)
176{
177 switch (state) {
178 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
179 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
180 case RES_SRQ_HW: return "RES_SRQ_HW";
181 default: return "Unknown";
182 }
183}
184
185struct res_srq {
186 struct res_common com;
187 struct res_mtt *mtt;
188 struct res_cq *cq;
189 atomic_t ref_count;
190};
191
192enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
195};
196
197static inline const char *counter_states_str(enum res_counter_states state)
198{
199 switch (state) {
200 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
201 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
202 default: return "Unknown";
203 }
204}
205
206struct res_counter {
207 struct res_common com;
208 int port;
209};
210
211/* For Debug uses */
212static const char *ResourceType(enum mlx4_resource rt)
213{
214 switch (rt) {
215 case RES_QP: return "RES_QP";
216 case RES_CQ: return "RES_CQ";
217 case RES_SRQ: return "RES_SRQ";
218 case RES_MPT: return "RES_MPT";
219 case RES_MTT: return "RES_MTT";
220 case RES_MAC: return "RES_MAC";
221 case RES_EQ: return "RES_EQ";
222 case RES_COUNTER: return "RES_COUNTER";
223 default: return "Unknown resource type !!!";
224 };
225}
226
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000227int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228{
229 struct mlx4_priv *priv = mlx4_priv(dev);
230 int i;
231 int t;
232
233 priv->mfunc.master.res_tracker.slave_list =
234 kzalloc(dev->num_slaves * sizeof(struct slave_list),
235 GFP_KERNEL);
236 if (!priv->mfunc.master.res_tracker.slave_list)
237 return -ENOMEM;
238
239 for (i = 0 ; i < dev->num_slaves; i++) {
240 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
241 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
242 slave_list[i].res_list[t]);
243 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
244 }
245
246 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
247 dev->num_slaves);
248 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
249 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
250 GFP_ATOMIC|__GFP_NOWARN);
251
252 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
253 return 0 ;
254}
255
256void mlx4_free_resource_tracker(struct mlx4_dev *dev)
257{
258 struct mlx4_priv *priv = mlx4_priv(dev);
259 int i;
260
261 if (priv->mfunc.master.res_tracker.slave_list) {
262 for (i = 0 ; i < dev->num_slaves; i++)
263 mlx4_delete_all_resources_for_slave(dev, i);
264
265 kfree(priv->mfunc.master.res_tracker.slave_list);
266 }
267}
268
269static void update_ud_gid(struct mlx4_dev *dev,
270 struct mlx4_qp_context *qp_ctx, u8 slave)
271{
272 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
273
274 if (MLX4_QP_ST_UD == ts)
275 qp_ctx->pri_path.mgid_index = 0x80 | slave;
276
277 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
278 slave, qp_ctx->pri_path.mgid_index);
279}
280
281static int mpt_mask(struct mlx4_dev *dev)
282{
283 return dev->caps.num_mpts - 1;
284}
285
286static void *find_res(struct mlx4_dev *dev, int res_id,
287 enum mlx4_resource type)
288{
289 struct mlx4_priv *priv = mlx4_priv(dev);
290
291 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
292 res_id);
293}
294
295static int get_res(struct mlx4_dev *dev, int slave, int res_id,
296 enum mlx4_resource type,
297 void *res)
298{
299 struct res_common *r;
300 int err = 0;
301
302 spin_lock_irq(mlx4_tlock(dev));
303 r = find_res(dev, res_id, type);
304 if (!r) {
305 err = -ENONET;
306 goto exit;
307 }
308
309 if (r->state == RES_ANY_BUSY) {
310 err = -EBUSY;
311 goto exit;
312 }
313
314 if (r->owner != slave) {
315 err = -EPERM;
316 goto exit;
317 }
318
319 r->from_state = r->state;
320 r->state = RES_ANY_BUSY;
321 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
322 ResourceType(type), r->res_id);
323
324 if (res)
325 *((struct res_common **)res) = r;
326
327exit:
328 spin_unlock_irq(mlx4_tlock(dev));
329 return err;
330}
331
332int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
333 enum mlx4_resource type,
334 int res_id, int *slave)
335{
336
337 struct res_common *r;
338 int err = -ENOENT;
339 int id = res_id;
340
341 if (type == RES_QP)
342 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000343 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000344
345 r = find_res(dev, id, type);
346 if (r) {
347 *slave = r->owner;
348 err = 0;
349 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000350 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000351
352 return err;
353}
354
355static void put_res(struct mlx4_dev *dev, int slave, int res_id,
356 enum mlx4_resource type)
357{
358 struct res_common *r;
359
360 spin_lock_irq(mlx4_tlock(dev));
361 r = find_res(dev, res_id, type);
362 if (r)
363 r->state = r->from_state;
364 spin_unlock_irq(mlx4_tlock(dev));
365}
366
367static struct res_common *alloc_qp_tr(int id)
368{
369 struct res_qp *ret;
370
371 ret = kzalloc(sizeof *ret, GFP_KERNEL);
372 if (!ret)
373 return NULL;
374
375 ret->com.res_id = id;
376 ret->com.state = RES_QP_RESERVED;
377 INIT_LIST_HEAD(&ret->mcg_list);
378 spin_lock_init(&ret->mcg_spl);
379
380 return &ret->com;
381}
382
383static struct res_common *alloc_mtt_tr(int id, int order)
384{
385 struct res_mtt *ret;
386
387 ret = kzalloc(sizeof *ret, GFP_KERNEL);
388 if (!ret)
389 return NULL;
390
391 ret->com.res_id = id;
392 ret->order = order;
393 ret->com.state = RES_MTT_ALLOCATED;
394 atomic_set(&ret->ref_count, 0);
395
396 return &ret->com;
397}
398
399static struct res_common *alloc_mpt_tr(int id, int key)
400{
401 struct res_mpt *ret;
402
403 ret = kzalloc(sizeof *ret, GFP_KERNEL);
404 if (!ret)
405 return NULL;
406
407 ret->com.res_id = id;
408 ret->com.state = RES_MPT_RESERVED;
409 ret->key = key;
410
411 return &ret->com;
412}
413
414static struct res_common *alloc_eq_tr(int id)
415{
416 struct res_eq *ret;
417
418 ret = kzalloc(sizeof *ret, GFP_KERNEL);
419 if (!ret)
420 return NULL;
421
422 ret->com.res_id = id;
423 ret->com.state = RES_EQ_RESERVED;
424
425 return &ret->com;
426}
427
428static struct res_common *alloc_cq_tr(int id)
429{
430 struct res_cq *ret;
431
432 ret = kzalloc(sizeof *ret, GFP_KERNEL);
433 if (!ret)
434 return NULL;
435
436 ret->com.res_id = id;
437 ret->com.state = RES_CQ_ALLOCATED;
438 atomic_set(&ret->ref_count, 0);
439
440 return &ret->com;
441}
442
443static struct res_common *alloc_srq_tr(int id)
444{
445 struct res_srq *ret;
446
447 ret = kzalloc(sizeof *ret, GFP_KERNEL);
448 if (!ret)
449 return NULL;
450
451 ret->com.res_id = id;
452 ret->com.state = RES_SRQ_ALLOCATED;
453 atomic_set(&ret->ref_count, 0);
454
455 return &ret->com;
456}
457
458static struct res_common *alloc_counter_tr(int id)
459{
460 struct res_counter *ret;
461
462 ret = kzalloc(sizeof *ret, GFP_KERNEL);
463 if (!ret)
464 return NULL;
465
466 ret->com.res_id = id;
467 ret->com.state = RES_COUNTER_ALLOCATED;
468
469 return &ret->com;
470}
471
472static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
473 int extra)
474{
475 struct res_common *ret;
476
477 switch (type) {
478 case RES_QP:
479 ret = alloc_qp_tr(id);
480 break;
481 case RES_MPT:
482 ret = alloc_mpt_tr(id, extra);
483 break;
484 case RES_MTT:
485 ret = alloc_mtt_tr(id, extra);
486 break;
487 case RES_EQ:
488 ret = alloc_eq_tr(id);
489 break;
490 case RES_CQ:
491 ret = alloc_cq_tr(id);
492 break;
493 case RES_SRQ:
494 ret = alloc_srq_tr(id);
495 break;
496 case RES_MAC:
497 printk(KERN_ERR "implementation missing\n");
498 return NULL;
499 case RES_COUNTER:
500 ret = alloc_counter_tr(id);
501 break;
502
503 default:
504 return NULL;
505 }
506 if (ret)
507 ret->owner = slave;
508
509 return ret;
510}
511
512static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
513 enum mlx4_resource type, int extra)
514{
515 int i;
516 int err;
517 struct mlx4_priv *priv = mlx4_priv(dev);
518 struct res_common **res_arr;
519 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
520 struct radix_tree_root *root = &tracker->res_tree[type];
521
522 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
523 if (!res_arr)
524 return -ENOMEM;
525
526 for (i = 0; i < count; ++i) {
527 res_arr[i] = alloc_tr(base + i, type, slave, extra);
528 if (!res_arr[i]) {
529 for (--i; i >= 0; --i)
530 kfree(res_arr[i]);
531
532 kfree(res_arr);
533 return -ENOMEM;
534 }
535 }
536
537 spin_lock_irq(mlx4_tlock(dev));
538 for (i = 0; i < count; ++i) {
539 if (find_res(dev, base + i, type)) {
540 err = -EEXIST;
541 goto undo;
542 }
543 err = radix_tree_insert(root, base + i, res_arr[i]);
544 if (err)
545 goto undo;
546 list_add_tail(&res_arr[i]->list,
547 &tracker->slave_list[slave].res_list[type]);
548 }
549 spin_unlock_irq(mlx4_tlock(dev));
550 kfree(res_arr);
551
552 return 0;
553
554undo:
555 for (--i; i >= base; --i)
556 radix_tree_delete(&tracker->res_tree[type], i);
557
558 spin_unlock_irq(mlx4_tlock(dev));
559
560 for (i = 0; i < count; ++i)
561 kfree(res_arr[i]);
562
563 kfree(res_arr);
564
565 return err;
566}
567
568static int remove_qp_ok(struct res_qp *res)
569{
570 if (res->com.state == RES_QP_BUSY)
571 return -EBUSY;
572 else if (res->com.state != RES_QP_RESERVED)
573 return -EPERM;
574
575 return 0;
576}
577
578static int remove_mtt_ok(struct res_mtt *res, int order)
579{
580 if (res->com.state == RES_MTT_BUSY ||
581 atomic_read(&res->ref_count)) {
582 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
583 __func__, __LINE__,
584 mtt_states_str(res->com.state),
585 atomic_read(&res->ref_count));
586 return -EBUSY;
587 } else if (res->com.state != RES_MTT_ALLOCATED)
588 return -EPERM;
589 else if (res->order != order)
590 return -EINVAL;
591
592 return 0;
593}
594
595static int remove_mpt_ok(struct res_mpt *res)
596{
597 if (res->com.state == RES_MPT_BUSY)
598 return -EBUSY;
599 else if (res->com.state != RES_MPT_RESERVED)
600 return -EPERM;
601
602 return 0;
603}
604
605static int remove_eq_ok(struct res_eq *res)
606{
607 if (res->com.state == RES_MPT_BUSY)
608 return -EBUSY;
609 else if (res->com.state != RES_MPT_RESERVED)
610 return -EPERM;
611
612 return 0;
613}
614
615static int remove_counter_ok(struct res_counter *res)
616{
617 if (res->com.state == RES_COUNTER_BUSY)
618 return -EBUSY;
619 else if (res->com.state != RES_COUNTER_ALLOCATED)
620 return -EPERM;
621
622 return 0;
623}
624
625static int remove_cq_ok(struct res_cq *res)
626{
627 if (res->com.state == RES_CQ_BUSY)
628 return -EBUSY;
629 else if (res->com.state != RES_CQ_ALLOCATED)
630 return -EPERM;
631
632 return 0;
633}
634
635static int remove_srq_ok(struct res_srq *res)
636{
637 if (res->com.state == RES_SRQ_BUSY)
638 return -EBUSY;
639 else if (res->com.state != RES_SRQ_ALLOCATED)
640 return -EPERM;
641
642 return 0;
643}
644
645static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
646{
647 switch (type) {
648 case RES_QP:
649 return remove_qp_ok((struct res_qp *)res);
650 case RES_CQ:
651 return remove_cq_ok((struct res_cq *)res);
652 case RES_SRQ:
653 return remove_srq_ok((struct res_srq *)res);
654 case RES_MPT:
655 return remove_mpt_ok((struct res_mpt *)res);
656 case RES_MTT:
657 return remove_mtt_ok((struct res_mtt *)res, extra);
658 case RES_MAC:
659 return -ENOSYS;
660 case RES_EQ:
661 return remove_eq_ok((struct res_eq *)res);
662 case RES_COUNTER:
663 return remove_counter_ok((struct res_counter *)res);
664 default:
665 return -EINVAL;
666 }
667}
668
669static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
670 enum mlx4_resource type, int extra)
671{
672 int i;
673 int err;
674 struct mlx4_priv *priv = mlx4_priv(dev);
675 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
676 struct res_common *r;
677
678 spin_lock_irq(mlx4_tlock(dev));
679 for (i = base; i < base + count; ++i) {
680 r = radix_tree_lookup(&tracker->res_tree[type], i);
681 if (!r) {
682 err = -ENOENT;
683 goto out;
684 }
685 if (r->owner != slave) {
686 err = -EPERM;
687 goto out;
688 }
689 err = remove_ok(r, type, extra);
690 if (err)
691 goto out;
692 }
693
694 for (i = base; i < base + count; ++i) {
695 r = radix_tree_lookup(&tracker->res_tree[type], i);
696 radix_tree_delete(&tracker->res_tree[type], i);
697 list_del(&r->list);
698 kfree(r);
699 }
700 err = 0;
701
702out:
703 spin_unlock_irq(mlx4_tlock(dev));
704
705 return err;
706}
707
708static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
709 enum res_qp_states state, struct res_qp **qp,
710 int alloc)
711{
712 struct mlx4_priv *priv = mlx4_priv(dev);
713 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
714 struct res_qp *r;
715 int err = 0;
716
717 spin_lock_irq(mlx4_tlock(dev));
718 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
719 if (!r)
720 err = -ENOENT;
721 else if (r->com.owner != slave)
722 err = -EPERM;
723 else {
724 switch (state) {
725 case RES_QP_BUSY:
726 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
727 __func__, r->com.res_id);
728 err = -EBUSY;
729 break;
730
731 case RES_QP_RESERVED:
732 if (r->com.state == RES_QP_MAPPED && !alloc)
733 break;
734
735 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
736 err = -EINVAL;
737 break;
738
739 case RES_QP_MAPPED:
740 if ((r->com.state == RES_QP_RESERVED && alloc) ||
741 r->com.state == RES_QP_HW)
742 break;
743 else {
744 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
745 r->com.res_id);
746 err = -EINVAL;
747 }
748
749 break;
750
751 case RES_QP_HW:
752 if (r->com.state != RES_QP_MAPPED)
753 err = -EINVAL;
754 break;
755 default:
756 err = -EINVAL;
757 }
758
759 if (!err) {
760 r->com.from_state = r->com.state;
761 r->com.to_state = state;
762 r->com.state = RES_QP_BUSY;
763 if (qp)
764 *qp = (struct res_qp *)r;
765 }
766 }
767
768 spin_unlock_irq(mlx4_tlock(dev));
769
770 return err;
771}
772
773static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
774 enum res_mpt_states state, struct res_mpt **mpt)
775{
776 struct mlx4_priv *priv = mlx4_priv(dev);
777 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
778 struct res_mpt *r;
779 int err = 0;
780
781 spin_lock_irq(mlx4_tlock(dev));
782 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
783 if (!r)
784 err = -ENOENT;
785 else if (r->com.owner != slave)
786 err = -EPERM;
787 else {
788 switch (state) {
789 case RES_MPT_BUSY:
790 err = -EINVAL;
791 break;
792
793 case RES_MPT_RESERVED:
794 if (r->com.state != RES_MPT_MAPPED)
795 err = -EINVAL;
796 break;
797
798 case RES_MPT_MAPPED:
799 if (r->com.state != RES_MPT_RESERVED &&
800 r->com.state != RES_MPT_HW)
801 err = -EINVAL;
802 break;
803
804 case RES_MPT_HW:
805 if (r->com.state != RES_MPT_MAPPED)
806 err = -EINVAL;
807 break;
808 default:
809 err = -EINVAL;
810 }
811
812 if (!err) {
813 r->com.from_state = r->com.state;
814 r->com.to_state = state;
815 r->com.state = RES_MPT_BUSY;
816 if (mpt)
817 *mpt = (struct res_mpt *)r;
818 }
819 }
820
821 spin_unlock_irq(mlx4_tlock(dev));
822
823 return err;
824}
825
826static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
827 enum res_eq_states state, struct res_eq **eq)
828{
829 struct mlx4_priv *priv = mlx4_priv(dev);
830 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
831 struct res_eq *r;
832 int err = 0;
833
834 spin_lock_irq(mlx4_tlock(dev));
835 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
836 if (!r)
837 err = -ENOENT;
838 else if (r->com.owner != slave)
839 err = -EPERM;
840 else {
841 switch (state) {
842 case RES_EQ_BUSY:
843 err = -EINVAL;
844 break;
845
846 case RES_EQ_RESERVED:
847 if (r->com.state != RES_EQ_HW)
848 err = -EINVAL;
849 break;
850
851 case RES_EQ_HW:
852 if (r->com.state != RES_EQ_RESERVED)
853 err = -EINVAL;
854 break;
855
856 default:
857 err = -EINVAL;
858 }
859
860 if (!err) {
861 r->com.from_state = r->com.state;
862 r->com.to_state = state;
863 r->com.state = RES_EQ_BUSY;
864 if (eq)
865 *eq = r;
866 }
867 }
868
869 spin_unlock_irq(mlx4_tlock(dev));
870
871 return err;
872}
873
874static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
875 enum res_cq_states state, struct res_cq **cq)
876{
877 struct mlx4_priv *priv = mlx4_priv(dev);
878 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
879 struct res_cq *r;
880 int err;
881
882 spin_lock_irq(mlx4_tlock(dev));
883 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
884 if (!r)
885 err = -ENOENT;
886 else if (r->com.owner != slave)
887 err = -EPERM;
888 else {
889 switch (state) {
890 case RES_CQ_BUSY:
891 err = -EBUSY;
892 break;
893
894 case RES_CQ_ALLOCATED:
895 if (r->com.state != RES_CQ_HW)
896 err = -EINVAL;
897 else if (atomic_read(&r->ref_count))
898 err = -EBUSY;
899 else
900 err = 0;
901 break;
902
903 case RES_CQ_HW:
904 if (r->com.state != RES_CQ_ALLOCATED)
905 err = -EINVAL;
906 else
907 err = 0;
908 break;
909
910 default:
911 err = -EINVAL;
912 }
913
914 if (!err) {
915 r->com.from_state = r->com.state;
916 r->com.to_state = state;
917 r->com.state = RES_CQ_BUSY;
918 if (cq)
919 *cq = r;
920 }
921 }
922
923 spin_unlock_irq(mlx4_tlock(dev));
924
925 return err;
926}
927
928static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
929 enum res_cq_states state, struct res_srq **srq)
930{
931 struct mlx4_priv *priv = mlx4_priv(dev);
932 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
933 struct res_srq *r;
934 int err = 0;
935
936 spin_lock_irq(mlx4_tlock(dev));
937 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
938 if (!r)
939 err = -ENOENT;
940 else if (r->com.owner != slave)
941 err = -EPERM;
942 else {
943 switch (state) {
944 case RES_SRQ_BUSY:
945 err = -EINVAL;
946 break;
947
948 case RES_SRQ_ALLOCATED:
949 if (r->com.state != RES_SRQ_HW)
950 err = -EINVAL;
951 else if (atomic_read(&r->ref_count))
952 err = -EBUSY;
953 break;
954
955 case RES_SRQ_HW:
956 if (r->com.state != RES_SRQ_ALLOCATED)
957 err = -EINVAL;
958 break;
959
960 default:
961 err = -EINVAL;
962 }
963
964 if (!err) {
965 r->com.from_state = r->com.state;
966 r->com.to_state = state;
967 r->com.state = RES_SRQ_BUSY;
968 if (srq)
969 *srq = r;
970 }
971 }
972
973 spin_unlock_irq(mlx4_tlock(dev));
974
975 return err;
976}
977
978static void res_abort_move(struct mlx4_dev *dev, int slave,
979 enum mlx4_resource type, int id)
980{
981 struct mlx4_priv *priv = mlx4_priv(dev);
982 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
983 struct res_common *r;
984
985 spin_lock_irq(mlx4_tlock(dev));
986 r = radix_tree_lookup(&tracker->res_tree[type], id);
987 if (r && (r->owner == slave))
988 r->state = r->from_state;
989 spin_unlock_irq(mlx4_tlock(dev));
990}
991
992static void res_end_move(struct mlx4_dev *dev, int slave,
993 enum mlx4_resource type, int id)
994{
995 struct mlx4_priv *priv = mlx4_priv(dev);
996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
997 struct res_common *r;
998
999 spin_lock_irq(mlx4_tlock(dev));
1000 r = radix_tree_lookup(&tracker->res_tree[type], id);
1001 if (r && (r->owner == slave))
1002 r->state = r->to_state;
1003 spin_unlock_irq(mlx4_tlock(dev));
1004}
1005
1006static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1007{
1008 return mlx4_is_qp_reserved(dev, qpn);
1009}
1010
1011static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1012 u64 in_param, u64 *out_param)
1013{
1014 int err;
1015 int count;
1016 int align;
1017 int base;
1018 int qpn;
1019
1020 switch (op) {
1021 case RES_OP_RESERVE:
1022 count = get_param_l(&in_param);
1023 align = get_param_h(&in_param);
1024 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1025 if (err)
1026 return err;
1027
1028 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1029 if (err) {
1030 __mlx4_qp_release_range(dev, base, count);
1031 return err;
1032 }
1033 set_param_l(out_param, base);
1034 break;
1035 case RES_OP_MAP_ICM:
1036 qpn = get_param_l(&in_param) & 0x7fffff;
1037 if (valid_reserved(dev, slave, qpn)) {
1038 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1039 if (err)
1040 return err;
1041 }
1042
1043 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1044 NULL, 1);
1045 if (err)
1046 return err;
1047
1048 if (!valid_reserved(dev, slave, qpn)) {
1049 err = __mlx4_qp_alloc_icm(dev, qpn);
1050 if (err) {
1051 res_abort_move(dev, slave, RES_QP, qpn);
1052 return err;
1053 }
1054 }
1055
1056 res_end_move(dev, slave, RES_QP, qpn);
1057 break;
1058
1059 default:
1060 err = -EINVAL;
1061 break;
1062 }
1063 return err;
1064}
1065
1066static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1067 u64 in_param, u64 *out_param)
1068{
1069 int err = -EINVAL;
1070 int base;
1071 int order;
1072
1073 if (op != RES_OP_RESERVE_AND_MAP)
1074 return err;
1075
1076 order = get_param_l(&in_param);
1077 base = __mlx4_alloc_mtt_range(dev, order);
1078 if (base == -1)
1079 return -ENOMEM;
1080
1081 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1082 if (err)
1083 __mlx4_free_mtt_range(dev, base, order);
1084 else
1085 set_param_l(out_param, base);
1086
1087 return err;
1088}
1089
1090static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1091 u64 in_param, u64 *out_param)
1092{
1093 int err = -EINVAL;
1094 int index;
1095 int id;
1096 struct res_mpt *mpt;
1097
1098 switch (op) {
1099 case RES_OP_RESERVE:
1100 index = __mlx4_mr_reserve(dev);
1101 if (index == -1)
1102 break;
1103 id = index & mpt_mask(dev);
1104
1105 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1106 if (err) {
1107 __mlx4_mr_release(dev, index);
1108 break;
1109 }
1110 set_param_l(out_param, index);
1111 break;
1112 case RES_OP_MAP_ICM:
1113 index = get_param_l(&in_param);
1114 id = index & mpt_mask(dev);
1115 err = mr_res_start_move_to(dev, slave, id,
1116 RES_MPT_MAPPED, &mpt);
1117 if (err)
1118 return err;
1119
1120 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1121 if (err) {
1122 res_abort_move(dev, slave, RES_MPT, id);
1123 return err;
1124 }
1125
1126 res_end_move(dev, slave, RES_MPT, id);
1127 break;
1128 }
1129 return err;
1130}
1131
1132static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1133 u64 in_param, u64 *out_param)
1134{
1135 int cqn;
1136 int err;
1137
1138 switch (op) {
1139 case RES_OP_RESERVE_AND_MAP:
1140 err = __mlx4_cq_alloc_icm(dev, &cqn);
1141 if (err)
1142 break;
1143
1144 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1145 if (err) {
1146 __mlx4_cq_free_icm(dev, cqn);
1147 break;
1148 }
1149
1150 set_param_l(out_param, cqn);
1151 break;
1152
1153 default:
1154 err = -EINVAL;
1155 }
1156
1157 return err;
1158}
1159
1160static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1161 u64 in_param, u64 *out_param)
1162{
1163 int srqn;
1164 int err;
1165
1166 switch (op) {
1167 case RES_OP_RESERVE_AND_MAP:
1168 err = __mlx4_srq_alloc_icm(dev, &srqn);
1169 if (err)
1170 break;
1171
1172 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1173 if (err) {
1174 __mlx4_srq_free_icm(dev, srqn);
1175 break;
1176 }
1177
1178 set_param_l(out_param, srqn);
1179 break;
1180
1181 default:
1182 err = -EINVAL;
1183 }
1184
1185 return err;
1186}
1187
1188static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1189{
1190 struct mlx4_priv *priv = mlx4_priv(dev);
1191 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1192 struct mac_res *res;
1193
1194 res = kzalloc(sizeof *res, GFP_KERNEL);
1195 if (!res)
1196 return -ENOMEM;
1197 res->mac = mac;
1198 res->port = (u8) port;
1199 list_add_tail(&res->list,
1200 &tracker->slave_list[slave].res_list[RES_MAC]);
1201 return 0;
1202}
1203
1204static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1205 int port)
1206{
1207 struct mlx4_priv *priv = mlx4_priv(dev);
1208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1209 struct list_head *mac_list =
1210 &tracker->slave_list[slave].res_list[RES_MAC];
1211 struct mac_res *res, *tmp;
1212
1213 list_for_each_entry_safe(res, tmp, mac_list, list) {
1214 if (res->mac == mac && res->port == (u8) port) {
1215 list_del(&res->list);
1216 kfree(res);
1217 break;
1218 }
1219 }
1220}
1221
1222static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1223{
1224 struct mlx4_priv *priv = mlx4_priv(dev);
1225 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1226 struct list_head *mac_list =
1227 &tracker->slave_list[slave].res_list[RES_MAC];
1228 struct mac_res *res, *tmp;
1229
1230 list_for_each_entry_safe(res, tmp, mac_list, list) {
1231 list_del(&res->list);
1232 __mlx4_unregister_mac(dev, res->port, res->mac);
1233 kfree(res);
1234 }
1235}
1236
1237static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1238 u64 in_param, u64 *out_param)
1239{
1240 int err = -EINVAL;
1241 int port;
1242 u64 mac;
1243
1244 if (op != RES_OP_RESERVE_AND_MAP)
1245 return err;
1246
1247 port = get_param_l(out_param);
1248 mac = in_param;
1249
1250 err = __mlx4_register_mac(dev, port, mac);
1251 if (err >= 0) {
1252 set_param_l(out_param, err);
1253 err = 0;
1254 }
1255
1256 if (!err) {
1257 err = mac_add_to_slave(dev, slave, mac, port);
1258 if (err)
1259 __mlx4_unregister_mac(dev, port, mac);
1260 }
1261 return err;
1262}
1263
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001264static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1265 u64 in_param, u64 *out_param)
1266{
1267 return 0;
1268}
1269
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001270int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1271 struct mlx4_vhcr *vhcr,
1272 struct mlx4_cmd_mailbox *inbox,
1273 struct mlx4_cmd_mailbox *outbox,
1274 struct mlx4_cmd_info *cmd)
1275{
1276 int err;
1277 int alop = vhcr->op_modifier;
1278
1279 switch (vhcr->in_modifier) {
1280 case RES_QP:
1281 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1282 vhcr->in_param, &vhcr->out_param);
1283 break;
1284
1285 case RES_MTT:
1286 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1287 vhcr->in_param, &vhcr->out_param);
1288 break;
1289
1290 case RES_MPT:
1291 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1292 vhcr->in_param, &vhcr->out_param);
1293 break;
1294
1295 case RES_CQ:
1296 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1297 vhcr->in_param, &vhcr->out_param);
1298 break;
1299
1300 case RES_SRQ:
1301 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1302 vhcr->in_param, &vhcr->out_param);
1303 break;
1304
1305 case RES_MAC:
1306 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1307 vhcr->in_param, &vhcr->out_param);
1308 break;
1309
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001310 case RES_VLAN:
1311 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1312 vhcr->in_param, &vhcr->out_param);
1313 break;
1314
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001315 default:
1316 err = -EINVAL;
1317 break;
1318 }
1319
1320 return err;
1321}
1322
1323static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1324 u64 in_param)
1325{
1326 int err;
1327 int count;
1328 int base;
1329 int qpn;
1330
1331 switch (op) {
1332 case RES_OP_RESERVE:
1333 base = get_param_l(&in_param) & 0x7fffff;
1334 count = get_param_h(&in_param);
1335 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1336 if (err)
1337 break;
1338 __mlx4_qp_release_range(dev, base, count);
1339 break;
1340 case RES_OP_MAP_ICM:
1341 qpn = get_param_l(&in_param) & 0x7fffff;
1342 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1343 NULL, 0);
1344 if (err)
1345 return err;
1346
1347 if (!valid_reserved(dev, slave, qpn))
1348 __mlx4_qp_free_icm(dev, qpn);
1349
1350 res_end_move(dev, slave, RES_QP, qpn);
1351
1352 if (valid_reserved(dev, slave, qpn))
1353 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1354 break;
1355 default:
1356 err = -EINVAL;
1357 break;
1358 }
1359 return err;
1360}
1361
1362static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1363 u64 in_param, u64 *out_param)
1364{
1365 int err = -EINVAL;
1366 int base;
1367 int order;
1368
1369 if (op != RES_OP_RESERVE_AND_MAP)
1370 return err;
1371
1372 base = get_param_l(&in_param);
1373 order = get_param_h(&in_param);
1374 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1375 if (!err)
1376 __mlx4_free_mtt_range(dev, base, order);
1377 return err;
1378}
1379
1380static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1381 u64 in_param)
1382{
1383 int err = -EINVAL;
1384 int index;
1385 int id;
1386 struct res_mpt *mpt;
1387
1388 switch (op) {
1389 case RES_OP_RESERVE:
1390 index = get_param_l(&in_param);
1391 id = index & mpt_mask(dev);
1392 err = get_res(dev, slave, id, RES_MPT, &mpt);
1393 if (err)
1394 break;
1395 index = mpt->key;
1396 put_res(dev, slave, id, RES_MPT);
1397
1398 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1399 if (err)
1400 break;
1401 __mlx4_mr_release(dev, index);
1402 break;
1403 case RES_OP_MAP_ICM:
1404 index = get_param_l(&in_param);
1405 id = index & mpt_mask(dev);
1406 err = mr_res_start_move_to(dev, slave, id,
1407 RES_MPT_RESERVED, &mpt);
1408 if (err)
1409 return err;
1410
1411 __mlx4_mr_free_icm(dev, mpt->key);
1412 res_end_move(dev, slave, RES_MPT, id);
1413 return err;
1414 break;
1415 default:
1416 err = -EINVAL;
1417 break;
1418 }
1419 return err;
1420}
1421
1422static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1423 u64 in_param, u64 *out_param)
1424{
1425 int cqn;
1426 int err;
1427
1428 switch (op) {
1429 case RES_OP_RESERVE_AND_MAP:
1430 cqn = get_param_l(&in_param);
1431 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1432 if (err)
1433 break;
1434
1435 __mlx4_cq_free_icm(dev, cqn);
1436 break;
1437
1438 default:
1439 err = -EINVAL;
1440 break;
1441 }
1442
1443 return err;
1444}
1445
1446static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1447 u64 in_param, u64 *out_param)
1448{
1449 int srqn;
1450 int err;
1451
1452 switch (op) {
1453 case RES_OP_RESERVE_AND_MAP:
1454 srqn = get_param_l(&in_param);
1455 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1456 if (err)
1457 break;
1458
1459 __mlx4_srq_free_icm(dev, srqn);
1460 break;
1461
1462 default:
1463 err = -EINVAL;
1464 break;
1465 }
1466
1467 return err;
1468}
1469
1470static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1471 u64 in_param, u64 *out_param)
1472{
1473 int port;
1474 int err = 0;
1475
1476 switch (op) {
1477 case RES_OP_RESERVE_AND_MAP:
1478 port = get_param_l(out_param);
1479 mac_del_from_slave(dev, slave, in_param, port);
1480 __mlx4_unregister_mac(dev, port, in_param);
1481 break;
1482 default:
1483 err = -EINVAL;
1484 break;
1485 }
1486
1487 return err;
1488
1489}
1490
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001491static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1492 u64 in_param, u64 *out_param)
1493{
1494 return 0;
1495}
1496
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001497int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1498 struct mlx4_vhcr *vhcr,
1499 struct mlx4_cmd_mailbox *inbox,
1500 struct mlx4_cmd_mailbox *outbox,
1501 struct mlx4_cmd_info *cmd)
1502{
1503 int err = -EINVAL;
1504 int alop = vhcr->op_modifier;
1505
1506 switch (vhcr->in_modifier) {
1507 case RES_QP:
1508 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1509 vhcr->in_param);
1510 break;
1511
1512 case RES_MTT:
1513 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1515 break;
1516
1517 case RES_MPT:
1518 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1519 vhcr->in_param);
1520 break;
1521
1522 case RES_CQ:
1523 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1525 break;
1526
1527 case RES_SRQ:
1528 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1530 break;
1531
1532 case RES_MAC:
1533 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1535 break;
1536
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001537 case RES_VLAN:
1538 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1540 break;
1541
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001542 default:
1543 break;
1544 }
1545 return err;
1546}
1547
1548/* ugly but other choices are uglier */
1549static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1550{
1551 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1552}
1553
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001554static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001555{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001556 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001557}
1558
1559static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1560{
1561 return be32_to_cpu(mpt->mtt_sz);
1562}
1563
1564static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1565{
1566 return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1567}
1568
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001569static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001570{
1571 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1572}
1573
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001574static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001575{
1576 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1577}
1578
1579static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1580{
1581 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1582 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1583 int log_sq_sride = qpc->sq_size_stride & 7;
1584 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1585 int log_rq_stride = qpc->rq_size_stride & 7;
1586 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1587 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1588 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1589 int sq_size;
1590 int rq_size;
1591 int total_pages;
1592 int total_mem;
1593 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1594
1595 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1596 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1597 total_mem = sq_size + rq_size;
1598 total_pages =
1599 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1600 page_shift);
1601
1602 return total_pages;
1603}
1604
1605static int qp_get_pdn(struct mlx4_qp_context *qpc)
1606{
1607 return be32_to_cpu(qpc->pd) & 0xffffff;
1608}
1609
1610static int pdn2slave(int pdn)
1611{
1612 return (pdn >> NOT_MASKED_PD_BITS) - 1;
1613}
1614
1615static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1616 int size, struct res_mtt *mtt)
1617{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001618 int res_start = mtt->com.res_id;
1619 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001620
1621 if (start < res_start || start + size > res_start + res_size)
1622 return -EPERM;
1623 return 0;
1624}
1625
1626int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1627 struct mlx4_vhcr *vhcr,
1628 struct mlx4_cmd_mailbox *inbox,
1629 struct mlx4_cmd_mailbox *outbox,
1630 struct mlx4_cmd_info *cmd)
1631{
1632 int err;
1633 int index = vhcr->in_modifier;
1634 struct res_mtt *mtt;
1635 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001636 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001637 int phys;
1638 int id;
1639
1640 id = index & mpt_mask(dev);
1641 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1642 if (err)
1643 return err;
1644
1645 phys = mr_phys_mpt(inbox->buf);
1646 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001647 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001648 if (err)
1649 goto ex_abort;
1650
1651 err = check_mtt_range(dev, slave, mtt_base,
1652 mr_get_mtt_size(inbox->buf), mtt);
1653 if (err)
1654 goto ex_put;
1655
1656 mpt->mtt = mtt;
1657 }
1658
1659 if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1660 err = -EPERM;
1661 goto ex_put;
1662 }
1663
1664 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1665 if (err)
1666 goto ex_put;
1667
1668 if (!phys) {
1669 atomic_inc(&mtt->ref_count);
1670 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1671 }
1672
1673 res_end_move(dev, slave, RES_MPT, id);
1674 return 0;
1675
1676ex_put:
1677 if (!phys)
1678 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1679ex_abort:
1680 res_abort_move(dev, slave, RES_MPT, id);
1681
1682 return err;
1683}
1684
1685int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1686 struct mlx4_vhcr *vhcr,
1687 struct mlx4_cmd_mailbox *inbox,
1688 struct mlx4_cmd_mailbox *outbox,
1689 struct mlx4_cmd_info *cmd)
1690{
1691 int err;
1692 int index = vhcr->in_modifier;
1693 struct res_mpt *mpt;
1694 int id;
1695
1696 id = index & mpt_mask(dev);
1697 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1698 if (err)
1699 return err;
1700
1701 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1702 if (err)
1703 goto ex_abort;
1704
1705 if (mpt->mtt)
1706 atomic_dec(&mpt->mtt->ref_count);
1707
1708 res_end_move(dev, slave, RES_MPT, id);
1709 return 0;
1710
1711ex_abort:
1712 res_abort_move(dev, slave, RES_MPT, id);
1713
1714 return err;
1715}
1716
1717int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1718 struct mlx4_vhcr *vhcr,
1719 struct mlx4_cmd_mailbox *inbox,
1720 struct mlx4_cmd_mailbox *outbox,
1721 struct mlx4_cmd_info *cmd)
1722{
1723 int err;
1724 int index = vhcr->in_modifier;
1725 struct res_mpt *mpt;
1726 int id;
1727
1728 id = index & mpt_mask(dev);
1729 err = get_res(dev, slave, id, RES_MPT, &mpt);
1730 if (err)
1731 return err;
1732
1733 if (mpt->com.from_state != RES_MPT_HW) {
1734 err = -EBUSY;
1735 goto out;
1736 }
1737
1738 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1739
1740out:
1741 put_res(dev, slave, id, RES_MPT);
1742 return err;
1743}
1744
1745static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1746{
1747 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1748}
1749
1750static int qp_get_scqn(struct mlx4_qp_context *qpc)
1751{
1752 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1753}
1754
1755static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1756{
1757 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1758}
1759
1760int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1761 struct mlx4_vhcr *vhcr,
1762 struct mlx4_cmd_mailbox *inbox,
1763 struct mlx4_cmd_mailbox *outbox,
1764 struct mlx4_cmd_info *cmd)
1765{
1766 int err;
1767 int qpn = vhcr->in_modifier & 0x7fffff;
1768 struct res_mtt *mtt;
1769 struct res_qp *qp;
1770 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001771 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001772 int mtt_size = qp_get_mtt_size(qpc);
1773 struct res_cq *rcq;
1774 struct res_cq *scq;
1775 int rcqn = qp_get_rcqn(qpc);
1776 int scqn = qp_get_scqn(qpc);
1777 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1778 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1779 struct res_srq *srq;
1780 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1781
1782 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1783 if (err)
1784 return err;
1785 qp->local_qpn = local_qpn;
1786
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001787 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001788 if (err)
1789 goto ex_abort;
1790
1791 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1792 if (err)
1793 goto ex_put_mtt;
1794
1795 if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1796 err = -EPERM;
1797 goto ex_put_mtt;
1798 }
1799
1800 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1801 if (err)
1802 goto ex_put_mtt;
1803
1804 if (scqn != rcqn) {
1805 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1806 if (err)
1807 goto ex_put_rcq;
1808 } else
1809 scq = rcq;
1810
1811 if (use_srq) {
1812 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1813 if (err)
1814 goto ex_put_scq;
1815 }
1816
1817 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1818 if (err)
1819 goto ex_put_srq;
1820 atomic_inc(&mtt->ref_count);
1821 qp->mtt = mtt;
1822 atomic_inc(&rcq->ref_count);
1823 qp->rcq = rcq;
1824 atomic_inc(&scq->ref_count);
1825 qp->scq = scq;
1826
1827 if (scqn != rcqn)
1828 put_res(dev, slave, scqn, RES_CQ);
1829
1830 if (use_srq) {
1831 atomic_inc(&srq->ref_count);
1832 put_res(dev, slave, srqn, RES_SRQ);
1833 qp->srq = srq;
1834 }
1835 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001836 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001837 res_end_move(dev, slave, RES_QP, qpn);
1838
1839 return 0;
1840
1841ex_put_srq:
1842 if (use_srq)
1843 put_res(dev, slave, srqn, RES_SRQ);
1844ex_put_scq:
1845 if (scqn != rcqn)
1846 put_res(dev, slave, scqn, RES_CQ);
1847ex_put_rcq:
1848 put_res(dev, slave, rcqn, RES_CQ);
1849ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001850 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001851ex_abort:
1852 res_abort_move(dev, slave, RES_QP, qpn);
1853
1854 return err;
1855}
1856
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001857static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001858{
1859 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1860}
1861
1862static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1863{
1864 int log_eq_size = eqc->log_eq_size & 0x1f;
1865 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1866
1867 if (log_eq_size + 5 < page_shift)
1868 return 1;
1869
1870 return 1 << (log_eq_size + 5 - page_shift);
1871}
1872
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001873static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001874{
1875 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1876}
1877
1878static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1879{
1880 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1881 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1882
1883 if (log_cq_size + 5 < page_shift)
1884 return 1;
1885
1886 return 1 << (log_cq_size + 5 - page_shift);
1887}
1888
1889int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1890 struct mlx4_vhcr *vhcr,
1891 struct mlx4_cmd_mailbox *inbox,
1892 struct mlx4_cmd_mailbox *outbox,
1893 struct mlx4_cmd_info *cmd)
1894{
1895 int err;
1896 int eqn = vhcr->in_modifier;
1897 int res_id = (slave << 8) | eqn;
1898 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001899 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001900 int mtt_size = eq_get_mtt_size(eqc);
1901 struct res_eq *eq;
1902 struct res_mtt *mtt;
1903
1904 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1905 if (err)
1906 return err;
1907 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1908 if (err)
1909 goto out_add;
1910
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001911 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001912 if (err)
1913 goto out_move;
1914
1915 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1916 if (err)
1917 goto out_put;
1918
1919 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1920 if (err)
1921 goto out_put;
1922
1923 atomic_inc(&mtt->ref_count);
1924 eq->mtt = mtt;
1925 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1926 res_end_move(dev, slave, RES_EQ, res_id);
1927 return 0;
1928
1929out_put:
1930 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1931out_move:
1932 res_abort_move(dev, slave, RES_EQ, res_id);
1933out_add:
1934 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1935 return err;
1936}
1937
1938static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1939 int len, struct res_mtt **res)
1940{
1941 struct mlx4_priv *priv = mlx4_priv(dev);
1942 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1943 struct res_mtt *mtt;
1944 int err = -EINVAL;
1945
1946 spin_lock_irq(mlx4_tlock(dev));
1947 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1948 com.list) {
1949 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1950 *res = mtt;
1951 mtt->com.from_state = mtt->com.state;
1952 mtt->com.state = RES_MTT_BUSY;
1953 err = 0;
1954 break;
1955 }
1956 }
1957 spin_unlock_irq(mlx4_tlock(dev));
1958
1959 return err;
1960}
1961
1962int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1963 struct mlx4_vhcr *vhcr,
1964 struct mlx4_cmd_mailbox *inbox,
1965 struct mlx4_cmd_mailbox *outbox,
1966 struct mlx4_cmd_info *cmd)
1967{
1968 struct mlx4_mtt mtt;
1969 __be64 *page_list = inbox->buf;
1970 u64 *pg_list = (u64 *)page_list;
1971 int i;
1972 struct res_mtt *rmtt = NULL;
1973 int start = be64_to_cpu(page_list[0]);
1974 int npages = vhcr->in_modifier;
1975 int err;
1976
1977 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1978 if (err)
1979 return err;
1980
1981 /* Call the SW implementation of write_mtt:
1982 * - Prepare a dummy mtt struct
1983 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001984 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1985 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001986 mtt.order = 0;
1987 mtt.page_shift = 0;
1988 for (i = 0; i < npages; ++i)
1989 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1990
1991 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1992 ((u64 *)page_list + 2));
1993
1994 if (rmtt)
1995 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1996
1997 return err;
1998}
1999
2000int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2001 struct mlx4_vhcr *vhcr,
2002 struct mlx4_cmd_mailbox *inbox,
2003 struct mlx4_cmd_mailbox *outbox,
2004 struct mlx4_cmd_info *cmd)
2005{
2006 int eqn = vhcr->in_modifier;
2007 int res_id = eqn | (slave << 8);
2008 struct res_eq *eq;
2009 int err;
2010
2011 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2012 if (err)
2013 return err;
2014
2015 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2016 if (err)
2017 goto ex_abort;
2018
2019 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2020 if (err)
2021 goto ex_put;
2022
2023 atomic_dec(&eq->mtt->ref_count);
2024 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2025 res_end_move(dev, slave, RES_EQ, res_id);
2026 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2027
2028 return 0;
2029
2030ex_put:
2031 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2032ex_abort:
2033 res_abort_move(dev, slave, RES_EQ, res_id);
2034
2035 return err;
2036}
2037
2038int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2039{
2040 struct mlx4_priv *priv = mlx4_priv(dev);
2041 struct mlx4_slave_event_eq_info *event_eq;
2042 struct mlx4_cmd_mailbox *mailbox;
2043 u32 in_modifier = 0;
2044 int err;
2045 int res_id;
2046 struct res_eq *req;
2047
2048 if (!priv->mfunc.master.slave_state)
2049 return -EINVAL;
2050
2051 event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2052
2053 /* Create the event only if the slave is registered */
2054 if ((event_eq->event_type & (1 << eqe->type)) == 0)
2055 return 0;
2056
2057 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2058 res_id = (slave << 8) | event_eq->eqn;
2059 err = get_res(dev, slave, res_id, RES_EQ, &req);
2060 if (err)
2061 goto unlock;
2062
2063 if (req->com.from_state != RES_EQ_HW) {
2064 err = -EINVAL;
2065 goto put;
2066 }
2067
2068 mailbox = mlx4_alloc_cmd_mailbox(dev);
2069 if (IS_ERR(mailbox)) {
2070 err = PTR_ERR(mailbox);
2071 goto put;
2072 }
2073
2074 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2075 ++event_eq->token;
2076 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2077 }
2078
2079 memcpy(mailbox->buf, (u8 *) eqe, 28);
2080
2081 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2082
2083 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2084 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2085 MLX4_CMD_NATIVE);
2086
2087 put_res(dev, slave, res_id, RES_EQ);
2088 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2089 mlx4_free_cmd_mailbox(dev, mailbox);
2090 return err;
2091
2092put:
2093 put_res(dev, slave, res_id, RES_EQ);
2094
2095unlock:
2096 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2097 return err;
2098}
2099
2100int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2101 struct mlx4_vhcr *vhcr,
2102 struct mlx4_cmd_mailbox *inbox,
2103 struct mlx4_cmd_mailbox *outbox,
2104 struct mlx4_cmd_info *cmd)
2105{
2106 int eqn = vhcr->in_modifier;
2107 int res_id = eqn | (slave << 8);
2108 struct res_eq *eq;
2109 int err;
2110
2111 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2112 if (err)
2113 return err;
2114
2115 if (eq->com.from_state != RES_EQ_HW) {
2116 err = -EINVAL;
2117 goto ex_put;
2118 }
2119
2120 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2121
2122ex_put:
2123 put_res(dev, slave, res_id, RES_EQ);
2124 return err;
2125}
2126
2127int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2128 struct mlx4_vhcr *vhcr,
2129 struct mlx4_cmd_mailbox *inbox,
2130 struct mlx4_cmd_mailbox *outbox,
2131 struct mlx4_cmd_info *cmd)
2132{
2133 int err;
2134 int cqn = vhcr->in_modifier;
2135 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002136 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002137 struct res_cq *cq;
2138 struct res_mtt *mtt;
2139
2140 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2141 if (err)
2142 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002143 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002144 if (err)
2145 goto out_move;
2146 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2147 if (err)
2148 goto out_put;
2149 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2150 if (err)
2151 goto out_put;
2152 atomic_inc(&mtt->ref_count);
2153 cq->mtt = mtt;
2154 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2155 res_end_move(dev, slave, RES_CQ, cqn);
2156 return 0;
2157
2158out_put:
2159 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2160out_move:
2161 res_abort_move(dev, slave, RES_CQ, cqn);
2162 return err;
2163}
2164
2165int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2166 struct mlx4_vhcr *vhcr,
2167 struct mlx4_cmd_mailbox *inbox,
2168 struct mlx4_cmd_mailbox *outbox,
2169 struct mlx4_cmd_info *cmd)
2170{
2171 int err;
2172 int cqn = vhcr->in_modifier;
2173 struct res_cq *cq;
2174
2175 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2176 if (err)
2177 return err;
2178 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2179 if (err)
2180 goto out_move;
2181 atomic_dec(&cq->mtt->ref_count);
2182 res_end_move(dev, slave, RES_CQ, cqn);
2183 return 0;
2184
2185out_move:
2186 res_abort_move(dev, slave, RES_CQ, cqn);
2187 return err;
2188}
2189
2190int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2191 struct mlx4_vhcr *vhcr,
2192 struct mlx4_cmd_mailbox *inbox,
2193 struct mlx4_cmd_mailbox *outbox,
2194 struct mlx4_cmd_info *cmd)
2195{
2196 int cqn = vhcr->in_modifier;
2197 struct res_cq *cq;
2198 int err;
2199
2200 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2201 if (err)
2202 return err;
2203
2204 if (cq->com.from_state != RES_CQ_HW)
2205 goto ex_put;
2206
2207 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2208ex_put:
2209 put_res(dev, slave, cqn, RES_CQ);
2210
2211 return err;
2212}
2213
2214static int handle_resize(struct mlx4_dev *dev, int slave,
2215 struct mlx4_vhcr *vhcr,
2216 struct mlx4_cmd_mailbox *inbox,
2217 struct mlx4_cmd_mailbox *outbox,
2218 struct mlx4_cmd_info *cmd,
2219 struct res_cq *cq)
2220{
2221 int err;
2222 struct res_mtt *orig_mtt;
2223 struct res_mtt *mtt;
2224 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002225 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002226
2227 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2228 if (err)
2229 return err;
2230
2231 if (orig_mtt != cq->mtt) {
2232 err = -EINVAL;
2233 goto ex_put;
2234 }
2235
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002236 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002237 if (err)
2238 goto ex_put;
2239
2240 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2241 if (err)
2242 goto ex_put1;
2243 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2244 if (err)
2245 goto ex_put1;
2246 atomic_dec(&orig_mtt->ref_count);
2247 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2248 atomic_inc(&mtt->ref_count);
2249 cq->mtt = mtt;
2250 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2251 return 0;
2252
2253ex_put1:
2254 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2255ex_put:
2256 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2257
2258 return err;
2259
2260}
2261
2262int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2263 struct mlx4_vhcr *vhcr,
2264 struct mlx4_cmd_mailbox *inbox,
2265 struct mlx4_cmd_mailbox *outbox,
2266 struct mlx4_cmd_info *cmd)
2267{
2268 int cqn = vhcr->in_modifier;
2269 struct res_cq *cq;
2270 int err;
2271
2272 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2273 if (err)
2274 return err;
2275
2276 if (cq->com.from_state != RES_CQ_HW)
2277 goto ex_put;
2278
2279 if (vhcr->op_modifier == 0) {
2280 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2281 if (err)
2282 goto ex_put;
2283 }
2284
2285 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2286ex_put:
2287 put_res(dev, slave, cqn, RES_CQ);
2288
2289 return err;
2290}
2291
2292static int srq_get_pdn(struct mlx4_srq_context *srqc)
2293{
2294 return be32_to_cpu(srqc->pd) & 0xffffff;
2295}
2296
2297static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2298{
2299 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2300 int log_rq_stride = srqc->logstride & 7;
2301 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2302
2303 if (log_srq_size + log_rq_stride + 4 < page_shift)
2304 return 1;
2305
2306 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2307}
2308
2309int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2310 struct mlx4_vhcr *vhcr,
2311 struct mlx4_cmd_mailbox *inbox,
2312 struct mlx4_cmd_mailbox *outbox,
2313 struct mlx4_cmd_info *cmd)
2314{
2315 int err;
2316 int srqn = vhcr->in_modifier;
2317 struct res_mtt *mtt;
2318 struct res_srq *srq;
2319 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002320 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002321
2322 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2323 return -EINVAL;
2324
2325 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2326 if (err)
2327 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002328 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002329 if (err)
2330 goto ex_abort;
2331 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2332 mtt);
2333 if (err)
2334 goto ex_put_mtt;
2335
2336 if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2337 err = -EPERM;
2338 goto ex_put_mtt;
2339 }
2340
2341 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2342 if (err)
2343 goto ex_put_mtt;
2344
2345 atomic_inc(&mtt->ref_count);
2346 srq->mtt = mtt;
2347 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2348 res_end_move(dev, slave, RES_SRQ, srqn);
2349 return 0;
2350
2351ex_put_mtt:
2352 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2353ex_abort:
2354 res_abort_move(dev, slave, RES_SRQ, srqn);
2355
2356 return err;
2357}
2358
2359int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2360 struct mlx4_vhcr *vhcr,
2361 struct mlx4_cmd_mailbox *inbox,
2362 struct mlx4_cmd_mailbox *outbox,
2363 struct mlx4_cmd_info *cmd)
2364{
2365 int err;
2366 int srqn = vhcr->in_modifier;
2367 struct res_srq *srq;
2368
2369 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2370 if (err)
2371 return err;
2372 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2373 if (err)
2374 goto ex_abort;
2375 atomic_dec(&srq->mtt->ref_count);
2376 if (srq->cq)
2377 atomic_dec(&srq->cq->ref_count);
2378 res_end_move(dev, slave, RES_SRQ, srqn);
2379
2380 return 0;
2381
2382ex_abort:
2383 res_abort_move(dev, slave, RES_SRQ, srqn);
2384
2385 return err;
2386}
2387
2388int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2389 struct mlx4_vhcr *vhcr,
2390 struct mlx4_cmd_mailbox *inbox,
2391 struct mlx4_cmd_mailbox *outbox,
2392 struct mlx4_cmd_info *cmd)
2393{
2394 int err;
2395 int srqn = vhcr->in_modifier;
2396 struct res_srq *srq;
2397
2398 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2399 if (err)
2400 return err;
2401 if (srq->com.from_state != RES_SRQ_HW) {
2402 err = -EBUSY;
2403 goto out;
2404 }
2405 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2406out:
2407 put_res(dev, slave, srqn, RES_SRQ);
2408 return err;
2409}
2410
2411int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2412 struct mlx4_vhcr *vhcr,
2413 struct mlx4_cmd_mailbox *inbox,
2414 struct mlx4_cmd_mailbox *outbox,
2415 struct mlx4_cmd_info *cmd)
2416{
2417 int err;
2418 int srqn = vhcr->in_modifier;
2419 struct res_srq *srq;
2420
2421 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2422 if (err)
2423 return err;
2424
2425 if (srq->com.from_state != RES_SRQ_HW) {
2426 err = -EBUSY;
2427 goto out;
2428 }
2429
2430 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2431out:
2432 put_res(dev, slave, srqn, RES_SRQ);
2433 return err;
2434}
2435
2436int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2437 struct mlx4_vhcr *vhcr,
2438 struct mlx4_cmd_mailbox *inbox,
2439 struct mlx4_cmd_mailbox *outbox,
2440 struct mlx4_cmd_info *cmd)
2441{
2442 int err;
2443 int qpn = vhcr->in_modifier & 0x7fffff;
2444 struct res_qp *qp;
2445
2446 err = get_res(dev, slave, qpn, RES_QP, &qp);
2447 if (err)
2448 return err;
2449 if (qp->com.from_state != RES_QP_HW) {
2450 err = -EBUSY;
2451 goto out;
2452 }
2453
2454 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2455out:
2456 put_res(dev, slave, qpn, RES_QP);
2457 return err;
2458}
2459
2460int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2461 struct mlx4_vhcr *vhcr,
2462 struct mlx4_cmd_mailbox *inbox,
2463 struct mlx4_cmd_mailbox *outbox,
2464 struct mlx4_cmd_info *cmd)
2465{
2466 struct mlx4_qp_context *qpc = inbox->buf + 8;
2467
2468 update_ud_gid(dev, qpc, (u8)slave);
2469
2470 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2471}
2472
2473int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2474 struct mlx4_vhcr *vhcr,
2475 struct mlx4_cmd_mailbox *inbox,
2476 struct mlx4_cmd_mailbox *outbox,
2477 struct mlx4_cmd_info *cmd)
2478{
2479 int err;
2480 int qpn = vhcr->in_modifier & 0x7fffff;
2481 struct res_qp *qp;
2482
2483 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2484 if (err)
2485 return err;
2486 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2487 if (err)
2488 goto ex_abort;
2489
2490 atomic_dec(&qp->mtt->ref_count);
2491 atomic_dec(&qp->rcq->ref_count);
2492 atomic_dec(&qp->scq->ref_count);
2493 if (qp->srq)
2494 atomic_dec(&qp->srq->ref_count);
2495 res_end_move(dev, slave, RES_QP, qpn);
2496 return 0;
2497
2498ex_abort:
2499 res_abort_move(dev, slave, RES_QP, qpn);
2500
2501 return err;
2502}
2503
2504static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2505 struct res_qp *rqp, u8 *gid)
2506{
2507 struct res_gid *res;
2508
2509 list_for_each_entry(res, &rqp->mcg_list, list) {
2510 if (!memcmp(res->gid, gid, 16))
2511 return res;
2512 }
2513 return NULL;
2514}
2515
2516static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2517 u8 *gid, enum mlx4_protocol prot)
2518{
2519 struct res_gid *res;
2520 int err;
2521
2522 res = kzalloc(sizeof *res, GFP_KERNEL);
2523 if (!res)
2524 return -ENOMEM;
2525
2526 spin_lock_irq(&rqp->mcg_spl);
2527 if (find_gid(dev, slave, rqp, gid)) {
2528 kfree(res);
2529 err = -EEXIST;
2530 } else {
2531 memcpy(res->gid, gid, 16);
2532 res->prot = prot;
2533 list_add_tail(&res->list, &rqp->mcg_list);
2534 err = 0;
2535 }
2536 spin_unlock_irq(&rqp->mcg_spl);
2537
2538 return err;
2539}
2540
2541static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2542 u8 *gid, enum mlx4_protocol prot)
2543{
2544 struct res_gid *res;
2545 int err;
2546
2547 spin_lock_irq(&rqp->mcg_spl);
2548 res = find_gid(dev, slave, rqp, gid);
2549 if (!res || res->prot != prot)
2550 err = -EINVAL;
2551 else {
2552 list_del(&res->list);
2553 kfree(res);
2554 err = 0;
2555 }
2556 spin_unlock_irq(&rqp->mcg_spl);
2557
2558 return err;
2559}
2560
2561int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2562 struct mlx4_vhcr *vhcr,
2563 struct mlx4_cmd_mailbox *inbox,
2564 struct mlx4_cmd_mailbox *outbox,
2565 struct mlx4_cmd_info *cmd)
2566{
2567 struct mlx4_qp qp; /* dummy for calling attach/detach */
2568 u8 *gid = inbox->buf;
2569 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2570 int err, err1;
2571 int qpn;
2572 struct res_qp *rqp;
2573 int attach = vhcr->op_modifier;
2574 int block_loopback = vhcr->in_modifier >> 31;
2575 u8 steer_type_mask = 2;
2576 enum mlx4_steer_type type = gid[7] & steer_type_mask;
2577
2578 qpn = vhcr->in_modifier & 0xffffff;
2579 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2580 if (err)
2581 return err;
2582
2583 qp.qpn = qpn;
2584 if (attach) {
2585 err = add_mcg_res(dev, slave, rqp, gid, prot);
2586 if (err)
2587 goto ex_put;
2588
2589 err = mlx4_qp_attach_common(dev, &qp, gid,
2590 block_loopback, prot, type);
2591 if (err)
2592 goto ex_rem;
2593 } else {
2594 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2595 if (err)
2596 goto ex_put;
2597 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2598 }
2599
2600 put_res(dev, slave, qpn, RES_QP);
2601 return 0;
2602
2603ex_rem:
2604 /* ignore error return below, already in error */
2605 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2606ex_put:
2607 put_res(dev, slave, qpn, RES_QP);
2608
2609 return err;
2610}
2611
2612enum {
2613 BUSY_MAX_RETRIES = 10
2614};
2615
2616int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2617 struct mlx4_vhcr *vhcr,
2618 struct mlx4_cmd_mailbox *inbox,
2619 struct mlx4_cmd_mailbox *outbox,
2620 struct mlx4_cmd_info *cmd)
2621{
2622 int err;
2623 int index = vhcr->in_modifier & 0xffff;
2624
2625 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2626 if (err)
2627 return err;
2628
2629 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2630 put_res(dev, slave, index, RES_COUNTER);
2631 return err;
2632}
2633
2634static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2635{
2636 struct res_gid *rgid;
2637 struct res_gid *tmp;
2638 int err;
2639 struct mlx4_qp qp; /* dummy for calling attach/detach */
2640
2641 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2642 qp.qpn = rqp->local_qpn;
2643 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2644 MLX4_MC_STEER);
2645 list_del(&rgid->list);
2646 kfree(rgid);
2647 }
2648}
2649
2650static int _move_all_busy(struct mlx4_dev *dev, int slave,
2651 enum mlx4_resource type, int print)
2652{
2653 struct mlx4_priv *priv = mlx4_priv(dev);
2654 struct mlx4_resource_tracker *tracker =
2655 &priv->mfunc.master.res_tracker;
2656 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2657 struct res_common *r;
2658 struct res_common *tmp;
2659 int busy;
2660
2661 busy = 0;
2662 spin_lock_irq(mlx4_tlock(dev));
2663 list_for_each_entry_safe(r, tmp, rlist, list) {
2664 if (r->owner == slave) {
2665 if (!r->removing) {
2666 if (r->state == RES_ANY_BUSY) {
2667 if (print)
2668 mlx4_dbg(dev,
2669 "%s id 0x%x is busy\n",
2670 ResourceType(type),
2671 r->res_id);
2672 ++busy;
2673 } else {
2674 r->from_state = r->state;
2675 r->state = RES_ANY_BUSY;
2676 r->removing = 1;
2677 }
2678 }
2679 }
2680 }
2681 spin_unlock_irq(mlx4_tlock(dev));
2682
2683 return busy;
2684}
2685
2686static int move_all_busy(struct mlx4_dev *dev, int slave,
2687 enum mlx4_resource type)
2688{
2689 unsigned long begin;
2690 int busy;
2691
2692 begin = jiffies;
2693 do {
2694 busy = _move_all_busy(dev, slave, type, 0);
2695 if (time_after(jiffies, begin + 5 * HZ))
2696 break;
2697 if (busy)
2698 cond_resched();
2699 } while (busy);
2700
2701 if (busy)
2702 busy = _move_all_busy(dev, slave, type, 1);
2703
2704 return busy;
2705}
2706static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2707{
2708 struct mlx4_priv *priv = mlx4_priv(dev);
2709 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2710 struct list_head *qp_list =
2711 &tracker->slave_list[slave].res_list[RES_QP];
2712 struct res_qp *qp;
2713 struct res_qp *tmp;
2714 int state;
2715 u64 in_param;
2716 int qpn;
2717 int err;
2718
2719 err = move_all_busy(dev, slave, RES_QP);
2720 if (err)
2721 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2722 "for slave %d\n", slave);
2723
2724 spin_lock_irq(mlx4_tlock(dev));
2725 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2726 spin_unlock_irq(mlx4_tlock(dev));
2727 if (qp->com.owner == slave) {
2728 qpn = qp->com.res_id;
2729 detach_qp(dev, slave, qp);
2730 state = qp->com.from_state;
2731 while (state != 0) {
2732 switch (state) {
2733 case RES_QP_RESERVED:
2734 spin_lock_irq(mlx4_tlock(dev));
2735 radix_tree_delete(&tracker->res_tree[RES_QP],
2736 qp->com.res_id);
2737 list_del(&qp->com.list);
2738 spin_unlock_irq(mlx4_tlock(dev));
2739 kfree(qp);
2740 state = 0;
2741 break;
2742 case RES_QP_MAPPED:
2743 if (!valid_reserved(dev, slave, qpn))
2744 __mlx4_qp_free_icm(dev, qpn);
2745 state = RES_QP_RESERVED;
2746 break;
2747 case RES_QP_HW:
2748 in_param = slave;
2749 err = mlx4_cmd(dev, in_param,
2750 qp->local_qpn, 2,
2751 MLX4_CMD_2RST_QP,
2752 MLX4_CMD_TIME_CLASS_A,
2753 MLX4_CMD_NATIVE);
2754 if (err)
2755 mlx4_dbg(dev, "rem_slave_qps: failed"
2756 " to move slave %d qpn %d to"
2757 " reset\n", slave,
2758 qp->local_qpn);
2759 atomic_dec(&qp->rcq->ref_count);
2760 atomic_dec(&qp->scq->ref_count);
2761 atomic_dec(&qp->mtt->ref_count);
2762 if (qp->srq)
2763 atomic_dec(&qp->srq->ref_count);
2764 state = RES_QP_MAPPED;
2765 break;
2766 default:
2767 state = 0;
2768 }
2769 }
2770 }
2771 spin_lock_irq(mlx4_tlock(dev));
2772 }
2773 spin_unlock_irq(mlx4_tlock(dev));
2774}
2775
2776static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2777{
2778 struct mlx4_priv *priv = mlx4_priv(dev);
2779 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2780 struct list_head *srq_list =
2781 &tracker->slave_list[slave].res_list[RES_SRQ];
2782 struct res_srq *srq;
2783 struct res_srq *tmp;
2784 int state;
2785 u64 in_param;
2786 LIST_HEAD(tlist);
2787 int srqn;
2788 int err;
2789
2790 err = move_all_busy(dev, slave, RES_SRQ);
2791 if (err)
2792 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2793 "busy for slave %d\n", slave);
2794
2795 spin_lock_irq(mlx4_tlock(dev));
2796 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2797 spin_unlock_irq(mlx4_tlock(dev));
2798 if (srq->com.owner == slave) {
2799 srqn = srq->com.res_id;
2800 state = srq->com.from_state;
2801 while (state != 0) {
2802 switch (state) {
2803 case RES_SRQ_ALLOCATED:
2804 __mlx4_srq_free_icm(dev, srqn);
2805 spin_lock_irq(mlx4_tlock(dev));
2806 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2807 srqn);
2808 list_del(&srq->com.list);
2809 spin_unlock_irq(mlx4_tlock(dev));
2810 kfree(srq);
2811 state = 0;
2812 break;
2813
2814 case RES_SRQ_HW:
2815 in_param = slave;
2816 err = mlx4_cmd(dev, in_param, srqn, 1,
2817 MLX4_CMD_HW2SW_SRQ,
2818 MLX4_CMD_TIME_CLASS_A,
2819 MLX4_CMD_NATIVE);
2820 if (err)
2821 mlx4_dbg(dev, "rem_slave_srqs: failed"
2822 " to move slave %d srq %d to"
2823 " SW ownership\n",
2824 slave, srqn);
2825
2826 atomic_dec(&srq->mtt->ref_count);
2827 if (srq->cq)
2828 atomic_dec(&srq->cq->ref_count);
2829 state = RES_SRQ_ALLOCATED;
2830 break;
2831
2832 default:
2833 state = 0;
2834 }
2835 }
2836 }
2837 spin_lock_irq(mlx4_tlock(dev));
2838 }
2839 spin_unlock_irq(mlx4_tlock(dev));
2840}
2841
2842static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2843{
2844 struct mlx4_priv *priv = mlx4_priv(dev);
2845 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2846 struct list_head *cq_list =
2847 &tracker->slave_list[slave].res_list[RES_CQ];
2848 struct res_cq *cq;
2849 struct res_cq *tmp;
2850 int state;
2851 u64 in_param;
2852 LIST_HEAD(tlist);
2853 int cqn;
2854 int err;
2855
2856 err = move_all_busy(dev, slave, RES_CQ);
2857 if (err)
2858 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2859 "busy for slave %d\n", slave);
2860
2861 spin_lock_irq(mlx4_tlock(dev));
2862 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2863 spin_unlock_irq(mlx4_tlock(dev));
2864 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2865 cqn = cq->com.res_id;
2866 state = cq->com.from_state;
2867 while (state != 0) {
2868 switch (state) {
2869 case RES_CQ_ALLOCATED:
2870 __mlx4_cq_free_icm(dev, cqn);
2871 spin_lock_irq(mlx4_tlock(dev));
2872 radix_tree_delete(&tracker->res_tree[RES_CQ],
2873 cqn);
2874 list_del(&cq->com.list);
2875 spin_unlock_irq(mlx4_tlock(dev));
2876 kfree(cq);
2877 state = 0;
2878 break;
2879
2880 case RES_CQ_HW:
2881 in_param = slave;
2882 err = mlx4_cmd(dev, in_param, cqn, 1,
2883 MLX4_CMD_HW2SW_CQ,
2884 MLX4_CMD_TIME_CLASS_A,
2885 MLX4_CMD_NATIVE);
2886 if (err)
2887 mlx4_dbg(dev, "rem_slave_cqs: failed"
2888 " to move slave %d cq %d to"
2889 " SW ownership\n",
2890 slave, cqn);
2891 atomic_dec(&cq->mtt->ref_count);
2892 state = RES_CQ_ALLOCATED;
2893 break;
2894
2895 default:
2896 state = 0;
2897 }
2898 }
2899 }
2900 spin_lock_irq(mlx4_tlock(dev));
2901 }
2902 spin_unlock_irq(mlx4_tlock(dev));
2903}
2904
2905static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2906{
2907 struct mlx4_priv *priv = mlx4_priv(dev);
2908 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2909 struct list_head *mpt_list =
2910 &tracker->slave_list[slave].res_list[RES_MPT];
2911 struct res_mpt *mpt;
2912 struct res_mpt *tmp;
2913 int state;
2914 u64 in_param;
2915 LIST_HEAD(tlist);
2916 int mptn;
2917 int err;
2918
2919 err = move_all_busy(dev, slave, RES_MPT);
2920 if (err)
2921 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2922 "busy for slave %d\n", slave);
2923
2924 spin_lock_irq(mlx4_tlock(dev));
2925 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2926 spin_unlock_irq(mlx4_tlock(dev));
2927 if (mpt->com.owner == slave) {
2928 mptn = mpt->com.res_id;
2929 state = mpt->com.from_state;
2930 while (state != 0) {
2931 switch (state) {
2932 case RES_MPT_RESERVED:
2933 __mlx4_mr_release(dev, mpt->key);
2934 spin_lock_irq(mlx4_tlock(dev));
2935 radix_tree_delete(&tracker->res_tree[RES_MPT],
2936 mptn);
2937 list_del(&mpt->com.list);
2938 spin_unlock_irq(mlx4_tlock(dev));
2939 kfree(mpt);
2940 state = 0;
2941 break;
2942
2943 case RES_MPT_MAPPED:
2944 __mlx4_mr_free_icm(dev, mpt->key);
2945 state = RES_MPT_RESERVED;
2946 break;
2947
2948 case RES_MPT_HW:
2949 in_param = slave;
2950 err = mlx4_cmd(dev, in_param, mptn, 0,
2951 MLX4_CMD_HW2SW_MPT,
2952 MLX4_CMD_TIME_CLASS_A,
2953 MLX4_CMD_NATIVE);
2954 if (err)
2955 mlx4_dbg(dev, "rem_slave_mrs: failed"
2956 " to move slave %d mpt %d to"
2957 " SW ownership\n",
2958 slave, mptn);
2959 if (mpt->mtt)
2960 atomic_dec(&mpt->mtt->ref_count);
2961 state = RES_MPT_MAPPED;
2962 break;
2963 default:
2964 state = 0;
2965 }
2966 }
2967 }
2968 spin_lock_irq(mlx4_tlock(dev));
2969 }
2970 spin_unlock_irq(mlx4_tlock(dev));
2971}
2972
2973static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2974{
2975 struct mlx4_priv *priv = mlx4_priv(dev);
2976 struct mlx4_resource_tracker *tracker =
2977 &priv->mfunc.master.res_tracker;
2978 struct list_head *mtt_list =
2979 &tracker->slave_list[slave].res_list[RES_MTT];
2980 struct res_mtt *mtt;
2981 struct res_mtt *tmp;
2982 int state;
2983 LIST_HEAD(tlist);
2984 int base;
2985 int err;
2986
2987 err = move_all_busy(dev, slave, RES_MTT);
2988 if (err)
2989 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2990 "busy for slave %d\n", slave);
2991
2992 spin_lock_irq(mlx4_tlock(dev));
2993 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2994 spin_unlock_irq(mlx4_tlock(dev));
2995 if (mtt->com.owner == slave) {
2996 base = mtt->com.res_id;
2997 state = mtt->com.from_state;
2998 while (state != 0) {
2999 switch (state) {
3000 case RES_MTT_ALLOCATED:
3001 __mlx4_free_mtt_range(dev, base,
3002 mtt->order);
3003 spin_lock_irq(mlx4_tlock(dev));
3004 radix_tree_delete(&tracker->res_tree[RES_MTT],
3005 base);
3006 list_del(&mtt->com.list);
3007 spin_unlock_irq(mlx4_tlock(dev));
3008 kfree(mtt);
3009 state = 0;
3010 break;
3011
3012 default:
3013 state = 0;
3014 }
3015 }
3016 }
3017 spin_lock_irq(mlx4_tlock(dev));
3018 }
3019 spin_unlock_irq(mlx4_tlock(dev));
3020}
3021
3022static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3023{
3024 struct mlx4_priv *priv = mlx4_priv(dev);
3025 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3026 struct list_head *eq_list =
3027 &tracker->slave_list[slave].res_list[RES_EQ];
3028 struct res_eq *eq;
3029 struct res_eq *tmp;
3030 int err;
3031 int state;
3032 LIST_HEAD(tlist);
3033 int eqn;
3034 struct mlx4_cmd_mailbox *mailbox;
3035
3036 err = move_all_busy(dev, slave, RES_EQ);
3037 if (err)
3038 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3039 "busy for slave %d\n", slave);
3040
3041 spin_lock_irq(mlx4_tlock(dev));
3042 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3043 spin_unlock_irq(mlx4_tlock(dev));
3044 if (eq->com.owner == slave) {
3045 eqn = eq->com.res_id;
3046 state = eq->com.from_state;
3047 while (state != 0) {
3048 switch (state) {
3049 case RES_EQ_RESERVED:
3050 spin_lock_irq(mlx4_tlock(dev));
3051 radix_tree_delete(&tracker->res_tree[RES_EQ],
3052 eqn);
3053 list_del(&eq->com.list);
3054 spin_unlock_irq(mlx4_tlock(dev));
3055 kfree(eq);
3056 state = 0;
3057 break;
3058
3059 case RES_EQ_HW:
3060 mailbox = mlx4_alloc_cmd_mailbox(dev);
3061 if (IS_ERR(mailbox)) {
3062 cond_resched();
3063 continue;
3064 }
3065 err = mlx4_cmd_box(dev, slave, 0,
3066 eqn & 0xff, 0,
3067 MLX4_CMD_HW2SW_EQ,
3068 MLX4_CMD_TIME_CLASS_A,
3069 MLX4_CMD_NATIVE);
3070 mlx4_dbg(dev, "rem_slave_eqs: failed"
3071 " to move slave %d eqs %d to"
3072 " SW ownership\n", slave, eqn);
3073 mlx4_free_cmd_mailbox(dev, mailbox);
3074 if (!err) {
3075 atomic_dec(&eq->mtt->ref_count);
3076 state = RES_EQ_RESERVED;
3077 }
3078 break;
3079
3080 default:
3081 state = 0;
3082 }
3083 }
3084 }
3085 spin_lock_irq(mlx4_tlock(dev));
3086 }
3087 spin_unlock_irq(mlx4_tlock(dev));
3088}
3089
3090void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3091{
3092 struct mlx4_priv *priv = mlx4_priv(dev);
3093
3094 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3095 /*VLAN*/
3096 rem_slave_macs(dev, slave);
3097 rem_slave_qps(dev, slave);
3098 rem_slave_srqs(dev, slave);
3099 rem_slave_cqs(dev, slave);
3100 rem_slave_mrs(dev, slave);
3101 rem_slave_eqs(dev, slave);
3102 rem_slave_mtts(dev, slave);
3103 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3104}