blob: bb09ed7a148486def3e1e39f6a8ed7cbfe308314 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
60 u32 res_id;
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66};
67
68enum {
69 RES_ANY_BUSY = 1
70};
71
72struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000076 enum mlx4_steer_type steer;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000077};
78
79enum res_qp_states {
80 RES_QP_BUSY = RES_ANY_BUSY,
81
82 /* QP number was allocated */
83 RES_QP_RESERVED,
84
85 /* ICM memory for QP context was mapped */
86 RES_QP_MAPPED,
87
88 /* QP is in hw ownership */
89 RES_QP_HW
90};
91
Eli Cohenc82e9aa2011-12-13 04:15:24 +000092struct res_qp {
93 struct res_common com;
94 struct res_mtt *mtt;
95 struct res_cq *rcq;
96 struct res_cq *scq;
97 struct res_srq *srq;
98 struct list_head mcg_list;
99 spinlock_t mcg_spl;
100 int local_qpn;
101};
102
103enum res_mtt_states {
104 RES_MTT_BUSY = RES_ANY_BUSY,
105 RES_MTT_ALLOCATED,
106};
107
108static inline const char *mtt_states_str(enum res_mtt_states state)
109{
110 switch (state) {
111 case RES_MTT_BUSY: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
114 }
115}
116
117struct res_mtt {
118 struct res_common com;
119 int order;
120 atomic_t ref_count;
121};
122
123enum res_mpt_states {
124 RES_MPT_BUSY = RES_ANY_BUSY,
125 RES_MPT_RESERVED,
126 RES_MPT_MAPPED,
127 RES_MPT_HW,
128};
129
130struct res_mpt {
131 struct res_common com;
132 struct res_mtt *mtt;
133 int key;
134};
135
136enum res_eq_states {
137 RES_EQ_BUSY = RES_ANY_BUSY,
138 RES_EQ_RESERVED,
139 RES_EQ_HW,
140};
141
142struct res_eq {
143 struct res_common com;
144 struct res_mtt *mtt;
145};
146
147enum res_cq_states {
148 RES_CQ_BUSY = RES_ANY_BUSY,
149 RES_CQ_ALLOCATED,
150 RES_CQ_HW,
151};
152
153struct res_cq {
154 struct res_common com;
155 struct res_mtt *mtt;
156 atomic_t ref_count;
157};
158
159enum res_srq_states {
160 RES_SRQ_BUSY = RES_ANY_BUSY,
161 RES_SRQ_ALLOCATED,
162 RES_SRQ_HW,
163};
164
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000165struct res_srq {
166 struct res_common com;
167 struct res_mtt *mtt;
168 struct res_cq *cq;
169 atomic_t ref_count;
170};
171
172enum res_counter_states {
173 RES_COUNTER_BUSY = RES_ANY_BUSY,
174 RES_COUNTER_ALLOCATED,
175};
176
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000177struct res_counter {
178 struct res_common com;
179 int port;
180};
181
182/* For Debug uses */
183static const char *ResourceType(enum mlx4_resource rt)
184{
185 switch (rt) {
186 case RES_QP: return "RES_QP";
187 case RES_CQ: return "RES_CQ";
188 case RES_SRQ: return "RES_SRQ";
189 case RES_MPT: return "RES_MPT";
190 case RES_MTT: return "RES_MTT";
191 case RES_MAC: return "RES_MAC";
192 case RES_EQ: return "RES_EQ";
193 case RES_COUNTER: return "RES_COUNTER";
194 default: return "Unknown resource type !!!";
195 };
196}
197
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000198int mlx4_init_resource_tracker(struct mlx4_dev *dev)
199{
200 struct mlx4_priv *priv = mlx4_priv(dev);
201 int i;
202 int t;
203
204 priv->mfunc.master.res_tracker.slave_list =
205 kzalloc(dev->num_slaves * sizeof(struct slave_list),
206 GFP_KERNEL);
207 if (!priv->mfunc.master.res_tracker.slave_list)
208 return -ENOMEM;
209
210 for (i = 0 ; i < dev->num_slaves; i++) {
211 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
212 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
213 slave_list[i].res_list[t]);
214 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
215 }
216
217 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
218 dev->num_slaves);
219 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
220 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
221 GFP_ATOMIC|__GFP_NOWARN);
222
223 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
224 return 0 ;
225}
226
227void mlx4_free_resource_tracker(struct mlx4_dev *dev)
228{
229 struct mlx4_priv *priv = mlx4_priv(dev);
230 int i;
231
232 if (priv->mfunc.master.res_tracker.slave_list) {
233 for (i = 0 ; i < dev->num_slaves; i++)
234 mlx4_delete_all_resources_for_slave(dev, i);
235
236 kfree(priv->mfunc.master.res_tracker.slave_list);
237 }
238}
239
240static void update_ud_gid(struct mlx4_dev *dev,
241 struct mlx4_qp_context *qp_ctx, u8 slave)
242{
243 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
244
245 if (MLX4_QP_ST_UD == ts)
246 qp_ctx->pri_path.mgid_index = 0x80 | slave;
247
248 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
249 slave, qp_ctx->pri_path.mgid_index);
250}
251
252static int mpt_mask(struct mlx4_dev *dev)
253{
254 return dev->caps.num_mpts - 1;
255}
256
257static void *find_res(struct mlx4_dev *dev, int res_id,
258 enum mlx4_resource type)
259{
260 struct mlx4_priv *priv = mlx4_priv(dev);
261
262 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
263 res_id);
264}
265
266static int get_res(struct mlx4_dev *dev, int slave, int res_id,
267 enum mlx4_resource type,
268 void *res)
269{
270 struct res_common *r;
271 int err = 0;
272
273 spin_lock_irq(mlx4_tlock(dev));
274 r = find_res(dev, res_id, type);
275 if (!r) {
276 err = -ENONET;
277 goto exit;
278 }
279
280 if (r->state == RES_ANY_BUSY) {
281 err = -EBUSY;
282 goto exit;
283 }
284
285 if (r->owner != slave) {
286 err = -EPERM;
287 goto exit;
288 }
289
290 r->from_state = r->state;
291 r->state = RES_ANY_BUSY;
292 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
293 ResourceType(type), r->res_id);
294
295 if (res)
296 *((struct res_common **)res) = r;
297
298exit:
299 spin_unlock_irq(mlx4_tlock(dev));
300 return err;
301}
302
303int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
304 enum mlx4_resource type,
305 int res_id, int *slave)
306{
307
308 struct res_common *r;
309 int err = -ENOENT;
310 int id = res_id;
311
312 if (type == RES_QP)
313 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000314 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000315
316 r = find_res(dev, id, type);
317 if (r) {
318 *slave = r->owner;
319 err = 0;
320 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000321 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000322
323 return err;
324}
325
326static void put_res(struct mlx4_dev *dev, int slave, int res_id,
327 enum mlx4_resource type)
328{
329 struct res_common *r;
330
331 spin_lock_irq(mlx4_tlock(dev));
332 r = find_res(dev, res_id, type);
333 if (r)
334 r->state = r->from_state;
335 spin_unlock_irq(mlx4_tlock(dev));
336}
337
338static struct res_common *alloc_qp_tr(int id)
339{
340 struct res_qp *ret;
341
342 ret = kzalloc(sizeof *ret, GFP_KERNEL);
343 if (!ret)
344 return NULL;
345
346 ret->com.res_id = id;
347 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +0000348 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000349 INIT_LIST_HEAD(&ret->mcg_list);
350 spin_lock_init(&ret->mcg_spl);
351
352 return &ret->com;
353}
354
355static struct res_common *alloc_mtt_tr(int id, int order)
356{
357 struct res_mtt *ret;
358
359 ret = kzalloc(sizeof *ret, GFP_KERNEL);
360 if (!ret)
361 return NULL;
362
363 ret->com.res_id = id;
364 ret->order = order;
365 ret->com.state = RES_MTT_ALLOCATED;
366 atomic_set(&ret->ref_count, 0);
367
368 return &ret->com;
369}
370
371static struct res_common *alloc_mpt_tr(int id, int key)
372{
373 struct res_mpt *ret;
374
375 ret = kzalloc(sizeof *ret, GFP_KERNEL);
376 if (!ret)
377 return NULL;
378
379 ret->com.res_id = id;
380 ret->com.state = RES_MPT_RESERVED;
381 ret->key = key;
382
383 return &ret->com;
384}
385
386static struct res_common *alloc_eq_tr(int id)
387{
388 struct res_eq *ret;
389
390 ret = kzalloc(sizeof *ret, GFP_KERNEL);
391 if (!ret)
392 return NULL;
393
394 ret->com.res_id = id;
395 ret->com.state = RES_EQ_RESERVED;
396
397 return &ret->com;
398}
399
400static struct res_common *alloc_cq_tr(int id)
401{
402 struct res_cq *ret;
403
404 ret = kzalloc(sizeof *ret, GFP_KERNEL);
405 if (!ret)
406 return NULL;
407
408 ret->com.res_id = id;
409 ret->com.state = RES_CQ_ALLOCATED;
410 atomic_set(&ret->ref_count, 0);
411
412 return &ret->com;
413}
414
415static struct res_common *alloc_srq_tr(int id)
416{
417 struct res_srq *ret;
418
419 ret = kzalloc(sizeof *ret, GFP_KERNEL);
420 if (!ret)
421 return NULL;
422
423 ret->com.res_id = id;
424 ret->com.state = RES_SRQ_ALLOCATED;
425 atomic_set(&ret->ref_count, 0);
426
427 return &ret->com;
428}
429
430static struct res_common *alloc_counter_tr(int id)
431{
432 struct res_counter *ret;
433
434 ret = kzalloc(sizeof *ret, GFP_KERNEL);
435 if (!ret)
436 return NULL;
437
438 ret->com.res_id = id;
439 ret->com.state = RES_COUNTER_ALLOCATED;
440
441 return &ret->com;
442}
443
444static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
445 int extra)
446{
447 struct res_common *ret;
448
449 switch (type) {
450 case RES_QP:
451 ret = alloc_qp_tr(id);
452 break;
453 case RES_MPT:
454 ret = alloc_mpt_tr(id, extra);
455 break;
456 case RES_MTT:
457 ret = alloc_mtt_tr(id, extra);
458 break;
459 case RES_EQ:
460 ret = alloc_eq_tr(id);
461 break;
462 case RES_CQ:
463 ret = alloc_cq_tr(id);
464 break;
465 case RES_SRQ:
466 ret = alloc_srq_tr(id);
467 break;
468 case RES_MAC:
469 printk(KERN_ERR "implementation missing\n");
470 return NULL;
471 case RES_COUNTER:
472 ret = alloc_counter_tr(id);
473 break;
474
475 default:
476 return NULL;
477 }
478 if (ret)
479 ret->owner = slave;
480
481 return ret;
482}
483
484static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
485 enum mlx4_resource type, int extra)
486{
487 int i;
488 int err;
489 struct mlx4_priv *priv = mlx4_priv(dev);
490 struct res_common **res_arr;
491 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
492 struct radix_tree_root *root = &tracker->res_tree[type];
493
494 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
495 if (!res_arr)
496 return -ENOMEM;
497
498 for (i = 0; i < count; ++i) {
499 res_arr[i] = alloc_tr(base + i, type, slave, extra);
500 if (!res_arr[i]) {
501 for (--i; i >= 0; --i)
502 kfree(res_arr[i]);
503
504 kfree(res_arr);
505 return -ENOMEM;
506 }
507 }
508
509 spin_lock_irq(mlx4_tlock(dev));
510 for (i = 0; i < count; ++i) {
511 if (find_res(dev, base + i, type)) {
512 err = -EEXIST;
513 goto undo;
514 }
515 err = radix_tree_insert(root, base + i, res_arr[i]);
516 if (err)
517 goto undo;
518 list_add_tail(&res_arr[i]->list,
519 &tracker->slave_list[slave].res_list[type]);
520 }
521 spin_unlock_irq(mlx4_tlock(dev));
522 kfree(res_arr);
523
524 return 0;
525
526undo:
527 for (--i; i >= base; --i)
528 radix_tree_delete(&tracker->res_tree[type], i);
529
530 spin_unlock_irq(mlx4_tlock(dev));
531
532 for (i = 0; i < count; ++i)
533 kfree(res_arr[i]);
534
535 kfree(res_arr);
536
537 return err;
538}
539
540static int remove_qp_ok(struct res_qp *res)
541{
542 if (res->com.state == RES_QP_BUSY)
543 return -EBUSY;
544 else if (res->com.state != RES_QP_RESERVED)
545 return -EPERM;
546
547 return 0;
548}
549
550static int remove_mtt_ok(struct res_mtt *res, int order)
551{
552 if (res->com.state == RES_MTT_BUSY ||
553 atomic_read(&res->ref_count)) {
554 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
555 __func__, __LINE__,
556 mtt_states_str(res->com.state),
557 atomic_read(&res->ref_count));
558 return -EBUSY;
559 } else if (res->com.state != RES_MTT_ALLOCATED)
560 return -EPERM;
561 else if (res->order != order)
562 return -EINVAL;
563
564 return 0;
565}
566
567static int remove_mpt_ok(struct res_mpt *res)
568{
569 if (res->com.state == RES_MPT_BUSY)
570 return -EBUSY;
571 else if (res->com.state != RES_MPT_RESERVED)
572 return -EPERM;
573
574 return 0;
575}
576
577static int remove_eq_ok(struct res_eq *res)
578{
579 if (res->com.state == RES_MPT_BUSY)
580 return -EBUSY;
581 else if (res->com.state != RES_MPT_RESERVED)
582 return -EPERM;
583
584 return 0;
585}
586
587static int remove_counter_ok(struct res_counter *res)
588{
589 if (res->com.state == RES_COUNTER_BUSY)
590 return -EBUSY;
591 else if (res->com.state != RES_COUNTER_ALLOCATED)
592 return -EPERM;
593
594 return 0;
595}
596
597static int remove_cq_ok(struct res_cq *res)
598{
599 if (res->com.state == RES_CQ_BUSY)
600 return -EBUSY;
601 else if (res->com.state != RES_CQ_ALLOCATED)
602 return -EPERM;
603
604 return 0;
605}
606
607static int remove_srq_ok(struct res_srq *res)
608{
609 if (res->com.state == RES_SRQ_BUSY)
610 return -EBUSY;
611 else if (res->com.state != RES_SRQ_ALLOCATED)
612 return -EPERM;
613
614 return 0;
615}
616
617static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
618{
619 switch (type) {
620 case RES_QP:
621 return remove_qp_ok((struct res_qp *)res);
622 case RES_CQ:
623 return remove_cq_ok((struct res_cq *)res);
624 case RES_SRQ:
625 return remove_srq_ok((struct res_srq *)res);
626 case RES_MPT:
627 return remove_mpt_ok((struct res_mpt *)res);
628 case RES_MTT:
629 return remove_mtt_ok((struct res_mtt *)res, extra);
630 case RES_MAC:
631 return -ENOSYS;
632 case RES_EQ:
633 return remove_eq_ok((struct res_eq *)res);
634 case RES_COUNTER:
635 return remove_counter_ok((struct res_counter *)res);
636 default:
637 return -EINVAL;
638 }
639}
640
641static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
642 enum mlx4_resource type, int extra)
643{
644 int i;
645 int err;
646 struct mlx4_priv *priv = mlx4_priv(dev);
647 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
648 struct res_common *r;
649
650 spin_lock_irq(mlx4_tlock(dev));
651 for (i = base; i < base + count; ++i) {
652 r = radix_tree_lookup(&tracker->res_tree[type], i);
653 if (!r) {
654 err = -ENOENT;
655 goto out;
656 }
657 if (r->owner != slave) {
658 err = -EPERM;
659 goto out;
660 }
661 err = remove_ok(r, type, extra);
662 if (err)
663 goto out;
664 }
665
666 for (i = base; i < base + count; ++i) {
667 r = radix_tree_lookup(&tracker->res_tree[type], i);
668 radix_tree_delete(&tracker->res_tree[type], i);
669 list_del(&r->list);
670 kfree(r);
671 }
672 err = 0;
673
674out:
675 spin_unlock_irq(mlx4_tlock(dev));
676
677 return err;
678}
679
680static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
681 enum res_qp_states state, struct res_qp **qp,
682 int alloc)
683{
684 struct mlx4_priv *priv = mlx4_priv(dev);
685 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
686 struct res_qp *r;
687 int err = 0;
688
689 spin_lock_irq(mlx4_tlock(dev));
690 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
691 if (!r)
692 err = -ENOENT;
693 else if (r->com.owner != slave)
694 err = -EPERM;
695 else {
696 switch (state) {
697 case RES_QP_BUSY:
698 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
699 __func__, r->com.res_id);
700 err = -EBUSY;
701 break;
702
703 case RES_QP_RESERVED:
704 if (r->com.state == RES_QP_MAPPED && !alloc)
705 break;
706
707 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
708 err = -EINVAL;
709 break;
710
711 case RES_QP_MAPPED:
712 if ((r->com.state == RES_QP_RESERVED && alloc) ||
713 r->com.state == RES_QP_HW)
714 break;
715 else {
716 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
717 r->com.res_id);
718 err = -EINVAL;
719 }
720
721 break;
722
723 case RES_QP_HW:
724 if (r->com.state != RES_QP_MAPPED)
725 err = -EINVAL;
726 break;
727 default:
728 err = -EINVAL;
729 }
730
731 if (!err) {
732 r->com.from_state = r->com.state;
733 r->com.to_state = state;
734 r->com.state = RES_QP_BUSY;
735 if (qp)
736 *qp = (struct res_qp *)r;
737 }
738 }
739
740 spin_unlock_irq(mlx4_tlock(dev));
741
742 return err;
743}
744
745static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
746 enum res_mpt_states state, struct res_mpt **mpt)
747{
748 struct mlx4_priv *priv = mlx4_priv(dev);
749 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
750 struct res_mpt *r;
751 int err = 0;
752
753 spin_lock_irq(mlx4_tlock(dev));
754 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
755 if (!r)
756 err = -ENOENT;
757 else if (r->com.owner != slave)
758 err = -EPERM;
759 else {
760 switch (state) {
761 case RES_MPT_BUSY:
762 err = -EINVAL;
763 break;
764
765 case RES_MPT_RESERVED:
766 if (r->com.state != RES_MPT_MAPPED)
767 err = -EINVAL;
768 break;
769
770 case RES_MPT_MAPPED:
771 if (r->com.state != RES_MPT_RESERVED &&
772 r->com.state != RES_MPT_HW)
773 err = -EINVAL;
774 break;
775
776 case RES_MPT_HW:
777 if (r->com.state != RES_MPT_MAPPED)
778 err = -EINVAL;
779 break;
780 default:
781 err = -EINVAL;
782 }
783
784 if (!err) {
785 r->com.from_state = r->com.state;
786 r->com.to_state = state;
787 r->com.state = RES_MPT_BUSY;
788 if (mpt)
789 *mpt = (struct res_mpt *)r;
790 }
791 }
792
793 spin_unlock_irq(mlx4_tlock(dev));
794
795 return err;
796}
797
798static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
799 enum res_eq_states state, struct res_eq **eq)
800{
801 struct mlx4_priv *priv = mlx4_priv(dev);
802 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
803 struct res_eq *r;
804 int err = 0;
805
806 spin_lock_irq(mlx4_tlock(dev));
807 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
808 if (!r)
809 err = -ENOENT;
810 else if (r->com.owner != slave)
811 err = -EPERM;
812 else {
813 switch (state) {
814 case RES_EQ_BUSY:
815 err = -EINVAL;
816 break;
817
818 case RES_EQ_RESERVED:
819 if (r->com.state != RES_EQ_HW)
820 err = -EINVAL;
821 break;
822
823 case RES_EQ_HW:
824 if (r->com.state != RES_EQ_RESERVED)
825 err = -EINVAL;
826 break;
827
828 default:
829 err = -EINVAL;
830 }
831
832 if (!err) {
833 r->com.from_state = r->com.state;
834 r->com.to_state = state;
835 r->com.state = RES_EQ_BUSY;
836 if (eq)
837 *eq = r;
838 }
839 }
840
841 spin_unlock_irq(mlx4_tlock(dev));
842
843 return err;
844}
845
846static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
847 enum res_cq_states state, struct res_cq **cq)
848{
849 struct mlx4_priv *priv = mlx4_priv(dev);
850 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
851 struct res_cq *r;
852 int err;
853
854 spin_lock_irq(mlx4_tlock(dev));
855 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
856 if (!r)
857 err = -ENOENT;
858 else if (r->com.owner != slave)
859 err = -EPERM;
860 else {
861 switch (state) {
862 case RES_CQ_BUSY:
863 err = -EBUSY;
864 break;
865
866 case RES_CQ_ALLOCATED:
867 if (r->com.state != RES_CQ_HW)
868 err = -EINVAL;
869 else if (atomic_read(&r->ref_count))
870 err = -EBUSY;
871 else
872 err = 0;
873 break;
874
875 case RES_CQ_HW:
876 if (r->com.state != RES_CQ_ALLOCATED)
877 err = -EINVAL;
878 else
879 err = 0;
880 break;
881
882 default:
883 err = -EINVAL;
884 }
885
886 if (!err) {
887 r->com.from_state = r->com.state;
888 r->com.to_state = state;
889 r->com.state = RES_CQ_BUSY;
890 if (cq)
891 *cq = r;
892 }
893 }
894
895 spin_unlock_irq(mlx4_tlock(dev));
896
897 return err;
898}
899
900static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
901 enum res_cq_states state, struct res_srq **srq)
902{
903 struct mlx4_priv *priv = mlx4_priv(dev);
904 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
905 struct res_srq *r;
906 int err = 0;
907
908 spin_lock_irq(mlx4_tlock(dev));
909 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
910 if (!r)
911 err = -ENOENT;
912 else if (r->com.owner != slave)
913 err = -EPERM;
914 else {
915 switch (state) {
916 case RES_SRQ_BUSY:
917 err = -EINVAL;
918 break;
919
920 case RES_SRQ_ALLOCATED:
921 if (r->com.state != RES_SRQ_HW)
922 err = -EINVAL;
923 else if (atomic_read(&r->ref_count))
924 err = -EBUSY;
925 break;
926
927 case RES_SRQ_HW:
928 if (r->com.state != RES_SRQ_ALLOCATED)
929 err = -EINVAL;
930 break;
931
932 default:
933 err = -EINVAL;
934 }
935
936 if (!err) {
937 r->com.from_state = r->com.state;
938 r->com.to_state = state;
939 r->com.state = RES_SRQ_BUSY;
940 if (srq)
941 *srq = r;
942 }
943 }
944
945 spin_unlock_irq(mlx4_tlock(dev));
946
947 return err;
948}
949
950static void res_abort_move(struct mlx4_dev *dev, int slave,
951 enum mlx4_resource type, int id)
952{
953 struct mlx4_priv *priv = mlx4_priv(dev);
954 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
955 struct res_common *r;
956
957 spin_lock_irq(mlx4_tlock(dev));
958 r = radix_tree_lookup(&tracker->res_tree[type], id);
959 if (r && (r->owner == slave))
960 r->state = r->from_state;
961 spin_unlock_irq(mlx4_tlock(dev));
962}
963
964static void res_end_move(struct mlx4_dev *dev, int slave,
965 enum mlx4_resource type, int id)
966{
967 struct mlx4_priv *priv = mlx4_priv(dev);
968 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
969 struct res_common *r;
970
971 spin_lock_irq(mlx4_tlock(dev));
972 r = radix_tree_lookup(&tracker->res_tree[type], id);
973 if (r && (r->owner == slave))
974 r->state = r->to_state;
975 spin_unlock_irq(mlx4_tlock(dev));
976}
977
978static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
979{
980 return mlx4_is_qp_reserved(dev, qpn);
981}
982
983static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
984 u64 in_param, u64 *out_param)
985{
986 int err;
987 int count;
988 int align;
989 int base;
990 int qpn;
991
992 switch (op) {
993 case RES_OP_RESERVE:
994 count = get_param_l(&in_param);
995 align = get_param_h(&in_param);
996 err = __mlx4_qp_reserve_range(dev, count, align, &base);
997 if (err)
998 return err;
999
1000 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1001 if (err) {
1002 __mlx4_qp_release_range(dev, base, count);
1003 return err;
1004 }
1005 set_param_l(out_param, base);
1006 break;
1007 case RES_OP_MAP_ICM:
1008 qpn = get_param_l(&in_param) & 0x7fffff;
1009 if (valid_reserved(dev, slave, qpn)) {
1010 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1011 if (err)
1012 return err;
1013 }
1014
1015 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1016 NULL, 1);
1017 if (err)
1018 return err;
1019
1020 if (!valid_reserved(dev, slave, qpn)) {
1021 err = __mlx4_qp_alloc_icm(dev, qpn);
1022 if (err) {
1023 res_abort_move(dev, slave, RES_QP, qpn);
1024 return err;
1025 }
1026 }
1027
1028 res_end_move(dev, slave, RES_QP, qpn);
1029 break;
1030
1031 default:
1032 err = -EINVAL;
1033 break;
1034 }
1035 return err;
1036}
1037
1038static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1039 u64 in_param, u64 *out_param)
1040{
1041 int err = -EINVAL;
1042 int base;
1043 int order;
1044
1045 if (op != RES_OP_RESERVE_AND_MAP)
1046 return err;
1047
1048 order = get_param_l(&in_param);
1049 base = __mlx4_alloc_mtt_range(dev, order);
1050 if (base == -1)
1051 return -ENOMEM;
1052
1053 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1054 if (err)
1055 __mlx4_free_mtt_range(dev, base, order);
1056 else
1057 set_param_l(out_param, base);
1058
1059 return err;
1060}
1061
1062static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1063 u64 in_param, u64 *out_param)
1064{
1065 int err = -EINVAL;
1066 int index;
1067 int id;
1068 struct res_mpt *mpt;
1069
1070 switch (op) {
1071 case RES_OP_RESERVE:
1072 index = __mlx4_mr_reserve(dev);
1073 if (index == -1)
1074 break;
1075 id = index & mpt_mask(dev);
1076
1077 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1078 if (err) {
1079 __mlx4_mr_release(dev, index);
1080 break;
1081 }
1082 set_param_l(out_param, index);
1083 break;
1084 case RES_OP_MAP_ICM:
1085 index = get_param_l(&in_param);
1086 id = index & mpt_mask(dev);
1087 err = mr_res_start_move_to(dev, slave, id,
1088 RES_MPT_MAPPED, &mpt);
1089 if (err)
1090 return err;
1091
1092 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1093 if (err) {
1094 res_abort_move(dev, slave, RES_MPT, id);
1095 return err;
1096 }
1097
1098 res_end_move(dev, slave, RES_MPT, id);
1099 break;
1100 }
1101 return err;
1102}
1103
1104static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1105 u64 in_param, u64 *out_param)
1106{
1107 int cqn;
1108 int err;
1109
1110 switch (op) {
1111 case RES_OP_RESERVE_AND_MAP:
1112 err = __mlx4_cq_alloc_icm(dev, &cqn);
1113 if (err)
1114 break;
1115
1116 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1117 if (err) {
1118 __mlx4_cq_free_icm(dev, cqn);
1119 break;
1120 }
1121
1122 set_param_l(out_param, cqn);
1123 break;
1124
1125 default:
1126 err = -EINVAL;
1127 }
1128
1129 return err;
1130}
1131
1132static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1133 u64 in_param, u64 *out_param)
1134{
1135 int srqn;
1136 int err;
1137
1138 switch (op) {
1139 case RES_OP_RESERVE_AND_MAP:
1140 err = __mlx4_srq_alloc_icm(dev, &srqn);
1141 if (err)
1142 break;
1143
1144 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1145 if (err) {
1146 __mlx4_srq_free_icm(dev, srqn);
1147 break;
1148 }
1149
1150 set_param_l(out_param, srqn);
1151 break;
1152
1153 default:
1154 err = -EINVAL;
1155 }
1156
1157 return err;
1158}
1159
1160static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1161{
1162 struct mlx4_priv *priv = mlx4_priv(dev);
1163 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1164 struct mac_res *res;
1165
1166 res = kzalloc(sizeof *res, GFP_KERNEL);
1167 if (!res)
1168 return -ENOMEM;
1169 res->mac = mac;
1170 res->port = (u8) port;
1171 list_add_tail(&res->list,
1172 &tracker->slave_list[slave].res_list[RES_MAC]);
1173 return 0;
1174}
1175
1176static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1177 int port)
1178{
1179 struct mlx4_priv *priv = mlx4_priv(dev);
1180 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1181 struct list_head *mac_list =
1182 &tracker->slave_list[slave].res_list[RES_MAC];
1183 struct mac_res *res, *tmp;
1184
1185 list_for_each_entry_safe(res, tmp, mac_list, list) {
1186 if (res->mac == mac && res->port == (u8) port) {
1187 list_del(&res->list);
1188 kfree(res);
1189 break;
1190 }
1191 }
1192}
1193
1194static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1195{
1196 struct mlx4_priv *priv = mlx4_priv(dev);
1197 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1198 struct list_head *mac_list =
1199 &tracker->slave_list[slave].res_list[RES_MAC];
1200 struct mac_res *res, *tmp;
1201
1202 list_for_each_entry_safe(res, tmp, mac_list, list) {
1203 list_del(&res->list);
1204 __mlx4_unregister_mac(dev, res->port, res->mac);
1205 kfree(res);
1206 }
1207}
1208
1209static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1210 u64 in_param, u64 *out_param)
1211{
1212 int err = -EINVAL;
1213 int port;
1214 u64 mac;
1215
1216 if (op != RES_OP_RESERVE_AND_MAP)
1217 return err;
1218
1219 port = get_param_l(out_param);
1220 mac = in_param;
1221
1222 err = __mlx4_register_mac(dev, port, mac);
1223 if (err >= 0) {
1224 set_param_l(out_param, err);
1225 err = 0;
1226 }
1227
1228 if (!err) {
1229 err = mac_add_to_slave(dev, slave, mac, port);
1230 if (err)
1231 __mlx4_unregister_mac(dev, port, mac);
1232 }
1233 return err;
1234}
1235
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001236static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237 u64 in_param, u64 *out_param)
1238{
1239 return 0;
1240}
1241
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001242int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1243 struct mlx4_vhcr *vhcr,
1244 struct mlx4_cmd_mailbox *inbox,
1245 struct mlx4_cmd_mailbox *outbox,
1246 struct mlx4_cmd_info *cmd)
1247{
1248 int err;
1249 int alop = vhcr->op_modifier;
1250
1251 switch (vhcr->in_modifier) {
1252 case RES_QP:
1253 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1254 vhcr->in_param, &vhcr->out_param);
1255 break;
1256
1257 case RES_MTT:
1258 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1259 vhcr->in_param, &vhcr->out_param);
1260 break;
1261
1262 case RES_MPT:
1263 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1264 vhcr->in_param, &vhcr->out_param);
1265 break;
1266
1267 case RES_CQ:
1268 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1269 vhcr->in_param, &vhcr->out_param);
1270 break;
1271
1272 case RES_SRQ:
1273 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1274 vhcr->in_param, &vhcr->out_param);
1275 break;
1276
1277 case RES_MAC:
1278 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1279 vhcr->in_param, &vhcr->out_param);
1280 break;
1281
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001282 case RES_VLAN:
1283 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1284 vhcr->in_param, &vhcr->out_param);
1285 break;
1286
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001287 default:
1288 err = -EINVAL;
1289 break;
1290 }
1291
1292 return err;
1293}
1294
1295static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1296 u64 in_param)
1297{
1298 int err;
1299 int count;
1300 int base;
1301 int qpn;
1302
1303 switch (op) {
1304 case RES_OP_RESERVE:
1305 base = get_param_l(&in_param) & 0x7fffff;
1306 count = get_param_h(&in_param);
1307 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1308 if (err)
1309 break;
1310 __mlx4_qp_release_range(dev, base, count);
1311 break;
1312 case RES_OP_MAP_ICM:
1313 qpn = get_param_l(&in_param) & 0x7fffff;
1314 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1315 NULL, 0);
1316 if (err)
1317 return err;
1318
1319 if (!valid_reserved(dev, slave, qpn))
1320 __mlx4_qp_free_icm(dev, qpn);
1321
1322 res_end_move(dev, slave, RES_QP, qpn);
1323
1324 if (valid_reserved(dev, slave, qpn))
1325 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1326 break;
1327 default:
1328 err = -EINVAL;
1329 break;
1330 }
1331 return err;
1332}
1333
1334static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1335 u64 in_param, u64 *out_param)
1336{
1337 int err = -EINVAL;
1338 int base;
1339 int order;
1340
1341 if (op != RES_OP_RESERVE_AND_MAP)
1342 return err;
1343
1344 base = get_param_l(&in_param);
1345 order = get_param_h(&in_param);
1346 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1347 if (!err)
1348 __mlx4_free_mtt_range(dev, base, order);
1349 return err;
1350}
1351
1352static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1353 u64 in_param)
1354{
1355 int err = -EINVAL;
1356 int index;
1357 int id;
1358 struct res_mpt *mpt;
1359
1360 switch (op) {
1361 case RES_OP_RESERVE:
1362 index = get_param_l(&in_param);
1363 id = index & mpt_mask(dev);
1364 err = get_res(dev, slave, id, RES_MPT, &mpt);
1365 if (err)
1366 break;
1367 index = mpt->key;
1368 put_res(dev, slave, id, RES_MPT);
1369
1370 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1371 if (err)
1372 break;
1373 __mlx4_mr_release(dev, index);
1374 break;
1375 case RES_OP_MAP_ICM:
1376 index = get_param_l(&in_param);
1377 id = index & mpt_mask(dev);
1378 err = mr_res_start_move_to(dev, slave, id,
1379 RES_MPT_RESERVED, &mpt);
1380 if (err)
1381 return err;
1382
1383 __mlx4_mr_free_icm(dev, mpt->key);
1384 res_end_move(dev, slave, RES_MPT, id);
1385 return err;
1386 break;
1387 default:
1388 err = -EINVAL;
1389 break;
1390 }
1391 return err;
1392}
1393
1394static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1395 u64 in_param, u64 *out_param)
1396{
1397 int cqn;
1398 int err;
1399
1400 switch (op) {
1401 case RES_OP_RESERVE_AND_MAP:
1402 cqn = get_param_l(&in_param);
1403 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1404 if (err)
1405 break;
1406
1407 __mlx4_cq_free_icm(dev, cqn);
1408 break;
1409
1410 default:
1411 err = -EINVAL;
1412 break;
1413 }
1414
1415 return err;
1416}
1417
1418static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1419 u64 in_param, u64 *out_param)
1420{
1421 int srqn;
1422 int err;
1423
1424 switch (op) {
1425 case RES_OP_RESERVE_AND_MAP:
1426 srqn = get_param_l(&in_param);
1427 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1428 if (err)
1429 break;
1430
1431 __mlx4_srq_free_icm(dev, srqn);
1432 break;
1433
1434 default:
1435 err = -EINVAL;
1436 break;
1437 }
1438
1439 return err;
1440}
1441
1442static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1443 u64 in_param, u64 *out_param)
1444{
1445 int port;
1446 int err = 0;
1447
1448 switch (op) {
1449 case RES_OP_RESERVE_AND_MAP:
1450 port = get_param_l(out_param);
1451 mac_del_from_slave(dev, slave, in_param, port);
1452 __mlx4_unregister_mac(dev, port, in_param);
1453 break;
1454 default:
1455 err = -EINVAL;
1456 break;
1457 }
1458
1459 return err;
1460
1461}
1462
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001463static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1464 u64 in_param, u64 *out_param)
1465{
1466 return 0;
1467}
1468
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001469int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1470 struct mlx4_vhcr *vhcr,
1471 struct mlx4_cmd_mailbox *inbox,
1472 struct mlx4_cmd_mailbox *outbox,
1473 struct mlx4_cmd_info *cmd)
1474{
1475 int err = -EINVAL;
1476 int alop = vhcr->op_modifier;
1477
1478 switch (vhcr->in_modifier) {
1479 case RES_QP:
1480 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1481 vhcr->in_param);
1482 break;
1483
1484 case RES_MTT:
1485 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1486 vhcr->in_param, &vhcr->out_param);
1487 break;
1488
1489 case RES_MPT:
1490 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1491 vhcr->in_param);
1492 break;
1493
1494 case RES_CQ:
1495 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1496 vhcr->in_param, &vhcr->out_param);
1497 break;
1498
1499 case RES_SRQ:
1500 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1501 vhcr->in_param, &vhcr->out_param);
1502 break;
1503
1504 case RES_MAC:
1505 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1506 vhcr->in_param, &vhcr->out_param);
1507 break;
1508
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001509 case RES_VLAN:
1510 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1511 vhcr->in_param, &vhcr->out_param);
1512 break;
1513
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001514 default:
1515 break;
1516 }
1517 return err;
1518}
1519
1520/* ugly but other choices are uglier */
1521static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1522{
1523 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1524}
1525
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001526static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001527{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001528 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001529}
1530
1531static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1532{
1533 return be32_to_cpu(mpt->mtt_sz);
1534}
1535
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001536static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001537{
1538 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1539}
1540
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001541static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001542{
1543 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1544}
1545
1546static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1547{
1548 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1549 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1550 int log_sq_sride = qpc->sq_size_stride & 7;
1551 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1552 int log_rq_stride = qpc->rq_size_stride & 7;
1553 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1554 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1555 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1556 int sq_size;
1557 int rq_size;
1558 int total_pages;
1559 int total_mem;
1560 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1561
1562 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1563 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1564 total_mem = sq_size + rq_size;
1565 total_pages =
1566 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1567 page_shift);
1568
1569 return total_pages;
1570}
1571
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001572static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1573 int size, struct res_mtt *mtt)
1574{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001575 int res_start = mtt->com.res_id;
1576 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001577
1578 if (start < res_start || start + size > res_start + res_size)
1579 return -EPERM;
1580 return 0;
1581}
1582
1583int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1584 struct mlx4_vhcr *vhcr,
1585 struct mlx4_cmd_mailbox *inbox,
1586 struct mlx4_cmd_mailbox *outbox,
1587 struct mlx4_cmd_info *cmd)
1588{
1589 int err;
1590 int index = vhcr->in_modifier;
1591 struct res_mtt *mtt;
1592 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001593 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001594 int phys;
1595 int id;
1596
1597 id = index & mpt_mask(dev);
1598 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1599 if (err)
1600 return err;
1601
1602 phys = mr_phys_mpt(inbox->buf);
1603 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001604 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001605 if (err)
1606 goto ex_abort;
1607
1608 err = check_mtt_range(dev, slave, mtt_base,
1609 mr_get_mtt_size(inbox->buf), mtt);
1610 if (err)
1611 goto ex_put;
1612
1613 mpt->mtt = mtt;
1614 }
1615
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001616 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1617 if (err)
1618 goto ex_put;
1619
1620 if (!phys) {
1621 atomic_inc(&mtt->ref_count);
1622 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1623 }
1624
1625 res_end_move(dev, slave, RES_MPT, id);
1626 return 0;
1627
1628ex_put:
1629 if (!phys)
1630 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1631ex_abort:
1632 res_abort_move(dev, slave, RES_MPT, id);
1633
1634 return err;
1635}
1636
1637int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1638 struct mlx4_vhcr *vhcr,
1639 struct mlx4_cmd_mailbox *inbox,
1640 struct mlx4_cmd_mailbox *outbox,
1641 struct mlx4_cmd_info *cmd)
1642{
1643 int err;
1644 int index = vhcr->in_modifier;
1645 struct res_mpt *mpt;
1646 int id;
1647
1648 id = index & mpt_mask(dev);
1649 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1650 if (err)
1651 return err;
1652
1653 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1654 if (err)
1655 goto ex_abort;
1656
1657 if (mpt->mtt)
1658 atomic_dec(&mpt->mtt->ref_count);
1659
1660 res_end_move(dev, slave, RES_MPT, id);
1661 return 0;
1662
1663ex_abort:
1664 res_abort_move(dev, slave, RES_MPT, id);
1665
1666 return err;
1667}
1668
1669int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1670 struct mlx4_vhcr *vhcr,
1671 struct mlx4_cmd_mailbox *inbox,
1672 struct mlx4_cmd_mailbox *outbox,
1673 struct mlx4_cmd_info *cmd)
1674{
1675 int err;
1676 int index = vhcr->in_modifier;
1677 struct res_mpt *mpt;
1678 int id;
1679
1680 id = index & mpt_mask(dev);
1681 err = get_res(dev, slave, id, RES_MPT, &mpt);
1682 if (err)
1683 return err;
1684
1685 if (mpt->com.from_state != RES_MPT_HW) {
1686 err = -EBUSY;
1687 goto out;
1688 }
1689
1690 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1691
1692out:
1693 put_res(dev, slave, id, RES_MPT);
1694 return err;
1695}
1696
1697static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1698{
1699 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1700}
1701
1702static int qp_get_scqn(struct mlx4_qp_context *qpc)
1703{
1704 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1705}
1706
1707static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1708{
1709 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1710}
1711
1712int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1713 struct mlx4_vhcr *vhcr,
1714 struct mlx4_cmd_mailbox *inbox,
1715 struct mlx4_cmd_mailbox *outbox,
1716 struct mlx4_cmd_info *cmd)
1717{
1718 int err;
1719 int qpn = vhcr->in_modifier & 0x7fffff;
1720 struct res_mtt *mtt;
1721 struct res_qp *qp;
1722 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001723 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001724 int mtt_size = qp_get_mtt_size(qpc);
1725 struct res_cq *rcq;
1726 struct res_cq *scq;
1727 int rcqn = qp_get_rcqn(qpc);
1728 int scqn = qp_get_scqn(qpc);
1729 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1730 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1731 struct res_srq *srq;
1732 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1733
1734 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1735 if (err)
1736 return err;
1737 qp->local_qpn = local_qpn;
1738
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001739 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001740 if (err)
1741 goto ex_abort;
1742
1743 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1744 if (err)
1745 goto ex_put_mtt;
1746
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001747 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1748 if (err)
1749 goto ex_put_mtt;
1750
1751 if (scqn != rcqn) {
1752 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1753 if (err)
1754 goto ex_put_rcq;
1755 } else
1756 scq = rcq;
1757
1758 if (use_srq) {
1759 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1760 if (err)
1761 goto ex_put_scq;
1762 }
1763
1764 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1765 if (err)
1766 goto ex_put_srq;
1767 atomic_inc(&mtt->ref_count);
1768 qp->mtt = mtt;
1769 atomic_inc(&rcq->ref_count);
1770 qp->rcq = rcq;
1771 atomic_inc(&scq->ref_count);
1772 qp->scq = scq;
1773
1774 if (scqn != rcqn)
1775 put_res(dev, slave, scqn, RES_CQ);
1776
1777 if (use_srq) {
1778 atomic_inc(&srq->ref_count);
1779 put_res(dev, slave, srqn, RES_SRQ);
1780 qp->srq = srq;
1781 }
1782 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001783 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001784 res_end_move(dev, slave, RES_QP, qpn);
1785
1786 return 0;
1787
1788ex_put_srq:
1789 if (use_srq)
1790 put_res(dev, slave, srqn, RES_SRQ);
1791ex_put_scq:
1792 if (scqn != rcqn)
1793 put_res(dev, slave, scqn, RES_CQ);
1794ex_put_rcq:
1795 put_res(dev, slave, rcqn, RES_CQ);
1796ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001797 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001798ex_abort:
1799 res_abort_move(dev, slave, RES_QP, qpn);
1800
1801 return err;
1802}
1803
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001804static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001805{
1806 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1807}
1808
1809static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1810{
1811 int log_eq_size = eqc->log_eq_size & 0x1f;
1812 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1813
1814 if (log_eq_size + 5 < page_shift)
1815 return 1;
1816
1817 return 1 << (log_eq_size + 5 - page_shift);
1818}
1819
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001820static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001821{
1822 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1823}
1824
1825static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1826{
1827 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1828 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1829
1830 if (log_cq_size + 5 < page_shift)
1831 return 1;
1832
1833 return 1 << (log_cq_size + 5 - page_shift);
1834}
1835
1836int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1837 struct mlx4_vhcr *vhcr,
1838 struct mlx4_cmd_mailbox *inbox,
1839 struct mlx4_cmd_mailbox *outbox,
1840 struct mlx4_cmd_info *cmd)
1841{
1842 int err;
1843 int eqn = vhcr->in_modifier;
1844 int res_id = (slave << 8) | eqn;
1845 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001846 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001847 int mtt_size = eq_get_mtt_size(eqc);
1848 struct res_eq *eq;
1849 struct res_mtt *mtt;
1850
1851 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1852 if (err)
1853 return err;
1854 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1855 if (err)
1856 goto out_add;
1857
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001858 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001859 if (err)
1860 goto out_move;
1861
1862 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1863 if (err)
1864 goto out_put;
1865
1866 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1867 if (err)
1868 goto out_put;
1869
1870 atomic_inc(&mtt->ref_count);
1871 eq->mtt = mtt;
1872 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1873 res_end_move(dev, slave, RES_EQ, res_id);
1874 return 0;
1875
1876out_put:
1877 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1878out_move:
1879 res_abort_move(dev, slave, RES_EQ, res_id);
1880out_add:
1881 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1882 return err;
1883}
1884
1885static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1886 int len, struct res_mtt **res)
1887{
1888 struct mlx4_priv *priv = mlx4_priv(dev);
1889 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1890 struct res_mtt *mtt;
1891 int err = -EINVAL;
1892
1893 spin_lock_irq(mlx4_tlock(dev));
1894 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1895 com.list) {
1896 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1897 *res = mtt;
1898 mtt->com.from_state = mtt->com.state;
1899 mtt->com.state = RES_MTT_BUSY;
1900 err = 0;
1901 break;
1902 }
1903 }
1904 spin_unlock_irq(mlx4_tlock(dev));
1905
1906 return err;
1907}
1908
1909int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1910 struct mlx4_vhcr *vhcr,
1911 struct mlx4_cmd_mailbox *inbox,
1912 struct mlx4_cmd_mailbox *outbox,
1913 struct mlx4_cmd_info *cmd)
1914{
1915 struct mlx4_mtt mtt;
1916 __be64 *page_list = inbox->buf;
1917 u64 *pg_list = (u64 *)page_list;
1918 int i;
1919 struct res_mtt *rmtt = NULL;
1920 int start = be64_to_cpu(page_list[0]);
1921 int npages = vhcr->in_modifier;
1922 int err;
1923
1924 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1925 if (err)
1926 return err;
1927
1928 /* Call the SW implementation of write_mtt:
1929 * - Prepare a dummy mtt struct
1930 * - Translate inbox contents to simple addresses in host endianess */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00001931 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1932 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001933 mtt.order = 0;
1934 mtt.page_shift = 0;
1935 for (i = 0; i < npages; ++i)
1936 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1937
1938 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1939 ((u64 *)page_list + 2));
1940
1941 if (rmtt)
1942 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1943
1944 return err;
1945}
1946
1947int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1948 struct mlx4_vhcr *vhcr,
1949 struct mlx4_cmd_mailbox *inbox,
1950 struct mlx4_cmd_mailbox *outbox,
1951 struct mlx4_cmd_info *cmd)
1952{
1953 int eqn = vhcr->in_modifier;
1954 int res_id = eqn | (slave << 8);
1955 struct res_eq *eq;
1956 int err;
1957
1958 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1959 if (err)
1960 return err;
1961
1962 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1963 if (err)
1964 goto ex_abort;
1965
1966 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1967 if (err)
1968 goto ex_put;
1969
1970 atomic_dec(&eq->mtt->ref_count);
1971 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
1972 res_end_move(dev, slave, RES_EQ, res_id);
1973 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1974
1975 return 0;
1976
1977ex_put:
1978 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
1979ex_abort:
1980 res_abort_move(dev, slave, RES_EQ, res_id);
1981
1982 return err;
1983}
1984
1985int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
1986{
1987 struct mlx4_priv *priv = mlx4_priv(dev);
1988 struct mlx4_slave_event_eq_info *event_eq;
1989 struct mlx4_cmd_mailbox *mailbox;
1990 u32 in_modifier = 0;
1991 int err;
1992 int res_id;
1993 struct res_eq *req;
1994
1995 if (!priv->mfunc.master.slave_state)
1996 return -EINVAL;
1997
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00001998 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001999
2000 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00002001 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002002 return 0;
2003
2004 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2005 res_id = (slave << 8) | event_eq->eqn;
2006 err = get_res(dev, slave, res_id, RES_EQ, &req);
2007 if (err)
2008 goto unlock;
2009
2010 if (req->com.from_state != RES_EQ_HW) {
2011 err = -EINVAL;
2012 goto put;
2013 }
2014
2015 mailbox = mlx4_alloc_cmd_mailbox(dev);
2016 if (IS_ERR(mailbox)) {
2017 err = PTR_ERR(mailbox);
2018 goto put;
2019 }
2020
2021 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2022 ++event_eq->token;
2023 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2024 }
2025
2026 memcpy(mailbox->buf, (u8 *) eqe, 28);
2027
2028 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2029
2030 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2031 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2032 MLX4_CMD_NATIVE);
2033
2034 put_res(dev, slave, res_id, RES_EQ);
2035 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2036 mlx4_free_cmd_mailbox(dev, mailbox);
2037 return err;
2038
2039put:
2040 put_res(dev, slave, res_id, RES_EQ);
2041
2042unlock:
2043 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2044 return err;
2045}
2046
2047int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2048 struct mlx4_vhcr *vhcr,
2049 struct mlx4_cmd_mailbox *inbox,
2050 struct mlx4_cmd_mailbox *outbox,
2051 struct mlx4_cmd_info *cmd)
2052{
2053 int eqn = vhcr->in_modifier;
2054 int res_id = eqn | (slave << 8);
2055 struct res_eq *eq;
2056 int err;
2057
2058 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2059 if (err)
2060 return err;
2061
2062 if (eq->com.from_state != RES_EQ_HW) {
2063 err = -EINVAL;
2064 goto ex_put;
2065 }
2066
2067 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2068
2069ex_put:
2070 put_res(dev, slave, res_id, RES_EQ);
2071 return err;
2072}
2073
2074int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2075 struct mlx4_vhcr *vhcr,
2076 struct mlx4_cmd_mailbox *inbox,
2077 struct mlx4_cmd_mailbox *outbox,
2078 struct mlx4_cmd_info *cmd)
2079{
2080 int err;
2081 int cqn = vhcr->in_modifier;
2082 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002083 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002084 struct res_cq *cq;
2085 struct res_mtt *mtt;
2086
2087 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2088 if (err)
2089 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002090 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002091 if (err)
2092 goto out_move;
2093 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2094 if (err)
2095 goto out_put;
2096 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2097 if (err)
2098 goto out_put;
2099 atomic_inc(&mtt->ref_count);
2100 cq->mtt = mtt;
2101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2102 res_end_move(dev, slave, RES_CQ, cqn);
2103 return 0;
2104
2105out_put:
2106 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2107out_move:
2108 res_abort_move(dev, slave, RES_CQ, cqn);
2109 return err;
2110}
2111
2112int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2113 struct mlx4_vhcr *vhcr,
2114 struct mlx4_cmd_mailbox *inbox,
2115 struct mlx4_cmd_mailbox *outbox,
2116 struct mlx4_cmd_info *cmd)
2117{
2118 int err;
2119 int cqn = vhcr->in_modifier;
2120 struct res_cq *cq;
2121
2122 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2123 if (err)
2124 return err;
2125 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2126 if (err)
2127 goto out_move;
2128 atomic_dec(&cq->mtt->ref_count);
2129 res_end_move(dev, slave, RES_CQ, cqn);
2130 return 0;
2131
2132out_move:
2133 res_abort_move(dev, slave, RES_CQ, cqn);
2134 return err;
2135}
2136
2137int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2138 struct mlx4_vhcr *vhcr,
2139 struct mlx4_cmd_mailbox *inbox,
2140 struct mlx4_cmd_mailbox *outbox,
2141 struct mlx4_cmd_info *cmd)
2142{
2143 int cqn = vhcr->in_modifier;
2144 struct res_cq *cq;
2145 int err;
2146
2147 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2148 if (err)
2149 return err;
2150
2151 if (cq->com.from_state != RES_CQ_HW)
2152 goto ex_put;
2153
2154 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2155ex_put:
2156 put_res(dev, slave, cqn, RES_CQ);
2157
2158 return err;
2159}
2160
2161static int handle_resize(struct mlx4_dev *dev, int slave,
2162 struct mlx4_vhcr *vhcr,
2163 struct mlx4_cmd_mailbox *inbox,
2164 struct mlx4_cmd_mailbox *outbox,
2165 struct mlx4_cmd_info *cmd,
2166 struct res_cq *cq)
2167{
2168 int err;
2169 struct res_mtt *orig_mtt;
2170 struct res_mtt *mtt;
2171 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002172 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002173
2174 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2175 if (err)
2176 return err;
2177
2178 if (orig_mtt != cq->mtt) {
2179 err = -EINVAL;
2180 goto ex_put;
2181 }
2182
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002183 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002184 if (err)
2185 goto ex_put;
2186
2187 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2188 if (err)
2189 goto ex_put1;
2190 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2191 if (err)
2192 goto ex_put1;
2193 atomic_dec(&orig_mtt->ref_count);
2194 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2195 atomic_inc(&mtt->ref_count);
2196 cq->mtt = mtt;
2197 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2198 return 0;
2199
2200ex_put1:
2201 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2202ex_put:
2203 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2204
2205 return err;
2206
2207}
2208
2209int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2210 struct mlx4_vhcr *vhcr,
2211 struct mlx4_cmd_mailbox *inbox,
2212 struct mlx4_cmd_mailbox *outbox,
2213 struct mlx4_cmd_info *cmd)
2214{
2215 int cqn = vhcr->in_modifier;
2216 struct res_cq *cq;
2217 int err;
2218
2219 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2220 if (err)
2221 return err;
2222
2223 if (cq->com.from_state != RES_CQ_HW)
2224 goto ex_put;
2225
2226 if (vhcr->op_modifier == 0) {
2227 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00002228 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002229 }
2230
2231 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2232ex_put:
2233 put_res(dev, slave, cqn, RES_CQ);
2234
2235 return err;
2236}
2237
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002238static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2239{
2240 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2241 int log_rq_stride = srqc->logstride & 7;
2242 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2243
2244 if (log_srq_size + log_rq_stride + 4 < page_shift)
2245 return 1;
2246
2247 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2248}
2249
2250int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2251 struct mlx4_vhcr *vhcr,
2252 struct mlx4_cmd_mailbox *inbox,
2253 struct mlx4_cmd_mailbox *outbox,
2254 struct mlx4_cmd_info *cmd)
2255{
2256 int err;
2257 int srqn = vhcr->in_modifier;
2258 struct res_mtt *mtt;
2259 struct res_srq *srq;
2260 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002261 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002262
2263 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2264 return -EINVAL;
2265
2266 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2267 if (err)
2268 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002269 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002270 if (err)
2271 goto ex_abort;
2272 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2273 mtt);
2274 if (err)
2275 goto ex_put_mtt;
2276
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002277 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2278 if (err)
2279 goto ex_put_mtt;
2280
2281 atomic_inc(&mtt->ref_count);
2282 srq->mtt = mtt;
2283 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2284 res_end_move(dev, slave, RES_SRQ, srqn);
2285 return 0;
2286
2287ex_put_mtt:
2288 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2289ex_abort:
2290 res_abort_move(dev, slave, RES_SRQ, srqn);
2291
2292 return err;
2293}
2294
2295int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2296 struct mlx4_vhcr *vhcr,
2297 struct mlx4_cmd_mailbox *inbox,
2298 struct mlx4_cmd_mailbox *outbox,
2299 struct mlx4_cmd_info *cmd)
2300{
2301 int err;
2302 int srqn = vhcr->in_modifier;
2303 struct res_srq *srq;
2304
2305 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2306 if (err)
2307 return err;
2308 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2309 if (err)
2310 goto ex_abort;
2311 atomic_dec(&srq->mtt->ref_count);
2312 if (srq->cq)
2313 atomic_dec(&srq->cq->ref_count);
2314 res_end_move(dev, slave, RES_SRQ, srqn);
2315
2316 return 0;
2317
2318ex_abort:
2319 res_abort_move(dev, slave, RES_SRQ, srqn);
2320
2321 return err;
2322}
2323
2324int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2325 struct mlx4_vhcr *vhcr,
2326 struct mlx4_cmd_mailbox *inbox,
2327 struct mlx4_cmd_mailbox *outbox,
2328 struct mlx4_cmd_info *cmd)
2329{
2330 int err;
2331 int srqn = vhcr->in_modifier;
2332 struct res_srq *srq;
2333
2334 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2335 if (err)
2336 return err;
2337 if (srq->com.from_state != RES_SRQ_HW) {
2338 err = -EBUSY;
2339 goto out;
2340 }
2341 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2342out:
2343 put_res(dev, slave, srqn, RES_SRQ);
2344 return err;
2345}
2346
2347int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2348 struct mlx4_vhcr *vhcr,
2349 struct mlx4_cmd_mailbox *inbox,
2350 struct mlx4_cmd_mailbox *outbox,
2351 struct mlx4_cmd_info *cmd)
2352{
2353 int err;
2354 int srqn = vhcr->in_modifier;
2355 struct res_srq *srq;
2356
2357 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2358 if (err)
2359 return err;
2360
2361 if (srq->com.from_state != RES_SRQ_HW) {
2362 err = -EBUSY;
2363 goto out;
2364 }
2365
2366 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2367out:
2368 put_res(dev, slave, srqn, RES_SRQ);
2369 return err;
2370}
2371
2372int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2373 struct mlx4_vhcr *vhcr,
2374 struct mlx4_cmd_mailbox *inbox,
2375 struct mlx4_cmd_mailbox *outbox,
2376 struct mlx4_cmd_info *cmd)
2377{
2378 int err;
2379 int qpn = vhcr->in_modifier & 0x7fffff;
2380 struct res_qp *qp;
2381
2382 err = get_res(dev, slave, qpn, RES_QP, &qp);
2383 if (err)
2384 return err;
2385 if (qp->com.from_state != RES_QP_HW) {
2386 err = -EBUSY;
2387 goto out;
2388 }
2389
2390 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2391out:
2392 put_res(dev, slave, qpn, RES_QP);
2393 return err;
2394}
2395
2396int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2397 struct mlx4_vhcr *vhcr,
2398 struct mlx4_cmd_mailbox *inbox,
2399 struct mlx4_cmd_mailbox *outbox,
2400 struct mlx4_cmd_info *cmd)
2401{
2402 struct mlx4_qp_context *qpc = inbox->buf + 8;
2403
2404 update_ud_gid(dev, qpc, (u8)slave);
2405
2406 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2407}
2408
2409int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2410 struct mlx4_vhcr *vhcr,
2411 struct mlx4_cmd_mailbox *inbox,
2412 struct mlx4_cmd_mailbox *outbox,
2413 struct mlx4_cmd_info *cmd)
2414{
2415 int err;
2416 int qpn = vhcr->in_modifier & 0x7fffff;
2417 struct res_qp *qp;
2418
2419 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2420 if (err)
2421 return err;
2422 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2423 if (err)
2424 goto ex_abort;
2425
2426 atomic_dec(&qp->mtt->ref_count);
2427 atomic_dec(&qp->rcq->ref_count);
2428 atomic_dec(&qp->scq->ref_count);
2429 if (qp->srq)
2430 atomic_dec(&qp->srq->ref_count);
2431 res_end_move(dev, slave, RES_QP, qpn);
2432 return 0;
2433
2434ex_abort:
2435 res_abort_move(dev, slave, RES_QP, qpn);
2436
2437 return err;
2438}
2439
2440static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2441 struct res_qp *rqp, u8 *gid)
2442{
2443 struct res_gid *res;
2444
2445 list_for_each_entry(res, &rqp->mcg_list, list) {
2446 if (!memcmp(res->gid, gid, 16))
2447 return res;
2448 }
2449 return NULL;
2450}
2451
2452static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002453 u8 *gid, enum mlx4_protocol prot,
2454 enum mlx4_steer_type steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002455{
2456 struct res_gid *res;
2457 int err;
2458
2459 res = kzalloc(sizeof *res, GFP_KERNEL);
2460 if (!res)
2461 return -ENOMEM;
2462
2463 spin_lock_irq(&rqp->mcg_spl);
2464 if (find_gid(dev, slave, rqp, gid)) {
2465 kfree(res);
2466 err = -EEXIST;
2467 } else {
2468 memcpy(res->gid, gid, 16);
2469 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002470 res->steer = steer;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002471 list_add_tail(&res->list, &rqp->mcg_list);
2472 err = 0;
2473 }
2474 spin_unlock_irq(&rqp->mcg_spl);
2475
2476 return err;
2477}
2478
2479static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002480 u8 *gid, enum mlx4_protocol prot,
2481 enum mlx4_steer_type steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002482{
2483 struct res_gid *res;
2484 int err;
2485
2486 spin_lock_irq(&rqp->mcg_spl);
2487 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002488 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002489 err = -EINVAL;
2490 else {
2491 list_del(&res->list);
2492 kfree(res);
2493 err = 0;
2494 }
2495 spin_unlock_irq(&rqp->mcg_spl);
2496
2497 return err;
2498}
2499
2500int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2501 struct mlx4_vhcr *vhcr,
2502 struct mlx4_cmd_mailbox *inbox,
2503 struct mlx4_cmd_mailbox *outbox,
2504 struct mlx4_cmd_info *cmd)
2505{
2506 struct mlx4_qp qp; /* dummy for calling attach/detach */
2507 u8 *gid = inbox->buf;
2508 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00002509 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002510 int qpn;
2511 struct res_qp *rqp;
2512 int attach = vhcr->op_modifier;
2513 int block_loopback = vhcr->in_modifier >> 31;
2514 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00002515 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002516
2517 qpn = vhcr->in_modifier & 0xffffff;
2518 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2519 if (err)
2520 return err;
2521
2522 qp.qpn = qpn;
2523 if (attach) {
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002524 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002525 if (err)
2526 goto ex_put;
2527
2528 err = mlx4_qp_attach_common(dev, &qp, gid,
2529 block_loopback, prot, type);
2530 if (err)
2531 goto ex_rem;
2532 } else {
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00002533 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002534 if (err)
2535 goto ex_put;
2536 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2537 }
2538
2539 put_res(dev, slave, qpn, RES_QP);
2540 return 0;
2541
2542ex_rem:
2543 /* ignore error return below, already in error */
Or Gerlitz162344e2012-05-15 10:34:57 +00002544 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002545ex_put:
2546 put_res(dev, slave, qpn, RES_QP);
2547
2548 return err;
2549}
2550
2551enum {
2552 BUSY_MAX_RETRIES = 10
2553};
2554
2555int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2556 struct mlx4_vhcr *vhcr,
2557 struct mlx4_cmd_mailbox *inbox,
2558 struct mlx4_cmd_mailbox *outbox,
2559 struct mlx4_cmd_info *cmd)
2560{
2561 int err;
2562 int index = vhcr->in_modifier & 0xffff;
2563
2564 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2565 if (err)
2566 return err;
2567
2568 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2569 put_res(dev, slave, index, RES_COUNTER);
2570 return err;
2571}
2572
2573static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2574{
2575 struct res_gid *rgid;
2576 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002577 struct mlx4_qp qp; /* dummy for calling attach/detach */
2578
2579 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2580 qp.qpn = rqp->local_qpn;
Or Gerlitz162344e2012-05-15 10:34:57 +00002581 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2582 rgid->steer);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002583 list_del(&rgid->list);
2584 kfree(rgid);
2585 }
2586}
2587
2588static int _move_all_busy(struct mlx4_dev *dev, int slave,
2589 enum mlx4_resource type, int print)
2590{
2591 struct mlx4_priv *priv = mlx4_priv(dev);
2592 struct mlx4_resource_tracker *tracker =
2593 &priv->mfunc.master.res_tracker;
2594 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2595 struct res_common *r;
2596 struct res_common *tmp;
2597 int busy;
2598
2599 busy = 0;
2600 spin_lock_irq(mlx4_tlock(dev));
2601 list_for_each_entry_safe(r, tmp, rlist, list) {
2602 if (r->owner == slave) {
2603 if (!r->removing) {
2604 if (r->state == RES_ANY_BUSY) {
2605 if (print)
2606 mlx4_dbg(dev,
2607 "%s id 0x%x is busy\n",
2608 ResourceType(type),
2609 r->res_id);
2610 ++busy;
2611 } else {
2612 r->from_state = r->state;
2613 r->state = RES_ANY_BUSY;
2614 r->removing = 1;
2615 }
2616 }
2617 }
2618 }
2619 spin_unlock_irq(mlx4_tlock(dev));
2620
2621 return busy;
2622}
2623
2624static int move_all_busy(struct mlx4_dev *dev, int slave,
2625 enum mlx4_resource type)
2626{
2627 unsigned long begin;
2628 int busy;
2629
2630 begin = jiffies;
2631 do {
2632 busy = _move_all_busy(dev, slave, type, 0);
2633 if (time_after(jiffies, begin + 5 * HZ))
2634 break;
2635 if (busy)
2636 cond_resched();
2637 } while (busy);
2638
2639 if (busy)
2640 busy = _move_all_busy(dev, slave, type, 1);
2641
2642 return busy;
2643}
2644static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2645{
2646 struct mlx4_priv *priv = mlx4_priv(dev);
2647 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2648 struct list_head *qp_list =
2649 &tracker->slave_list[slave].res_list[RES_QP];
2650 struct res_qp *qp;
2651 struct res_qp *tmp;
2652 int state;
2653 u64 in_param;
2654 int qpn;
2655 int err;
2656
2657 err = move_all_busy(dev, slave, RES_QP);
2658 if (err)
2659 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2660 "for slave %d\n", slave);
2661
2662 spin_lock_irq(mlx4_tlock(dev));
2663 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2664 spin_unlock_irq(mlx4_tlock(dev));
2665 if (qp->com.owner == slave) {
2666 qpn = qp->com.res_id;
2667 detach_qp(dev, slave, qp);
2668 state = qp->com.from_state;
2669 while (state != 0) {
2670 switch (state) {
2671 case RES_QP_RESERVED:
2672 spin_lock_irq(mlx4_tlock(dev));
2673 radix_tree_delete(&tracker->res_tree[RES_QP],
2674 qp->com.res_id);
2675 list_del(&qp->com.list);
2676 spin_unlock_irq(mlx4_tlock(dev));
2677 kfree(qp);
2678 state = 0;
2679 break;
2680 case RES_QP_MAPPED:
2681 if (!valid_reserved(dev, slave, qpn))
2682 __mlx4_qp_free_icm(dev, qpn);
2683 state = RES_QP_RESERVED;
2684 break;
2685 case RES_QP_HW:
2686 in_param = slave;
2687 err = mlx4_cmd(dev, in_param,
2688 qp->local_qpn, 2,
2689 MLX4_CMD_2RST_QP,
2690 MLX4_CMD_TIME_CLASS_A,
2691 MLX4_CMD_NATIVE);
2692 if (err)
2693 mlx4_dbg(dev, "rem_slave_qps: failed"
2694 " to move slave %d qpn %d to"
2695 " reset\n", slave,
2696 qp->local_qpn);
2697 atomic_dec(&qp->rcq->ref_count);
2698 atomic_dec(&qp->scq->ref_count);
2699 atomic_dec(&qp->mtt->ref_count);
2700 if (qp->srq)
2701 atomic_dec(&qp->srq->ref_count);
2702 state = RES_QP_MAPPED;
2703 break;
2704 default:
2705 state = 0;
2706 }
2707 }
2708 }
2709 spin_lock_irq(mlx4_tlock(dev));
2710 }
2711 spin_unlock_irq(mlx4_tlock(dev));
2712}
2713
2714static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2715{
2716 struct mlx4_priv *priv = mlx4_priv(dev);
2717 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2718 struct list_head *srq_list =
2719 &tracker->slave_list[slave].res_list[RES_SRQ];
2720 struct res_srq *srq;
2721 struct res_srq *tmp;
2722 int state;
2723 u64 in_param;
2724 LIST_HEAD(tlist);
2725 int srqn;
2726 int err;
2727
2728 err = move_all_busy(dev, slave, RES_SRQ);
2729 if (err)
2730 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2731 "busy for slave %d\n", slave);
2732
2733 spin_lock_irq(mlx4_tlock(dev));
2734 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2735 spin_unlock_irq(mlx4_tlock(dev));
2736 if (srq->com.owner == slave) {
2737 srqn = srq->com.res_id;
2738 state = srq->com.from_state;
2739 while (state != 0) {
2740 switch (state) {
2741 case RES_SRQ_ALLOCATED:
2742 __mlx4_srq_free_icm(dev, srqn);
2743 spin_lock_irq(mlx4_tlock(dev));
2744 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2745 srqn);
2746 list_del(&srq->com.list);
2747 spin_unlock_irq(mlx4_tlock(dev));
2748 kfree(srq);
2749 state = 0;
2750 break;
2751
2752 case RES_SRQ_HW:
2753 in_param = slave;
2754 err = mlx4_cmd(dev, in_param, srqn, 1,
2755 MLX4_CMD_HW2SW_SRQ,
2756 MLX4_CMD_TIME_CLASS_A,
2757 MLX4_CMD_NATIVE);
2758 if (err)
2759 mlx4_dbg(dev, "rem_slave_srqs: failed"
2760 " to move slave %d srq %d to"
2761 " SW ownership\n",
2762 slave, srqn);
2763
2764 atomic_dec(&srq->mtt->ref_count);
2765 if (srq->cq)
2766 atomic_dec(&srq->cq->ref_count);
2767 state = RES_SRQ_ALLOCATED;
2768 break;
2769
2770 default:
2771 state = 0;
2772 }
2773 }
2774 }
2775 spin_lock_irq(mlx4_tlock(dev));
2776 }
2777 spin_unlock_irq(mlx4_tlock(dev));
2778}
2779
2780static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2781{
2782 struct mlx4_priv *priv = mlx4_priv(dev);
2783 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2784 struct list_head *cq_list =
2785 &tracker->slave_list[slave].res_list[RES_CQ];
2786 struct res_cq *cq;
2787 struct res_cq *tmp;
2788 int state;
2789 u64 in_param;
2790 LIST_HEAD(tlist);
2791 int cqn;
2792 int err;
2793
2794 err = move_all_busy(dev, slave, RES_CQ);
2795 if (err)
2796 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2797 "busy for slave %d\n", slave);
2798
2799 spin_lock_irq(mlx4_tlock(dev));
2800 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2801 spin_unlock_irq(mlx4_tlock(dev));
2802 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2803 cqn = cq->com.res_id;
2804 state = cq->com.from_state;
2805 while (state != 0) {
2806 switch (state) {
2807 case RES_CQ_ALLOCATED:
2808 __mlx4_cq_free_icm(dev, cqn);
2809 spin_lock_irq(mlx4_tlock(dev));
2810 radix_tree_delete(&tracker->res_tree[RES_CQ],
2811 cqn);
2812 list_del(&cq->com.list);
2813 spin_unlock_irq(mlx4_tlock(dev));
2814 kfree(cq);
2815 state = 0;
2816 break;
2817
2818 case RES_CQ_HW:
2819 in_param = slave;
2820 err = mlx4_cmd(dev, in_param, cqn, 1,
2821 MLX4_CMD_HW2SW_CQ,
2822 MLX4_CMD_TIME_CLASS_A,
2823 MLX4_CMD_NATIVE);
2824 if (err)
2825 mlx4_dbg(dev, "rem_slave_cqs: failed"
2826 " to move slave %d cq %d to"
2827 " SW ownership\n",
2828 slave, cqn);
2829 atomic_dec(&cq->mtt->ref_count);
2830 state = RES_CQ_ALLOCATED;
2831 break;
2832
2833 default:
2834 state = 0;
2835 }
2836 }
2837 }
2838 spin_lock_irq(mlx4_tlock(dev));
2839 }
2840 spin_unlock_irq(mlx4_tlock(dev));
2841}
2842
2843static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2844{
2845 struct mlx4_priv *priv = mlx4_priv(dev);
2846 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2847 struct list_head *mpt_list =
2848 &tracker->slave_list[slave].res_list[RES_MPT];
2849 struct res_mpt *mpt;
2850 struct res_mpt *tmp;
2851 int state;
2852 u64 in_param;
2853 LIST_HEAD(tlist);
2854 int mptn;
2855 int err;
2856
2857 err = move_all_busy(dev, slave, RES_MPT);
2858 if (err)
2859 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2860 "busy for slave %d\n", slave);
2861
2862 spin_lock_irq(mlx4_tlock(dev));
2863 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2864 spin_unlock_irq(mlx4_tlock(dev));
2865 if (mpt->com.owner == slave) {
2866 mptn = mpt->com.res_id;
2867 state = mpt->com.from_state;
2868 while (state != 0) {
2869 switch (state) {
2870 case RES_MPT_RESERVED:
2871 __mlx4_mr_release(dev, mpt->key);
2872 spin_lock_irq(mlx4_tlock(dev));
2873 radix_tree_delete(&tracker->res_tree[RES_MPT],
2874 mptn);
2875 list_del(&mpt->com.list);
2876 spin_unlock_irq(mlx4_tlock(dev));
2877 kfree(mpt);
2878 state = 0;
2879 break;
2880
2881 case RES_MPT_MAPPED:
2882 __mlx4_mr_free_icm(dev, mpt->key);
2883 state = RES_MPT_RESERVED;
2884 break;
2885
2886 case RES_MPT_HW:
2887 in_param = slave;
2888 err = mlx4_cmd(dev, in_param, mptn, 0,
2889 MLX4_CMD_HW2SW_MPT,
2890 MLX4_CMD_TIME_CLASS_A,
2891 MLX4_CMD_NATIVE);
2892 if (err)
2893 mlx4_dbg(dev, "rem_slave_mrs: failed"
2894 " to move slave %d mpt %d to"
2895 " SW ownership\n",
2896 slave, mptn);
2897 if (mpt->mtt)
2898 atomic_dec(&mpt->mtt->ref_count);
2899 state = RES_MPT_MAPPED;
2900 break;
2901 default:
2902 state = 0;
2903 }
2904 }
2905 }
2906 spin_lock_irq(mlx4_tlock(dev));
2907 }
2908 spin_unlock_irq(mlx4_tlock(dev));
2909}
2910
2911static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2912{
2913 struct mlx4_priv *priv = mlx4_priv(dev);
2914 struct mlx4_resource_tracker *tracker =
2915 &priv->mfunc.master.res_tracker;
2916 struct list_head *mtt_list =
2917 &tracker->slave_list[slave].res_list[RES_MTT];
2918 struct res_mtt *mtt;
2919 struct res_mtt *tmp;
2920 int state;
2921 LIST_HEAD(tlist);
2922 int base;
2923 int err;
2924
2925 err = move_all_busy(dev, slave, RES_MTT);
2926 if (err)
2927 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2928 "busy for slave %d\n", slave);
2929
2930 spin_lock_irq(mlx4_tlock(dev));
2931 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2932 spin_unlock_irq(mlx4_tlock(dev));
2933 if (mtt->com.owner == slave) {
2934 base = mtt->com.res_id;
2935 state = mtt->com.from_state;
2936 while (state != 0) {
2937 switch (state) {
2938 case RES_MTT_ALLOCATED:
2939 __mlx4_free_mtt_range(dev, base,
2940 mtt->order);
2941 spin_lock_irq(mlx4_tlock(dev));
2942 radix_tree_delete(&tracker->res_tree[RES_MTT],
2943 base);
2944 list_del(&mtt->com.list);
2945 spin_unlock_irq(mlx4_tlock(dev));
2946 kfree(mtt);
2947 state = 0;
2948 break;
2949
2950 default:
2951 state = 0;
2952 }
2953 }
2954 }
2955 spin_lock_irq(mlx4_tlock(dev));
2956 }
2957 spin_unlock_irq(mlx4_tlock(dev));
2958}
2959
2960static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2961{
2962 struct mlx4_priv *priv = mlx4_priv(dev);
2963 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2964 struct list_head *eq_list =
2965 &tracker->slave_list[slave].res_list[RES_EQ];
2966 struct res_eq *eq;
2967 struct res_eq *tmp;
2968 int err;
2969 int state;
2970 LIST_HEAD(tlist);
2971 int eqn;
2972 struct mlx4_cmd_mailbox *mailbox;
2973
2974 err = move_all_busy(dev, slave, RES_EQ);
2975 if (err)
2976 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
2977 "busy for slave %d\n", slave);
2978
2979 spin_lock_irq(mlx4_tlock(dev));
2980 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
2981 spin_unlock_irq(mlx4_tlock(dev));
2982 if (eq->com.owner == slave) {
2983 eqn = eq->com.res_id;
2984 state = eq->com.from_state;
2985 while (state != 0) {
2986 switch (state) {
2987 case RES_EQ_RESERVED:
2988 spin_lock_irq(mlx4_tlock(dev));
2989 radix_tree_delete(&tracker->res_tree[RES_EQ],
2990 eqn);
2991 list_del(&eq->com.list);
2992 spin_unlock_irq(mlx4_tlock(dev));
2993 kfree(eq);
2994 state = 0;
2995 break;
2996
2997 case RES_EQ_HW:
2998 mailbox = mlx4_alloc_cmd_mailbox(dev);
2999 if (IS_ERR(mailbox)) {
3000 cond_resched();
3001 continue;
3002 }
3003 err = mlx4_cmd_box(dev, slave, 0,
3004 eqn & 0xff, 0,
3005 MLX4_CMD_HW2SW_EQ,
3006 MLX4_CMD_TIME_CLASS_A,
3007 MLX4_CMD_NATIVE);
3008 mlx4_dbg(dev, "rem_slave_eqs: failed"
3009 " to move slave %d eqs %d to"
3010 " SW ownership\n", slave, eqn);
3011 mlx4_free_cmd_mailbox(dev, mailbox);
3012 if (!err) {
3013 atomic_dec(&eq->mtt->ref_count);
3014 state = RES_EQ_RESERVED;
3015 }
3016 break;
3017
3018 default:
3019 state = 0;
3020 }
3021 }
3022 }
3023 spin_lock_irq(mlx4_tlock(dev));
3024 }
3025 spin_unlock_irq(mlx4_tlock(dev));
3026}
3027
3028void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3029{
3030 struct mlx4_priv *priv = mlx4_priv(dev);
3031
3032 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3033 /*VLAN*/
3034 rem_slave_macs(dev, slave);
3035 rem_slave_qps(dev, slave);
3036 rem_slave_srqs(dev, slave);
3037 rem_slave_cqs(dev, slave);
3038 rem_slave_mrs(dev, slave);
3039 rem_slave_eqs(dev, slave);
3040 rem_slave_mtts(dev, slave);
3041 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3042}