blob: 8d82ba17135366317ba2d3e856c78c4cfb97829a [file] [log] [blame]
Sean Heftyfaec2f72007-02-15 17:00:17 -08001/*
Roland Dreier43506d92007-07-09 16:17:32 -07002 * Copyright (c) 2006 Intel Corporation. All rights reserved.
Sean Heftyfaec2f72007-02-15 17:00:17 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
Sean Heftyfaec2f72007-02-15 17:00:17 -080037#include <linux/bitops.h>
38#include <linux/random.h>
39
40#include <rdma/ib_cache.h>
41#include "sa.h"
42
43static void mcast_add_one(struct ib_device *device);
44static void mcast_remove_one(struct ib_device *device);
45
46static struct ib_client mcast_client = {
47 .name = "ib_multicast",
48 .add = mcast_add_one,
49 .remove = mcast_remove_one
50};
51
52static struct ib_sa_client sa_client;
53static struct workqueue_struct *mcast_wq;
54static union ib_gid mgid0;
55
56struct mcast_device;
57
58struct mcast_port {
59 struct mcast_device *dev;
60 spinlock_t lock;
61 struct rb_root table;
62 atomic_t refcount;
63 struct completion comp;
64 u8 port_num;
65};
66
67struct mcast_device {
68 struct ib_device *device;
69 struct ib_event_handler event_handler;
70 int start_port;
71 int end_port;
72 struct mcast_port port[0];
73};
74
75enum mcast_state {
Sean Heftyfaec2f72007-02-15 17:00:17 -080076 MCAST_JOINING,
77 MCAST_MEMBER,
Sean Hefty547af762007-10-22 21:52:54 -070078 MCAST_ERROR,
79};
80
81enum mcast_group_state {
82 MCAST_IDLE,
Sean Heftyfaec2f72007-02-15 17:00:17 -080083 MCAST_BUSY,
Sean Hefty547af762007-10-22 21:52:54 -070084 MCAST_GROUP_ERROR,
85 MCAST_PKEY_EVENT
86};
87
88enum {
89 MCAST_INVALID_PKEY_INDEX = 0xFFFF
Sean Heftyfaec2f72007-02-15 17:00:17 -080090};
91
92struct mcast_member;
93
94struct mcast_group {
95 struct ib_sa_mcmember_rec rec;
96 struct rb_node node;
97 struct mcast_port *port;
98 spinlock_t lock;
99 struct work_struct work;
100 struct list_head pending_list;
101 struct list_head active_list;
102 struct mcast_member *last_join;
103 int members[3];
104 atomic_t refcount;
Sean Hefty547af762007-10-22 21:52:54 -0700105 enum mcast_group_state state;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800106 struct ib_sa_query *query;
107 int query_id;
Sean Hefty547af762007-10-22 21:52:54 -0700108 u16 pkey_index;
Yossi Etigine1d78062009-09-05 20:24:24 -0700109 u8 leave_state;
110 int retries;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800111};
112
113struct mcast_member {
114 struct ib_sa_multicast multicast;
115 struct ib_sa_client *client;
116 struct mcast_group *group;
117 struct list_head list;
118 enum mcast_state state;
119 atomic_t refcount;
120 struct completion comp;
121};
122
123static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
124 void *context);
125static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
126 void *context);
127
128static struct mcast_group *mcast_find(struct mcast_port *port,
129 union ib_gid *mgid)
130{
131 struct rb_node *node = port->table.rb_node;
132 struct mcast_group *group;
133 int ret;
134
135 while (node) {
136 group = rb_entry(node, struct mcast_group, node);
137 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
138 if (!ret)
139 return group;
140
141 if (ret < 0)
142 node = node->rb_left;
143 else
144 node = node->rb_right;
145 }
146 return NULL;
147}
148
149static struct mcast_group *mcast_insert(struct mcast_port *port,
150 struct mcast_group *group,
151 int allow_duplicates)
152{
153 struct rb_node **link = &port->table.rb_node;
154 struct rb_node *parent = NULL;
155 struct mcast_group *cur_group;
156 int ret;
157
158 while (*link) {
159 parent = *link;
160 cur_group = rb_entry(parent, struct mcast_group, node);
161
162 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
163 sizeof group->rec.mgid);
164 if (ret < 0)
165 link = &(*link)->rb_left;
166 else if (ret > 0)
167 link = &(*link)->rb_right;
168 else if (allow_duplicates)
169 link = &(*link)->rb_left;
170 else
171 return cur_group;
172 }
173 rb_link_node(&group->node, parent, link);
174 rb_insert_color(&group->node, &port->table);
175 return NULL;
176}
177
178static void deref_port(struct mcast_port *port)
179{
180 if (atomic_dec_and_test(&port->refcount))
181 complete(&port->comp);
182}
183
184static void release_group(struct mcast_group *group)
185{
186 struct mcast_port *port = group->port;
187 unsigned long flags;
188
189 spin_lock_irqsave(&port->lock, flags);
190 if (atomic_dec_and_test(&group->refcount)) {
191 rb_erase(&group->node, &port->table);
192 spin_unlock_irqrestore(&port->lock, flags);
193 kfree(group);
194 deref_port(port);
195 } else
196 spin_unlock_irqrestore(&port->lock, flags);
197}
198
199static void deref_member(struct mcast_member *member)
200{
201 if (atomic_dec_and_test(&member->refcount))
202 complete(&member->comp);
203}
204
205static void queue_join(struct mcast_member *member)
206{
207 struct mcast_group *group = member->group;
208 unsigned long flags;
209
210 spin_lock_irqsave(&group->lock, flags);
Ralph Campbell57cb61d2007-09-20 16:33:44 -0700211 list_add_tail(&member->list, &group->pending_list);
Sean Heftyfaec2f72007-02-15 17:00:17 -0800212 if (group->state == MCAST_IDLE) {
213 group->state = MCAST_BUSY;
214 atomic_inc(&group->refcount);
215 queue_work(mcast_wq, &group->work);
216 }
217 spin_unlock_irqrestore(&group->lock, flags);
218}
219
220/*
221 * A multicast group has three types of members: full member, non member, and
222 * send only member. We need to keep track of the number of members of each
223 * type based on their join state. Adjust the number of members the belong to
224 * the specified join states.
225 */
226static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
227{
228 int i;
229
230 for (i = 0; i < 3; i++, join_state >>= 1)
231 if (join_state & 0x1)
232 group->members[i] += inc;
233}
234
235/*
236 * If a multicast group has zero members left for a particular join state, but
237 * the group is still a member with the SA, we need to leave that join state.
238 * Determine which join states we still belong to, but that do not have any
239 * active members.
240 */
241static u8 get_leave_state(struct mcast_group *group)
242{
243 u8 leave_state = 0;
244 int i;
245
246 for (i = 0; i < 3; i++)
247 if (!group->members[i])
248 leave_state |= (0x1 << i);
249
250 return leave_state & group->rec.join_state;
251}
252
253static int check_selector(ib_sa_comp_mask comp_mask,
254 ib_sa_comp_mask selector_mask,
255 ib_sa_comp_mask value_mask,
256 u8 selector, u8 src_value, u8 dst_value)
257{
258 int err;
259
260 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
261 return 0;
262
263 switch (selector) {
264 case IB_SA_GT:
265 err = (src_value <= dst_value);
266 break;
267 case IB_SA_LT:
268 err = (src_value >= dst_value);
269 break;
270 case IB_SA_EQ:
271 err = (src_value != dst_value);
272 break;
273 default:
274 err = 0;
275 break;
276 }
277
278 return err;
279}
280
281static int cmp_rec(struct ib_sa_mcmember_rec *src,
282 struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
283{
284 /* MGID must already match */
285
286 if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
287 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
288 return -EINVAL;
289 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
290 return -EINVAL;
291 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
292 return -EINVAL;
293 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
294 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
295 src->mtu, dst->mtu))
296 return -EINVAL;
297 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
298 src->traffic_class != dst->traffic_class)
299 return -EINVAL;
300 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
301 return -EINVAL;
302 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
303 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
304 src->rate, dst->rate))
305 return -EINVAL;
306 if (check_selector(comp_mask,
307 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
308 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
309 dst->packet_life_time_selector,
310 src->packet_life_time, dst->packet_life_time))
311 return -EINVAL;
312 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
313 return -EINVAL;
314 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
315 src->flow_label != dst->flow_label)
316 return -EINVAL;
317 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
318 src->hop_limit != dst->hop_limit)
319 return -EINVAL;
320 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
321 return -EINVAL;
322
323 /* join_state checked separately, proxy_join ignored */
324
325 return 0;
326}
327
328static int send_join(struct mcast_group *group, struct mcast_member *member)
329{
330 struct mcast_port *port = group->port;
331 int ret;
332
333 group->last_join = member;
334 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
335 port->port_num, IB_MGMT_METHOD_SET,
336 &member->multicast.rec,
337 member->multicast.comp_mask,
338 3000, GFP_KERNEL, join_handler, group,
339 &group->query);
340 if (ret >= 0) {
341 group->query_id = ret;
342 ret = 0;
343 }
344 return ret;
345}
346
347static int send_leave(struct mcast_group *group, u8 leave_state)
348{
349 struct mcast_port *port = group->port;
350 struct ib_sa_mcmember_rec rec;
351 int ret;
352
353 rec = group->rec;
354 rec.join_state = leave_state;
Yossi Etigine1d78062009-09-05 20:24:24 -0700355 group->leave_state = leave_state;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800356
357 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
358 port->port_num, IB_SA_METHOD_DELETE, &rec,
359 IB_SA_MCMEMBER_REC_MGID |
360 IB_SA_MCMEMBER_REC_PORT_GID |
361 IB_SA_MCMEMBER_REC_JOIN_STATE,
362 3000, GFP_KERNEL, leave_handler,
363 group, &group->query);
364 if (ret >= 0) {
365 group->query_id = ret;
366 ret = 0;
367 }
368 return ret;
369}
370
371static void join_group(struct mcast_group *group, struct mcast_member *member,
372 u8 join_state)
373{
374 member->state = MCAST_MEMBER;
375 adjust_membership(group, join_state, 1);
376 group->rec.join_state |= join_state;
377 member->multicast.rec = group->rec;
378 member->multicast.rec.join_state = join_state;
379 list_move(&member->list, &group->active_list);
380}
381
382static int fail_join(struct mcast_group *group, struct mcast_member *member,
383 int status)
384{
385 spin_lock_irq(&group->lock);
386 list_del_init(&member->list);
387 spin_unlock_irq(&group->lock);
388 return member->multicast.callback(status, &member->multicast);
389}
390
391static void process_group_error(struct mcast_group *group)
392{
393 struct mcast_member *member;
Sean Hefty547af762007-10-22 21:52:54 -0700394 int ret = 0;
395 u16 pkey_index;
396
397 if (group->state == MCAST_PKEY_EVENT)
398 ret = ib_find_pkey(group->port->dev->device,
399 group->port->port_num,
400 be16_to_cpu(group->rec.pkey), &pkey_index);
Sean Heftyfaec2f72007-02-15 17:00:17 -0800401
402 spin_lock_irq(&group->lock);
Sean Hefty547af762007-10-22 21:52:54 -0700403 if (group->state == MCAST_PKEY_EVENT && !ret &&
404 group->pkey_index == pkey_index)
405 goto out;
406
Sean Heftyfaec2f72007-02-15 17:00:17 -0800407 while (!list_empty(&group->active_list)) {
408 member = list_entry(group->active_list.next,
409 struct mcast_member, list);
410 atomic_inc(&member->refcount);
411 list_del_init(&member->list);
412 adjust_membership(group, member->multicast.rec.join_state, -1);
413 member->state = MCAST_ERROR;
414 spin_unlock_irq(&group->lock);
415
416 ret = member->multicast.callback(-ENETRESET,
417 &member->multicast);
418 deref_member(member);
419 if (ret)
420 ib_sa_free_multicast(&member->multicast);
421 spin_lock_irq(&group->lock);
422 }
423
424 group->rec.join_state = 0;
Sean Hefty547af762007-10-22 21:52:54 -0700425out:
Sean Heftyfaec2f72007-02-15 17:00:17 -0800426 group->state = MCAST_BUSY;
427 spin_unlock_irq(&group->lock);
428}
429
430static void mcast_work_handler(struct work_struct *work)
431{
432 struct mcast_group *group;
433 struct mcast_member *member;
434 struct ib_sa_multicast *multicast;
435 int status, ret;
436 u8 join_state;
437
438 group = container_of(work, typeof(*group), work);
439retest:
440 spin_lock_irq(&group->lock);
441 while (!list_empty(&group->pending_list) ||
Sean Hefty547af762007-10-22 21:52:54 -0700442 (group->state != MCAST_BUSY)) {
Sean Heftyfaec2f72007-02-15 17:00:17 -0800443
Sean Hefty547af762007-10-22 21:52:54 -0700444 if (group->state != MCAST_BUSY) {
Sean Heftyfaec2f72007-02-15 17:00:17 -0800445 spin_unlock_irq(&group->lock);
446 process_group_error(group);
447 goto retest;
448 }
449
450 member = list_entry(group->pending_list.next,
451 struct mcast_member, list);
452 multicast = &member->multicast;
453 join_state = multicast->rec.join_state;
454 atomic_inc(&member->refcount);
455
456 if (join_state == (group->rec.join_state & join_state)) {
457 status = cmp_rec(&group->rec, &multicast->rec,
458 multicast->comp_mask);
459 if (!status)
460 join_group(group, member, join_state);
461 else
462 list_del_init(&member->list);
463 spin_unlock_irq(&group->lock);
464 ret = multicast->callback(status, multicast);
465 } else {
466 spin_unlock_irq(&group->lock);
467 status = send_join(group, member);
468 if (!status) {
469 deref_member(member);
470 return;
471 }
472 ret = fail_join(group, member, status);
473 }
474
475 deref_member(member);
476 if (ret)
477 ib_sa_free_multicast(&member->multicast);
478 spin_lock_irq(&group->lock);
479 }
480
481 join_state = get_leave_state(group);
482 if (join_state) {
483 group->rec.join_state &= ~join_state;
484 spin_unlock_irq(&group->lock);
485 if (send_leave(group, join_state))
486 goto retest;
487 } else {
488 group->state = MCAST_IDLE;
489 spin_unlock_irq(&group->lock);
490 release_group(group);
491 }
492}
493
494/*
495 * Fail a join request if it is still active - at the head of the pending queue.
496 */
497static void process_join_error(struct mcast_group *group, int status)
498{
499 struct mcast_member *member;
500 int ret;
501
502 spin_lock_irq(&group->lock);
503 member = list_entry(group->pending_list.next,
504 struct mcast_member, list);
505 if (group->last_join == member) {
506 atomic_inc(&member->refcount);
507 list_del_init(&member->list);
508 spin_unlock_irq(&group->lock);
509 ret = member->multicast.callback(status, &member->multicast);
510 deref_member(member);
511 if (ret)
512 ib_sa_free_multicast(&member->multicast);
513 } else
514 spin_unlock_irq(&group->lock);
515}
516
517static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
518 void *context)
519{
520 struct mcast_group *group = context;
Sean Hefty547af762007-10-22 21:52:54 -0700521 u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800522
523 if (status)
524 process_join_error(group, status);
525 else {
Sean Hefty547af762007-10-22 21:52:54 -0700526 ib_find_pkey(group->port->dev->device, group->port->port_num,
527 be16_to_cpu(rec->pkey), &pkey_index);
528
Sean Heftyfaec2f72007-02-15 17:00:17 -0800529 spin_lock_irq(&group->port->lock);
530 group->rec = *rec;
Sean Hefty547af762007-10-22 21:52:54 -0700531 if (group->state == MCAST_BUSY &&
532 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
533 group->pkey_index = pkey_index;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800534 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
535 rb_erase(&group->node, &group->port->table);
536 mcast_insert(group->port, group, 1);
537 }
538 spin_unlock_irq(&group->port->lock);
539 }
540 mcast_work_handler(&group->work);
541}
542
543static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
544 void *context)
545{
546 struct mcast_group *group = context;
547
Yossi Etigine1d78062009-09-05 20:24:24 -0700548 if (status && group->retries > 0 &&
549 !send_leave(group, group->leave_state))
550 group->retries--;
551 else
552 mcast_work_handler(&group->work);
Sean Heftyfaec2f72007-02-15 17:00:17 -0800553}
554
555static struct mcast_group *acquire_group(struct mcast_port *port,
556 union ib_gid *mgid, gfp_t gfp_mask)
557{
558 struct mcast_group *group, *cur_group;
559 unsigned long flags;
560 int is_mgid0;
561
562 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
563 if (!is_mgid0) {
564 spin_lock_irqsave(&port->lock, flags);
565 group = mcast_find(port, mgid);
566 if (group)
567 goto found;
568 spin_unlock_irqrestore(&port->lock, flags);
569 }
570
571 group = kzalloc(sizeof *group, gfp_mask);
572 if (!group)
573 return NULL;
574
Yossi Etigine1d78062009-09-05 20:24:24 -0700575 group->retries = 3;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800576 group->port = port;
577 group->rec.mgid = *mgid;
Sean Hefty547af762007-10-22 21:52:54 -0700578 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800579 INIT_LIST_HEAD(&group->pending_list);
580 INIT_LIST_HEAD(&group->active_list);
581 INIT_WORK(&group->work, mcast_work_handler);
582 spin_lock_init(&group->lock);
583
584 spin_lock_irqsave(&port->lock, flags);
585 cur_group = mcast_insert(port, group, is_mgid0);
586 if (cur_group) {
587 kfree(group);
588 group = cur_group;
589 } else
590 atomic_inc(&port->refcount);
591found:
592 atomic_inc(&group->refcount);
593 spin_unlock_irqrestore(&port->lock, flags);
594 return group;
595}
596
597/*
598 * We serialize all join requests to a single group to make our lives much
599 * easier. Otherwise, two users could try to join the same group
600 * simultaneously, with different configurations, one could leave while the
601 * join is in progress, etc., which makes locking around error recovery
602 * difficult.
603 */
604struct ib_sa_multicast *
605ib_sa_join_multicast(struct ib_sa_client *client,
606 struct ib_device *device, u8 port_num,
607 struct ib_sa_mcmember_rec *rec,
608 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
609 int (*callback)(int status,
610 struct ib_sa_multicast *multicast),
611 void *context)
612{
613 struct mcast_device *dev;
614 struct mcast_member *member;
615 struct ib_sa_multicast *multicast;
616 int ret;
617
618 dev = ib_get_client_data(device, &mcast_client);
619 if (!dev)
620 return ERR_PTR(-ENODEV);
621
622 member = kmalloc(sizeof *member, gfp_mask);
623 if (!member)
624 return ERR_PTR(-ENOMEM);
625
626 ib_sa_client_get(client);
627 member->client = client;
628 member->multicast.rec = *rec;
629 member->multicast.comp_mask = comp_mask;
630 member->multicast.callback = callback;
631 member->multicast.context = context;
632 init_completion(&member->comp);
633 atomic_set(&member->refcount, 1);
634 member->state = MCAST_JOINING;
635
636 member->group = acquire_group(&dev->port[port_num - dev->start_port],
637 &rec->mgid, gfp_mask);
638 if (!member->group) {
639 ret = -ENOMEM;
640 goto err;
641 }
642
643 /*
644 * The user will get the multicast structure in their callback. They
645 * could then free the multicast structure before we can return from
646 * this routine. So we save the pointer to return before queuing
647 * any callback.
648 */
649 multicast = &member->multicast;
650 queue_join(member);
651 return multicast;
652
653err:
654 ib_sa_client_put(client);
655 kfree(member);
656 return ERR_PTR(ret);
657}
658EXPORT_SYMBOL(ib_sa_join_multicast);
659
660void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
661{
662 struct mcast_member *member;
663 struct mcast_group *group;
664
665 member = container_of(multicast, struct mcast_member, multicast);
666 group = member->group;
667
668 spin_lock_irq(&group->lock);
669 if (member->state == MCAST_MEMBER)
670 adjust_membership(group, multicast->rec.join_state, -1);
671
672 list_del_init(&member->list);
673
674 if (group->state == MCAST_IDLE) {
675 group->state = MCAST_BUSY;
676 spin_unlock_irq(&group->lock);
677 /* Continue to hold reference on group until callback */
678 queue_work(mcast_wq, &group->work);
679 } else {
680 spin_unlock_irq(&group->lock);
681 release_group(group);
682 }
683
684 deref_member(member);
685 wait_for_completion(&member->comp);
686 ib_sa_client_put(member->client);
687 kfree(member);
688}
689EXPORT_SYMBOL(ib_sa_free_multicast);
690
691int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
692 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
693{
694 struct mcast_device *dev;
695 struct mcast_port *port;
696 struct mcast_group *group;
697 unsigned long flags;
698 int ret = 0;
699
700 dev = ib_get_client_data(device, &mcast_client);
701 if (!dev)
702 return -ENODEV;
703
704 port = &dev->port[port_num - dev->start_port];
705 spin_lock_irqsave(&port->lock, flags);
706 group = mcast_find(port, mgid);
707 if (group)
708 *rec = group->rec;
709 else
710 ret = -EADDRNOTAVAIL;
711 spin_unlock_irqrestore(&port->lock, flags);
712
713 return ret;
714}
715EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
716
717int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
718 struct ib_sa_mcmember_rec *rec,
719 struct ib_ah_attr *ah_attr)
720{
721 int ret;
722 u16 gid_index;
723 u8 p;
724
725 ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
726 if (ret)
727 return ret;
728
729 memset(ah_attr, 0, sizeof *ah_attr);
730 ah_attr->dlid = be16_to_cpu(rec->mlid);
731 ah_attr->sl = rec->sl;
732 ah_attr->port_num = port_num;
733 ah_attr->static_rate = rec->rate;
734
735 ah_attr->ah_flags = IB_AH_GRH;
736 ah_attr->grh.dgid = rec->mgid;
737
738 ah_attr->grh.sgid_index = (u8) gid_index;
739 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
740 ah_attr->grh.hop_limit = rec->hop_limit;
741 ah_attr->grh.traffic_class = rec->traffic_class;
742
743 return 0;
744}
745EXPORT_SYMBOL(ib_init_ah_from_mcmember);
746
Sean Hefty547af762007-10-22 21:52:54 -0700747static void mcast_groups_event(struct mcast_port *port,
748 enum mcast_group_state state)
Sean Heftyfaec2f72007-02-15 17:00:17 -0800749{
750 struct mcast_group *group;
751 struct rb_node *node;
752 unsigned long flags;
753
754 spin_lock_irqsave(&port->lock, flags);
755 for (node = rb_first(&port->table); node; node = rb_next(node)) {
756 group = rb_entry(node, struct mcast_group, node);
757 spin_lock(&group->lock);
758 if (group->state == MCAST_IDLE) {
759 atomic_inc(&group->refcount);
760 queue_work(mcast_wq, &group->work);
761 }
Sean Hefty547af762007-10-22 21:52:54 -0700762 if (group->state != MCAST_GROUP_ERROR)
763 group->state = state;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800764 spin_unlock(&group->lock);
765 }
766 spin_unlock_irqrestore(&port->lock, flags);
767}
768
769static void mcast_event_handler(struct ib_event_handler *handler,
770 struct ib_event *event)
771{
772 struct mcast_device *dev;
Sean Hefty547af762007-10-22 21:52:54 -0700773 int index;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800774
775 dev = container_of(handler, struct mcast_device, event_handler);
Sean Hefty547af762007-10-22 21:52:54 -0700776 index = event->element.port_num - dev->start_port;
Sean Heftyfaec2f72007-02-15 17:00:17 -0800777
778 switch (event->event) {
779 case IB_EVENT_PORT_ERR:
780 case IB_EVENT_LID_CHANGE:
781 case IB_EVENT_SM_CHANGE:
782 case IB_EVENT_CLIENT_REREGISTER:
Sean Hefty547af762007-10-22 21:52:54 -0700783 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
784 break;
785 case IB_EVENT_PKEY_CHANGE:
786 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
Sean Heftyfaec2f72007-02-15 17:00:17 -0800787 break;
788 default:
789 break;
790 }
791}
792
793static void mcast_add_one(struct ib_device *device)
794{
795 struct mcast_device *dev;
796 struct mcast_port *port;
797 int i;
798
799 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
800 return;
801
802 dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
803 GFP_KERNEL);
804 if (!dev)
805 return;
806
807 if (device->node_type == RDMA_NODE_IB_SWITCH)
808 dev->start_port = dev->end_port = 0;
809 else {
810 dev->start_port = 1;
811 dev->end_port = device->phys_port_cnt;
812 }
813
814 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
815 port = &dev->port[i];
816 port->dev = dev;
817 port->port_num = dev->start_port + i;
818 spin_lock_init(&port->lock);
819 port->table = RB_ROOT;
820 init_completion(&port->comp);
821 atomic_set(&port->refcount, 1);
822 }
823
824 dev->device = device;
825 ib_set_client_data(device, &mcast_client, dev);
826
827 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
828 ib_register_event_handler(&dev->event_handler);
829}
830
831static void mcast_remove_one(struct ib_device *device)
832{
833 struct mcast_device *dev;
834 struct mcast_port *port;
835 int i;
836
837 dev = ib_get_client_data(device, &mcast_client);
838 if (!dev)
839 return;
840
841 ib_unregister_event_handler(&dev->event_handler);
842 flush_workqueue(mcast_wq);
843
844 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
845 port = &dev->port[i];
846 deref_port(port);
847 wait_for_completion(&port->comp);
848 }
849
850 kfree(dev);
851}
852
853int mcast_init(void)
854{
855 int ret;
856
857 mcast_wq = create_singlethread_workqueue("ib_mcast");
858 if (!mcast_wq)
859 return -ENOMEM;
860
861 ib_sa_register_client(&sa_client);
862
863 ret = ib_register_client(&mcast_client);
864 if (ret)
865 goto err;
866 return 0;
867
868err:
869 ib_sa_unregister_client(&sa_client);
870 destroy_workqueue(mcast_wq);
871 return ret;
872}
873
874void mcast_cleanup(void)
875{
876 ib_unregister_client(&mcast_client);
877 ib_sa_unregister_client(&sa_client);
878 destroy_workqueue(mcast_wq);
879}