blob: a8395ef06c172e2dd481ef4db1ec98ab288ad291 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
35 */
36
37#include <linux/skbuff.h>
38#include <linux/rtnetlink.h>
39#include <linux/ip.h>
40#include <linux/in.h>
41#include <linux/igmp.h>
42#include <linux/inetdevice.h>
43#include <linux/delay.h>
44#include <linux/completion.h>
45
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020046#include <net/dst.h>
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include "ipoib.h"
49
50#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51static int mcast_debug_level;
52
53module_param(mcast_debug_level, int, 0644);
54MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56#endif
57
Ingo Molnar95ed6442006-01-13 14:51:39 -080058static DEFINE_MUTEX(mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
61struct ipoib_mcast {
62 struct ib_sa_mcmember_rec mcmember;
63 struct ipoib_ah *ah;
64
65 struct rb_node rb_node;
66 struct list_head list;
67 struct completion done;
68
69 int query_id;
70 struct ib_sa_query *query;
71
72 unsigned long created;
73 unsigned long backoff;
74
75 unsigned long flags;
76 unsigned char logcount;
77
78 struct list_head neigh_list;
79
80 struct sk_buff_head pkt_queue;
81
82 struct net_device *dev;
83};
84
85struct ipoib_mcast_iter {
86 struct net_device *dev;
87 union ib_gid mgid;
88 unsigned long created;
89 unsigned int queuelen;
90 unsigned int complete;
91 unsigned int send_only;
92};
93
94static void ipoib_mcast_free(struct ipoib_mcast *mcast)
95{
96 struct net_device *dev = mcast->dev;
97 struct ipoib_dev_priv *priv = netdev_priv(dev);
98 struct ipoib_neigh *neigh, *tmp;
99 unsigned long flags;
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800100 int tx_dropped = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 ipoib_dbg_mcast(netdev_priv(dev),
103 "deleting multicast group " IPOIB_GID_FMT "\n",
104 IPOIB_GID_ARG(mcast->mcmember.mgid));
105
106 spin_lock_irqsave(&priv->lock, flags);
107
108 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
Eli Cohen97460df2006-01-10 07:43:02 -0800109 /*
110 * It's safe to call ipoib_put_ah() inside priv->lock
111 * here, because we know that mcast->ah will always
112 * hold one more reference, so ipoib_put_ah() will
113 * never do more than decrement the ref count.
114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 if (neigh->ah)
Eli Cohen97460df2006-01-10 07:43:02 -0800116 ipoib_put_ah(neigh->ah);
Michael S. Tsirkind2e06552006-04-04 19:59:40 +0300117 ipoib_neigh_free(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 }
119
120 spin_unlock_irqrestore(&priv->lock, flags);
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 if (mcast->ah)
123 ipoib_put_ah(mcast->ah);
124
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800125 while (!skb_queue_empty(&mcast->pkt_queue)) {
126 ++tx_dropped;
Roland Dreier8c608a32005-11-07 10:49:38 -0800127 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800128 }
129
130 spin_lock_irqsave(&priv->tx_lock, flags);
131 priv->stats.tx_dropped += tx_dropped;
132 spin_unlock_irqrestore(&priv->tx_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134 kfree(mcast);
135}
136
137static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
138 int can_sleep)
139{
140 struct ipoib_mcast *mcast;
141
Roland Dreierde6eb662005-11-02 07:23:14 -0800142 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 if (!mcast)
144 return NULL;
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 mcast->dev = dev;
147 mcast->created = jiffies;
Hal Rosenstockce5b65c2005-09-18 13:47:53 -0700148 mcast->backoff = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 INIT_LIST_HEAD(&mcast->list);
151 INIT_LIST_HEAD(&mcast->neigh_list);
152 skb_queue_head_init(&mcast->pkt_queue);
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return mcast;
155}
156
157static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid)
158{
159 struct ipoib_dev_priv *priv = netdev_priv(dev);
160 struct rb_node *n = priv->multicast_tree.rb_node;
161
162 while (n) {
163 struct ipoib_mcast *mcast;
164 int ret;
165
166 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
167
168 ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw,
169 sizeof (union ib_gid));
170 if (ret < 0)
171 n = n->rb_left;
172 else if (ret > 0)
173 n = n->rb_right;
174 else
175 return mcast;
176 }
177
178 return NULL;
179}
180
181static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
182{
183 struct ipoib_dev_priv *priv = netdev_priv(dev);
184 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
185
186 while (*n) {
187 struct ipoib_mcast *tmcast;
188 int ret;
189
190 pn = *n;
191 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
192
193 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
194 sizeof (union ib_gid));
195 if (ret < 0)
196 n = &pn->rb_left;
197 else if (ret > 0)
198 n = &pn->rb_right;
199 else
200 return -EEXIST;
201 }
202
203 rb_link_node(&mcast->rb_node, pn, n);
204 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
205
206 return 0;
207}
208
209static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
210 struct ib_sa_mcmember_rec *mcmember)
211{
212 struct net_device *dev = mcast->dev;
213 struct ipoib_dev_priv *priv = netdev_priv(dev);
Eli Cohen7343b232006-02-27 20:47:43 -0800214 struct ipoib_ah *ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 int ret;
216
217 mcast->mcmember = *mcmember;
218
219 /* Set the cached Q_Key before we attach if it's the broadcast group */
220 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
221 sizeof (union ib_gid))) {
222 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
223 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
224 }
225
226 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
227 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
228 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
229 " already attached\n",
230 IPOIB_GID_ARG(mcast->mcmember.mgid));
231
232 return 0;
233 }
234
235 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
236 &mcast->mcmember.mgid);
237 if (ret < 0) {
238 ipoib_warn(priv, "couldn't attach QP to multicast group "
239 IPOIB_GID_FMT "\n",
240 IPOIB_GID_ARG(mcast->mcmember.mgid));
241
242 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
243 return ret;
244 }
245 }
246
247 {
248 struct ib_ah_attr av = {
249 .dlid = be16_to_cpu(mcast->mcmember.mlid),
250 .port_num = priv->port,
251 .sl = mcast->mcmember.sl,
252 .ah_flags = IB_AH_GRH,
253 .grh = {
254 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
255 .hop_limit = mcast->mcmember.hop_limit,
256 .sgid_index = 0,
257 .traffic_class = mcast->mcmember.traffic_class
258 }
259 };
Roland Dreiere6ded992005-04-16 15:26:06 -0700260 int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 av.grh.dgid = mcast->mcmember.mgid;
263
Roland Dreiere6ded992005-04-16 15:26:06 -0700264 if (path_rate > 0 && priv->local_rate > path_rate)
265 av.static_rate = (priv->local_rate - 1) / path_rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267 ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
268 av.static_rate, priv->local_rate,
269 ib_sa_rate_enum_to_int(mcast->mcmember.rate));
270
Eli Cohen7343b232006-02-27 20:47:43 -0800271 ah = ipoib_create_ah(dev, priv->pd, &av);
272 if (!ah) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 ipoib_warn(priv, "ib_address_create failed\n");
274 } else {
275 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
276 " AV %p, LID 0x%04x, SL %d\n",
277 IPOIB_GID_ARG(mcast->mcmember.mgid),
278 mcast->ah->ah,
279 be16_to_cpu(mcast->mcmember.mlid),
280 mcast->mcmember.sl);
281 }
Eli Cohen7343b232006-02-27 20:47:43 -0800282
283 spin_lock_irq(&priv->lock);
284 mcast->ah = ah;
285 spin_unlock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 }
287
288 /* actually send any queued packets */
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800289 spin_lock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 while (!skb_queue_empty(&mcast->pkt_queue)) {
291 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800292 spin_unlock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 skb->dev = dev;
295
296 if (!skb->dst || !skb->dst->neighbour) {
297 /* put pseudoheader back on for next time */
298 skb_push(skb, sizeof (struct ipoib_pseudoheader));
299 }
300
301 if (dev_queue_xmit(skb))
302 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800303 spin_lock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800305 spin_unlock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 return 0;
308}
309
310static void
311ipoib_mcast_sendonly_join_complete(int status,
312 struct ib_sa_mcmember_rec *mcmember,
313 void *mcast_ptr)
314{
315 struct ipoib_mcast *mcast = mcast_ptr;
316 struct net_device *dev = mcast->dev;
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800317 struct ipoib_dev_priv *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319 if (!status)
320 ipoib_mcast_join_finish(mcast, mcmember);
321 else {
322 if (mcast->logcount++ < 20)
323 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for "
324 IPOIB_GID_FMT ", status %d\n",
325 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
326
327 /* Flush out any queued packets */
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800328 spin_lock_irq(&priv->tx_lock);
329 while (!skb_queue_empty(&mcast->pkt_queue)) {
330 ++priv->stats.tx_dropped;
Roland Dreier8c608a32005-11-07 10:49:38 -0800331 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800332 }
333 spin_unlock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 /* Clear the busy flag so we try again */
336 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
337 }
338
339 complete(&mcast->done);
340}
341
342static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
343{
344 struct net_device *dev = mcast->dev;
345 struct ipoib_dev_priv *priv = netdev_priv(dev);
346 struct ib_sa_mcmember_rec rec = {
347#if 0 /* Some SMs don't support send-only yet */
348 .join_state = 4
349#else
350 .join_state = 1
351#endif
352 };
353 int ret = 0;
354
355 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
356 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
357 return -ENODEV;
358 }
359
360 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
361 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
362 return -EBUSY;
363 }
364
365 rec.mgid = mcast->mcmember.mgid;
366 rec.port_gid = priv->local_gid;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700367 rec.pkey = cpu_to_be16(priv->pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Michael S. Tsirkinde922482005-11-29 10:18:45 -0800369 init_completion(&mcast->done);
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
372 IB_SA_MCMEMBER_REC_MGID |
373 IB_SA_MCMEMBER_REC_PORT_GID |
374 IB_SA_MCMEMBER_REC_PKEY |
375 IB_SA_MCMEMBER_REC_JOIN_STATE,
376 1000, GFP_ATOMIC,
377 ipoib_mcast_sendonly_join_complete,
378 mcast, &mcast->query);
379 if (ret < 0) {
380 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed (ret = %d)\n",
381 ret);
382 } else {
383 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT
384 ", starting join\n",
385 IPOIB_GID_ARG(mcast->mcmember.mgid));
386
387 mcast->query_id = ret;
388 }
389
390 return ret;
391}
392
393static void ipoib_mcast_join_complete(int status,
394 struct ib_sa_mcmember_rec *mcmember,
395 void *mcast_ptr)
396{
397 struct ipoib_mcast *mcast = mcast_ptr;
398 struct net_device *dev = mcast->dev;
399 struct ipoib_dev_priv *priv = netdev_priv(dev);
400
401 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT
402 " (status %d)\n",
403 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
404
405 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
Hal Rosenstockce5b65c2005-09-18 13:47:53 -0700406 mcast->backoff = 1;
Ingo Molnar95ed6442006-01-13 14:51:39 -0800407 mutex_lock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
409 queue_work(ipoib_workqueue, &priv->mcast_task);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800410 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 complete(&mcast->done);
412 return;
413 }
414
415 if (status == -EINTR) {
416 complete(&mcast->done);
417 return;
418 }
419
420 if (status && mcast->logcount++ < 20) {
421 if (status == -ETIMEDOUT || status == -EINTR) {
422 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
423 ", status %d\n",
424 IPOIB_GID_ARG(mcast->mcmember.mgid),
425 status);
426 } else {
427 ipoib_warn(priv, "multicast join failed for "
428 IPOIB_GID_FMT ", status %d\n",
429 IPOIB_GID_ARG(mcast->mcmember.mgid),
430 status);
431 }
432 }
433
434 mcast->backoff *= 2;
435 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
436 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
437
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800438 mutex_lock(&mcast_mutex);
439
440 spin_lock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 mcast->query = NULL;
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
444 if (status == -ETIMEDOUT)
445 queue_work(ipoib_workqueue, &priv->mcast_task);
446 else
447 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
448 mcast->backoff * HZ);
449 } else
450 complete(&mcast->done);
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800451 spin_unlock_irq(&priv->lock);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800452 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 return;
455}
456
457static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
458 int create)
459{
460 struct ipoib_dev_priv *priv = netdev_priv(dev);
461 struct ib_sa_mcmember_rec rec = {
462 .join_state = 1
463 };
464 ib_sa_comp_mask comp_mask;
465 int ret = 0;
466
467 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
468 IPOIB_GID_ARG(mcast->mcmember.mgid));
469
470 rec.mgid = mcast->mcmember.mgid;
471 rec.port_gid = priv->local_gid;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700472 rec.pkey = cpu_to_be16(priv->pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 comp_mask =
475 IB_SA_MCMEMBER_REC_MGID |
476 IB_SA_MCMEMBER_REC_PORT_GID |
477 IB_SA_MCMEMBER_REC_PKEY |
478 IB_SA_MCMEMBER_REC_JOIN_STATE;
479
480 if (create) {
481 comp_mask |=
482 IB_SA_MCMEMBER_REC_QKEY |
483 IB_SA_MCMEMBER_REC_SL |
484 IB_SA_MCMEMBER_REC_FLOW_LABEL |
485 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
486
487 rec.qkey = priv->broadcast->mcmember.qkey;
488 rec.sl = priv->broadcast->mcmember.sl;
489 rec.flow_label = priv->broadcast->mcmember.flow_label;
490 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
491 }
492
Michael S. Tsirkinde922482005-11-29 10:18:45 -0800493 init_completion(&mcast->done);
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
496 mcast->backoff * 1000, GFP_ATOMIC,
497 ipoib_mcast_join_complete,
498 mcast, &mcast->query);
499
500 if (ret < 0) {
501 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed, status %d\n", ret);
502
503 mcast->backoff *= 2;
504 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
505 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
506
Ingo Molnar95ed6442006-01-13 14:51:39 -0800507 mutex_lock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
509 queue_delayed_work(ipoib_workqueue,
510 &priv->mcast_task,
Hal Rosenstockce5b65c2005-09-18 13:47:53 -0700511 mcast->backoff * HZ);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800512 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 } else
514 mcast->query_id = ret;
515}
516
517void ipoib_mcast_join_task(void *dev_ptr)
518{
519 struct net_device *dev = dev_ptr;
520 struct ipoib_dev_priv *priv = netdev_priv(dev);
521
522 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
523 return;
524
525 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
526 ipoib_warn(priv, "ib_gid_entry_get() failed\n");
527 else
528 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
529
530 {
531 struct ib_port_attr attr;
532
533 if (!ib_query_port(priv->ca, priv->port, &attr)) {
534 priv->local_lid = attr.lid;
535 priv->local_rate = attr.active_speed *
536 ib_width_enum_to_int(attr.active_width);
537 } else
538 ipoib_warn(priv, "ib_query_port failed\n");
539 }
540
541 if (!priv->broadcast) {
Roland Dreier20b83382006-02-11 12:22:12 -0800542 struct ipoib_mcast *broadcast;
543
544 broadcast = ipoib_mcast_alloc(dev, 1);
545 if (!broadcast) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 ipoib_warn(priv, "failed to allocate broadcast group\n");
Ingo Molnar95ed6442006-01-13 14:51:39 -0800547 mutex_lock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
549 queue_delayed_work(ipoib_workqueue,
550 &priv->mcast_task, HZ);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800551 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return;
553 }
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 spin_lock_irq(&priv->lock);
Roland Dreier20b83382006-02-11 12:22:12 -0800556 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
557 sizeof (union ib_gid));
558 priv->broadcast = broadcast;
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 __ipoib_mcast_add(dev, priv->broadcast);
561 spin_unlock_irq(&priv->lock);
562 }
563
564 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
565 ipoib_mcast_join(dev, priv->broadcast, 0);
566 return;
567 }
568
569 while (1) {
570 struct ipoib_mcast *mcast = NULL;
571
572 spin_lock_irq(&priv->lock);
573 list_for_each_entry(mcast, &priv->multicast_list, list) {
574 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
575 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
576 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
577 /* Found the next unjoined group */
578 break;
579 }
580 }
581 spin_unlock_irq(&priv->lock);
582
583 if (&mcast->list == &priv->multicast_list) {
584 /* All done */
585 break;
586 }
587
588 ipoib_mcast_join(dev, mcast, 1);
589 return;
590 }
591
592 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
593 IPOIB_ENCAP_LEN;
594 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
595
596 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
597
598 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
599 netif_carrier_on(dev);
600}
601
602int ipoib_mcast_start_thread(struct net_device *dev)
603{
604 struct ipoib_dev_priv *priv = netdev_priv(dev);
605
606 ipoib_dbg_mcast(priv, "starting multicast thread\n");
607
Ingo Molnar95ed6442006-01-13 14:51:39 -0800608 mutex_lock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
610 queue_work(ipoib_workqueue, &priv->mcast_task);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800611 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Michael S. Tsirkin479a0792006-02-07 16:37:08 -0800613 spin_lock_irq(&priv->lock);
614 set_bit(IPOIB_MCAST_STARTED, &priv->flags);
615 spin_unlock_irq(&priv->lock);
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 return 0;
618}
619
Roland Dreier8d2cae02005-09-20 10:52:04 -0700620int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
622 struct ipoib_dev_priv *priv = netdev_priv(dev);
623 struct ipoib_mcast *mcast;
624
625 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
626
Michael S. Tsirkin479a0792006-02-07 16:37:08 -0800627 spin_lock_irq(&priv->lock);
628 clear_bit(IPOIB_MCAST_STARTED, &priv->flags);
629 spin_unlock_irq(&priv->lock);
630
Ingo Molnar95ed6442006-01-13 14:51:39 -0800631 mutex_lock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
633 cancel_delayed_work(&priv->mcast_task);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800634 mutex_unlock(&mcast_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Roland Dreier8d2cae02005-09-20 10:52:04 -0700636 if (flush)
637 flush_workqueue(ipoib_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800639 spin_lock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 if (priv->broadcast && priv->broadcast->query) {
641 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
642 priv->broadcast->query = NULL;
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800643 spin_unlock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 ipoib_dbg_mcast(priv, "waiting for bcast\n");
645 wait_for_completion(&priv->broadcast->done);
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800646 } else
647 spin_unlock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649 list_for_each_entry(mcast, &priv->multicast_list, list) {
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800650 spin_lock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (mcast->query) {
652 ib_sa_cancel_query(mcast->query_id, mcast->query);
653 mcast->query = NULL;
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800654 spin_unlock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
656 IPOIB_GID_ARG(mcast->mcmember.mgid));
657 wait_for_completion(&mcast->done);
Michael S. Tsirkin9acf6a82006-03-02 11:07:47 -0800658 } else
659 spin_unlock_irq(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
661
662 return 0;
663}
664
665static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
666{
667 struct ipoib_dev_priv *priv = netdev_priv(dev);
668 struct ib_sa_mcmember_rec rec = {
669 .join_state = 1
670 };
671 int ret = 0;
672
673 if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags))
674 return 0;
675
676 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
677 IPOIB_GID_ARG(mcast->mcmember.mgid));
678
679 rec.mgid = mcast->mcmember.mgid;
680 rec.port_gid = priv->local_gid;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700681 rec.pkey = cpu_to_be16(priv->pkey);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 /* Remove ourselves from the multicast group */
684 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
685 &mcast->mcmember.mgid);
686 if (ret)
687 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
688
689 /*
690 * Just make one shot at leaving and don't wait for a reply;
691 * if we fail, too bad.
692 */
693 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
694 IB_SA_MCMEMBER_REC_MGID |
695 IB_SA_MCMEMBER_REC_PORT_GID |
696 IB_SA_MCMEMBER_REC_PKEY |
697 IB_SA_MCMEMBER_REC_JOIN_STATE,
698 0, GFP_ATOMIC, NULL,
699 mcast, &mcast->query);
700 if (ret < 0)
701 ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed "
702 "for leave (result = %d)\n", ret);
703
704 return 0;
705}
706
707void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
708 struct sk_buff *skb)
709{
710 struct ipoib_dev_priv *priv = netdev_priv(dev);
711 struct ipoib_mcast *mcast;
712
713 /*
714 * We can only be called from ipoib_start_xmit, so we're
715 * inside tx_lock -- no need to save/restore flags.
716 */
717 spin_lock(&priv->lock);
718
Roland Dreier20b83382006-02-11 12:22:12 -0800719 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
720 !priv->broadcast ||
721 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
Michael S. Tsirkin479a0792006-02-07 16:37:08 -0800722 ++priv->stats.tx_dropped;
723 dev_kfree_skb_any(skb);
724 goto unlock;
725 }
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 mcast = __ipoib_mcast_find(dev, mgid);
728 if (!mcast) {
729 /* Let's create a new send only group now */
730 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
731 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid));
732
733 mcast = ipoib_mcast_alloc(dev, 0);
734 if (!mcast) {
735 ipoib_warn(priv, "unable to allocate memory for "
736 "multicast structure\n");
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800737 ++priv->stats.tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 dev_kfree_skb_any(skb);
739 goto out;
740 }
741
742 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
743 mcast->mcmember.mgid = *mgid;
744 __ipoib_mcast_add(dev, mcast);
745 list_add_tail(&mcast->list, &priv->multicast_list);
746 }
747
748 if (!mcast->ah) {
749 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
750 skb_queue_tail(&mcast->pkt_queue, skb);
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800751 else {
752 ++priv->stats.tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 dev_kfree_skb_any(skb);
Michael S. Tsirkinb36f1702006-01-17 12:19:40 -0800754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 if (mcast->query)
757 ipoib_dbg_mcast(priv, "no address vector, "
758 "but multicast join already started\n");
759 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
760 ipoib_mcast_sendonly_join(mcast);
761
762 /*
763 * If lookup completes between here and out:, don't
764 * want to send packet twice.
765 */
766 mcast = NULL;
767 }
768
769out:
770 if (mcast && mcast->ah) {
771 if (skb->dst &&
772 skb->dst->neighbour &&
773 !*to_ipoib_neigh(skb->dst->neighbour)) {
Michael S. Tsirkind2e06552006-04-04 19:59:40 +0300774 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 if (neigh) {
777 kref_get(&mcast->ah->ref);
778 neigh->ah = mcast->ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 list_add_tail(&neigh->list, &mcast->neigh_list);
780 }
781 }
782
783 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
784 }
785
Michael S. Tsirkin479a0792006-02-07 16:37:08 -0800786unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 spin_unlock(&priv->lock);
788}
789
790void ipoib_mcast_dev_flush(struct net_device *dev)
791{
792 struct ipoib_dev_priv *priv = netdev_priv(dev);
793 LIST_HEAD(remove_list);
Eli Cohen988bd502006-01-12 14:32:20 -0800794 struct ipoib_mcast *mcast, *tmcast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 unsigned long flags;
796
797 ipoib_dbg_mcast(priv, "flushing multicast list\n");
798
799 spin_lock_irqsave(&priv->lock, flags);
Eli Cohen988bd502006-01-12 14:32:20 -0800800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
Eli Cohen988bd502006-01-12 14:32:20 -0800802 list_del(&mcast->list);
803 rb_erase(&mcast->rb_node, &priv->multicast_tree);
804 list_add_tail(&mcast->list, &remove_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 }
806
807 if (priv->broadcast) {
Eli Cohen988bd502006-01-12 14:32:20 -0800808 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
809 list_add_tail(&priv->broadcast->list, &remove_list);
810 priv->broadcast = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
812
813 spin_unlock_irqrestore(&priv->lock, flags);
814
815 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
816 ipoib_mcast_leave(dev, mcast);
817 ipoib_mcast_free(mcast);
818 }
819}
820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821void ipoib_mcast_restart_task(void *dev_ptr)
822{
823 struct net_device *dev = dev_ptr;
824 struct ipoib_dev_priv *priv = netdev_priv(dev);
825 struct dev_mc_list *mclist;
826 struct ipoib_mcast *mcast, *tmcast;
827 LIST_HEAD(remove_list);
828 unsigned long flags;
829
830 ipoib_dbg_mcast(priv, "restarting multicast task\n");
831
Roland Dreier8d2cae02005-09-20 10:52:04 -0700832 ipoib_mcast_stop_thread(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Michael S. Tsirkin78bfe0b2006-01-11 11:47:34 -0800834 spin_lock_irqsave(&dev->xmit_lock, flags);
835 spin_lock(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /*
838 * Unfortunately, the networking core only gives us a list of all of
839 * the multicast hardware addresses. We need to figure out which ones
840 * are new and which ones have been removed
841 */
842
843 /* Clear out the found flag */
844 list_for_each_entry(mcast, &priv->multicast_list, list)
845 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
846
847 /* Mark all of the entries that are found or don't exist */
848 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
849 union ib_gid mgid;
850
851 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
852
853 /* Add in the P_Key */
854 mgid.raw[4] = (priv->pkey >> 8) & 0xff;
855 mgid.raw[5] = priv->pkey & 0xff;
856
857 mcast = __ipoib_mcast_find(dev, &mgid);
858 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
859 struct ipoib_mcast *nmcast;
860
861 /* Not found or send-only group, let's add a new entry */
862 ipoib_dbg_mcast(priv, "adding multicast entry for mgid "
863 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));
864
865 nmcast = ipoib_mcast_alloc(dev, 0);
866 if (!nmcast) {
867 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
868 continue;
869 }
870
871 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
872
873 nmcast->mcmember.mgid = mgid;
874
875 if (mcast) {
876 /* Destroy the send only entry */
877 list_del(&mcast->list);
878 list_add_tail(&mcast->list, &remove_list);
879
880 rb_replace_node(&mcast->rb_node,
881 &nmcast->rb_node,
882 &priv->multicast_tree);
883 } else
884 __ipoib_mcast_add(dev, nmcast);
885
886 list_add_tail(&nmcast->list, &priv->multicast_list);
887 }
888
889 if (mcast)
890 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
891 }
892
893 /* Remove all of the entries don't exist anymore */
894 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
895 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
896 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
897 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",
898 IPOIB_GID_ARG(mcast->mcmember.mgid));
899
900 rb_erase(&mcast->rb_node, &priv->multicast_tree);
901
902 /* Move to the remove list */
903 list_del(&mcast->list);
904 list_add_tail(&mcast->list, &remove_list);
905 }
906 }
Michael S. Tsirkin78bfe0b2006-01-11 11:47:34 -0800907
908 spin_unlock(&priv->lock);
909 spin_unlock_irqrestore(&dev->xmit_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911 /* We have to cancel outside of the spinlock */
912 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
913 ipoib_mcast_leave(mcast->dev, mcast);
914 ipoib_mcast_free(mcast);
915 }
916
917 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
918 ipoib_mcast_start_thread(dev);
919}
920
Roland Dreier8ae5a8a2005-11-02 20:51:01 -0800921#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
924{
925 struct ipoib_mcast_iter *iter;
926
927 iter = kmalloc(sizeof *iter, GFP_KERNEL);
928 if (!iter)
929 return NULL;
930
931 iter->dev = dev;
Roland Dreier1732b0e2005-11-07 10:33:11 -0800932 memset(iter->mgid.raw, 0, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 if (ipoib_mcast_iter_next(iter)) {
Roland Dreier1732b0e2005-11-07 10:33:11 -0800935 kfree(iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 return NULL;
937 }
938
939 return iter;
940}
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
943{
944 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
945 struct rb_node *n;
946 struct ipoib_mcast *mcast;
947 int ret = 1;
948
949 spin_lock_irq(&priv->lock);
950
951 n = rb_first(&priv->multicast_tree);
952
953 while (n) {
954 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
955
956 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
957 sizeof (union ib_gid)) < 0) {
958 iter->mgid = mcast->mcmember.mgid;
959 iter->created = mcast->created;
960 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
961 iter->complete = !!mcast->ah;
962 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
963
964 ret = 0;
965
966 break;
967 }
968
969 n = rb_next(n);
970 }
971
972 spin_unlock_irq(&priv->lock);
973
974 return ret;
975}
976
977void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
978 union ib_gid *mgid,
979 unsigned long *created,
980 unsigned int *queuelen,
981 unsigned int *complete,
982 unsigned int *send_only)
983{
984 *mgid = iter->mgid;
985 *created = iter->created;
986 *queuelen = iter->queuelen;
987 *complete = iter->complete;
988 *send_only = iter->send_only;
989}
Roland Dreier8ae5a8a2005-11-02 20:51:01 -0800990
991#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */