blob: eca8518d79a0e537ff6cc43093968b3b52709998 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 */
35
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38
Roland Dreiera4d61e82005-08-25 13:40:04 -070039#include <rdma/ib_cache.h>
Eli Cohen40ca1982008-04-16 21:09:27 -070040#include <linux/ip.h>
41#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include "ipoib.h"
44
45#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46static int data_debug_level;
47
48module_param(data_debug_level, int, 0644);
49MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
51#endif
52
Ingo Molnar95ed6442006-01-13 14:51:39 -080053static DEFINE_MUTEX(pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
56 struct ib_pd *pd, struct ib_ah_attr *attr)
57{
58 struct ipoib_ah *ah;
59
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
61 if (!ah)
62 return NULL;
63
64 ah->dev = dev;
65 ah->last_send = 0;
66 kref_init(&ah->ref);
67
68 ah->ah = ib_create_ah(pd, attr);
69 if (IS_ERR(ah->ah)) {
70 kfree(ah);
71 ah = NULL;
72 } else
73 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
74
75 return ah;
76}
77
78void ipoib_free_ah(struct kref *kref)
79{
80 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
81 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
82
83 unsigned long flags;
84
Roland Dreier31c02e22006-06-17 20:37:34 -070085 spin_lock_irqsave(&priv->lock, flags);
86 list_add_tail(&ah->list, &priv->dead_ahs);
87 spin_unlock_irqrestore(&priv->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Shirley Mabc7b3a32008-04-23 11:55:45 -070090static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
91 u64 mapping[IPOIB_UD_RX_SG])
92{
93 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
94 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
95 DMA_FROM_DEVICE);
96 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
97 DMA_FROM_DEVICE);
98 } else
99 ib_dma_unmap_single(priv->ca, mapping[0],
100 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
101 DMA_FROM_DEVICE);
102}
103
104static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
105 struct sk_buff *skb,
106 unsigned int length)
107{
108 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
109 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
110 unsigned int size;
111 /*
112 * There is only two buffers needed for max_payload = 4K,
113 * first buf size is IPOIB_UD_HEAD_SIZE
114 */
115 skb->tail += IPOIB_UD_HEAD_SIZE;
116 skb->len += length;
117
118 size = length - IPOIB_UD_HEAD_SIZE;
119
120 frag->size = size;
121 skb->data_len += size;
122 skb->truesize += size;
123 } else
124 skb_put(skb, length);
125
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static int ipoib_ib_post_receive(struct net_device *dev, int id)
129{
130 struct ipoib_dev_priv *priv = netdev_priv(dev);
Roland Dreier1993d682005-10-28 15:30:34 -0700131 struct ib_recv_wr *bad_wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 int ret;
133
Shirley Mabc7b3a32008-04-23 11:55:45 -0700134 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
135 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
136 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Shirley Mabc7b3a32008-04-23 11:55:45 -0700139 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
Roland Dreier1993d682005-10-28 15:30:34 -0700140 if (unlikely(ret)) {
141 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
Shirley Mabc7b3a32008-04-23 11:55:45 -0700142 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
Roland Dreier1993d682005-10-28 15:30:34 -0700143 dev_kfree_skb_any(priv->rx_ring[id].skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 priv->rx_ring[id].skb = NULL;
145 }
146
147 return ret;
148}
149
Shirley Mabc7b3a32008-04-23 11:55:45 -0700150static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
Roland Dreier1993d682005-10-28 15:30:34 -0700151{
152 struct ipoib_dev_priv *priv = netdev_priv(dev);
153 struct sk_buff *skb;
Shirley Mabc7b3a32008-04-23 11:55:45 -0700154 int buf_size;
155 u64 *mapping;
Roland Dreier1993d682005-10-28 15:30:34 -0700156
Shirley Mabc7b3a32008-04-23 11:55:45 -0700157 if (ipoib_ud_need_sg(priv->max_ib_mtu))
158 buf_size = IPOIB_UD_HEAD_SIZE;
159 else
160 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
161
162 skb = dev_alloc_skb(buf_size + 4);
163 if (unlikely(!skb))
164 return NULL;
Roland Dreier1993d682005-10-28 15:30:34 -0700165
166 /*
167 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
168 * header. So we need 4 more bytes to get to 48 and align the
169 * IP header to a multiple of 16.
170 */
171 skb_reserve(skb, 4);
172
Shirley Mabc7b3a32008-04-23 11:55:45 -0700173 mapping = priv->rx_ring[id].mapping;
174 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
175 DMA_FROM_DEVICE);
176 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
177 goto error;
178
179 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
180 struct page *page = alloc_page(GFP_ATOMIC);
181 if (!page)
182 goto partial_error;
183 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
184 mapping[1] =
185 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
186 0, PAGE_SIZE, DMA_FROM_DEVICE);
187 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
188 goto partial_error;
Roland Dreier1993d682005-10-28 15:30:34 -0700189 }
190
Shirley Mabc7b3a32008-04-23 11:55:45 -0700191 priv->rx_ring[id].skb = skb;
192 return skb;
Roland Dreier1993d682005-10-28 15:30:34 -0700193
Shirley Mabc7b3a32008-04-23 11:55:45 -0700194partial_error:
195 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
196error:
197 dev_kfree_skb_any(skb);
198 return NULL;
Roland Dreier1993d682005-10-28 15:30:34 -0700199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201static int ipoib_ib_post_receives(struct net_device *dev)
202{
203 struct ipoib_dev_priv *priv = netdev_priv(dev);
204 int i;
205
Shirley Ma0f485252006-04-10 09:43:58 -0700206 for (i = 0; i < ipoib_recvq_size; ++i) {
Shirley Mabc7b3a32008-04-23 11:55:45 -0700207 if (!ipoib_alloc_rx_skb(dev, i)) {
Roland Dreier1993d682005-10-28 15:30:34 -0700208 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
209 return -ENOMEM;
210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (ipoib_ib_post_receive(dev, i)) {
212 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
213 return -EIO;
214 }
215 }
216
217 return 0;
218}
219
Roland Dreier2439a6e2006-09-22 15:22:52 -0700220static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
221{
222 struct ipoib_dev_priv *priv = netdev_priv(dev);
223 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
224 struct sk_buff *skb;
Shirley Mabc7b3a32008-04-23 11:55:45 -0700225 u64 mapping[IPOIB_UD_RX_SG];
Roland Dreier2439a6e2006-09-22 15:22:52 -0700226
Roland Dreiera89875f2007-04-18 20:20:53 -0700227 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
228 wr_id, wc->status);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700229
230 if (unlikely(wr_id >= ipoib_recvq_size)) {
231 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
232 wr_id, ipoib_recvq_size);
233 return;
234 }
235
236 skb = priv->rx_ring[wr_id].skb;
Roland Dreier2439a6e2006-09-22 15:22:52 -0700237
238 if (unlikely(wc->status != IB_WC_SUCCESS)) {
239 if (wc->status != IB_WC_WR_FLUSH_ERR)
240 ipoib_warn(priv, "failed recv event "
241 "(status=%d, wrid=%d vend_err %x)\n",
242 wc->status, wr_id, wc->vendor_err);
Shirley Mabc7b3a32008-04-23 11:55:45 -0700243 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700244 dev_kfree_skb_any(skb);
245 priv->rx_ring[wr_id].skb = NULL;
246 return;
247 }
248
249 /*
Roland Dreier1b844af2007-07-10 13:43:53 -0700250 * Drop packets that this interface sent, ie multicast packets
251 * that the HCA has replicated.
252 */
253 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
254 goto repost;
255
Shirley Mabc7b3a32008-04-23 11:55:45 -0700256 memcpy(mapping, priv->rx_ring[wr_id].mapping,
257 IPOIB_UD_RX_SG * sizeof *mapping);
258
Roland Dreier1b844af2007-07-10 13:43:53 -0700259 /*
Roland Dreier2439a6e2006-09-22 15:22:52 -0700260 * If we can't allocate a new RX buffer, dump
261 * this packet and reuse the old buffer.
262 */
Shirley Mabc7b3a32008-04-23 11:55:45 -0700263 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
Roland Dreierde903512007-09-28 15:33:51 -0700264 ++dev->stats.rx_dropped;
Roland Dreier2439a6e2006-09-22 15:22:52 -0700265 goto repost;
266 }
267
268 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
269 wc->byte_len, wc->slid);
270
Shirley Mabc7b3a32008-04-23 11:55:45 -0700271 ipoib_ud_dma_unmap_rx(priv, mapping);
272 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700273
Roland Dreier2439a6e2006-09-22 15:22:52 -0700274 skb_pull(skb, IB_GRH_BYTES);
275
Roland Dreier1b844af2007-07-10 13:43:53 -0700276 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
277 skb_reset_mac_header(skb);
278 skb_pull(skb, IPOIB_ENCAP_LEN);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700279
Roland Dreier1b844af2007-07-10 13:43:53 -0700280 dev->last_rx = jiffies;
Roland Dreierde903512007-09-28 15:33:51 -0700281 ++dev->stats.rx_packets;
282 dev->stats.rx_bytes += skb->len;
Roland Dreier2439a6e2006-09-22 15:22:52 -0700283
Roland Dreier1b844af2007-07-10 13:43:53 -0700284 skb->dev = dev;
285 /* XXX get correct PACKET_ type here */
286 skb->pkt_type = PACKET_HOST;
Eli Cohen60461362008-04-16 21:01:10 -0700287
288 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
289 skb->ip_summed = CHECKSUM_UNNECESSARY;
290
Roland Dreier1b844af2007-07-10 13:43:53 -0700291 netif_receive_skb(skb);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700292
293repost:
294 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
295 ipoib_warn(priv, "ipoib_ib_post_receive failed "
296 "for buf %d\n", wr_id);
297}
298
Eli Cohen71437402008-01-30 18:30:53 +0200299static int ipoib_dma_map_tx(struct ib_device *ca,
300 struct ipoib_tx_buf *tx_req)
301{
302 struct sk_buff *skb = tx_req->skb;
303 u64 *mapping = tx_req->mapping;
304 int i;
Eli Cohen40ca1982008-04-16 21:09:27 -0700305 int off;
Eli Cohen71437402008-01-30 18:30:53 +0200306
Eli Cohen40ca1982008-04-16 21:09:27 -0700307 if (skb_headlen(skb)) {
308 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
309 DMA_TO_DEVICE);
310 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
311 return -EIO;
312
313 off = 1;
314 } else
315 off = 0;
Eli Cohen71437402008-01-30 18:30:53 +0200316
317 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
318 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eli Cohen40ca1982008-04-16 21:09:27 -0700319 mapping[i + off] = ib_dma_map_page(ca, frag->page,
Eli Cohen71437402008-01-30 18:30:53 +0200320 frag->page_offset, frag->size,
321 DMA_TO_DEVICE);
Eli Cohen40ca1982008-04-16 21:09:27 -0700322 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
Eli Cohen71437402008-01-30 18:30:53 +0200323 goto partial_error;
324 }
325 return 0;
326
327partial_error:
Eli Cohen71437402008-01-30 18:30:53 +0200328 for (; i > 0; --i) {
329 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
Eli Cohen40ca1982008-04-16 21:09:27 -0700330 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
Eli Cohen71437402008-01-30 18:30:53 +0200331 }
Eli Cohen40ca1982008-04-16 21:09:27 -0700332
333 if (off)
334 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
335
Eli Cohen71437402008-01-30 18:30:53 +0200336 return -EIO;
337}
338
339static void ipoib_dma_unmap_tx(struct ib_device *ca,
340 struct ipoib_tx_buf *tx_req)
341{
342 struct sk_buff *skb = tx_req->skb;
343 u64 *mapping = tx_req->mapping;
344 int i;
Eli Cohen40ca1982008-04-16 21:09:27 -0700345 int off;
Eli Cohen71437402008-01-30 18:30:53 +0200346
Eli Cohen40ca1982008-04-16 21:09:27 -0700347 if (skb_headlen(skb)) {
348 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
349 off = 1;
350 } else
351 off = 0;
Eli Cohen71437402008-01-30 18:30:53 +0200352
353 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
354 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eli Cohen40ca1982008-04-16 21:09:27 -0700355 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
Eli Cohen71437402008-01-30 18:30:53 +0200356 DMA_TO_DEVICE);
357 }
358}
359
Roland Dreier2439a6e2006-09-22 15:22:52 -0700360static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct ipoib_dev_priv *priv = netdev_priv(dev);
363 unsigned int wr_id = wc->wr_id;
Roland Dreier2439a6e2006-09-22 15:22:52 -0700364 struct ipoib_tx_buf *tx_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Roland Dreiera89875f2007-04-18 20:20:53 -0700366 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
367 wr_id, wc->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Roland Dreier2439a6e2006-09-22 15:22:52 -0700369 if (unlikely(wr_id >= ipoib_sendq_size)) {
370 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
371 wr_id, ipoib_sendq_size);
372 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 }
Roland Dreier2439a6e2006-09-22 15:22:52 -0700374
375 tx_req = &priv->tx_ring[wr_id];
376
Eli Cohen71437402008-01-30 18:30:53 +0200377 ipoib_dma_unmap_tx(priv->ca, tx_req);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700378
Roland Dreierde903512007-09-28 15:33:51 -0700379 ++dev->stats.tx_packets;
380 dev->stats.tx_bytes += tx_req->skb->len;
Roland Dreier2439a6e2006-09-22 15:22:52 -0700381
382 dev_kfree_skb_any(tx_req->skb);
383
Roland Dreier2439a6e2006-09-22 15:22:52 -0700384 ++priv->tx_tail;
Michael S. Tsirkin1b524962007-08-16 15:36:16 +0300385 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
386 netif_queue_stopped(dev) &&
387 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
Roland Dreier2439a6e2006-09-22 15:22:52 -0700388 netif_wake_queue(dev);
Roland Dreier2439a6e2006-09-22 15:22:52 -0700389
390 if (wc->status != IB_WC_SUCCESS &&
391 wc->status != IB_WC_WR_FLUSH_ERR)
392 ipoib_warn(priv, "failed send event "
393 "(status=%d, wrid=%d vend_err %x)\n",
394 wc->status, wr_id, wc->vendor_err);
395}
396
Eli Cohenf56bcd82008-04-29 13:46:53 -0700397static int poll_tx(struct ipoib_dev_priv *priv)
398{
399 int n, i;
400
401 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
402 for (i = 0; i < n; ++i)
403 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
404
405 return n == MAX_SEND_CQE;
406}
407
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700408int ipoib_poll(struct napi_struct *napi, int budget)
Roland Dreier2439a6e2006-09-22 15:22:52 -0700409{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700410 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
411 struct net_device *dev = priv->dev;
Roland Dreier8d1cc862007-05-06 21:05:32 -0700412 int done;
413 int t;
Roland Dreier8d1cc862007-05-06 21:05:32 -0700414 int n, i;
415
416 done = 0;
Roland Dreier8d1cc862007-05-06 21:05:32 -0700417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418poll_more:
419 while (done < budget) {
420 int max = (budget - done);
421
Roland Dreier8d1cc862007-05-06 21:05:32 -0700422 t = min(IPOIB_NUM_WC, max);
Eli Cohenf56bcd82008-04-29 13:46:53 -0700423 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
Roland Dreier8d1cc862007-05-06 21:05:32 -0700424
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700425 for (i = 0; i < n; i++) {
Roland Dreier8d1cc862007-05-06 21:05:32 -0700426 struct ib_wc *wc = priv->ibwc + i;
427
Michael S. Tsirkin1b524962007-08-16 15:36:16 +0300428 if (wc->wr_id & IPOIB_OP_RECV) {
Roland Dreier8d1cc862007-05-06 21:05:32 -0700429 ++done;
Michael S. Tsirkin1b524962007-08-16 15:36:16 +0300430 if (wc->wr_id & IPOIB_OP_CM)
431 ipoib_cm_handle_rx_wc(dev, wc);
432 else
433 ipoib_ib_handle_rx_wc(dev, wc);
Eli Cohenf56bcd82008-04-29 13:46:53 -0700434 } else
435 ipoib_cm_handle_tx_wc(priv->dev, wc);
Roland Dreier8d1cc862007-05-06 21:05:32 -0700436 }
437
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700438 if (n != t)
Roland Dreier8d1cc862007-05-06 21:05:32 -0700439 break;
Roland Dreier8d1cc862007-05-06 21:05:32 -0700440 }
441
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700442 if (done < budget) {
443 netif_rx_complete(dev, napi);
Eli Cohenf56bcd82008-04-29 13:46:53 -0700444 if (unlikely(ib_req_notify_cq(priv->recv_cq,
Roland Dreier8d1cc862007-05-06 21:05:32 -0700445 IB_CQ_NEXT_COMP |
446 IB_CQ_REPORT_MISSED_EVENTS)) &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700447 netif_rx_reschedule(dev, napi))
448 goto poll_more;
Roland Dreier8d1cc862007-05-06 21:05:32 -0700449 }
450
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700451 return done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452}
453
454void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
455{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700456 struct net_device *dev = dev_ptr;
457 struct ipoib_dev_priv *priv = netdev_priv(dev);
458
459 netif_rx_schedule(dev, &priv->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460}
461
Eli Cohen57ce41d2008-04-30 20:02:45 -0700462static void drain_tx_cq(struct net_device *dev)
463{
464 struct ipoib_dev_priv *priv = netdev_priv(dev);
465 unsigned long flags;
466
467 spin_lock_irqsave(&priv->tx_lock, flags);
468 while (poll_tx(priv))
469 ; /* nothing */
470
471 if (netif_queue_stopped(dev))
472 mod_timer(&priv->poll_timer, jiffies + 1);
473
474 spin_unlock_irqrestore(&priv->tx_lock, flags);
475}
476
477void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
478{
479 drain_tx_cq((struct net_device *)dev_ptr);
480}
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482static inline int post_send(struct ipoib_dev_priv *priv,
483 unsigned int wr_id,
484 struct ib_ah *address, u32 qpn,
Eli Cohen40ca1982008-04-16 21:09:27 -0700485 struct ipoib_tx_buf *tx_req,
486 void *head, int hlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
488 struct ib_send_wr *bad_wr;
Eli Cohen40ca1982008-04-16 21:09:27 -0700489 int i, off;
490 struct sk_buff *skb = tx_req->skb;
491 skb_frag_t *frags = skb_shinfo(skb)->frags;
492 int nr_frags = skb_shinfo(skb)->nr_frags;
493 u64 *mapping = tx_req->mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Eli Cohen40ca1982008-04-16 21:09:27 -0700495 if (skb_headlen(skb)) {
496 priv->tx_sge[0].addr = mapping[0];
497 priv->tx_sge[0].length = skb_headlen(skb);
498 off = 1;
499 } else
500 off = 0;
501
Eli Cohen71437402008-01-30 18:30:53 +0200502 for (i = 0; i < nr_frags; ++i) {
Eli Cohen40ca1982008-04-16 21:09:27 -0700503 priv->tx_sge[i + off].addr = mapping[i + off];
504 priv->tx_sge[i + off].length = frags[i].size;
Eli Cohen71437402008-01-30 18:30:53 +0200505 }
Eli Cohen40ca1982008-04-16 21:09:27 -0700506 priv->tx_wr.num_sge = nr_frags + off;
Eli Cohen71437402008-01-30 18:30:53 +0200507 priv->tx_wr.wr_id = wr_id;
508 priv->tx_wr.wr.ud.remote_qpn = qpn;
509 priv->tx_wr.wr.ud.ah = address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Eli Cohen40ca1982008-04-16 21:09:27 -0700511 if (head) {
512 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
513 priv->tx_wr.wr.ud.header = head;
514 priv->tx_wr.wr.ud.hlen = hlen;
515 priv->tx_wr.opcode = IB_WR_LSO;
516 } else
517 priv->tx_wr.opcode = IB_WR_SEND;
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
520}
521
522void ipoib_send(struct net_device *dev, struct sk_buff *skb,
523 struct ipoib_ah *address, u32 qpn)
524{
525 struct ipoib_dev_priv *priv = netdev_priv(dev);
Roland Dreier1993d682005-10-28 15:30:34 -0700526 struct ipoib_tx_buf *tx_req;
Eli Cohen40ca1982008-04-16 21:09:27 -0700527 int hlen;
528 void *phead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Eli Cohen40ca1982008-04-16 21:09:27 -0700530 if (skb_is_gso(skb)) {
531 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
532 phead = skb->data;
533 if (unlikely(!skb_pull(skb, hlen))) {
534 ipoib_warn(priv, "linear data too small\n");
535 ++dev->stats.tx_dropped;
536 ++dev->stats.tx_errors;
537 dev_kfree_skb_any(skb);
538 return;
539 }
540 } else {
541 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
542 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
543 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
544 ++dev->stats.tx_dropped;
545 ++dev->stats.tx_errors;
546 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
547 return;
548 }
549 phead = NULL;
550 hlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
554 skb->len, address, qpn);
555
556 /*
557 * We put the skb into the tx_ring _before_ we call post_send()
558 * because it's entirely possible that the completion handler will
559 * run before we execute anything after the post_send(). That
560 * means we have to make sure everything is properly recorded and
561 * our state is consistent before we call post_send().
562 */
Shirley Ma0f485252006-04-10 09:43:58 -0700563 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 tx_req->skb = skb;
Eli Cohen71437402008-01-30 18:30:53 +0200565 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
Roland Dreierde903512007-09-28 15:33:51 -0700566 ++dev->stats.tx_errors;
Roland Dreier73fbe8b2006-10-10 12:50:38 -0700567 dev_kfree_skb_any(skb);
568 return;
569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Eli Cohen60461362008-04-16 21:01:10 -0700571 if (skb->ip_summed == CHECKSUM_PARTIAL)
572 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
573 else
574 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
575
Eli Cohen57ce41d2008-04-30 20:02:45 -0700576 if (++priv->tx_outstanding == ipoib_sendq_size) {
577 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
578 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
579 ipoib_warn(priv, "request notify on send CQ failed\n");
580 netif_stop_queue(dev);
581 }
582
Shirley Ma0f485252006-04-10 09:43:58 -0700583 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
Eli Cohen40ca1982008-04-16 21:09:27 -0700584 address->ah, qpn, tx_req, phead, hlen))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 ipoib_warn(priv, "post_send failed\n");
Roland Dreierde903512007-09-28 15:33:51 -0700586 ++dev->stats.tx_errors;
Eli Cohen57ce41d2008-04-30 20:02:45 -0700587 --priv->tx_outstanding;
Eli Cohen71437402008-01-30 18:30:53 +0200588 ipoib_dma_unmap_tx(priv->ca, tx_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 dev_kfree_skb_any(skb);
Eli Cohen57ce41d2008-04-30 20:02:45 -0700590 if (netif_queue_stopped(dev))
591 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 } else {
593 dev->trans_start = jiffies;
594
595 address->last_send = priv->tx_head;
596 ++priv->tx_head;
Eli Cohenf56bcd82008-04-29 13:46:53 -0700597 skb_orphan(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
Eli Cohenf56bcd82008-04-29 13:46:53 -0700600
601 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
Eli Cohen57ce41d2008-04-30 20:02:45 -0700602 while (poll_tx(priv))
603 ; /* nothing */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
605
606static void __ipoib_reap_ah(struct net_device *dev)
607{
608 struct ipoib_dev_priv *priv = netdev_priv(dev);
609 struct ipoib_ah *ah, *tah;
610 LIST_HEAD(remove_list);
611
Roland Dreier31c02e22006-06-17 20:37:34 -0700612 spin_lock_irq(&priv->tx_lock);
613 spin_lock(&priv->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
Roland Dreier21818582005-07-27 14:41:32 -0700615 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 list_del(&ah->list);
Roland Dreier31c02e22006-06-17 20:37:34 -0700617 ib_destroy_ah(ah->ah);
618 kfree(ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 }
Roland Dreier31c02e22006-06-17 20:37:34 -0700620 spin_unlock(&priv->lock);
621 spin_unlock_irq(&priv->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
David Howellsc4028952006-11-22 14:57:56 +0000624void ipoib_reap_ah(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
David Howellsc4028952006-11-22 14:57:56 +0000626 struct ipoib_dev_priv *priv =
627 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
628 struct net_device *dev = priv->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 __ipoib_reap_ah(dev);
631
632 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
Anton Blanchard69fc5072007-10-15 00:50:56 -0500633 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
634 round_jiffies_relative(HZ));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Eli Cohen57ce41d2008-04-30 20:02:45 -0700637static void ipoib_ib_tx_timer_func(unsigned long ctx)
638{
639 drain_tx_cq((struct net_device *)ctx);
640}
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642int ipoib_ib_dev_open(struct net_device *dev)
643{
644 struct ipoib_dev_priv *priv = netdev_priv(dev);
645 int ret;
646
Yosef Etigin26bbf132007-05-19 08:51:54 -0700647 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
648 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
649 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
650 return -1;
651 }
652 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
653
Roland Dreier5b6810e2005-10-11 11:08:24 -0700654 ret = ipoib_init_qp(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if (ret) {
Roland Dreier5b6810e2005-10-11 11:08:24 -0700656 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return -1;
658 }
659
660 ret = ipoib_ib_post_receives(dev);
661 if (ret) {
662 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
Yosef Etigin26bbf132007-05-19 08:51:54 -0700663 ipoib_ib_dev_stop(dev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return -1;
665 }
666
Michael S. Tsirkin839fcab2007-02-05 22:12:23 +0200667 ret = ipoib_cm_dev_open(dev);
668 if (ret) {
Michael S. Tsirkin24bd1e42007-05-18 16:12:54 +0300669 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
Yosef Etigin26bbf132007-05-19 08:51:54 -0700670 ipoib_ib_dev_stop(dev, 1);
Michael S. Tsirkin839fcab2007-02-05 22:12:23 +0200671 return -1;
672 }
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
Anton Blanchard69fc5072007-10-15 00:50:56 -0500675 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
676 round_jiffies_relative(HZ));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Eli Cohen57ce41d2008-04-30 20:02:45 -0700678 init_timer(&priv->poll_timer);
679 priv->poll_timer.function = ipoib_ib_tx_timer_func;
680 priv->poll_timer.data = (unsigned long)dev;
681
Leonid Arsh7a343d42006-03-23 19:52:51 +0200682 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 return 0;
685}
686
Leonid Arsh7a343d42006-03-23 19:52:51 +0200687static void ipoib_pkey_dev_check_presence(struct net_device *dev)
688{
689 struct ipoib_dev_priv *priv = netdev_priv(dev);
690 u16 pkey_index = 0;
691
Roland Dreier9fdd5e52008-04-16 21:09:35 -0700692 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
Leonid Arsh7a343d42006-03-23 19:52:51 +0200693 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
694 else
695 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
696}
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698int ipoib_ib_dev_up(struct net_device *dev)
699{
700 struct ipoib_dev_priv *priv = netdev_priv(dev);
701
Leonid Arsh7a343d42006-03-23 19:52:51 +0200702 ipoib_pkey_dev_check_presence(dev);
703
704 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
705 ipoib_dbg(priv, "PKEY is not assigned.\n");
706 return 0;
707 }
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
710
711 return ipoib_mcast_start_thread(dev);
712}
713
Jack Morgenstein0b3ea082006-03-20 10:08:24 -0800714int ipoib_ib_dev_down(struct net_device *dev, int flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 struct ipoib_dev_priv *priv = netdev_priv(dev);
717
718 ipoib_dbg(priv, "downing ib_dev\n");
719
720 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
721 netif_carrier_off(dev);
722
723 /* Shutdown the P_Key thread if still active */
724 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
Ingo Molnar95ed6442006-01-13 14:51:39 -0800725 mutex_lock(&pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 set_bit(IPOIB_PKEY_STOP, &priv->flags);
Yosef Etigin26bbf132007-05-19 08:51:54 -0700727 cancel_delayed_work(&priv->pkey_poll_task);
Ingo Molnar95ed6442006-01-13 14:51:39 -0800728 mutex_unlock(&pkey_mutex);
Jack Morgenstein0b3ea082006-03-20 10:08:24 -0800729 if (flush)
730 flush_workqueue(ipoib_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 }
732
Jack Morgenstein0b3ea082006-03-20 10:08:24 -0800733 ipoib_mcast_stop_thread(dev, flush);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 ipoib_mcast_dev_flush(dev);
735
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 ipoib_flush_paths(dev);
737
738 return 0;
739}
740
741static int recvs_pending(struct net_device *dev)
742{
743 struct ipoib_dev_priv *priv = netdev_priv(dev);
744 int pending = 0;
745 int i;
746
Shirley Ma0f485252006-04-10 09:43:58 -0700747 for (i = 0; i < ipoib_recvq_size; ++i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 if (priv->rx_ring[i].skb)
749 ++pending;
750
751 return pending;
752}
753
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300754void ipoib_drain_cq(struct net_device *dev)
755{
756 struct ipoib_dev_priv *priv = netdev_priv(dev);
757 int i, n;
758 do {
Eli Cohenf56bcd82008-04-29 13:46:53 -0700759 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300760 for (i = 0; i < n; ++i) {
Roland Dreierce423ef2007-10-09 19:59:04 -0700761 /*
762 * Convert any successful completions to flush
763 * errors to avoid passing packets up the
764 * stack after bringing the device down.
765 */
766 if (priv->ibwc[i].status == IB_WC_SUCCESS)
767 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
768
Michael S. Tsirkin1b524962007-08-16 15:36:16 +0300769 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
770 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
771 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
772 else
773 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
Eli Cohenf56bcd82008-04-29 13:46:53 -0700774 } else
775 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300776 }
777 } while (n == IPOIB_NUM_WC);
Eli Cohenf56bcd82008-04-29 13:46:53 -0700778
779 while (poll_tx(priv))
780 ; /* nothing */
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300781}
782
Yosef Etigin26bbf132007-05-19 08:51:54 -0700783int ipoib_ib_dev_stop(struct net_device *dev, int flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 struct ipoib_dev_priv *priv = netdev_priv(dev);
786 struct ib_qp_attr qp_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 unsigned long begin;
Roland Dreier1993d682005-10-28 15:30:34 -0700788 struct ipoib_tx_buf *tx_req;
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300789 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Leonid Arsh7a343d42006-03-23 19:52:51 +0200791 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
792
Michael S. Tsirkin839fcab2007-02-05 22:12:23 +0200793 ipoib_cm_dev_stop(dev);
794
Roland Dreier3bc12e72005-10-30 13:20:09 -0800795 /*
796 * Move our QP to the error state and then reinitialize in
797 * when all work requests have completed or have been flushed.
798 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 qp_attr.qp_state = IB_QPS_ERR;
Roland Dreier3bc12e72005-10-30 13:20:09 -0800800 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
802
803 /* Wait for all sends and receives to complete */
804 begin = jiffies;
805
806 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
807 if (time_after(jiffies, begin + 5 * HZ)) {
808 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
809 priv->tx_head - priv->tx_tail, recvs_pending(dev));
810
811 /*
812 * assume the HW is wedged and just free up
813 * all our pending work requests.
814 */
Roland Dreier21818582005-07-27 14:41:32 -0700815 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 tx_req = &priv->tx_ring[priv->tx_tail &
Shirley Ma0f485252006-04-10 09:43:58 -0700817 (ipoib_sendq_size - 1)];
Eli Cohen71437402008-01-30 18:30:53 +0200818 ipoib_dma_unmap_tx(priv->ca, tx_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 dev_kfree_skb_any(tx_req->skb);
820 ++priv->tx_tail;
Michael S. Tsirkin1b524962007-08-16 15:36:16 +0300821 --priv->tx_outstanding;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 }
823
Ralph Campbell37ccf9d2006-12-12 14:30:48 -0800824 for (i = 0; i < ipoib_recvq_size; ++i) {
825 struct ipoib_rx_buf *rx_req;
826
827 rx_req = &priv->rx_ring[i];
828 if (!rx_req->skb)
829 continue;
Shirley Mabc7b3a32008-04-23 11:55:45 -0700830 ipoib_ud_dma_unmap_rx(priv,
831 priv->rx_ring[i].mapping);
Ralph Campbell37ccf9d2006-12-12 14:30:48 -0800832 dev_kfree_skb_any(rx_req->skb);
833 rx_req->skb = NULL;
834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 goto timeout;
837 }
838
Michael S. Tsirkin2dfbfc32007-05-24 18:32:46 +0300839 ipoib_drain_cq(dev);
Roland Dreier8d1cc862007-05-06 21:05:32 -0700840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 msleep(1);
842 }
843
844 ipoib_dbg(priv, "All sends and receives done.\n");
845
846timeout:
Eli Cohen57ce41d2008-04-30 20:02:45 -0700847 del_timer_sync(&priv->poll_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 qp_attr.qp_state = IB_QPS_RESET;
Roland Dreier3bc12e72005-10-30 13:20:09 -0800849 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
851
852 /* Wait for all AHs to be reaped */
853 set_bit(IPOIB_STOP_REAPER, &priv->flags);
854 cancel_delayed_work(&priv->ah_reap_task);
Yosef Etigin26bbf132007-05-19 08:51:54 -0700855 if (flush)
856 flush_workqueue(ipoib_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858 begin = jiffies;
859
860 while (!list_empty(&priv->dead_ahs)) {
861 __ipoib_reap_ah(dev);
862
863 if (time_after(jiffies, begin + HZ)) {
864 ipoib_warn(priv, "timing out; will leak address handles\n");
865 break;
866 }
867
868 msleep(1);
869 }
870
Eli Cohenf56bcd82008-04-29 13:46:53 -0700871 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
Roland Dreier8d1cc862007-05-06 21:05:32 -0700872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 return 0;
874}
875
876int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
877{
878 struct ipoib_dev_priv *priv = netdev_priv(dev);
879
880 priv->ca = ca;
881 priv->port = port;
882 priv->qp = NULL;
883
884 if (ipoib_transport_dev_init(dev, ca)) {
885 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
886 return -ENODEV;
887 }
888
889 if (dev->flags & IFF_UP) {
890 if (ipoib_ib_dev_open(dev)) {
891 ipoib_transport_dev_cleanup(dev);
892 return -ENODEV;
893 }
894 }
895
896 return 0;
897}
898
Yosef Etigin26bbf132007-05-19 08:51:54 -0700899static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900{
Yosef Etigin26bbf132007-05-19 08:51:54 -0700901 struct ipoib_dev_priv *cpriv;
David Howellsc4028952006-11-22 14:57:56 +0000902 struct net_device *dev = priv->dev;
Yosef Etigin26bbf132007-05-19 08:51:54 -0700903 u16 new_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Yosef Etigin26bbf132007-05-19 08:51:54 -0700905 mutex_lock(&priv->vlan_mutex);
906
907 /*
908 * Flush any child interfaces too -- they might be up even if
909 * the parent is down.
910 */
911 list_for_each_entry(cpriv, &priv->child_intfs, list)
912 __ipoib_ib_dev_flush(cpriv, pkey_event);
913
914 mutex_unlock(&priv->vlan_mutex);
915
916 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
Leonid Arsh7a343d42006-03-23 19:52:51 +0200917 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 return;
Leonid Arsh7a343d42006-03-23 19:52:51 +0200919 }
920
921 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
922 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
923 return;
924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Yosef Etigin26bbf132007-05-19 08:51:54 -0700926 if (pkey_event) {
927 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
928 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
929 ipoib_ib_dev_down(dev, 0);
Jack Morgenstein167c4262008-02-13 16:23:50 +0200930 ipoib_ib_dev_stop(dev, 0);
Roland Dreier9fdd5e52008-04-16 21:09:35 -0700931 if (ipoib_pkey_dev_delay_open(dev))
932 return;
Yosef Etigin26bbf132007-05-19 08:51:54 -0700933 }
Yosef Etigin26bbf132007-05-19 08:51:54 -0700934
935 /* restart QP only if P_Key index is changed */
Roland Dreier9fdd5e52008-04-16 21:09:35 -0700936 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
937 new_index == priv->pkey_index) {
Yosef Etigin26bbf132007-05-19 08:51:54 -0700938 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
939 return;
940 }
941 priv->pkey_index = new_index;
942 }
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 ipoib_dbg(priv, "flushing\n");
945
Jack Morgenstein0b3ea082006-03-20 10:08:24 -0800946 ipoib_ib_dev_down(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Yosef Etigin26bbf132007-05-19 08:51:54 -0700948 if (pkey_event) {
949 ipoib_ib_dev_stop(dev, 0);
950 ipoib_ib_dev_open(dev);
951 }
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 /*
954 * The device could have been brought down between the start and when
955 * we get here, don't bring it back up if it's not configured up
956 */
Eli Cohen5ccd0252006-09-22 15:22:56 -0700957 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 ipoib_ib_dev_up(dev);
David Howellsc4028952006-11-22 14:57:56 +0000959 ipoib_mcast_restart_task(&priv->restart_task);
Eli Cohen5ccd0252006-09-22 15:22:56 -0700960 }
Yosef Etigin26bbf132007-05-19 08:51:54 -0700961}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Yosef Etigin26bbf132007-05-19 08:51:54 -0700963void ipoib_ib_dev_flush(struct work_struct *work)
964{
965 struct ipoib_dev_priv *priv =
966 container_of(work, struct ipoib_dev_priv, flush_task);
Michael S. Tsirkin4f710552005-11-29 10:53:30 -0800967
Yosef Etigin26bbf132007-05-19 08:51:54 -0700968 ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
969 __ipoib_ib_dev_flush(priv, 0);
970}
Michael S. Tsirkin4f710552005-11-29 10:53:30 -0800971
Yosef Etigin26bbf132007-05-19 08:51:54 -0700972void ipoib_pkey_event(struct work_struct *work)
973{
974 struct ipoib_dev_priv *priv =
975 container_of(work, struct ipoib_dev_priv, pkey_event_task);
976
977 ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
978 __ipoib_ib_dev_flush(priv, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
980
981void ipoib_ib_dev_cleanup(struct net_device *dev)
982{
983 struct ipoib_dev_priv *priv = netdev_priv(dev);
984
985 ipoib_dbg(priv, "cleaning up ib_dev\n");
986
Roland Dreier8d2cae02005-09-20 10:52:04 -0700987 ipoib_mcast_stop_thread(dev, 1);
Eli Cohen988bd502006-01-12 14:32:20 -0800988 ipoib_mcast_dev_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 ipoib_transport_dev_cleanup(dev);
991}
992
993/*
994 * Delayed P_Key Assigment Interim Support
995 *
996 * The following is initial implementation of delayed P_Key assigment
997 * mechanism. It is using the same approach implemented for the multicast
998 * group join. The single goal of this implementation is to quickly address
999 * Bug #2507. This implementation will probably be removed when the P_Key
1000 * change async notification is available.
1001 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
David Howellsc4028952006-11-22 14:57:56 +00001003void ipoib_pkey_poll(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004{
David Howellsc4028952006-11-22 14:57:56 +00001005 struct ipoib_dev_priv *priv =
Yosef Etigin26bbf132007-05-19 08:51:54 -07001006 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
David Howellsc4028952006-11-22 14:57:56 +00001007 struct net_device *dev = priv->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 ipoib_pkey_dev_check_presence(dev);
1010
1011 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1012 ipoib_open(dev);
1013 else {
Ingo Molnar95ed6442006-01-13 14:51:39 -08001014 mutex_lock(&pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1016 queue_delayed_work(ipoib_workqueue,
Yosef Etigin26bbf132007-05-19 08:51:54 -07001017 &priv->pkey_poll_task,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 HZ);
Ingo Molnar95ed6442006-01-13 14:51:39 -08001019 mutex_unlock(&pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 }
1021}
1022
1023int ipoib_pkey_dev_delay_open(struct net_device *dev)
1024{
1025 struct ipoib_dev_priv *priv = netdev_priv(dev);
1026
1027 /* Look for the interface pkey value in the IB Port P_Key table and */
1028 /* set the interface pkey assigment flag */
1029 ipoib_pkey_dev_check_presence(dev);
1030
1031 /* P_Key value not assigned yet - start polling */
1032 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
Ingo Molnar95ed6442006-01-13 14:51:39 -08001033 mutex_lock(&pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1035 queue_delayed_work(ipoib_workqueue,
Yosef Etigin26bbf132007-05-19 08:51:54 -07001036 &priv->pkey_poll_task,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 HZ);
Ingo Molnar95ed6442006-01-13 14:51:39 -08001038 mutex_unlock(&pkey_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 return 1;
1040 }
1041
1042 return 0;
1043}