blob: 008f0af5cc8bfc5cc6a9f23767fbdbaba0d47cc1 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070037#include <linux/mlx4/qp.h>
38#include <linux/skbuff.h>
39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h>
Yevgeny Petrilinfa37a952010-08-24 03:46:46 +000041#include <linux/tcp.h>
Paul Gortmaker6eb07ca2011-09-15 19:46:05 -040042#include <linux/moduleparam.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070043
44#include "mlx4_en.h"
45
46enum {
47 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +000048 MAX_BF = 256,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070049};
50
51static int inline_thold __read_mostly = MAX_INLINE;
52
53module_param_named(inline_thold, inline_thold, int, 0444);
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020054MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070055
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +000057 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070058 u16 stride)
59{
60 struct mlx4_en_dev *mdev = priv->mdev;
61 int tmp;
62 int err;
63
64 ring->size = size;
65 ring->size_mask = size - 1;
66 ring->stride = stride;
67
68 inline_thold = min(inline_thold, MAX_INLINE);
69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp);
Joe Perchese404dec2012-01-29 12:56:23 +000074 if (!ring->tx_info)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070075 return -ENOMEM;
Joe Perchese404dec2012-01-29 12:56:23 +000076
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000077 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070078 ring->tx_info, tmp);
79
80 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
81 if (!ring->bounce_buf) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070082 err = -ENOMEM;
83 goto err_tx;
84 }
85 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
86
87 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
88 2 * PAGE_SIZE);
89 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000090 en_err(priv, "Failed allocating hwq resources\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070091 goto err_bounce;
92 }
93
94 err = mlx4_en_map_buffer(&ring->wqres.buf);
95 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000096 en_err(priv, "Failed to map TX buffer\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070097 goto err_hwq_res;
98 }
99
100 ring->buf = ring->wqres.buf.direct.buf;
101
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000102 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700105
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000106 ring->qpn = qpn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700107 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
108 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000109 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000110 goto err_map;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700111 }
Yevgeny Petrilin966508f2009-04-20 04:30:03 +0000112 ring->qp.event = mlx4_en_sqp_event;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700113
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000114 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
115 if (err) {
116 en_dbg(DRV, priv, "working without blueflame (%d)", err);
117 ring->bf.uar = &mdev->priv_uar;
118 ring->bf.uar->map = mdev->uar_map;
119 ring->bf_enabled = false;
120 } else
121 ring->bf_enabled = true;
122
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700123 return 0;
124
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700125err_map:
126 mlx4_en_unmap_buffer(&ring->wqres.buf);
127err_hwq_res:
128 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
129err_bounce:
130 kfree(ring->bounce_buf);
131 ring->bounce_buf = NULL;
132err_tx:
133 vfree(ring->tx_info);
134 ring->tx_info = NULL;
135 return err;
136}
137
138void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_tx_ring *ring)
140{
141 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700143
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000144 if (ring->bf_enabled)
145 mlx4_bf_free(mdev->dev, &ring->bf);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700146 mlx4_qp_remove(mdev->dev, &ring->qp);
147 mlx4_qp_free(mdev->dev, &ring->qp);
148 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
149 mlx4_en_unmap_buffer(&ring->wqres.buf);
150 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
151 kfree(ring->bounce_buf);
152 ring->bounce_buf = NULL;
153 vfree(ring->tx_info);
154 ring->tx_info = NULL;
155}
156
157int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
158 struct mlx4_en_tx_ring *ring,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700159 int cq)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700160{
161 struct mlx4_en_dev *mdev = priv->mdev;
162 int err;
163
164 ring->cqn = cq;
165 ring->prod = 0;
166 ring->cons = 0xffffffff;
167 ring->last_nr_txbb = 1;
168 ring->poll_cnt = 0;
169 ring->blocked = 0;
170 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
171 memset(ring->buf, 0, ring->buf_size);
172
173 ring->qp_state = MLX4_QP_STATE_RST;
David S. Miller1805b2f2011-10-24 18:18:09 -0400174 ring->doorbell_qpn = ring->qp.qpn << 8;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700175
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700177 ring->cqn, &ring->context);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000178 if (ring->bf_enabled)
179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700180
181 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
182 &ring->qp, &ring->qp_state);
183
184 return err;
185}
186
187void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
188 struct mlx4_en_tx_ring *ring)
189{
190 struct mlx4_en_dev *mdev = priv->mdev;
191
192 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
193 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
194}
195
196
197static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
198 struct mlx4_en_tx_ring *ring,
199 int index, u8 owner)
200{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700201 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
202 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
203 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
204 struct sk_buff *skb = tx_info->skb;
205 struct skb_frag_struct *frag;
206 void *end = ring->buf + ring->buf_size;
207 int frags = skb_shinfo(skb)->nr_frags;
208 int i;
209 __be32 *ptr = (__be32 *)tx_desc;
210 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
211
212 /* Optimize the common case when there are no wraparounds */
213 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800214 if (!tx_info->inl) {
215 if (tx_info->linear) {
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000216 dma_unmap_single(priv->ddev,
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800217 (dma_addr_t) be64_to_cpu(data->addr),
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700218 be32_to_cpu(data->byte_count),
219 PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800220 ++data;
221 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700222
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800223 for (i = 0; i < frags; i++) {
224 frag = &skb_shinfo(skb)->frags[i];
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000225 dma_unmap_page(priv->ddev,
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800226 (dma_addr_t) be64_to_cpu(data[i].addr),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000227 skb_frag_size(frag), PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800228 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700229 }
230 /* Stamp the freed descriptor */
231 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
232 *ptr = stamp;
233 ptr += STAMP_DWORDS;
234 }
235
236 } else {
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800237 if (!tx_info->inl) {
238 if ((void *) data >= end) {
Joe Perches43d620c2011-06-16 19:08:06 +0000239 data = ring->buf + ((void *)data - end);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800240 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700241
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800242 if (tx_info->linear) {
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000243 dma_unmap_single(priv->ddev,
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800244 (dma_addr_t) be64_to_cpu(data->addr),
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700245 be32_to_cpu(data->byte_count),
246 PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800247 ++data;
248 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700249
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800250 for (i = 0; i < frags; i++) {
251 /* Check for wraparound before unmapping */
252 if ((void *) data >= end)
Joe Perches43d620c2011-06-16 19:08:06 +0000253 data = ring->buf;
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800254 frag = &skb_shinfo(skb)->frags[i];
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000255 dma_unmap_page(priv->ddev,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700256 (dma_addr_t) be64_to_cpu(data->addr),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000257 skb_frag_size(frag), PCI_DMA_TODEVICE);
Yevgeny Petrilineb4ad822009-08-02 20:22:18 -0700258 ++data;
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800259 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700260 }
261 /* Stamp the freed descriptor */
262 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
263 *ptr = stamp;
264 ptr += STAMP_DWORDS;
265 if ((void *) ptr >= end) {
266 ptr = ring->buf;
267 stamp ^= cpu_to_be32(0x80000000);
268 }
269 }
270
271 }
272 dev_kfree_skb_any(skb);
273 return tx_info->nr_txbb;
274}
275
276
277int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
278{
279 struct mlx4_en_priv *priv = netdev_priv(dev);
280 int cnt = 0;
281
282 /* Skip last polled descriptor */
283 ring->cons += ring->last_nr_txbb;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000284 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700285 ring->cons, ring->prod);
286
287 if ((u32) (ring->prod - ring->cons) > ring->size) {
288 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000289 en_warn(priv, "Tx consumer passed producer!\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700290 return 0;
291 }
292
293 while (ring->cons != ring->prod) {
294 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
295 ring->cons & ring->size_mask,
296 !!(ring->cons & ring->size));
297 ring->cons += ring->last_nr_txbb;
298 cnt++;
299 }
300
301 if (cnt)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000302 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700303
304 return cnt;
305}
306
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700307static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
308{
309 struct mlx4_en_priv *priv = netdev_priv(dev);
310 struct mlx4_cq *mcq = &cq->mcq;
311 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000312 struct mlx4_cqe *cqe;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700313 u16 index;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000314 u16 new_index, ring_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700315 u32 txbbs_skipped = 0;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000316 u32 cons_index = mcq->cons_index;
317 int size = cq->size;
318 u32 size_mask = ring->size_mask;
319 struct mlx4_cqe *buf = cq->buf;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700320
321 if (!priv->port_up)
322 return;
323
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000324 index = cons_index & size_mask;
325 cqe = &buf[index];
326 ring_index = ring->cons & size_mask;
327
328 /* Process all completed CQEs */
329 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
330 cons_index & size)) {
331 /*
332 * make sure we read the CQE after we read the
333 * ownership bit
334 */
335 rmb();
336
337 /* Skip over last polled CQE */
338 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
339
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700340 do {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700341 txbbs_skipped += ring->last_nr_txbb;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000342 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
343 /* free next descriptor */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700344 ring->last_nr_txbb = mlx4_en_free_tx_desc(
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000345 priv, ring, ring_index,
346 !!((ring->cons + txbbs_skipped) &
347 ring->size));
348 } while (ring_index != new_index);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700349
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000350 ++cons_index;
351 index = cons_index & size_mask;
352 cqe = &buf[index];
353 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700354
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700355
356 /*
357 * To prevent CQ overflow we first update CQ consumer and only then
358 * the ring consumer.
359 */
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000360 mcq->cons_index = cons_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700361 mlx4_cq_set_ci(mcq);
362 wmb();
363 ring->cons += txbbs_skipped;
364
365 /* Wakeup Tx queue if this ring stopped it */
366 if (unlikely(ring->blocked)) {
Yevgeny Petrilinc03ea212008-12-25 18:14:04 -0800367 if ((u32) (ring->prod - ring->cons) <=
368 ring->size - HEADROOM - MAX_DESC_TXBBS) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700369 ring->blocked = 0;
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000370 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700371 priv->port_stats.wake_queue++;
372 }
373 }
374}
375
376void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377{
378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800382 if (!spin_trylock(&ring->comp_lock))
383 return;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700384 mlx4_en_process_tx_cq(cq->dev, cq);
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800385 mod_timer(&cq->timer, jiffies + 1);
386 spin_unlock(&ring->comp_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700387}
388
389
390void mlx4_en_poll_tx_cq(unsigned long data)
391{
392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395 u32 inflight;
396
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
398
Yevgeny Petrilin465440d2009-05-25 20:57:21 +0000399 if (!spin_trylock_irq(&ring->comp_lock)) {
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401 return;
402 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700403 mlx4_en_process_tx_cq(cq->dev, cq);
404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405
406 /* If there are still packets in flight and the timer has not already
407 * been scheduled by the Tx routine then schedule it here to guarantee
408 * completion processing of these packets */
409 if (inflight && priv->port_up)
410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411
Yevgeny Petrilin465440d2009-05-25 20:57:21 +0000412 spin_unlock_irq(&ring->comp_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700413}
414
415static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416 struct mlx4_en_tx_ring *ring,
417 u32 index,
418 unsigned int desc_size)
419{
420 u32 copy = (ring->size - index) * TXBB_SIZE;
421 int i;
422
423 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
424 if ((i & (TXBB_SIZE - 1)) == 0)
425 wmb();
426
427 *((u32 *) (ring->buf + i)) =
428 *((u32 *) (ring->bounce_buf + copy + i));
429 }
430
431 for (i = copy - 4; i >= 4 ; i -= 4) {
432 if ((i & (TXBB_SIZE - 1)) == 0)
433 wmb();
434
435 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
436 *((u32 *) (ring->bounce_buf + i));
437 }
438
439 /* Return real descriptor location */
440 return ring->buf + index * TXBB_SIZE;
441}
442
443static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444{
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
Dongdong Deng48719532009-08-23 19:49:07 -0700447 unsigned long flags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700448
449 /* If we don't have a pending timer, set one up to catch our recent
450 post in case the interface becomes idle */
451 if (!timer_pending(&cq->timer))
452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453
454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
Dongdong Deng48719532009-08-23 19:49:07 -0700456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800457 mlx4_en_process_tx_cq(priv->dev, cq);
Dongdong Deng48719532009-08-23 19:49:07 -0700458 spin_unlock_irqrestore(&ring->comp_lock, flags);
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800459 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700460}
461
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700462static int is_inline(struct sk_buff *skb, void **pfrag)
463{
464 void *ptr;
465
466 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
467 if (skb_shinfo(skb)->nr_frags == 1) {
Ian Campbell311761c2011-10-19 23:01:45 +0000468 ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700469 if (unlikely(!ptr))
470 return 0;
471
472 if (pfrag)
473 *pfrag = ptr;
474
475 return 1;
476 } else if (unlikely(skb_shinfo(skb)->nr_frags))
477 return 0;
478 else
479 return 1;
480 }
481
482 return 0;
483}
484
485static int inline_size(struct sk_buff *skb)
486{
487 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
488 <= MLX4_INLINE_ALIGN)
489 return ALIGN(skb->len + CTRL_SIZE +
490 sizeof(struct mlx4_wqe_inline_seg), 16);
491 else
492 return ALIGN(skb->len + CTRL_SIZE + 2 *
493 sizeof(struct mlx4_wqe_inline_seg), 16);
494}
495
496static int get_real_size(struct sk_buff *skb, struct net_device *dev,
497 int *lso_header_size)
498{
499 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700500 int real_size;
501
502 if (skb_is_gso(skb)) {
503 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
504 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
505 ALIGN(*lso_header_size + 4, DS_SIZE);
506 if (unlikely(*lso_header_size != skb_headlen(skb))) {
507 /* We add a segment for the skb linear buffer only if
508 * it contains data */
509 if (*lso_header_size < skb_headlen(skb))
510 real_size += DS_SIZE;
511 else {
512 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000513 en_warn(priv, "Non-linear headers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700514 return 0;
515 }
516 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700517 } else {
518 *lso_header_size = 0;
519 if (!is_inline(skb, NULL))
520 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
521 else
522 real_size = inline_size(skb);
523 }
524
525 return real_size;
526}
527
528static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
529 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
530{
531 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
532 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
533
534 if (skb->len <= spc) {
535 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
536 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
537 if (skb_shinfo(skb)->nr_frags)
538 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000539 skb_frag_size(&skb_shinfo(skb)->frags[0]));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700540
541 } else {
542 inl->byte_count = cpu_to_be32(1 << 31 | spc);
543 if (skb_headlen(skb) <= spc) {
544 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
545 if (skb_headlen(skb) < spc) {
546 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
547 fragptr, spc - skb_headlen(skb));
548 fragptr += spc - skb_headlen(skb);
549 }
550 inl = (void *) (inl + 1) + spc;
551 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
552 } else {
553 skb_copy_from_linear_data(skb, inl + 1, spc);
554 inl = (void *) (inl + 1) + spc;
555 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
556 skb_headlen(skb) - spc);
557 if (skb_shinfo(skb)->nr_frags)
558 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000559 fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700560 }
561
562 wmb();
563 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
564 }
565 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
Amir Vadaic140d762011-11-26 19:55:23 +0000566 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
567 (!!vlan_tx_tag_present(skb));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700568 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
569}
570
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000571u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700572{
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000573 struct mlx4_en_priv *priv = netdev_priv(dev);
574 u16 vlan_tag = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700575
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000576 /* If we support per priority flow control and the packet contains
577 * a vlan tag, send the packet to the TX ring assigned to that priority
578 */
Jesse Grosseab6d182010-10-20 13:56:03 +0000579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000580 vlan_tag = vlan_tx_tag_get(skb);
581 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700582 }
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000583
584 return skb_tx_hash(dev, skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700585}
586
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000587static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
588{
589 __iowrite64_copy(dst, src, bytecnt / 8);
590}
591
Stephen Hemminger613573252009-08-31 19:50:58 +0000592netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700593{
594 struct mlx4_en_priv *priv = netdev_priv(dev);
595 struct mlx4_en_dev *mdev = priv->mdev;
596 struct mlx4_en_tx_ring *ring;
597 struct mlx4_en_cq *cq;
598 struct mlx4_en_tx_desc *tx_desc;
599 struct mlx4_wqe_data_seg *data;
600 struct skb_frag_struct *frag;
601 struct mlx4_en_tx_info *tx_info;
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000602 struct ethhdr *ethh;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700603 int tx_ind = 0;
604 int nr_txbb;
605 int desc_size;
606 int real_size;
607 dma_addr_t dma;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000608 u32 index, bf_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700609 __be32 op_own;
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000610 u16 vlan_tag = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700611 int i;
612 int lso_header_size;
613 void *fragptr;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000614 bool bounce = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700615
Yevgeny Petrilin3005ad42010-08-24 03:46:07 +0000616 if (!priv->port_up)
617 goto tx_drop;
618
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700619 real_size = get_real_size(skb, dev, &lso_header_size);
620 if (unlikely(!real_size))
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000621 goto tx_drop;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700622
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300623 /* Align descriptor to TXBB size */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700624 desc_size = ALIGN(real_size, TXBB_SIZE);
625 nr_txbb = desc_size / TXBB_SIZE;
626 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
627 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000628 en_warn(priv, "Oversized header or SG list\n");
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000629 goto tx_drop;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700630 }
631
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000632 tx_ind = skb->queue_mapping;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700633 ring = &priv->tx_ring[tx_ind];
Jesse Grosseab6d182010-10-20 13:56:03 +0000634 if (vlan_tx_tag_present(skb))
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000635 vlan_tag = vlan_tx_tag_get(skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700636
637 /* Check available TXBBs And 2K spare for prefetch */
638 if (unlikely(((int)(ring->prod - ring->cons)) >
639 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000640 /* every full Tx ring stops queue */
641 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700642 ring->blocked = 1;
643 priv->port_stats.queue_stopped++;
644
645 /* Use interrupts to find out when queue opened */
646 cq = &priv->tx_cq[tx_ind];
647 mlx4_en_arm_cq(priv, cq);
648 return NETDEV_TX_BUSY;
649 }
650
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700651 /* Track current inflight packets for performance analysis */
652 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
653 (u32) (ring->prod - ring->cons - 1));
654
655 /* Packet is good - grab an index and transmit it */
656 index = ring->prod & ring->size_mask;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000657 bf_index = ring->prod;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700658
659 /* See if we have enough space for whole descriptor TXBB for setting
660 * SW ownership on next descriptor; if not, use a bounce buffer. */
661 if (likely(index + nr_txbb <= ring->size))
662 tx_desc = ring->buf + index * TXBB_SIZE;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000663 else {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700664 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000665 bounce = true;
666 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700667
668 /* Save skb in tx_info ring */
669 tx_info = &ring->tx_info[index];
670 tx_info->skb = skb;
671 tx_info->nr_txbb = nr_txbb;
672
673 /* Prepare ctrl segement apart opcode+ownership, which depends on
674 * whether LSO is used */
675 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
Amir Vadaic140d762011-11-26 19:55:23 +0000676 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
677 !!vlan_tx_tag_present(skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700678 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
Amir Vadai60d6fe92011-11-26 19:55:19 +0000679 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700680 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
681 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
682 MLX4_WQE_CTRL_TCP_UDP_CSUM);
Yevgeny Petrilinad043782011-10-18 01:50:56 +0000683 ring->tx_csum++;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700684 }
685
Eugenia Emantayev5b4c4d32011-12-13 04:16:38 +0000686 /* Copy dst mac address to wqe */
Eric Dumazet62212172012-02-25 00:51:05 +0000687 ethh = (struct ethhdr *)skb->data;
Eric Dumazet18f973a2012-03-05 05:01:14 +0000688 tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
689 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700690 /* Handle LSO (TSO) packets */
691 if (lso_header_size) {
692 /* Mark opcode as LSO */
693 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
694 ((ring->prod & ring->size) ?
695 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
696
697 /* Fill in the LSO prefix */
698 tx_desc->lso.mss_hdr_size = cpu_to_be32(
699 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
700
701 /* Copy headers;
702 * note that we already verified that it is linear */
703 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
704 data = ((void *) &tx_desc->lso +
705 ALIGN(lso_header_size + 4, DS_SIZE));
706
707 priv->port_stats.tso_packets++;
708 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
709 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
710 ring->bytes += skb->len + (i - 1) * lso_header_size;
711 ring->packets += i;
712 } else {
713 /* Normal (Non LSO) packet */
714 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
715 ((ring->prod & ring->size) ?
716 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
717 data = &tx_desc->data;
718 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
719 ring->packets++;
720
721 }
722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
723
724
725 /* valid only for none inline segments */
726 tx_info->data_offset = (void *) data - (void *) tx_desc;
727
728 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
729 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
730
731 if (!is_inline(skb, &fragptr)) {
732 /* Map fragments */
733 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
734 frag = &skb_shinfo(skb)->frags[i];
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000735 dma = skb_frag_dma_map(priv->ddev, frag,
Ian Campbell311761c2011-10-19 23:01:45 +0000736 0, skb_frag_size(frag),
737 DMA_TO_DEVICE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700738 data->addr = cpu_to_be64(dma);
739 data->lkey = cpu_to_be32(mdev->mr.key);
740 wmb();
Eric Dumazet9e903e02011-10-18 21:00:24 +0000741 data->byte_count = cpu_to_be32(skb_frag_size(frag));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700742 --data;
743 }
744
745 /* Map linear part */
746 if (tx_info->linear) {
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +0000747 dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700748 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
749 data->addr = cpu_to_be64(dma);
750 data->lkey = cpu_to_be32(mdev->mr.key);
751 wmb();
752 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
753 }
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800754 tx_info->inl = 0;
755 } else {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700756 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800757 tx_info->inl = 1;
758 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700759
760 ring->prod += nr_txbb;
761
762 /* If we used a bounce buffer then copy descriptor back into place */
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000763 if (bounce)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700764 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
765
766 /* Run destructor before passing skb to HW */
767 if (likely(!skb_shared(skb)))
768 skb_orphan(skb);
769
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000770 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
David S. Miller1805b2f2011-10-24 18:18:09 -0400771 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000772 op_own |= htonl((bf_index & 0xffff) << 8);
773 /* Ensure new descirptor hits memory
774 * before setting ownership of this descriptor to HW */
775 wmb();
776 tx_desc->ctrl.owner_opcode = op_own;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700777
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000778 wmb();
779
780 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
781 desc_size);
782
783 wmb();
784
785 ring->bf.offset ^= ring->bf.buf_size;
786 } else {
787 /* Ensure new descirptor hits memory
788 * before setting ownership of this descriptor to HW */
789 wmb();
790 tx_desc->ctrl.owner_opcode = op_own;
791 wmb();
David S. Miller1805b2f2011-10-24 18:18:09 -0400792 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000793 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700794
795 /* Poll CQ here */
796 mlx4_en_xmit_poll(priv, tx_ind);
797
Patrick McHardyec634fe2009-07-05 19:23:38 -0700798 return NETDEV_TX_OK;
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000799
800tx_drop:
801 dev_kfree_skb_any(skb);
802 priv->stats.tx_dropped++;
803 return NETDEV_TX_OK;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700804}
805