blob: ff3250586584f9a7229aa97b4b2f290e36348f7e [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070037#include <linux/mlx4/qp.h>
38#include <linux/skbuff.h>
39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h>
Yevgeny Petrilinfa37a952010-08-24 03:46:46 +000041#include <linux/tcp.h>
Paul Gortmaker6eb07ca2011-09-15 19:46:05 -040042#include <linux/moduleparam.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070043
44#include "mlx4_en.h"
45
46enum {
47 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +000048 MAX_BF = 256,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070049};
50
51static int inline_thold __read_mostly = MAX_INLINE;
52
53module_param_named(inline_thold, inline_thold, int, 0444);
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020054MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070055
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +000057 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070058 u16 stride)
59{
60 struct mlx4_en_dev *mdev = priv->mdev;
61 int tmp;
62 int err;
63
64 ring->size = size;
65 ring->size_mask = size - 1;
66 ring->stride = stride;
67
68 inline_thold = min(inline_thold, MAX_INLINE);
69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp);
Joe Perchese404dec2012-01-29 12:56:23 +000074 if (!ring->tx_info)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070075 return -ENOMEM;
Joe Perchese404dec2012-01-29 12:56:23 +000076
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000077 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070078 ring->tx_info, tmp);
79
80 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
81 if (!ring->bounce_buf) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070082 err = -ENOMEM;
83 goto err_tx;
84 }
85 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
86
87 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
88 2 * PAGE_SIZE);
89 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000090 en_err(priv, "Failed allocating hwq resources\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070091 goto err_bounce;
92 }
93
94 err = mlx4_en_map_buffer(&ring->wqres.buf);
95 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000096 en_err(priv, "Failed to map TX buffer\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070097 goto err_hwq_res;
98 }
99
100 ring->buf = ring->wqres.buf.direct.buf;
101
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000102 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700105
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000106 ring->qpn = qpn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700107 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
108 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000109 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000110 goto err_map;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700111 }
Yevgeny Petrilin966508f2009-04-20 04:30:03 +0000112 ring->qp.event = mlx4_en_sqp_event;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700113
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000114 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
115 if (err) {
116 en_dbg(DRV, priv, "working without blueflame (%d)", err);
117 ring->bf.uar = &mdev->priv_uar;
118 ring->bf.uar->map = mdev->uar_map;
119 ring->bf_enabled = false;
120 } else
121 ring->bf_enabled = true;
122
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700123 return 0;
124
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700125err_map:
126 mlx4_en_unmap_buffer(&ring->wqres.buf);
127err_hwq_res:
128 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
129err_bounce:
130 kfree(ring->bounce_buf);
131 ring->bounce_buf = NULL;
132err_tx:
133 vfree(ring->tx_info);
134 ring->tx_info = NULL;
135 return err;
136}
137
138void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_tx_ring *ring)
140{
141 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700143
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000144 if (ring->bf_enabled)
145 mlx4_bf_free(mdev->dev, &ring->bf);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700146 mlx4_qp_remove(mdev->dev, &ring->qp);
147 mlx4_qp_free(mdev->dev, &ring->qp);
148 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
149 mlx4_en_unmap_buffer(&ring->wqres.buf);
150 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
151 kfree(ring->bounce_buf);
152 ring->bounce_buf = NULL;
153 vfree(ring->tx_info);
154 ring->tx_info = NULL;
155}
156
157int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
158 struct mlx4_en_tx_ring *ring,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700159 int cq)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700160{
161 struct mlx4_en_dev *mdev = priv->mdev;
162 int err;
163
164 ring->cqn = cq;
165 ring->prod = 0;
166 ring->cons = 0xffffffff;
167 ring->last_nr_txbb = 1;
168 ring->poll_cnt = 0;
169 ring->blocked = 0;
170 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
171 memset(ring->buf, 0, ring->buf_size);
172
173 ring->qp_state = MLX4_QP_STATE_RST;
David S. Miller1805b2f2011-10-24 18:18:09 -0400174 ring->doorbell_qpn = ring->qp.qpn << 8;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700175
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
Yevgeny Petrilin9f519f62009-08-06 19:28:18 -0700177 ring->cqn, &ring->context);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000178 if (ring->bf_enabled)
179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700180
181 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
182 &ring->qp, &ring->qp_state);
183
184 return err;
185}
186
187void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
188 struct mlx4_en_tx_ring *ring)
189{
190 struct mlx4_en_dev *mdev = priv->mdev;
191
192 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
193 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
194}
195
196
197static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
198 struct mlx4_en_tx_ring *ring,
199 int index, u8 owner)
200{
201 struct mlx4_en_dev *mdev = priv->mdev;
202 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
203 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
204 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
205 struct sk_buff *skb = tx_info->skb;
206 struct skb_frag_struct *frag;
207 void *end = ring->buf + ring->buf_size;
208 int frags = skb_shinfo(skb)->nr_frags;
209 int i;
210 __be32 *ptr = (__be32 *)tx_desc;
211 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
212
213 /* Optimize the common case when there are no wraparounds */
214 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800215 if (!tx_info->inl) {
216 if (tx_info->linear) {
217 pci_unmap_single(mdev->pdev,
218 (dma_addr_t) be64_to_cpu(data->addr),
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700219 be32_to_cpu(data->byte_count),
220 PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800221 ++data;
222 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700223
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800224 for (i = 0; i < frags; i++) {
225 frag = &skb_shinfo(skb)->frags[i];
226 pci_unmap_page(mdev->pdev,
227 (dma_addr_t) be64_to_cpu(data[i].addr),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000228 skb_frag_size(frag), PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800229 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700230 }
231 /* Stamp the freed descriptor */
232 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
233 *ptr = stamp;
234 ptr += STAMP_DWORDS;
235 }
236
237 } else {
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800238 if (!tx_info->inl) {
239 if ((void *) data >= end) {
Joe Perches43d620c2011-06-16 19:08:06 +0000240 data = ring->buf + ((void *)data - end);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800241 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700242
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800243 if (tx_info->linear) {
244 pci_unmap_single(mdev->pdev,
245 (dma_addr_t) be64_to_cpu(data->addr),
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700246 be32_to_cpu(data->byte_count),
247 PCI_DMA_TODEVICE);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800248 ++data;
249 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700250
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800251 for (i = 0; i < frags; i++) {
252 /* Check for wraparound before unmapping */
253 if ((void *) data >= end)
Joe Perches43d620c2011-06-16 19:08:06 +0000254 data = ring->buf;
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800255 frag = &skb_shinfo(skb)->frags[i];
256 pci_unmap_page(mdev->pdev,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700257 (dma_addr_t) be64_to_cpu(data->addr),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000258 skb_frag_size(frag), PCI_DMA_TODEVICE);
Yevgeny Petrilineb4ad822009-08-02 20:22:18 -0700259 ++data;
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800260 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700261 }
262 /* Stamp the freed descriptor */
263 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
264 *ptr = stamp;
265 ptr += STAMP_DWORDS;
266 if ((void *) ptr >= end) {
267 ptr = ring->buf;
268 stamp ^= cpu_to_be32(0x80000000);
269 }
270 }
271
272 }
273 dev_kfree_skb_any(skb);
274 return tx_info->nr_txbb;
275}
276
277
278int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
279{
280 struct mlx4_en_priv *priv = netdev_priv(dev);
281 int cnt = 0;
282
283 /* Skip last polled descriptor */
284 ring->cons += ring->last_nr_txbb;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000285 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700286 ring->cons, ring->prod);
287
288 if ((u32) (ring->prod - ring->cons) > ring->size) {
289 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000290 en_warn(priv, "Tx consumer passed producer!\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700291 return 0;
292 }
293
294 while (ring->cons != ring->prod) {
295 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
296 ring->cons & ring->size_mask,
297 !!(ring->cons & ring->size));
298 ring->cons += ring->last_nr_txbb;
299 cnt++;
300 }
301
302 if (cnt)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000303 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700304
305 return cnt;
306}
307
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700308static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
309{
310 struct mlx4_en_priv *priv = netdev_priv(dev);
311 struct mlx4_cq *mcq = &cq->mcq;
312 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000313 struct mlx4_cqe *cqe;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700314 u16 index;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000315 u16 new_index, ring_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700316 u32 txbbs_skipped = 0;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000317 u32 cons_index = mcq->cons_index;
318 int size = cq->size;
319 u32 size_mask = ring->size_mask;
320 struct mlx4_cqe *buf = cq->buf;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700321
322 if (!priv->port_up)
323 return;
324
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000325 index = cons_index & size_mask;
326 cqe = &buf[index];
327 ring_index = ring->cons & size_mask;
328
329 /* Process all completed CQEs */
330 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
331 cons_index & size)) {
332 /*
333 * make sure we read the CQE after we read the
334 * ownership bit
335 */
336 rmb();
337
338 /* Skip over last polled CQE */
339 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
340
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700341 do {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700342 txbbs_skipped += ring->last_nr_txbb;
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000343 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
344 /* free next descriptor */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700345 ring->last_nr_txbb = mlx4_en_free_tx_desc(
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000346 priv, ring, ring_index,
347 !!((ring->cons + txbbs_skipped) &
348 ring->size));
349 } while (ring_index != new_index);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700350
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000351 ++cons_index;
352 index = cons_index & size_mask;
353 cqe = &buf[index];
354 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700355
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700356
357 /*
358 * To prevent CQ overflow we first update CQ consumer and only then
359 * the ring consumer.
360 */
Yevgeny Petrilinf0ab34f2011-11-26 19:55:10 +0000361 mcq->cons_index = cons_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700362 mlx4_cq_set_ci(mcq);
363 wmb();
364 ring->cons += txbbs_skipped;
365
366 /* Wakeup Tx queue if this ring stopped it */
367 if (unlikely(ring->blocked)) {
Yevgeny Petrilinc03ea212008-12-25 18:14:04 -0800368 if ((u32) (ring->prod - ring->cons) <=
369 ring->size - HEADROOM - MAX_DESC_TXBBS) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700370 ring->blocked = 0;
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000371 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700372 priv->port_stats.wake_queue++;
373 }
374 }
375}
376
377void mlx4_en_tx_irq(struct mlx4_cq *mcq)
378{
379 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
380 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
381 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
382
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800383 if (!spin_trylock(&ring->comp_lock))
384 return;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700385 mlx4_en_process_tx_cq(cq->dev, cq);
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800386 mod_timer(&cq->timer, jiffies + 1);
387 spin_unlock(&ring->comp_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700388}
389
390
391void mlx4_en_poll_tx_cq(unsigned long data)
392{
393 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
394 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
395 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
396 u32 inflight;
397
398 INC_PERF_COUNTER(priv->pstats.tx_poll);
399
Yevgeny Petrilin465440d2009-05-25 20:57:21 +0000400 if (!spin_trylock_irq(&ring->comp_lock)) {
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800401 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
402 return;
403 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700404 mlx4_en_process_tx_cq(cq->dev, cq);
405 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
406
407 /* If there are still packets in flight and the timer has not already
408 * been scheduled by the Tx routine then schedule it here to guarantee
409 * completion processing of these packets */
410 if (inflight && priv->port_up)
411 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
412
Yevgeny Petrilin465440d2009-05-25 20:57:21 +0000413 spin_unlock_irq(&ring->comp_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700414}
415
416static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
417 struct mlx4_en_tx_ring *ring,
418 u32 index,
419 unsigned int desc_size)
420{
421 u32 copy = (ring->size - index) * TXBB_SIZE;
422 int i;
423
424 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
425 if ((i & (TXBB_SIZE - 1)) == 0)
426 wmb();
427
428 *((u32 *) (ring->buf + i)) =
429 *((u32 *) (ring->bounce_buf + copy + i));
430 }
431
432 for (i = copy - 4; i >= 4 ; i -= 4) {
433 if ((i & (TXBB_SIZE - 1)) == 0)
434 wmb();
435
436 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
437 *((u32 *) (ring->bounce_buf + i));
438 }
439
440 /* Return real descriptor location */
441 return ring->buf + index * TXBB_SIZE;
442}
443
444static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
445{
446 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
447 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
Dongdong Deng48719532009-08-23 19:49:07 -0700448 unsigned long flags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700449
450 /* If we don't have a pending timer, set one up to catch our recent
451 post in case the interface becomes idle */
452 if (!timer_pending(&cq->timer))
453 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
454
455 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
456 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
Dongdong Deng48719532009-08-23 19:49:07 -0700457 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800458 mlx4_en_process_tx_cq(priv->dev, cq);
Dongdong Deng48719532009-08-23 19:49:07 -0700459 spin_unlock_irqrestore(&ring->comp_lock, flags);
Yevgeny Petrilin48374dd2008-12-25 18:13:45 -0800460 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700461}
462
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700463static int is_inline(struct sk_buff *skb, void **pfrag)
464{
465 void *ptr;
466
467 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
468 if (skb_shinfo(skb)->nr_frags == 1) {
Ian Campbell311761c2011-10-19 23:01:45 +0000469 ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700470 if (unlikely(!ptr))
471 return 0;
472
473 if (pfrag)
474 *pfrag = ptr;
475
476 return 1;
477 } else if (unlikely(skb_shinfo(skb)->nr_frags))
478 return 0;
479 else
480 return 1;
481 }
482
483 return 0;
484}
485
486static int inline_size(struct sk_buff *skb)
487{
488 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
489 <= MLX4_INLINE_ALIGN)
490 return ALIGN(skb->len + CTRL_SIZE +
491 sizeof(struct mlx4_wqe_inline_seg), 16);
492 else
493 return ALIGN(skb->len + CTRL_SIZE + 2 *
494 sizeof(struct mlx4_wqe_inline_seg), 16);
495}
496
497static int get_real_size(struct sk_buff *skb, struct net_device *dev,
498 int *lso_header_size)
499{
500 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700501 int real_size;
502
503 if (skb_is_gso(skb)) {
504 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
505 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
506 ALIGN(*lso_header_size + 4, DS_SIZE);
507 if (unlikely(*lso_header_size != skb_headlen(skb))) {
508 /* We add a segment for the skb linear buffer only if
509 * it contains data */
510 if (*lso_header_size < skb_headlen(skb))
511 real_size += DS_SIZE;
512 else {
513 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000514 en_warn(priv, "Non-linear headers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700515 return 0;
516 }
517 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700518 } else {
519 *lso_header_size = 0;
520 if (!is_inline(skb, NULL))
521 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
522 else
523 real_size = inline_size(skb);
524 }
525
526 return real_size;
527}
528
529static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
530 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
531{
532 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
533 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
534
535 if (skb->len <= spc) {
536 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
537 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
538 if (skb_shinfo(skb)->nr_frags)
539 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000540 skb_frag_size(&skb_shinfo(skb)->frags[0]));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700541
542 } else {
543 inl->byte_count = cpu_to_be32(1 << 31 | spc);
544 if (skb_headlen(skb) <= spc) {
545 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
546 if (skb_headlen(skb) < spc) {
547 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
548 fragptr, spc - skb_headlen(skb));
549 fragptr += spc - skb_headlen(skb);
550 }
551 inl = (void *) (inl + 1) + spc;
552 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
553 } else {
554 skb_copy_from_linear_data(skb, inl + 1, spc);
555 inl = (void *) (inl + 1) + spc;
556 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
557 skb_headlen(skb) - spc);
558 if (skb_shinfo(skb)->nr_frags)
559 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000560 fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700561 }
562
563 wmb();
564 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
565 }
566 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
Amir Vadaic140d762011-11-26 19:55:23 +0000567 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
568 (!!vlan_tx_tag_present(skb));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700569 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
570}
571
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000572u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700573{
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000574 struct mlx4_en_priv *priv = netdev_priv(dev);
575 u16 vlan_tag = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700576
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000577 /* If we support per priority flow control and the packet contains
578 * a vlan tag, send the packet to the TX ring assigned to that priority
579 */
Jesse Grosseab6d182010-10-20 13:56:03 +0000580 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000581 vlan_tag = vlan_tx_tag_get(skb);
582 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700583 }
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000584
585 return skb_tx_hash(dev, skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700586}
587
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000588static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
589{
590 __iowrite64_copy(dst, src, bytecnt / 8);
591}
592
Stephen Hemminger613573252009-08-31 19:50:58 +0000593netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700594{
595 struct mlx4_en_priv *priv = netdev_priv(dev);
596 struct mlx4_en_dev *mdev = priv->mdev;
597 struct mlx4_en_tx_ring *ring;
598 struct mlx4_en_cq *cq;
599 struct mlx4_en_tx_desc *tx_desc;
600 struct mlx4_wqe_data_seg *data;
601 struct skb_frag_struct *frag;
602 struct mlx4_en_tx_info *tx_info;
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000603 struct ethhdr *ethh;
604 u64 mac;
605 u32 mac_l, mac_h;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700606 int tx_ind = 0;
607 int nr_txbb;
608 int desc_size;
609 int real_size;
610 dma_addr_t dma;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000611 u32 index, bf_index;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700612 __be32 op_own;
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000613 u16 vlan_tag = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700614 int i;
615 int lso_header_size;
616 void *fragptr;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000617 bool bounce = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700618
Yevgeny Petrilin3005ad42010-08-24 03:46:07 +0000619 if (!priv->port_up)
620 goto tx_drop;
621
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700622 real_size = get_real_size(skb, dev, &lso_header_size);
623 if (unlikely(!real_size))
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000624 goto tx_drop;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700625
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300626 /* Align descriptor to TXBB size */
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700627 desc_size = ALIGN(real_size, TXBB_SIZE);
628 nr_txbb = desc_size / TXBB_SIZE;
629 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
630 if (netif_msg_tx_err(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000631 en_warn(priv, "Oversized header or SG list\n");
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000632 goto tx_drop;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700633 }
634
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000635 tx_ind = skb->queue_mapping;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700636 ring = &priv->tx_ring[tx_ind];
Jesse Grosseab6d182010-10-20 13:56:03 +0000637 if (vlan_tx_tag_present(skb))
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000638 vlan_tag = vlan_tx_tag_get(skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700639
640 /* Check available TXBBs And 2K spare for prefetch */
641 if (unlikely(((int)(ring->prod - ring->cons)) >
642 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +0000643 /* every full Tx ring stops queue */
644 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700645 ring->blocked = 1;
646 priv->port_stats.queue_stopped++;
647
648 /* Use interrupts to find out when queue opened */
649 cq = &priv->tx_cq[tx_ind];
650 mlx4_en_arm_cq(priv, cq);
651 return NETDEV_TX_BUSY;
652 }
653
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700654 /* Track current inflight packets for performance analysis */
655 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
656 (u32) (ring->prod - ring->cons - 1));
657
658 /* Packet is good - grab an index and transmit it */
659 index = ring->prod & ring->size_mask;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000660 bf_index = ring->prod;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700661
662 /* See if we have enough space for whole descriptor TXBB for setting
663 * SW ownership on next descriptor; if not, use a bounce buffer. */
664 if (likely(index + nr_txbb <= ring->size))
665 tx_desc = ring->buf + index * TXBB_SIZE;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000666 else {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700667 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000668 bounce = true;
669 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700670
671 /* Save skb in tx_info ring */
672 tx_info = &ring->tx_info[index];
673 tx_info->skb = skb;
674 tx_info->nr_txbb = nr_txbb;
675
676 /* Prepare ctrl segement apart opcode+ownership, which depends on
677 * whether LSO is used */
678 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
Amir Vadaic140d762011-11-26 19:55:23 +0000679 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
680 !!vlan_tx_tag_present(skb);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700681 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
Amir Vadai60d6fe92011-11-26 19:55:19 +0000682 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700683 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
684 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
685 MLX4_WQE_CTRL_TCP_UDP_CSUM);
Yevgeny Petrilinad043782011-10-18 01:50:56 +0000686 ring->tx_csum++;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700687 }
688
Eugenia Emantayev5b4c4d32011-12-13 04:16:38 +0000689 /* Copy dst mac address to wqe */
690 skb_reset_mac_header(skb);
691 ethh = eth_hdr(skb);
692 if (ethh && ethh->h_dest) {
693 mac = mlx4_en_mac_to_u64(ethh->h_dest);
694 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
695 mac_l = (u32) (mac & 0xffffffff);
696 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
697 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000698 }
699
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700700 /* Handle LSO (TSO) packets */
701 if (lso_header_size) {
702 /* Mark opcode as LSO */
703 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
704 ((ring->prod & ring->size) ?
705 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
706
707 /* Fill in the LSO prefix */
708 tx_desc->lso.mss_hdr_size = cpu_to_be32(
709 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
710
711 /* Copy headers;
712 * note that we already verified that it is linear */
713 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
714 data = ((void *) &tx_desc->lso +
715 ALIGN(lso_header_size + 4, DS_SIZE));
716
717 priv->port_stats.tso_packets++;
718 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
719 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
720 ring->bytes += skb->len + (i - 1) * lso_header_size;
721 ring->packets += i;
722 } else {
723 /* Normal (Non LSO) packet */
724 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
725 ((ring->prod & ring->size) ?
726 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
727 data = &tx_desc->data;
728 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
729 ring->packets++;
730
731 }
732 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
733
734
735 /* valid only for none inline segments */
736 tx_info->data_offset = (void *) data - (void *) tx_desc;
737
738 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
739 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
740
741 if (!is_inline(skb, &fragptr)) {
742 /* Map fragments */
743 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
744 frag = &skb_shinfo(skb)->frags[i];
Ian Campbell311761c2011-10-19 23:01:45 +0000745 dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
746 0, skb_frag_size(frag),
747 DMA_TO_DEVICE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700748 data->addr = cpu_to_be64(dma);
749 data->lkey = cpu_to_be32(mdev->mr.key);
750 wmb();
Eric Dumazet9e903e02011-10-18 21:00:24 +0000751 data->byte_count = cpu_to_be32(skb_frag_size(frag));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700752 --data;
753 }
754
755 /* Map linear part */
756 if (tx_info->linear) {
757 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
758 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
759 data->addr = cpu_to_be64(dma);
760 data->lkey = cpu_to_be32(mdev->mr.key);
761 wmb();
762 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
763 }
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800764 tx_info->inl = 0;
765 } else {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700766 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
Yevgeny Petrilin41efea52009-01-08 10:57:15 -0800767 tx_info->inl = 1;
768 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700769
770 ring->prod += nr_txbb;
771
772 /* If we used a bounce buffer then copy descriptor back into place */
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000773 if (bounce)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700774 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
775
776 /* Run destructor before passing skb to HW */
777 if (likely(!skb_shared(skb)))
778 skb_orphan(skb);
779
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000780 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
David S. Miller1805b2f2011-10-24 18:18:09 -0400781 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000782 op_own |= htonl((bf_index & 0xffff) << 8);
783 /* Ensure new descirptor hits memory
784 * before setting ownership of this descriptor to HW */
785 wmb();
786 tx_desc->ctrl.owner_opcode = op_own;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700787
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000788 wmb();
789
790 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
791 desc_size);
792
793 wmb();
794
795 ring->bf.offset ^= ring->bf.buf_size;
796 } else {
797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
801 wmb();
David S. Miller1805b2f2011-10-24 18:18:09 -0400802 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000803 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700804
805 /* Poll CQ here */
806 mlx4_en_xmit_poll(priv, tx_ind);
807
Patrick McHardyec634fe2009-07-05 19:23:38 -0700808 return NETDEV_TX_OK;
Yevgeny Petrilin7e230912009-06-20 22:15:31 +0000809
810tx_drop:
811 dev_kfree_skb_any(skb);
812 priv->stats.tx_dropped++;
813 return NETDEV_TX_OK;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700814}
815