blob: f01cfdb995deaef8a04d74b5d997a8fb1e74b6dd [file] [log] [blame]
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001/*****************************************************************************
2 * *
3 * File: sge.c *
Scott Bardone559fb512005-06-23 01:40:19 -04004 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
Christoph Lameter8199d3a2005-03-30 13:34:31 -08006 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
Christoph Lameter8199d3a2005-03-30 13:34:31 -080042#include <linux/types.h>
43#include <linux/errno.h>
44#include <linux/pci.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080045#include <linux/ktime.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080046#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080052#include <linux/tcp.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080053#include <linux/ip.h>
54#include <linux/in.h>
55#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090056#include <linux/slab.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080057
58#include "cpl5_cmd.h"
59#include "sge.h"
60#include "regs.h"
61#include "espi.h"
62
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080063/* This belongs in if_ether.h */
64#define ETH_P_CPL5 0xf
Christoph Lameter8199d3a2005-03-30 13:34:31 -080065
66#define SGE_CMDQ_N 2
67#define SGE_FREELQ_N 2
Scott Bardone559fb512005-06-23 01:40:19 -040068#define SGE_CMDQ0_E_N 1024
Christoph Lameter8199d3a2005-03-30 13:34:31 -080069#define SGE_CMDQ1_E_N 128
70#define SGE_FREEL_SIZE 4096
71#define SGE_JUMBO_FREEL_SIZE 512
72#define SGE_FREEL_REFILL_THRESH 16
73#define SGE_RESPQ_E_N 1024
Scott Bardone559fb512005-06-23 01:40:19 -040074#define SGE_INTRTIMER_NRES 1000
Christoph Lameter8199d3a2005-03-30 13:34:31 -080075#define SGE_RX_SM_BUF_SIZE 1536
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080076#define SGE_TX_DESC_MAX_PLEN 16384
Christoph Lameter8199d3a2005-03-30 13:34:31 -080077
Scott Bardone559fb512005-06-23 01:40:19 -040078#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
Christoph Lameter8199d3a2005-03-30 13:34:31 -080079
80/*
Scott Bardone559fb512005-06-23 01:40:19 -040081 * Period of the TX buffer reclaim timer. This timer does not need to run
82 * frequently as TX buffers are usually reclaimed by new TX packets.
83 */
84#define TX_RECLAIM_PERIOD (HZ / 4)
85
Scott Bardone559fb512005-06-23 01:40:19 -040086#define M_CMD_LEN 0x7fffffff
87#define V_CMD_LEN(v) (v)
88#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
89#define V_CMD_GEN1(v) ((v) << 31)
90#define V_CMD_GEN2(v) (v)
91#define F_CMD_DATAVALID (1 << 1)
92#define F_CMD_SOP (1 << 2)
93#define V_CMD_EOP(v) ((v) << 3)
94
95/*
96 * Command queue, receive buffer list, and response queue descriptors.
Christoph Lameter8199d3a2005-03-30 13:34:31 -080097 */
98#if defined(__BIG_ENDIAN_BITFIELD)
99struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400100 u32 addr_lo;
101 u32 len_gen;
102 u32 flags;
103 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800104};
105
106struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400107 u32 addr_lo;
108 u32 len_gen;
109 u32 gen2;
110 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800111};
112
113struct respQ_e {
114 u32 Qsleeping : 4;
115 u32 Cmdq1CreditReturn : 5;
116 u32 Cmdq1DmaComplete : 5;
117 u32 Cmdq0CreditReturn : 5;
118 u32 Cmdq0DmaComplete : 5;
119 u32 FreelistQid : 2;
120 u32 CreditValid : 1;
121 u32 DataValid : 1;
122 u32 Offload : 1;
123 u32 Eop : 1;
124 u32 Sop : 1;
125 u32 GenerationBit : 1;
126 u32 BufferLength;
127};
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800128#elif defined(__LITTLE_ENDIAN_BITFIELD)
129struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400130 u32 len_gen;
131 u32 addr_lo;
132 u32 addr_hi;
133 u32 flags;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800134};
135
136struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400137 u32 len_gen;
138 u32 addr_lo;
139 u32 addr_hi;
140 u32 gen2;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800141};
142
143struct respQ_e {
144 u32 BufferLength;
145 u32 GenerationBit : 1;
146 u32 Sop : 1;
147 u32 Eop : 1;
148 u32 Offload : 1;
149 u32 DataValid : 1;
150 u32 CreditValid : 1;
151 u32 FreelistQid : 2;
152 u32 Cmdq0DmaComplete : 5;
153 u32 Cmdq0CreditReturn : 5;
154 u32 Cmdq1DmaComplete : 5;
155 u32 Cmdq1CreditReturn : 5;
156 u32 Qsleeping : 4;
157} ;
158#endif
159
160/*
161 * SW Context Command and Freelist Queue Descriptors
162 */
163struct cmdQ_ce {
164 struct sk_buff *skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000165 DEFINE_DMA_UNMAP_ADDR(dma_addr);
166 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800167};
168
169struct freelQ_ce {
170 struct sk_buff *skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000171 DEFINE_DMA_UNMAP_ADDR(dma_addr);
172 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800173};
174
175/*
Scott Bardone559fb512005-06-23 01:40:19 -0400176 * SW command, freelist and response rings
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800177 */
178struct cmdQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400179 unsigned long status; /* HW DMA fetch status */
180 unsigned int in_use; /* # of in-use command descriptors */
181 unsigned int size; /* # of descriptors */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800182 unsigned int processed; /* total # of descs HW has processed */
183 unsigned int cleaned; /* total # of descs SW has reclaimed */
184 unsigned int stop_thres; /* SW TX queue suspend threshold */
Scott Bardone559fb512005-06-23 01:40:19 -0400185 u16 pidx; /* producer index (SW) */
186 u16 cidx; /* consumer index (HW) */
187 u8 genbit; /* current generation (=valid) bit */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800188 u8 sop; /* is next entry start of packet? */
Scott Bardone559fb512005-06-23 01:40:19 -0400189 struct cmdQ_e *entries; /* HW command descriptor Q */
190 struct cmdQ_ce *centries; /* SW command context descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400191 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
Francois Romieu356bd142006-12-11 23:47:00 +0100192 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800193};
194
195struct freelQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400196 unsigned int credits; /* # of available RX buffers */
197 unsigned int size; /* free list capacity */
198 u16 pidx; /* producer index (SW) */
199 u16 cidx; /* consumer index (HW) */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800200 u16 rx_buffer_size; /* Buffer size on this free list */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800201 u16 dma_offset; /* DMA offset to align IP headers */
202 u16 recycleq_idx; /* skb recycle q to use */
Scott Bardone559fb512005-06-23 01:40:19 -0400203 u8 genbit; /* current generation (=valid) bit */
204 struct freelQ_e *entries; /* HW freelist descriptor Q */
205 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
206 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800207};
208
209struct respQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400210 unsigned int credits; /* credits to be returned to SGE */
211 unsigned int size; /* # of response Q descriptors */
212 u16 cidx; /* consumer index (SW) */
213 u8 genbit; /* current generation(=valid) bit */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800214 struct respQ_e *entries; /* HW response descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400215 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
216};
217
218/* Bit flags for cmdQ.status */
219enum {
220 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
221 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800222};
223
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800224/* T204 TX SW scheduler */
225
226/* Per T204 TX port */
227struct sched_port {
228 unsigned int avail; /* available bits - quota */
229 unsigned int drain_bits_per_1024ns; /* drain rate */
230 unsigned int speed; /* drain rate, mbps */
231 unsigned int mtu; /* mtu size */
232 struct sk_buff_head skbq; /* pending skbs */
233};
234
235/* Per T204 device */
236struct sched {
237 ktime_t last_updated; /* last time quotas were computed */
Francois Romieu356bd142006-12-11 23:47:00 +0100238 unsigned int max_avail; /* max bits to be sent to any port */
239 unsigned int port; /* port index (round robin ports) */
240 unsigned int num; /* num skbs in per port queues */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800241 struct sched_port p[MAX_NPORTS];
242 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
243};
244static void restart_sched(unsigned long);
245
246
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800247/*
248 * Main SGE data structure
249 *
250 * Interrupts are handled by a single CPU and it is likely that on a MP system
251 * the application is migrated to another CPU. In that scenario, we try to
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800252 * separate the RX(in irq context) and TX state in order to decrease memory
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800253 * contention.
254 */
255struct sge {
Francois Romieu356bd142006-12-11 23:47:00 +0100256 struct adapter *adapter; /* adapter backpointer */
Scott Bardone559fb512005-06-23 01:40:19 -0400257 struct net_device *netdev; /* netdevice backpointer */
Francois Romieu356bd142006-12-11 23:47:00 +0100258 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
259 struct respQ respQ; /* response Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400260 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800261 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
262 unsigned int jumbo_fl; /* jumbo freelist Q index */
Scott Bardone559fb512005-06-23 01:40:19 -0400263 unsigned int intrtimer_nres; /* no-resource interrupt timer */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800264 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
Scott Bardone559fb512005-06-23 01:40:19 -0400265 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
266 struct timer_list espibug_timer;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800267 unsigned long espibug_timeout;
268 struct sk_buff *espibug_skb[MAX_NPORTS];
Scott Bardone559fb512005-06-23 01:40:19 -0400269 u32 sge_control; /* shadow value of sge control reg */
270 struct sge_intr_counts stats;
Tejun Heo47d74272010-02-16 15:21:08 +0000271 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800272 struct sched *tx_sched;
Scott Bardone559fb512005-06-23 01:40:19 -0400273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800274};
275
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800276/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800277 * stop tasklet and free all pending skb's
278 */
279static void tx_sched_stop(struct sge *sge)
280{
281 struct sched *s = sge->tx_sched;
282 int i;
283
284 tasklet_kill(&s->sched_tsk);
285
286 for (i = 0; i < MAX_NPORTS; i++)
287 __skb_queue_purge(&s->p[s->port].skbq);
288}
289
290/*
291 * t1_sched_update_parms() is called when the MTU or link speed changes. It
292 * re-computes scheduler parameters to scope with the change.
293 */
294unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
295 unsigned int mtu, unsigned int speed)
296{
297 struct sched *s = sge->tx_sched;
298 struct sched_port *p = &s->p[port];
299 unsigned int max_avail_segs;
300
301 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
302 if (speed)
303 p->speed = speed;
304 if (mtu)
305 p->mtu = mtu;
306
307 if (speed || mtu) {
308 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
309 do_div(drain, (p->mtu + 50) * 1000);
310 p->drain_bits_per_1024ns = (unsigned int) drain;
311
312 if (p->speed < 1000)
313 p->drain_bits_per_1024ns =
314 90 * p->drain_bits_per_1024ns / 100;
315 }
316
317 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
318 p->drain_bits_per_1024ns -= 16;
319 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
320 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
321 } else {
322 s->max_avail = 16384;
323 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
324 }
325
326 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
327 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
328 p->speed, s->max_avail, max_avail_segs,
329 p->drain_bits_per_1024ns);
330
331 return max_avail_segs * (p->mtu - 40);
332}
333
Adrian Bunk68d579f2007-11-21 15:02:55 -0800334#if 0
335
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800336/*
337 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
338 * data that can be pushed per port.
339 */
340void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
341{
342 struct sched *s = sge->tx_sched;
343 unsigned int i;
344
345 s->max_avail = val;
346 for (i = 0; i < MAX_NPORTS; i++)
347 t1_sched_update_parms(sge, i, 0, 0);
348}
349
350/*
351 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
352 * is draining.
353 */
354void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
355 unsigned int val)
356{
357 struct sched *s = sge->tx_sched;
358 struct sched_port *p = &s->p[port];
359 p->drain_bits_per_1024ns = val * 1024 / 1000;
360 t1_sched_update_parms(sge, port, 0, 0);
361}
362
Adrian Bunk68d579f2007-11-21 15:02:55 -0800363#endif /* 0 */
364
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800365
366/*
367 * get_clock() implements a ns clock (see ktime_get)
368 */
369static inline ktime_t get_clock(void)
370{
371 struct timespec ts;
372
373 ktime_get_ts(&ts);
374 return timespec_to_ktime(ts);
375}
376
377/*
378 * tx_sched_init() allocates resources and does basic initialization.
379 */
380static int tx_sched_init(struct sge *sge)
381{
382 struct sched *s;
383 int i;
384
385 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
386 if (!s)
387 return -ENOMEM;
388
389 pr_debug("tx_sched_init\n");
390 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
391 sge->tx_sched = s;
392
393 for (i = 0; i < MAX_NPORTS; i++) {
394 skb_queue_head_init(&s->p[i].skbq);
395 t1_sched_update_parms(sge, i, 1500, 1000);
396 }
397
398 return 0;
399}
400
401/*
402 * sched_update_avail() computes the delta since the last time it was called
403 * and updates the per port quota (number of bits that can be sent to the any
404 * port).
405 */
406static inline int sched_update_avail(struct sge *sge)
407{
408 struct sched *s = sge->tx_sched;
409 ktime_t now = get_clock();
410 unsigned int i;
411 long long delta_time_ns;
412
413 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
414
415 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
416 if (delta_time_ns < 15000)
417 return 0;
418
419 for (i = 0; i < MAX_NPORTS; i++) {
420 struct sched_port *p = &s->p[i];
421 unsigned int delta_avail;
422
423 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
424 p->avail = min(p->avail + delta_avail, s->max_avail);
425 }
426
427 s->last_updated = now;
428
429 return 1;
430}
431
432/*
433 * sched_skb() is called from two different places. In the tx path, any
434 * packet generating load on an output port will call sched_skb()
435 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
436 * context (skb == NULL).
437 * The scheduler only returns a skb (which will then be sent) if the
438 * length of the skb is <= the current quota of the output port.
439 */
440static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
441 unsigned int credits)
442{
443 struct sched *s = sge->tx_sched;
444 struct sk_buff_head *skbq;
445 unsigned int i, len, update = 1;
446
447 pr_debug("sched_skb %p\n", skb);
448 if (!skb) {
449 if (!s->num)
450 return NULL;
451 } else {
452 skbq = &s->p[skb->dev->if_port].skbq;
453 __skb_queue_tail(skbq, skb);
454 s->num++;
455 skb = NULL;
456 }
457
458 if (credits < MAX_SKB_FRAGS + 1)
459 goto out;
460
Francois Romieu356bd142006-12-11 23:47:00 +0100461again:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800462 for (i = 0; i < MAX_NPORTS; i++) {
David S. Miller18d777a2010-04-13 03:07:17 -0700463 s->port = (s->port + 1) & (MAX_NPORTS - 1);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800464 skbq = &s->p[s->port].skbq;
465
466 skb = skb_peek(skbq);
467
468 if (!skb)
469 continue;
470
471 len = skb->len;
472 if (len <= s->p[s->port].avail) {
473 s->p[s->port].avail -= len;
474 s->num--;
475 __skb_unlink(skb, skbq);
476 goto out;
477 }
478 skb = NULL;
479 }
480
481 if (update-- && sched_update_avail(sge))
482 goto again;
483
Francois Romieu356bd142006-12-11 23:47:00 +0100484out:
485 /* If there are more pending skbs, we use the hardware to schedule us
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800486 * again.
487 */
488 if (s->num && !skb) {
489 struct cmdQ *q = &sge->cmdQ[0];
490 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
491 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
492 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
493 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
494 }
495 }
496 pr_debug("sched_skb ret %p\n", skb);
497
498 return skb;
499}
500
501/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800502 * PIO to indicate that memory mapped Q contains valid descriptor(s).
503 */
Scott Bardone559fb512005-06-23 01:40:19 -0400504static inline void doorbell_pio(struct adapter *adapter, u32 val)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800505{
506 wmb();
Scott Bardone559fb512005-06-23 01:40:19 -0400507 writel(val, adapter->regs + A_SG_DOORBELL);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800508}
509
510/*
511 * Frees all RX buffers on the freelist Q. The caller must make sure that
512 * the SGE is turned off before calling this function.
513 */
Scott Bardone559fb512005-06-23 01:40:19 -0400514static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800515{
Scott Bardone559fb512005-06-23 01:40:19 -0400516 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800517
Scott Bardone559fb512005-06-23 01:40:19 -0400518 while (q->credits--) {
519 struct freelQ_ce *ce = &q->centries[cidx];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800520
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000521 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
522 dma_unmap_len(ce, dma_len),
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800523 PCI_DMA_FROMDEVICE);
524 dev_kfree_skb(ce->skb);
525 ce->skb = NULL;
Scott Bardone559fb512005-06-23 01:40:19 -0400526 if (++cidx == q->size)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800527 cidx = 0;
528 }
529}
530
531/*
532 * Free RX free list and response queue resources.
533 */
534static void free_rx_resources(struct sge *sge)
535{
536 struct pci_dev *pdev = sge->adapter->pdev;
537 unsigned int size, i;
538
539 if (sge->respQ.entries) {
Scott Bardone559fb512005-06-23 01:40:19 -0400540 size = sizeof(struct respQ_e) * sge->respQ.size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800541 pci_free_consistent(pdev, size, sge->respQ.entries,
542 sge->respQ.dma_addr);
543 }
544
545 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400546 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800547
Scott Bardone559fb512005-06-23 01:40:19 -0400548 if (q->centries) {
549 free_freelQ_buffers(pdev, q);
550 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800551 }
Scott Bardone559fb512005-06-23 01:40:19 -0400552 if (q->entries) {
553 size = sizeof(struct freelQ_e) * q->size;
554 pci_free_consistent(pdev, size, q->entries,
555 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800556 }
557 }
558}
559
560/*
561 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
Scott Bardone559fb512005-06-23 01:40:19 -0400562 * response queue.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800563 */
564static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
565{
566 struct pci_dev *pdev = sge->adapter->pdev;
567 unsigned int size, i;
568
569 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400570 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800571
Scott Bardone559fb512005-06-23 01:40:19 -0400572 q->genbit = 1;
573 q->size = p->freelQ_size[i];
574 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
575 size = sizeof(struct freelQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100576 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400577 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800578 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100579
Scott Bardone559fb512005-06-23 01:40:19 -0400580 size = sizeof(struct freelQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800581 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400582 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800583 goto err_no_mem;
584 }
585
586 /*
587 * Calculate the buffer sizes for the two free lists. FL0 accommodates
588 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
589 * including all the sk_buff overhead.
590 *
591 * Note: For T2 FL0 and FL1 are reversed.
592 */
593 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
594 sizeof(struct cpl_rx_data) +
595 sge->freelQ[!sge->jumbo_fl].dma_offset;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800596
597 size = (16 * 1024) -
598 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
599
600 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800601
Scott Bardone559fb512005-06-23 01:40:19 -0400602 /*
603 * Setup which skb recycle Q should be used when recycling buffers from
604 * each free list.
605 */
606 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
607 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
608
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800609 sge->respQ.genbit = 1;
Scott Bardone559fb512005-06-23 01:40:19 -0400610 sge->respQ.size = SGE_RESPQ_E_N;
611 sge->respQ.credits = 0;
612 size = sizeof(struct respQ_e) * sge->respQ.size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100613 sge->respQ.entries =
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800614 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
615 if (!sge->respQ.entries)
616 goto err_no_mem;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800617 return 0;
618
619err_no_mem:
620 free_rx_resources(sge);
621 return -ENOMEM;
622}
623
624/*
Scott Bardone559fb512005-06-23 01:40:19 -0400625 * Reclaims n TX descriptors and frees the buffers associated with them.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800626 */
Scott Bardone559fb512005-06-23 01:40:19 -0400627static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800628{
Scott Bardone559fb512005-06-23 01:40:19 -0400629 struct cmdQ_ce *ce;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800630 struct pci_dev *pdev = sge->adapter->pdev;
Scott Bardone559fb512005-06-23 01:40:19 -0400631 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800632
Scott Bardone559fb512005-06-23 01:40:19 -0400633 q->in_use -= n;
634 ce = &q->centries[cidx];
635 while (n--) {
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000636 if (likely(dma_unmap_len(ce, dma_len))) {
637 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
638 dma_unmap_len(ce, dma_len),
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100639 PCI_DMA_TODEVICE);
640 if (q->sop)
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800641 q->sop = 0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800642 }
Scott Bardone559fb512005-06-23 01:40:19 -0400643 if (ce->skb) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800644 dev_kfree_skb_any(ce->skb);
Scott Bardone559fb512005-06-23 01:40:19 -0400645 q->sop = 1;
646 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800647 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -0400648 if (++cidx == q->size) {
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800649 cidx = 0;
Scott Bardone559fb512005-06-23 01:40:19 -0400650 ce = q->centries;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800651 }
652 }
Scott Bardone559fb512005-06-23 01:40:19 -0400653 q->cidx = cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800654}
655
656/*
657 * Free TX resources.
658 *
659 * Assumes that SGE is stopped and all interrupts are disabled.
660 */
661static void free_tx_resources(struct sge *sge)
662{
663 struct pci_dev *pdev = sge->adapter->pdev;
664 unsigned int size, i;
665
666 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400667 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800668
Scott Bardone559fb512005-06-23 01:40:19 -0400669 if (q->centries) {
670 if (q->in_use)
671 free_cmdQ_buffers(sge, q, q->in_use);
672 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800673 }
Scott Bardone559fb512005-06-23 01:40:19 -0400674 if (q->entries) {
675 size = sizeof(struct cmdQ_e) * q->size;
676 pci_free_consistent(pdev, size, q->entries,
677 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800678 }
679 }
680}
681
682/*
683 * Allocates basic TX resources, consisting of memory mapped command Qs.
684 */
685static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
686{
687 struct pci_dev *pdev = sge->adapter->pdev;
688 unsigned int size, i;
689
690 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400691 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800692
Scott Bardone559fb512005-06-23 01:40:19 -0400693 q->genbit = 1;
694 q->sop = 1;
695 q->size = p->cmdQ_size[i];
696 q->in_use = 0;
697 q->status = 0;
698 q->processed = q->cleaned = 0;
699 q->stop_thres = 0;
700 spin_lock_init(&q->lock);
701 size = sizeof(struct cmdQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100702 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400703 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800704 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100705
Scott Bardone559fb512005-06-23 01:40:19 -0400706 size = sizeof(struct cmdQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800707 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400708 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800709 goto err_no_mem;
710 }
711
Scott Bardone559fb512005-06-23 01:40:19 -0400712 /*
713 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
714 * only. For queue 0 set the stop threshold so we can handle one more
715 * packet from each port, plus reserve an additional 24 entries for
716 * Ethernet packets only. Queue 1 never suspends nor do we reserve
717 * space for Ethernet packets.
718 */
719 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
720 (MAX_SKB_FRAGS + 1);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800721 return 0;
722
723err_no_mem:
724 free_tx_resources(sge);
725 return -ENOMEM;
726}
727
728static inline void setup_ring_params(struct adapter *adapter, u64 addr,
729 u32 size, int base_reg_lo,
730 int base_reg_hi, int size_reg)
731{
Scott Bardone559fb512005-06-23 01:40:19 -0400732 writel((u32)addr, adapter->regs + base_reg_lo);
733 writel(addr >> 32, adapter->regs + base_reg_hi);
734 writel(size, adapter->regs + size_reg);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800735}
736
737/*
738 * Enable/disable VLAN acceleration.
739 */
740void t1_set_vlan_accel(struct adapter *adapter, int on_off)
741{
742 struct sge *sge = adapter->sge;
743
744 sge->sge_control &= ~F_VLAN_XTRACT;
745 if (on_off)
746 sge->sge_control |= F_VLAN_XTRACT;
747 if (adapter->open_device_map) {
Scott Bardone559fb512005-06-23 01:40:19 -0400748 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800749 readl(adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800750 }
751}
752
753/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800754 * Programs the various SGE registers. However, the engine is not yet enabled,
755 * but sge->sge_control is setup and ready to go.
756 */
757static void configure_sge(struct sge *sge, struct sge_params *p)
758{
759 struct adapter *ap = sge->adapter;
Francois Romieu356bd142006-12-11 23:47:00 +0100760
Scott Bardone559fb512005-06-23 01:40:19 -0400761 writel(0, ap->regs + A_SG_CONTROL);
762 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800763 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
Scott Bardone559fb512005-06-23 01:40:19 -0400764 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800765 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
766 setup_ring_params(ap, sge->freelQ[0].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400767 sge->freelQ[0].size, A_SG_FL0BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800768 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
769 setup_ring_params(ap, sge->freelQ[1].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400770 sge->freelQ[1].size, A_SG_FL1BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800771 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
772
773 /* The threshold comparison uses <. */
Scott Bardone559fb512005-06-23 01:40:19 -0400774 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800775
Scott Bardone559fb512005-06-23 01:40:19 -0400776 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
777 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
778 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800779
780 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
781 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
782 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
783 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
784
785#if defined(__BIG_ENDIAN_BITFIELD)
786 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
787#endif
788
Scott Bardone559fb512005-06-23 01:40:19 -0400789 /* Initialize no-resource timer */
790 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800791
Scott Bardone559fb512005-06-23 01:40:19 -0400792 t1_sge_set_coalesce_params(sge, p);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800793}
794
795/*
796 * Return the payload capacity of the jumbo free-list buffers.
797 */
798static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
799{
800 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
Scott Bardone559fb512005-06-23 01:40:19 -0400801 sge->freelQ[sge->jumbo_fl].dma_offset -
802 sizeof(struct cpl_rx_data);
803}
804
805/*
806 * Frees all SGE related resources and the sge structure itself
807 */
808void t1_sge_destroy(struct sge *sge)
809{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800810 int i;
811
812 for_each_port(sge->adapter, i)
813 free_percpu(sge->port_stats[i]);
814
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800815 kfree(sge->tx_sched);
Scott Bardone559fb512005-06-23 01:40:19 -0400816 free_tx_resources(sge);
817 free_rx_resources(sge);
818 kfree(sge);
819}
820
821/*
822 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
823 * context Q) until the Q is full or alloc_skb fails.
824 *
825 * It is possible that the generation bits already match, indicating that the
826 * buffer is already valid and nothing needs to be done. This happens when we
827 * copied a received buffer into a new sk_buff during the interrupt processing.
828 *
829 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
830 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
831 * aligned.
832 */
833static void refill_free_list(struct sge *sge, struct freelQ *q)
834{
835 struct pci_dev *pdev = sge->adapter->pdev;
836 struct freelQ_ce *ce = &q->centries[q->pidx];
837 struct freelQ_e *e = &q->entries[q->pidx];
838 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
839
Scott Bardone559fb512005-06-23 01:40:19 -0400840 while (q->credits < q->size) {
841 struct sk_buff *skb;
842 dma_addr_t mapping;
843
844 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
845 if (!skb)
846 break;
847
848 skb_reserve(skb, q->dma_offset);
849 mapping = pci_map_single(pdev, skb->data, dma_len,
850 PCI_DMA_FROMDEVICE);
Stephen Hemminger24a427c2007-01-08 11:26:12 -0800851 skb_reserve(skb, sge->rx_pkt_pad);
852
Scott Bardone559fb512005-06-23 01:40:19 -0400853 ce->skb = skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000854 dma_unmap_addr_set(ce, dma_addr, mapping);
855 dma_unmap_len_set(ce, dma_len, dma_len);
Scott Bardone559fb512005-06-23 01:40:19 -0400856 e->addr_lo = (u32)mapping;
857 e->addr_hi = (u64)mapping >> 32;
858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
859 wmb();
860 e->gen2 = V_CMD_GEN2(q->genbit);
861
862 e++;
863 ce++;
864 if (++q->pidx == q->size) {
865 q->pidx = 0;
866 q->genbit ^= 1;
867 ce = q->centries;
868 e = q->entries;
869 }
870 q->credits++;
871 }
Scott Bardone559fb512005-06-23 01:40:19 -0400872}
873
874/*
875 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
876 * of both rings, we go into 'few interrupt mode' in order to give the system
877 * time to free up resources.
878 */
879static void freelQs_empty(struct sge *sge)
880{
881 struct adapter *adapter = sge->adapter;
882 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
883 u32 irqholdoff_reg;
884
885 refill_free_list(sge, &sge->freelQ[0]);
886 refill_free_list(sge, &sge->freelQ[1]);
887
888 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
889 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
890 irq_reg |= F_FL_EXHAUSTED;
891 irqholdoff_reg = sge->fixed_intrtimer;
892 } else {
893 /* Clear the F_FL_EXHAUSTED interrupts for now */
894 irq_reg &= ~F_FL_EXHAUSTED;
895 irqholdoff_reg = sge->intrtimer_nres;
896 }
897 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
898 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
899
900 /* We reenable the Qs to force a freelist GTS interrupt later */
901 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
902}
903
904#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
905#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
906#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
907 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
908
909/*
910 * Disable SGE Interrupts
911 */
912void t1_sge_intr_disable(struct sge *sge)
913{
914 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
915
916 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
917 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
918}
919
920/*
921 * Enable SGE interrupts.
922 */
923void t1_sge_intr_enable(struct sge *sge)
924{
925 u32 en = SGE_INT_ENABLE;
926 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
927
928 if (sge->adapter->flags & TSO_CAPABLE)
929 en &= ~F_PACKET_TOO_BIG;
930 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
931 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
932}
933
934/*
935 * Clear SGE interrupts.
936 */
937void t1_sge_intr_clear(struct sge *sge)
938{
939 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
940 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
941}
942
943/*
944 * SGE 'Error' interrupt handler
945 */
946int t1_sge_intr_error_handler(struct sge *sge)
947{
948 struct adapter *adapter = sge->adapter;
949 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
950
951 if (adapter->flags & TSO_CAPABLE)
952 cause &= ~F_PACKET_TOO_BIG;
953 if (cause & F_RESPQ_EXHAUSTED)
954 sge->stats.respQ_empty++;
955 if (cause & F_RESPQ_OVERFLOW) {
956 sge->stats.respQ_overflow++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000957 pr_alert("%s: SGE response queue overflow\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400958 adapter->name);
959 }
960 if (cause & F_FL_EXHAUSTED) {
961 sge->stats.freelistQ_empty++;
962 freelQs_empty(sge);
963 }
964 if (cause & F_PACKET_TOO_BIG) {
965 sge->stats.pkt_too_big++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000966 pr_alert("%s: SGE max packet size exceeded\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400967 adapter->name);
968 }
969 if (cause & F_PACKET_MISMATCH) {
970 sge->stats.pkt_mismatch++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000971 pr_alert("%s: SGE packet mismatch\n", adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -0400972 }
973 if (cause & SGE_INT_FATAL)
974 t1_fatal_err(adapter);
975
976 writel(cause, adapter->regs + A_SG_INT_CAUSE);
977 return 0;
978}
979
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800980const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
Scott Bardone559fb512005-06-23 01:40:19 -0400981{
982 return &sge->stats;
983}
984
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800985void t1_sge_get_port_stats(const struct sge *sge, int port,
986 struct sge_port_stats *ss)
Scott Bardone559fb512005-06-23 01:40:19 -0400987{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800988 int cpu;
989
990 memset(ss, 0, sizeof(*ss));
991 for_each_possible_cpu(cpu) {
992 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
993
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800994 ss->rx_cso_good += st->rx_cso_good;
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800995 ss->tx_cso += st->tx_cso;
996 ss->tx_tso += st->tx_tso;
Divy Le Ray7832ee02007-11-27 13:30:09 -0800997 ss->tx_need_hdrroom += st->tx_need_hdrroom;
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800998 ss->vlan_xtract += st->vlan_xtract;
999 ss->vlan_insert += st->vlan_insert;
1000 }
Scott Bardone559fb512005-06-23 01:40:19 -04001001}
1002
1003/**
1004 * recycle_fl_buf - recycle a free list buffer
1005 * @fl: the free list
1006 * @idx: index of buffer to recycle
1007 *
1008 * Recycles the specified buffer on the given free list by adding it at
1009 * the next available slot on the list.
1010 */
1011static void recycle_fl_buf(struct freelQ *fl, int idx)
1012{
1013 struct freelQ_e *from = &fl->entries[idx];
1014 struct freelQ_e *to = &fl->entries[fl->pidx];
1015
1016 fl->centries[fl->pidx] = fl->centries[idx];
1017 to->addr_lo = from->addr_lo;
1018 to->addr_hi = from->addr_hi;
1019 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1020 wmb();
1021 to->gen2 = V_CMD_GEN2(fl->genbit);
1022 fl->credits++;
1023
1024 if (++fl->pidx == fl->size) {
1025 fl->pidx = 0;
1026 fl->genbit ^= 1;
1027 }
1028}
1029
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001030static int copybreak __read_mostly = 256;
1031module_param(copybreak, int, 0);
1032MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1033
Scott Bardone559fb512005-06-23 01:40:19 -04001034/**
1035 * get_packet - return the next ingress packet buffer
1036 * @pdev: the PCI device that received the packet
1037 * @fl: the SGE free list holding the packet
1038 * @len: the actual packet length, excluding any SGE padding
Scott Bardone559fb512005-06-23 01:40:19 -04001039 *
1040 * Get the next packet from a free list and complete setup of the
1041 * sk_buff. If the packet is small we make a copy and recycle the
1042 * original buffer, otherwise we use the original buffer itself. If a
1043 * positive drop threshold is supplied packets are dropped and their
1044 * buffers recycled if (a) the number of remaining buffers is under the
1045 * threshold and the packet is too big to copy, or (b) the packet should
1046 * be copied but there is no memory for the copy.
1047 */
1048static inline struct sk_buff *get_packet(struct pci_dev *pdev,
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001049 struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001050{
1051 struct sk_buff *skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001052 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
Scott Bardone559fb512005-06-23 01:40:19 -04001053
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001054 if (len < copybreak) {
1055 skb = alloc_skb(len + 2, GFP_ATOMIC);
1056 if (!skb)
Scott Bardone559fb512005-06-23 01:40:19 -04001057 goto use_orig_buf;
1058
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev,
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001062 dma_unmap_addr(ce, dma_addr),
1063 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001064 PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001065 skb_copy_from_linear_data(ce->skb, skb->data, len);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001066 pci_dma_sync_single_for_device(pdev,
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001067 dma_unmap_addr(ce, dma_addr),
1068 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001069 PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001070 recycle_fl_buf(fl, fl->cidx);
1071 return skb;
1072 }
1073
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001074use_orig_buf:
1075 if (fl->credits < 2) {
Scott Bardone559fb512005-06-23 01:40:19 -04001076 recycle_fl_buf(fl, fl->cidx);
1077 return NULL;
1078 }
1079
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001080 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1081 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001082 skb = ce->skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001083 prefetch(skb->data);
1084
Scott Bardone559fb512005-06-23 01:40:19 -04001085 skb_put(skb, len);
1086 return skb;
1087}
1088
1089/**
1090 * unexpected_offload - handle an unexpected offload packet
1091 * @adapter: the adapter
1092 * @fl: the free list that received the packet
1093 *
1094 * Called when we receive an unexpected offload packet (e.g., the TOE
1095 * function is disabled or the card is a NIC). Prints a message and
1096 * recycles the buffer.
1097 */
1098static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1099{
1100 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1101 struct sk_buff *skb = ce->skb;
1102
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001103 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1104 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Joe Perchesc1f51212010-02-22 16:56:57 +00001105 pr_err("%s: unexpected offload packet, cmd %u\n",
Scott Bardone559fb512005-06-23 01:40:19 -04001106 adapter->name, *skb->data);
1107 recycle_fl_buf(fl, fl->cidx);
1108}
1109
1110/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001111 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1112 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1113 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1114 * Note that the *_large_page_tx_descs stuff will be optimized out when
1115 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1116 *
1117 * compute_large_page_descs() computes how many additional descriptors are
1118 * required to break down the stack's request.
1119 */
1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1121{
1122 unsigned int count = 0;
Francois Romieu356bd142006-12-11 23:47:00 +01001123
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
Eric Dumazete743d312010-04-14 15:59:40 -07001126 unsigned int i, len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001127 while (len > SGE_TX_DESC_MAX_PLEN) {
1128 count++;
1129 len -= SGE_TX_DESC_MAX_PLEN;
1130 }
1131 for (i = 0; nfrags--; i++) {
1132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1133 len = frag->size;
1134 while (len > SGE_TX_DESC_MAX_PLEN) {
1135 count++;
1136 len -= SGE_TX_DESC_MAX_PLEN;
1137 }
1138 }
1139 }
1140 return count;
1141}
1142
1143/*
1144 * Write a cmdQ entry.
1145 *
1146 * Since this function writes the 'flags' field, it must not be used to
1147 * write the first cmdQ entry.
1148 */
1149static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1150 unsigned int len, unsigned int gen,
1151 unsigned int eop)
1152{
Alexander Beregalov0ee904c2009-04-11 14:50:23 +00001153 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1154
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001155 e->addr_lo = (u32)mapping;
1156 e->addr_hi = (u64)mapping >> 32;
1157 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1158 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1159}
1160
1161/*
1162 * See comment for previous function.
1163 *
1164 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1165 * *desc_len exceeds HW's capability.
1166 */
1167static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1168 struct cmdQ_e **e,
1169 struct cmdQ_ce **ce,
1170 unsigned int *gen,
1171 dma_addr_t *desc_mapping,
1172 unsigned int *desc_len,
1173 unsigned int nfrags,
1174 struct cmdQ *q)
1175{
1176 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1177 struct cmdQ_e *e1 = *e;
1178 struct cmdQ_ce *ce1 = *ce;
1179
1180 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1181 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1183 *gen, nfrags == 0 && *desc_len == 0);
1184 ce1->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001185 dma_unmap_len_set(ce1, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001186 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1187 if (*desc_len) {
1188 ce1++;
1189 e1++;
1190 if (++pidx == q->size) {
1191 pidx = 0;
1192 *gen ^= 1;
1193 ce1 = q->centries;
1194 e1 = q->entries;
1195 }
1196 }
1197 }
1198 *e = e1;
1199 *ce = ce1;
1200 }
1201 return pidx;
1202}
1203
1204/*
Scott Bardone559fb512005-06-23 01:40:19 -04001205 * Write the command descriptors to transmit the given skb starting at
1206 * descriptor pidx with the given generation.
1207 */
1208static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1209 unsigned int pidx, unsigned int gen,
1210 struct cmdQ *q)
1211{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001212 dma_addr_t mapping, desc_mapping;
Scott Bardone559fb512005-06-23 01:40:19 -04001213 struct cmdQ_e *e, *e1;
1214 struct cmdQ_ce *ce;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001215 unsigned int i, flags, first_desc_len, desc_len,
1216 nfrags = skb_shinfo(skb)->nr_frags;
1217
1218 e = e1 = &q->entries[pidx];
1219 ce = &q->centries[pidx];
Scott Bardone559fb512005-06-23 01:40:19 -04001220
1221 mapping = pci_map_single(adapter->pdev, skb->data,
Eric Dumazete743d312010-04-14 15:59:40 -07001222 skb_headlen(skb), PCI_DMA_TODEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001223
1224 desc_mapping = mapping;
Eric Dumazete743d312010-04-14 15:59:40 -07001225 desc_len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001226
1227 flags = F_CMD_DATAVALID | F_CMD_SOP |
1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1229 V_CMD_GEN2(gen);
1230 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1231 desc_len : SGE_TX_DESC_MAX_PLEN;
1232 e->addr_lo = (u32)desc_mapping;
1233 e->addr_hi = (u64)desc_mapping >> 32;
1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1235 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001236 dma_unmap_len_set(ce, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001237
1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1239 desc_len > SGE_TX_DESC_MAX_PLEN) {
1240 desc_mapping += first_desc_len;
1241 desc_len -= first_desc_len;
1242 e1++;
1243 ce++;
1244 if (++pidx == q->size) {
1245 pidx = 0;
1246 gen ^= 1;
1247 e1 = q->entries;
1248 ce = q->centries;
1249 }
1250 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1251 &desc_mapping, &desc_len,
1252 nfrags, q);
1253
1254 if (likely(desc_len))
1255 write_tx_desc(e1, desc_mapping, desc_len, gen,
1256 nfrags == 0);
1257 }
1258
Scott Bardone559fb512005-06-23 01:40:19 -04001259 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001260 dma_unmap_addr_set(ce, dma_addr, mapping);
Eric Dumazete743d312010-04-14 15:59:40 -07001261 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
Scott Bardone559fb512005-06-23 01:40:19 -04001262
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001263 for (i = 0; nfrags--; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -04001264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Scott Bardone559fb512005-06-23 01:40:19 -04001265 e1++;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001266 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -04001267 if (++pidx == q->size) {
1268 pidx = 0;
1269 gen ^= 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001270 e1 = q->entries;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001271 ce = q->centries;
Scott Bardone559fb512005-06-23 01:40:19 -04001272 }
1273
1274 mapping = pci_map_page(adapter->pdev, frag->page,
1275 frag->page_offset, frag->size,
1276 PCI_DMA_TODEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001277 desc_mapping = mapping;
1278 desc_len = frag->size;
1279
1280 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1281 &desc_mapping, &desc_len,
1282 nfrags, q);
1283 if (likely(desc_len))
1284 write_tx_desc(e1, desc_mapping, desc_len, gen,
1285 nfrags == 0);
Scott Bardone559fb512005-06-23 01:40:19 -04001286 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001287 dma_unmap_addr_set(ce, dma_addr, mapping);
1288 dma_unmap_len_set(ce, dma_len, frag->size);
Scott Bardone559fb512005-06-23 01:40:19 -04001289 }
Scott Bardone559fb512005-06-23 01:40:19 -04001290 ce->skb = skb;
1291 wmb();
1292 e->flags = flags;
1293}
1294
1295/*
1296 * Clean up completed Tx buffers.
1297 */
1298static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1299{
1300 unsigned int reclaim = q->processed - q->cleaned;
1301
1302 if (reclaim) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001303 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1304 q->processed, q->cleaned);
Scott Bardone559fb512005-06-23 01:40:19 -04001305 free_cmdQ_buffers(sge, q, reclaim);
1306 q->cleaned += reclaim;
1307 }
1308}
1309
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001310/*
1311 * Called from tasklet. Checks the scheduler for any
1312 * pending skbs that can be sent.
1313 */
1314static void restart_sched(unsigned long arg)
1315{
1316 struct sge *sge = (struct sge *) arg;
1317 struct adapter *adapter = sge->adapter;
1318 struct cmdQ *q = &sge->cmdQ[0];
1319 struct sk_buff *skb;
1320 unsigned int credits, queued_skb = 0;
1321
1322 spin_lock(&q->lock);
1323 reclaim_completed_tx(sge, q);
1324
1325 credits = q->size - q->in_use;
1326 pr_debug("restart_sched credits=%d\n", credits);
1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1328 unsigned int genbit, pidx, count;
1329 count = 1 + skb_shinfo(skb)->nr_frags;
Francois Romieu356bd142006-12-11 23:47:00 +01001330 count += compute_large_page_tx_descs(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001331 q->in_use += count;
1332 genbit = q->genbit;
1333 pidx = q->pidx;
1334 q->pidx += count;
1335 if (q->pidx >= q->size) {
1336 q->pidx -= q->size;
1337 q->genbit ^= 1;
1338 }
1339 write_tx_descs(adapter, skb, pidx, genbit, q);
1340 credits = q->size - q->in_use;
1341 queued_skb = 1;
1342 }
1343
1344 if (queued_skb) {
1345 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1346 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1347 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1348 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1349 }
1350 }
1351 spin_unlock(&q->lock);
1352}
Scott Bardone559fb512005-06-23 01:40:19 -04001353
Scott Bardone559fb512005-06-23 01:40:19 -04001354/**
1355 * sge_rx - process an ingress ethernet packet
1356 * @sge: the sge structure
1357 * @fl: the free list that contains the packet buffer
1358 * @len: the packet length
1359 *
1360 * Process an ingress ethernet pakcet and deliver it to the stack.
1361 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001363{
1364 struct sk_buff *skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001365 const struct cpl_rx_pkt *p;
Scott Bardone559fb512005-06-23 01:40:19 -04001366 struct adapter *adapter = sge->adapter;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001367 struct sge_port_stats *st;
Scott Bardone559fb512005-06-23 01:40:19 -04001368
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001370 if (unlikely(!skb)) {
1371 sge->stats.rx_drops++;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001372 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001373 }
1374
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001375 p = (const struct cpl_rx_pkt *) skb->data;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001376 if (p->iff >= adapter->params.nports) {
1377 kfree_skb(skb);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001378 return;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001379 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001380 __skb_pull(skb, sizeof(*p));
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001381
Christoph Lameterca0c9582009-10-03 19:48:22 +09001382 st = this_cpu_ptr(sge->port_stats[p->iff]);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001383
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001384 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
Scott Bardone559fb512005-06-23 01:40:19 -04001385 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1386 skb->protocol == htons(ETH_P_IP) &&
1387 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001388 ++st->rx_cso_good;
Scott Bardone559fb512005-06-23 01:40:19 -04001389 skb->ip_summed = CHECKSUM_UNNECESSARY;
1390 } else
1391 skb->ip_summed = CHECKSUM_NONE;
1392
1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001394 st->vlan_xtract++;
Francois Romieu4422b002008-07-11 00:29:19 +02001395 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1396 ntohs(p->vlan));
1397 } else
Scott Bardone559fb512005-06-23 01:40:19 -04001398 netif_receive_skb(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001399}
1400
1401/*
1402 * Returns true if a command queue has enough available descriptors that
1403 * we can resume Tx operation after temporarily disabling its packet queue.
1404 */
1405static inline int enough_free_Tx_descs(const struct cmdQ *q)
1406{
1407 unsigned int r = q->processed - q->cleaned;
1408
1409 return q->in_use - r < (q->size >> 1);
1410}
1411
1412/*
1413 * Called when sufficient space has become available in the SGE command queues
1414 * after the Tx packet schedulers have been suspended to restart the Tx path.
1415 */
1416static void restart_tx_queues(struct sge *sge)
1417{
1418 struct adapter *adap = sge->adapter;
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001419 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001420
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001421 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1422 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001423
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001424 for_each_port(adap, i) {
1425 struct net_device *nd = adap->port[i].dev;
Scott Bardone559fb512005-06-23 01:40:19 -04001426
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001427 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1428 netif_running(nd)) {
1429 sge->stats.cmdQ_restarted[2]++;
1430 netif_wake_queue(nd);
Scott Bardone559fb512005-06-23 01:40:19 -04001431 }
1432 }
1433}
1434
1435/*
Francois Romieu356bd142006-12-11 23:47:00 +01001436 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
Scott Bardone559fb512005-06-23 01:40:19 -04001437 * information.
1438 */
Francois Romieu356bd142006-12-11 23:47:00 +01001439static unsigned int update_tx_info(struct adapter *adapter,
1440 unsigned int flags,
Scott Bardone559fb512005-06-23 01:40:19 -04001441 unsigned int pr0)
1442{
1443 struct sge *sge = adapter->sge;
1444 struct cmdQ *cmdq = &sge->cmdQ[0];
1445
1446 cmdq->processed += pr0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001447 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1448 freelQs_empty(sge);
1449 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1450 }
Scott Bardone559fb512005-06-23 01:40:19 -04001451 if (flags & F_CMDQ0_ENABLE) {
1452 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001453
Scott Bardone559fb512005-06-23 01:40:19 -04001454 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1455 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1456 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1457 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1458 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001459 if (sge->tx_sched)
1460 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1461
1462 flags &= ~F_CMDQ0_ENABLE;
Scott Bardone559fb512005-06-23 01:40:19 -04001463 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001464
Scott Bardone559fb512005-06-23 01:40:19 -04001465 if (unlikely(sge->stopped_tx_queues != 0))
1466 restart_tx_queues(sge);
1467
1468 return flags;
1469}
1470
1471/*
1472 * Process SGE responses, up to the supplied budget. Returns the number of
1473 * responses processed. A negative budget is effectively unlimited.
1474 */
1475static int process_responses(struct adapter *adapter, int budget)
1476{
1477 struct sge *sge = adapter->sge;
1478 struct respQ *q = &sge->respQ;
1479 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001480 int done = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001481 unsigned int flags = 0;
1482 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
Francois Romieu356bd142006-12-11 23:47:00 +01001483
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001484 while (done < budget && e->GenerationBit == q->genbit) {
Scott Bardone559fb512005-06-23 01:40:19 -04001485 flags |= e->Qsleeping;
Francois Romieu356bd142006-12-11 23:47:00 +01001486
Scott Bardone559fb512005-06-23 01:40:19 -04001487 cmdq_processed[0] += e->Cmdq0CreditReturn;
1488 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001489
Scott Bardone559fb512005-06-23 01:40:19 -04001490 /* We batch updates to the TX side to avoid cacheline
1491 * ping-pong of TX state information on MP where the sender
1492 * might run on a different CPU than this function...
1493 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001494 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001495 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1496 cmdq_processed[0] = 0;
1497 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001498
Scott Bardone559fb512005-06-23 01:40:19 -04001499 if (unlikely(cmdq_processed[1] > 16)) {
1500 sge->cmdQ[1].processed += cmdq_processed[1];
1501 cmdq_processed[1] = 0;
1502 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001503
Scott Bardone559fb512005-06-23 01:40:19 -04001504 if (likely(e->DataValid)) {
1505 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1506
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02001507 BUG_ON(!e->Sop || !e->Eop);
Scott Bardone559fb512005-06-23 01:40:19 -04001508 if (unlikely(e->Offload))
1509 unexpected_offload(adapter, fl);
1510 else
1511 sge_rx(sge, fl, e->BufferLength);
1512
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001513 ++done;
1514
Scott Bardone559fb512005-06-23 01:40:19 -04001515 /*
1516 * Note: this depends on each packet consuming a
1517 * single free-list buffer; cf. the BUG above.
1518 */
1519 if (++fl->cidx == fl->size)
1520 fl->cidx = 0;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001521 prefetch(fl->centries[fl->cidx].skb);
1522
Scott Bardone559fb512005-06-23 01:40:19 -04001523 if (unlikely(--fl->credits <
1524 fl->size - SGE_FREEL_REFILL_THRESH))
1525 refill_free_list(sge, fl);
1526 } else
1527 sge->stats.pure_rsps++;
1528
1529 e++;
1530 if (unlikely(++q->cidx == q->size)) {
1531 q->cidx = 0;
1532 q->genbit ^= 1;
1533 e = q->entries;
1534 }
1535 prefetch(e);
1536
1537 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1538 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1539 q->credits = 0;
1540 }
Scott Bardone559fb512005-06-23 01:40:19 -04001541 }
1542
Francois Romieu356bd142006-12-11 23:47:00 +01001543 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001544 sge->cmdQ[1].processed += cmdq_processed[1];
1545
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001546 return done;
Scott Bardone559fb512005-06-23 01:40:19 -04001547}
1548
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001549static inline int responses_pending(const struct adapter *adapter)
1550{
1551 const struct respQ *Q = &adapter->sge->respQ;
1552 const struct respQ_e *e = &Q->entries[Q->cidx];
1553
1554 return (e->GenerationBit == Q->genbit);
1555}
1556
Scott Bardone559fb512005-06-23 01:40:19 -04001557/*
1558 * A simpler version of process_responses() that handles only pure (i.e.,
1559 * non data-carrying) responses. Such respones are too light-weight to justify
1560 * calling a softirq when using NAPI, so we handle them specially in hard
1561 * interrupt context. The function is called with a pointer to a response,
1562 * which the caller must ensure is a valid pure response. Returns 1 if it
1563 * encounters a valid data-carrying response, 0 otherwise.
1564 */
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001565static int process_pure_responses(struct adapter *adapter)
Scott Bardone559fb512005-06-23 01:40:19 -04001566{
1567 struct sge *sge = adapter->sge;
1568 struct respQ *q = &sge->respQ;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001569 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001570 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
Scott Bardone559fb512005-06-23 01:40:19 -04001571 unsigned int flags = 0;
1572 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1573
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001574 prefetch(fl->centries[fl->cidx].skb);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001575 if (e->DataValid)
1576 return 1;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001577
Scott Bardone559fb512005-06-23 01:40:19 -04001578 do {
1579 flags |= e->Qsleeping;
1580
1581 cmdq_processed[0] += e->Cmdq0CreditReturn;
1582 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001583
Scott Bardone559fb512005-06-23 01:40:19 -04001584 e++;
1585 if (unlikely(++q->cidx == q->size)) {
1586 q->cidx = 0;
1587 q->genbit ^= 1;
1588 e = q->entries;
1589 }
1590 prefetch(e);
1591
1592 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1593 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1594 q->credits = 0;
1595 }
1596 sge->stats.pure_rsps++;
1597 } while (e->GenerationBit == q->genbit && !e->DataValid);
1598
Francois Romieu356bd142006-12-11 23:47:00 +01001599 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001600 sge->cmdQ[1].processed += cmdq_processed[1];
1601
1602 return e->GenerationBit == q->genbit;
1603}
1604
1605/*
1606 * Handler for new data events when using NAPI. This does not need any locking
1607 * or protection from interrupts as data interrupts are off at this point and
1608 * other adapter interrupts do not interfere.
1609 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001610int t1_poll(struct napi_struct *napi, int budget)
Scott Bardone559fb512005-06-23 01:40:19 -04001611{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001612 struct adapter *adapter = container_of(napi, struct adapter, napi);
Divy Le Ray445cf802007-11-27 13:30:15 -08001613 int work_done = process_responses(adapter, budget);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001614
Divy Le Ray445cf802007-11-27 13:30:15 -08001615 if (likely(work_done < budget)) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001616 napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001617 writel(adapter->sge->respQ.cidx,
1618 adapter->regs + A_SG_SLEEPING);
1619 }
1620 return work_done;
Scott Bardone559fb512005-06-23 01:40:19 -04001621}
1622
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001623irqreturn_t t1_interrupt(int irq, void *data)
Scott Bardone559fb512005-06-23 01:40:19 -04001624{
Scott Bardone559fb512005-06-23 01:40:19 -04001625 struct adapter *adapter = data;
1626 struct sge *sge = adapter->sge;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001627 int handled;
Scott Bardone559fb512005-06-23 01:40:19 -04001628
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001629 if (likely(responses_pending(adapter))) {
Francois Romieu356bd142006-12-11 23:47:00 +01001630 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001631
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001632 if (napi_schedule_prep(&adapter->napi)) {
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001633 if (process_pure_responses(adapter))
Ben Hutchings288379f2009-01-19 16:43:59 -08001634 __napi_schedule(&adapter->napi);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001635 else {
1636 /* no data, no NAPI needed */
1637 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
Francois Romieu4422b002008-07-11 00:29:19 +02001638 /* undo schedule_prep */
1639 napi_enable(&adapter->napi);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001640 }
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001641 }
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001642 return IRQ_HANDLED;
1643 }
1644
1645 spin_lock(&adapter->async_lock);
1646 handled = t1_slow_intr_handler(adapter);
1647 spin_unlock(&adapter->async_lock);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001648
Scott Bardone559fb512005-06-23 01:40:19 -04001649 if (!handled)
1650 sge->stats.unhandled_irqs++;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001651
Scott Bardone559fb512005-06-23 01:40:19 -04001652 return IRQ_RETVAL(handled != 0);
1653}
1654
Scott Bardone559fb512005-06-23 01:40:19 -04001655/*
1656 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1657 *
1658 * The code figures out how many entries the sk_buff will require in the
1659 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1660 * has complete. Then, it doesn't access the global structure anymore, but
1661 * uses the corresponding fields on the stack. In conjuction with a spinlock
1662 * around that code, we can make the function reentrant without holding the
1663 * lock when we actually enqueue (which might be expensive, especially on
1664 * architectures with IO MMUs).
1665 *
1666 * This runs with softirqs disabled.
1667 */
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001668static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1669 unsigned int qid, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001670{
1671 struct sge *sge = adapter->sge;
1672 struct cmdQ *q = &sge->cmdQ[qid];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001673 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001674
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001675 if (!spin_trylock(&q->lock))
1676 return NETDEV_TX_LOCKED;
1677
Scott Bardone559fb512005-06-23 01:40:19 -04001678 reclaim_completed_tx(sge, q);
1679
1680 pidx = q->pidx;
1681 credits = q->size - q->in_use;
1682 count = 1 + skb_shinfo(skb)->nr_frags;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001683 count += compute_large_page_tx_descs(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001684
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001685 /* Ethernet packet */
1686 if (unlikely(credits < count)) {
1687 if (!netif_queue_stopped(dev)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001688 netif_stop_queue(dev);
1689 set_bit(dev->if_port, &sge->stopped_tx_queues);
Scott Bardone232a3472006-03-16 19:20:40 -05001690 sge->stats.cmdQ_full[2]++;
Joe Perchesc1f51212010-02-22 16:56:57 +00001691 pr_err("%s: Tx ring full while queue awake!\n",
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001692 adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -04001693 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001694 spin_unlock(&q->lock);
1695 return NETDEV_TX_BUSY;
Scott Bardone559fb512005-06-23 01:40:19 -04001696 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001697
1698 if (unlikely(credits - count < q->stop_thres)) {
1699 netif_stop_queue(dev);
1700 set_bit(dev->if_port, &sge->stopped_tx_queues);
1701 sge->stats.cmdQ_full[2]++;
1702 }
1703
1704 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1705 * through the scheduler.
1706 */
1707 if (sge->tx_sched && !qid && skb->dev) {
Francois Romieu356bd142006-12-11 23:47:00 +01001708use_sched:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001709 use_sched_skb = 1;
1710 /* Note that the scheduler might return a different skb than
1711 * the one passed in.
1712 */
1713 skb = sched_skb(sge, skb, credits);
1714 if (!skb) {
1715 spin_unlock(&q->lock);
1716 return NETDEV_TX_OK;
1717 }
1718 pidx = q->pidx;
1719 count = 1 + skb_shinfo(skb)->nr_frags;
1720 count += compute_large_page_tx_descs(skb);
1721 }
1722
Scott Bardone559fb512005-06-23 01:40:19 -04001723 q->in_use += count;
1724 genbit = q->genbit;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001725 pidx = q->pidx;
Scott Bardone559fb512005-06-23 01:40:19 -04001726 q->pidx += count;
1727 if (q->pidx >= q->size) {
1728 q->pidx -= q->size;
1729 q->genbit ^= 1;
1730 }
1731 spin_unlock(&q->lock);
1732
1733 write_tx_descs(adapter, skb, pidx, genbit, q);
1734
1735 /*
1736 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1737 * the doorbell if the Q is asleep. There is a natural race, where
1738 * the hardware is going to sleep just after we checked, however,
1739 * then the interrupt handler will detect the outstanding TX packet
1740 * and ring the doorbell for us.
1741 */
1742 if (qid)
1743 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1744 else {
1745 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1746 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1747 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1748 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1749 }
1750 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001751
1752 if (use_sched_skb) {
1753 if (spin_trylock(&q->lock)) {
1754 credits = q->size - q->in_use;
1755 skb = NULL;
1756 goto use_sched;
1757 }
1758 }
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001759 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001760}
1761
1762#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1763
1764/*
1765 * eth_hdr_len - return the length of an Ethernet header
1766 * @data: pointer to the start of the Ethernet header
1767 *
1768 * Returns the length of an Ethernet header, including optional VLAN tag.
1769 */
1770static inline int eth_hdr_len(const void *data)
1771{
1772 const struct ethhdr *e = data;
1773
1774 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1775}
1776
1777/*
1778 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1779 */
Stephen Hemminger613573252009-08-31 19:50:58 +00001780netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001781{
Wang Chenc3ccc122008-11-16 23:06:39 -08001782 struct adapter *adapter = dev->ml_priv;
Scott Bardone559fb512005-06-23 01:40:19 -04001783 struct sge *sge = adapter->sge;
Christoph Lameterca0c9582009-10-03 19:48:22 +09001784 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
Scott Bardone559fb512005-06-23 01:40:19 -04001785 struct cpl_tx_pkt *cpl;
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001786 struct sk_buff *orig_skb = skb;
1787 int ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001788
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001789 if (skb->protocol == htons(ETH_P_CPL5))
1790 goto send;
1791
Divy Le Ray7832ee02007-11-27 13:30:09 -08001792 /*
1793 * We are using a non-standard hard_header_len.
1794 * Allocate more header room in the rare cases it is not big enough.
1795 */
1796 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1797 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1798 ++st->tx_need_hdrroom;
1799 dev_kfree_skb_any(orig_skb);
1800 if (!skb)
1801 return NETDEV_TX_OK;
1802 }
1803
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001804 if (skb_shinfo(skb)->gso_size) {
Scott Bardone559fb512005-06-23 01:40:19 -04001805 int eth_type;
1806 struct cpl_tx_pkt_lso *hdr;
1807
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001808 ++st->tx_tso;
Scott Bardone559fb512005-06-23 01:40:19 -04001809
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001810 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Scott Bardone559fb512005-06-23 01:40:19 -04001811 CPL_ETH_II : CPL_ETH_II_VLAN;
1812
1813 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1814 hdr->opcode = CPL_TX_PKT_LSO;
1815 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001816 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001817 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
Scott Bardone559fb512005-06-23 01:40:19 -04001818 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001819 skb_shinfo(skb)->gso_size));
Scott Bardone559fb512005-06-23 01:40:19 -04001820 hdr->len = htonl(skb->len - sizeof(*hdr));
1821 cpl = (struct cpl_tx_pkt *)hdr;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001822 } else {
Scott Bardone559fb512005-06-23 01:40:19 -04001823 /*
Francois Romieu356bd142006-12-11 23:47:00 +01001824 * Packets shorter than ETH_HLEN can break the MAC, drop them
Scott Bardone559fb512005-06-23 01:40:19 -04001825 * early. Also, we may get oversized packets because some
1826 * parts of the kernel don't handle our unusual hard_header_len
1827 * right, drop those too.
1828 */
1829 if (unlikely(skb->len < ETH_HLEN ||
1830 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001831 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1832 skb->len, eth_hdr_len(skb->data), dev->mtu);
Scott Bardone559fb512005-06-23 01:40:19 -04001833 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001834 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001835 }
1836
Scott Bardone559fb512005-06-23 01:40:19 -04001837 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001838 skb->ip_summed == CHECKSUM_PARTIAL &&
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001839 ip_hdr(skb)->protocol == IPPROTO_UDP) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001840 if (unlikely(skb_checksum_help(skb))) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001841 pr_debug("%s: unable to do udp checksum\n", dev->name);
Scott Bardone559fb512005-06-23 01:40:19 -04001842 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001843 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001844 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001845 }
Scott Bardone559fb512005-06-23 01:40:19 -04001846
1847 /* Hmmm, assuming to catch the gratious arp... and we'll use
1848 * it to flush out stuck espi packets...
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001849 */
1850 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
Scott Bardone559fb512005-06-23 01:40:19 -04001851 if (skb->protocol == htons(ETH_P_ARP) &&
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -03001852 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001853 adapter->sge->espibug_skb[dev->if_port] = skb;
Scott Bardone559fb512005-06-23 01:40:19 -04001854 /* We want to re-use this skb later. We
1855 * simply bump the reference count and it
1856 * will not be freed...
1857 */
1858 skb = skb_get(skb);
1859 }
1860 }
1861
1862 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1863 cpl->opcode = CPL_TX_PKT;
1864 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001865 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001866 /* the length field isn't used so don't bother setting it */
1867
Patrick McHardy84fa7932006-08-29 16:44:56 -07001868 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
Scott Bardone559fb512005-06-23 01:40:19 -04001869 }
1870 cpl->iff = dev->if_port;
1871
1872#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1873 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1874 cpl->vlan_valid = 1;
1875 cpl->vlan = htons(vlan_tx_tag_get(skb));
1876 st->vlan_insert++;
1877 } else
1878#endif
1879 cpl->vlan_valid = 0;
1880
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001881send:
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001882 ret = t1_sge_tx(skb, adapter, 0, dev);
1883
1884 /* If transmit busy, and we reallocated skb's due to headroom limit,
1885 * then silently discard to avoid leak.
1886 */
1887 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
Francois Romieu356bd142006-12-11 23:47:00 +01001888 dev_kfree_skb_any(skb);
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001889 ret = NETDEV_TX_OK;
Francois Romieu356bd142006-12-11 23:47:00 +01001890 }
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001891 return ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001892}
1893
1894/*
1895 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1896 */
1897static void sge_tx_reclaim_cb(unsigned long data)
1898{
1899 int i;
1900 struct sge *sge = (struct sge *)data;
1901
1902 for (i = 0; i < SGE_CMDQ_N; ++i) {
1903 struct cmdQ *q = &sge->cmdQ[i];
1904
1905 if (!spin_trylock(&q->lock))
1906 continue;
1907
1908 reclaim_completed_tx(sge, q);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001909 if (i == 0 && q->in_use) { /* flush pending credits */
1910 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1911 }
Scott Bardone559fb512005-06-23 01:40:19 -04001912 spin_unlock(&q->lock);
1913 }
1914 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1915}
1916
1917/*
1918 * Propagate changes of the SGE coalescing parameters to the HW.
1919 */
1920int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1921{
Scott Bardone559fb512005-06-23 01:40:19 -04001922 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1923 core_ticks_per_usec(sge->adapter);
1924 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1925 return 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001926}
1927
1928/*
1929 * Allocates both RX and TX resources and configures the SGE. However,
1930 * the hardware is not enabled yet.
1931 */
1932int t1_sge_configure(struct sge *sge, struct sge_params *p)
1933{
1934 if (alloc_rx_resources(sge, p))
1935 return -ENOMEM;
1936 if (alloc_tx_resources(sge, p)) {
1937 free_rx_resources(sge);
1938 return -ENOMEM;
1939 }
1940 configure_sge(sge, p);
1941
1942 /*
1943 * Now that we have sized the free lists calculate the payload
1944 * capacity of the large buffers. Other parts of the driver use
1945 * this to set the max offload coalescing size so that RX packets
1946 * do not overflow our large buffers.
1947 */
1948 p->large_buf_capacity = jumbo_payload_capacity(sge);
1949 return 0;
1950}
1951
1952/*
Scott Bardone559fb512005-06-23 01:40:19 -04001953 * Disables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001954 */
Scott Bardone559fb512005-06-23 01:40:19 -04001955void t1_sge_stop(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001956{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001957 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001958 writel(0, sge->adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001959 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1960
Scott Bardone559fb512005-06-23 01:40:19 -04001961 if (is_T2(sge->adapter))
1962 del_timer_sync(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001963
Scott Bardone559fb512005-06-23 01:40:19 -04001964 del_timer_sync(&sge->tx_reclaim_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001965 if (sge->tx_sched)
1966 tx_sched_stop(sge);
1967
1968 for (i = 0; i < MAX_NPORTS; i++)
Wei Yongjunf4fe5a92009-02-25 00:45:09 +00001969 kfree_skb(sge->espibug_skb[i]);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001970}
1971
1972/*
Scott Bardone559fb512005-06-23 01:40:19 -04001973 * Enables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001974 */
Scott Bardone559fb512005-06-23 01:40:19 -04001975void t1_sge_start(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001976{
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001977 refill_free_list(sge, &sge->freelQ[0]);
1978 refill_free_list(sge, &sge->freelQ[1]);
1979
Scott Bardone559fb512005-06-23 01:40:19 -04001980 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1981 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001982 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001983
Scott Bardone559fb512005-06-23 01:40:19 -04001984 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001985
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001986 if (is_T2(sge->adapter))
Scott Bardone559fb512005-06-23 01:40:19 -04001987 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001988}
1989
1990/*
Scott Bardone559fb512005-06-23 01:40:19 -04001991 * Callback for the T2 ESPI 'stuck packet feature' workaorund
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001992 */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001993static void espibug_workaround_t204(unsigned long data)
1994{
1995 struct adapter *adapter = (struct adapter *)data;
1996 struct sge *sge = adapter->sge;
1997 unsigned int nports = adapter->params.nports;
1998 u32 seop[MAX_NPORTS];
1999
2000 if (adapter->open_device_map & PORT_MASK) {
2001 int i;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002002
Francois Romieu356bd142006-12-11 23:47:00 +01002003 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2004 return;
2005
2006 for (i = 0; i < nports; i++) {
2007 struct sk_buff *skb = sge->espibug_skb[i];
2008
2009 if (!netif_running(adapter->port[i].dev) ||
2010 netif_queue_stopped(adapter->port[i].dev) ||
2011 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2012 continue;
2013
2014 if (!skb->cb[0]) {
2015 u8 ch_mac_addr[ETH_ALEN] = {
2016 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2017 };
2018
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002019 skb_copy_to_linear_data_offset(skb,
2020 sizeof(struct cpl_tx_pkt),
2021 ch_mac_addr,
2022 ETH_ALEN);
2023 skb_copy_to_linear_data_offset(skb,
2024 skb->len - 10,
2025 ch_mac_addr,
2026 ETH_ALEN);
Francois Romieu356bd142006-12-11 23:47:00 +01002027 skb->cb[0] = 0xff;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002028 }
Francois Romieu356bd142006-12-11 23:47:00 +01002029
2030 /* bump the reference count to avoid freeing of
2031 * the skb once the DMA has completed.
2032 */
2033 skb = skb_get(skb);
2034 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002035 }
2036 }
2037 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2038}
2039
2040static void espibug_workaround(unsigned long data)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002041{
Scott Bardone559fb512005-06-23 01:40:19 -04002042 struct adapter *adapter = (struct adapter *)data;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002043 struct sge *sge = adapter->sge;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002044
Scott Bardone559fb512005-06-23 01:40:19 -04002045 if (netif_running(adapter->port[0].dev)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002046 struct sk_buff *skb = sge->espibug_skb[0];
2047 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002048
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002049 if ((seop & 0xfff0fff) == 0xfff && skb) {
2050 if (!skb->cb[0]) {
2051 u8 ch_mac_addr[ETH_ALEN] =
2052 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002053 skb_copy_to_linear_data_offset(skb,
2054 sizeof(struct cpl_tx_pkt),
2055 ch_mac_addr,
2056 ETH_ALEN);
2057 skb_copy_to_linear_data_offset(skb,
2058 skb->len - 10,
2059 ch_mac_addr,
2060 ETH_ALEN);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002061 skb->cb[0] = 0xff;
2062 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002063
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002064 /* bump the reference count to avoid freeing of the
2065 * skb once the DMA has completed.
2066 */
2067 skb = skb_get(skb);
2068 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2069 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002070 }
Scott Bardone559fb512005-06-23 01:40:19 -04002071 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002072}
2073
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002074/*
Scott Bardone559fb512005-06-23 01:40:19 -04002075 * Creates a t1_sge structure and returns suggested resource parameters.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002076 */
Scott Bardone559fb512005-06-23 01:40:19 -04002077struct sge * __devinit t1_sge_create(struct adapter *adapter,
2078 struct sge_params *p)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002079{
Stephen Hemmingercbee9f92006-11-17 17:01:52 -08002080 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002081 int i;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002082
Scott Bardone559fb512005-06-23 01:40:19 -04002083 if (!sge)
2084 return NULL;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002085
Scott Bardone559fb512005-06-23 01:40:19 -04002086 sge->adapter = adapter;
2087 sge->netdev = adapter->port[0].dev;
2088 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2089 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2090
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002091 for_each_port(adapter, i) {
2092 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2093 if (!sge->port_stats[i])
2094 goto nomem_port;
2095 }
2096
Scott Bardone559fb512005-06-23 01:40:19 -04002097 init_timer(&sge->tx_reclaim_timer);
2098 sge->tx_reclaim_timer.data = (unsigned long)sge;
2099 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2100
2101 if (is_T2(sge->adapter)) {
2102 init_timer(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002103
2104 if (adapter->params.nports > 1) {
2105 tx_sched_init(sge);
2106 sge->espibug_timer.function = espibug_workaround_t204;
Francois Romieud7487422006-12-11 23:49:13 +01002107 } else
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002108 sge->espibug_timer.function = espibug_workaround;
Scott Bardone559fb512005-06-23 01:40:19 -04002109 sge->espibug_timer.data = (unsigned long)sge->adapter;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002110
Scott Bardone559fb512005-06-23 01:40:19 -04002111 sge->espibug_timeout = 1;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002112 /* for T204, every 10ms */
2113 if (adapter->params.nports > 1)
2114 sge->espibug_timeout = HZ/100;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002115 }
Francois Romieu356bd142006-12-11 23:47:00 +01002116
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002117
Scott Bardone559fb512005-06-23 01:40:19 -04002118 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2119 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2120 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2121 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002122 if (sge->tx_sched) {
2123 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2124 p->rx_coalesce_usecs = 15;
2125 else
2126 p->rx_coalesce_usecs = 50;
2127 } else
2128 p->rx_coalesce_usecs = 50;
2129
Scott Bardone559fb512005-06-23 01:40:19 -04002130 p->coalesce_enable = 0;
2131 p->sample_interval_usecs = 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002132
Scott Bardone559fb512005-06-23 01:40:19 -04002133 return sge;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002134nomem_port:
2135 while (i >= 0) {
2136 free_percpu(sge->port_stats[i]);
2137 --i;
2138 }
2139 kfree(sge);
2140 return NULL;
2141
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002142}