blob: 58380d240619dc232b36c577268b1752606c455c [file] [log] [blame]
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001/*****************************************************************************
2 * *
3 * File: sge.c *
Scott Bardone559fb512005-06-23 01:40:19 -04004 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
Christoph Lameter8199d3a2005-03-30 13:34:31 -08006 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
Christoph Lameter8199d3a2005-03-30 13:34:31 -080042#include <linux/types.h>
43#include <linux/errno.h>
44#include <linux/pci.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080045#include <linux/ktime.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080046#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080052#include <linux/tcp.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080053#include <linux/ip.h>
54#include <linux/in.h>
55#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090056#include <linux/slab.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000057#include <linux/prefetch.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080058
59#include "cpl5_cmd.h"
60#include "sge.h"
61#include "regs.h"
62#include "espi.h"
63
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080064/* This belongs in if_ether.h */
65#define ETH_P_CPL5 0xf
Christoph Lameter8199d3a2005-03-30 13:34:31 -080066
67#define SGE_CMDQ_N 2
68#define SGE_FREELQ_N 2
Scott Bardone559fb512005-06-23 01:40:19 -040069#define SGE_CMDQ0_E_N 1024
Christoph Lameter8199d3a2005-03-30 13:34:31 -080070#define SGE_CMDQ1_E_N 128
71#define SGE_FREEL_SIZE 4096
72#define SGE_JUMBO_FREEL_SIZE 512
73#define SGE_FREEL_REFILL_THRESH 16
74#define SGE_RESPQ_E_N 1024
Scott Bardone559fb512005-06-23 01:40:19 -040075#define SGE_INTRTIMER_NRES 1000
Christoph Lameter8199d3a2005-03-30 13:34:31 -080076#define SGE_RX_SM_BUF_SIZE 1536
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080077#define SGE_TX_DESC_MAX_PLEN 16384
Christoph Lameter8199d3a2005-03-30 13:34:31 -080078
Scott Bardone559fb512005-06-23 01:40:19 -040079#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
Christoph Lameter8199d3a2005-03-30 13:34:31 -080080
81/*
Scott Bardone559fb512005-06-23 01:40:19 -040082 * Period of the TX buffer reclaim timer. This timer does not need to run
83 * frequently as TX buffers are usually reclaimed by new TX packets.
84 */
85#define TX_RECLAIM_PERIOD (HZ / 4)
86
Scott Bardone559fb512005-06-23 01:40:19 -040087#define M_CMD_LEN 0x7fffffff
88#define V_CMD_LEN(v) (v)
89#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
90#define V_CMD_GEN1(v) ((v) << 31)
91#define V_CMD_GEN2(v) (v)
92#define F_CMD_DATAVALID (1 << 1)
93#define F_CMD_SOP (1 << 2)
94#define V_CMD_EOP(v) ((v) << 3)
95
96/*
97 * Command queue, receive buffer list, and response queue descriptors.
Christoph Lameter8199d3a2005-03-30 13:34:31 -080098 */
99#if defined(__BIG_ENDIAN_BITFIELD)
100struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400101 u32 addr_lo;
102 u32 len_gen;
103 u32 flags;
104 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800105};
106
107struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400108 u32 addr_lo;
109 u32 len_gen;
110 u32 gen2;
111 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800112};
113
114struct respQ_e {
115 u32 Qsleeping : 4;
116 u32 Cmdq1CreditReturn : 5;
117 u32 Cmdq1DmaComplete : 5;
118 u32 Cmdq0CreditReturn : 5;
119 u32 Cmdq0DmaComplete : 5;
120 u32 FreelistQid : 2;
121 u32 CreditValid : 1;
122 u32 DataValid : 1;
123 u32 Offload : 1;
124 u32 Eop : 1;
125 u32 Sop : 1;
126 u32 GenerationBit : 1;
127 u32 BufferLength;
128};
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800129#elif defined(__LITTLE_ENDIAN_BITFIELD)
130struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400131 u32 len_gen;
132 u32 addr_lo;
133 u32 addr_hi;
134 u32 flags;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800135};
136
137struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400138 u32 len_gen;
139 u32 addr_lo;
140 u32 addr_hi;
141 u32 gen2;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800142};
143
144struct respQ_e {
145 u32 BufferLength;
146 u32 GenerationBit : 1;
147 u32 Sop : 1;
148 u32 Eop : 1;
149 u32 Offload : 1;
150 u32 DataValid : 1;
151 u32 CreditValid : 1;
152 u32 FreelistQid : 2;
153 u32 Cmdq0DmaComplete : 5;
154 u32 Cmdq0CreditReturn : 5;
155 u32 Cmdq1DmaComplete : 5;
156 u32 Cmdq1CreditReturn : 5;
157 u32 Qsleeping : 4;
158} ;
159#endif
160
161/*
162 * SW Context Command and Freelist Queue Descriptors
163 */
164struct cmdQ_ce {
165 struct sk_buff *skb;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +0000166 DEFINE_DMA_UNMAP_ADDR(dma_addr);
167 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800168};
169
170struct freelQ_ce {
171 struct sk_buff *skb;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +0000172 DEFINE_DMA_UNMAP_ADDR(dma_addr);
173 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800174};
175
176/*
Scott Bardone559fb512005-06-23 01:40:19 -0400177 * SW command, freelist and response rings
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800178 */
179struct cmdQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400180 unsigned long status; /* HW DMA fetch status */
181 unsigned int in_use; /* # of in-use command descriptors */
182 unsigned int size; /* # of descriptors */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800183 unsigned int processed; /* total # of descs HW has processed */
184 unsigned int cleaned; /* total # of descs SW has reclaimed */
185 unsigned int stop_thres; /* SW TX queue suspend threshold */
Scott Bardone559fb512005-06-23 01:40:19 -0400186 u16 pidx; /* producer index (SW) */
187 u16 cidx; /* consumer index (HW) */
188 u8 genbit; /* current generation (=valid) bit */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800189 u8 sop; /* is next entry start of packet? */
Scott Bardone559fb512005-06-23 01:40:19 -0400190 struct cmdQ_e *entries; /* HW command descriptor Q */
191 struct cmdQ_ce *centries; /* SW command context descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400192 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
Francois Romieu356bd142006-12-11 23:47:00 +0100193 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800194};
195
196struct freelQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400197 unsigned int credits; /* # of available RX buffers */
198 unsigned int size; /* free list capacity */
199 u16 pidx; /* producer index (SW) */
200 u16 cidx; /* consumer index (HW) */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800201 u16 rx_buffer_size; /* Buffer size on this free list */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800202 u16 dma_offset; /* DMA offset to align IP headers */
203 u16 recycleq_idx; /* skb recycle q to use */
Scott Bardone559fb512005-06-23 01:40:19 -0400204 u8 genbit; /* current generation (=valid) bit */
205 struct freelQ_e *entries; /* HW freelist descriptor Q */
206 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
207 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800208};
209
210struct respQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400211 unsigned int credits; /* credits to be returned to SGE */
212 unsigned int size; /* # of response Q descriptors */
213 u16 cidx; /* consumer index (SW) */
214 u8 genbit; /* current generation(=valid) bit */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800215 struct respQ_e *entries; /* HW response descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400216 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
217};
218
219/* Bit flags for cmdQ.status */
220enum {
221 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
222 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800223};
224
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800225/* T204 TX SW scheduler */
226
227/* Per T204 TX port */
228struct sched_port {
229 unsigned int avail; /* available bits - quota */
230 unsigned int drain_bits_per_1024ns; /* drain rate */
231 unsigned int speed; /* drain rate, mbps */
232 unsigned int mtu; /* mtu size */
233 struct sk_buff_head skbq; /* pending skbs */
234};
235
236/* Per T204 device */
237struct sched {
238 ktime_t last_updated; /* last time quotas were computed */
Francois Romieu356bd142006-12-11 23:47:00 +0100239 unsigned int max_avail; /* max bits to be sent to any port */
240 unsigned int port; /* port index (round robin ports) */
241 unsigned int num; /* num skbs in per port queues */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800242 struct sched_port p[MAX_NPORTS];
243 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
244};
245static void restart_sched(unsigned long);
246
247
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800248/*
249 * Main SGE data structure
250 *
251 * Interrupts are handled by a single CPU and it is likely that on a MP system
252 * the application is migrated to another CPU. In that scenario, we try to
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800253 * separate the RX(in irq context) and TX state in order to decrease memory
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800254 * contention.
255 */
256struct sge {
Francois Romieu356bd142006-12-11 23:47:00 +0100257 struct adapter *adapter; /* adapter backpointer */
Scott Bardone559fb512005-06-23 01:40:19 -0400258 struct net_device *netdev; /* netdevice backpointer */
Francois Romieu356bd142006-12-11 23:47:00 +0100259 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
260 struct respQ respQ; /* response Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400261 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800262 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
263 unsigned int jumbo_fl; /* jumbo freelist Q index */
Scott Bardone559fb512005-06-23 01:40:19 -0400264 unsigned int intrtimer_nres; /* no-resource interrupt timer */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800265 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
Scott Bardone559fb512005-06-23 01:40:19 -0400266 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
267 struct timer_list espibug_timer;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800268 unsigned long espibug_timeout;
269 struct sk_buff *espibug_skb[MAX_NPORTS];
Scott Bardone559fb512005-06-23 01:40:19 -0400270 u32 sge_control; /* shadow value of sge control reg */
271 struct sge_intr_counts stats;
Tejun Heo47d74272010-02-16 15:21:08 +0000272 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800273 struct sched *tx_sched;
Scott Bardone559fb512005-06-23 01:40:19 -0400274 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800275};
276
Joe Perches215faf92010-12-21 02:16:10 -0800277static const u8 ch_mac_addr[ETH_ALEN] = {
278 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
279};
280
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800281/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800282 * stop tasklet and free all pending skb's
283 */
284static void tx_sched_stop(struct sge *sge)
285{
286 struct sched *s = sge->tx_sched;
287 int i;
288
289 tasklet_kill(&s->sched_tsk);
290
291 for (i = 0; i < MAX_NPORTS; i++)
292 __skb_queue_purge(&s->p[s->port].skbq);
293}
294
295/*
296 * t1_sched_update_parms() is called when the MTU or link speed changes. It
297 * re-computes scheduler parameters to scope with the change.
298 */
299unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
300 unsigned int mtu, unsigned int speed)
301{
302 struct sched *s = sge->tx_sched;
303 struct sched_port *p = &s->p[port];
304 unsigned int max_avail_segs;
305
306 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
307 if (speed)
308 p->speed = speed;
309 if (mtu)
310 p->mtu = mtu;
311
312 if (speed || mtu) {
313 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
314 do_div(drain, (p->mtu + 50) * 1000);
315 p->drain_bits_per_1024ns = (unsigned int) drain;
316
317 if (p->speed < 1000)
318 p->drain_bits_per_1024ns =
319 90 * p->drain_bits_per_1024ns / 100;
320 }
321
322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
323 p->drain_bits_per_1024ns -= 16;
324 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
325 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
326 } else {
327 s->max_avail = 16384;
328 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
329 }
330
331 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
332 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
333 p->speed, s->max_avail, max_avail_segs,
334 p->drain_bits_per_1024ns);
335
336 return max_avail_segs * (p->mtu - 40);
337}
338
Adrian Bunk68d579f2007-11-21 15:02:55 -0800339#if 0
340
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800341/*
342 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
343 * data that can be pushed per port.
344 */
345void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
346{
347 struct sched *s = sge->tx_sched;
348 unsigned int i;
349
350 s->max_avail = val;
351 for (i = 0; i < MAX_NPORTS; i++)
352 t1_sched_update_parms(sge, i, 0, 0);
353}
354
355/*
356 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
357 * is draining.
358 */
359void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
360 unsigned int val)
361{
362 struct sched *s = sge->tx_sched;
363 struct sched_port *p = &s->p[port];
364 p->drain_bits_per_1024ns = val * 1024 / 1000;
365 t1_sched_update_parms(sge, port, 0, 0);
366}
367
Adrian Bunk68d579f2007-11-21 15:02:55 -0800368#endif /* 0 */
369
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800370
371/*
372 * get_clock() implements a ns clock (see ktime_get)
373 */
374static inline ktime_t get_clock(void)
375{
376 struct timespec ts;
377
378 ktime_get_ts(&ts);
379 return timespec_to_ktime(ts);
380}
381
382/*
383 * tx_sched_init() allocates resources and does basic initialization.
384 */
385static int tx_sched_init(struct sge *sge)
386{
387 struct sched *s;
388 int i;
389
390 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
391 if (!s)
392 return -ENOMEM;
393
394 pr_debug("tx_sched_init\n");
395 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
396 sge->tx_sched = s;
397
398 for (i = 0; i < MAX_NPORTS; i++) {
399 skb_queue_head_init(&s->p[i].skbq);
400 t1_sched_update_parms(sge, i, 1500, 1000);
401 }
402
403 return 0;
404}
405
406/*
407 * sched_update_avail() computes the delta since the last time it was called
408 * and updates the per port quota (number of bits that can be sent to the any
409 * port).
410 */
411static inline int sched_update_avail(struct sge *sge)
412{
413 struct sched *s = sge->tx_sched;
414 ktime_t now = get_clock();
415 unsigned int i;
416 long long delta_time_ns;
417
418 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
419
420 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
421 if (delta_time_ns < 15000)
422 return 0;
423
424 for (i = 0; i < MAX_NPORTS; i++) {
425 struct sched_port *p = &s->p[i];
426 unsigned int delta_avail;
427
428 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
429 p->avail = min(p->avail + delta_avail, s->max_avail);
430 }
431
432 s->last_updated = now;
433
434 return 1;
435}
436
437/*
438 * sched_skb() is called from two different places. In the tx path, any
439 * packet generating load on an output port will call sched_skb()
440 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
441 * context (skb == NULL).
442 * The scheduler only returns a skb (which will then be sent) if the
443 * length of the skb is <= the current quota of the output port.
444 */
445static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
446 unsigned int credits)
447{
448 struct sched *s = sge->tx_sched;
449 struct sk_buff_head *skbq;
450 unsigned int i, len, update = 1;
451
452 pr_debug("sched_skb %p\n", skb);
453 if (!skb) {
454 if (!s->num)
455 return NULL;
456 } else {
457 skbq = &s->p[skb->dev->if_port].skbq;
458 __skb_queue_tail(skbq, skb);
459 s->num++;
460 skb = NULL;
461 }
462
463 if (credits < MAX_SKB_FRAGS + 1)
464 goto out;
465
Francois Romieu356bd142006-12-11 23:47:00 +0100466again:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800467 for (i = 0; i < MAX_NPORTS; i++) {
David S. Miller18d777a2010-04-13 03:07:17 -0700468 s->port = (s->port + 1) & (MAX_NPORTS - 1);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800469 skbq = &s->p[s->port].skbq;
470
471 skb = skb_peek(skbq);
472
473 if (!skb)
474 continue;
475
476 len = skb->len;
477 if (len <= s->p[s->port].avail) {
478 s->p[s->port].avail -= len;
479 s->num--;
480 __skb_unlink(skb, skbq);
481 goto out;
482 }
483 skb = NULL;
484 }
485
486 if (update-- && sched_update_avail(sge))
487 goto again;
488
Francois Romieu356bd142006-12-11 23:47:00 +0100489out:
490 /* If there are more pending skbs, we use the hardware to schedule us
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800491 * again.
492 */
493 if (s->num && !skb) {
494 struct cmdQ *q = &sge->cmdQ[0];
495 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
496 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
497 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
498 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
499 }
500 }
501 pr_debug("sched_skb ret %p\n", skb);
502
503 return skb;
504}
505
506/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800507 * PIO to indicate that memory mapped Q contains valid descriptor(s).
508 */
Scott Bardone559fb512005-06-23 01:40:19 -0400509static inline void doorbell_pio(struct adapter *adapter, u32 val)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800510{
511 wmb();
Scott Bardone559fb512005-06-23 01:40:19 -0400512 writel(val, adapter->regs + A_SG_DOORBELL);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800513}
514
515/*
516 * Frees all RX buffers on the freelist Q. The caller must make sure that
517 * the SGE is turned off before calling this function.
518 */
Scott Bardone559fb512005-06-23 01:40:19 -0400519static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800520{
Scott Bardone559fb512005-06-23 01:40:19 -0400521 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800522
Scott Bardone559fb512005-06-23 01:40:19 -0400523 while (q->credits--) {
524 struct freelQ_ce *ce = &q->centries[cidx];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800525
FUJITA Tomonori094f92a62010-04-12 14:32:11 +0000526 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
527 dma_unmap_len(ce, dma_len),
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800528 PCI_DMA_FROMDEVICE);
529 dev_kfree_skb(ce->skb);
530 ce->skb = NULL;
Scott Bardone559fb512005-06-23 01:40:19 -0400531 if (++cidx == q->size)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800532 cidx = 0;
533 }
534}
535
536/*
537 * Free RX free list and response queue resources.
538 */
539static void free_rx_resources(struct sge *sge)
540{
541 struct pci_dev *pdev = sge->adapter->pdev;
542 unsigned int size, i;
543
544 if (sge->respQ.entries) {
Scott Bardone559fb512005-06-23 01:40:19 -0400545 size = sizeof(struct respQ_e) * sge->respQ.size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800546 pci_free_consistent(pdev, size, sge->respQ.entries,
547 sge->respQ.dma_addr);
548 }
549
550 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400551 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800552
Scott Bardone559fb512005-06-23 01:40:19 -0400553 if (q->centries) {
554 free_freelQ_buffers(pdev, q);
555 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800556 }
Scott Bardone559fb512005-06-23 01:40:19 -0400557 if (q->entries) {
558 size = sizeof(struct freelQ_e) * q->size;
559 pci_free_consistent(pdev, size, q->entries,
560 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800561 }
562 }
563}
564
565/*
566 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
Scott Bardone559fb512005-06-23 01:40:19 -0400567 * response queue.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800568 */
569static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
570{
571 struct pci_dev *pdev = sge->adapter->pdev;
572 unsigned int size, i;
573
574 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400575 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800576
Scott Bardone559fb512005-06-23 01:40:19 -0400577 q->genbit = 1;
578 q->size = p->freelQ_size[i];
579 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
580 size = sizeof(struct freelQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100581 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400582 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800583 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100584
Scott Bardone559fb512005-06-23 01:40:19 -0400585 size = sizeof(struct freelQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800586 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400587 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800588 goto err_no_mem;
589 }
590
591 /*
592 * Calculate the buffer sizes for the two free lists. FL0 accommodates
593 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
594 * including all the sk_buff overhead.
595 *
596 * Note: For T2 FL0 and FL1 are reversed.
597 */
598 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
599 sizeof(struct cpl_rx_data) +
600 sge->freelQ[!sge->jumbo_fl].dma_offset;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800601
602 size = (16 * 1024) -
603 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
604
605 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800606
Scott Bardone559fb512005-06-23 01:40:19 -0400607 /*
608 * Setup which skb recycle Q should be used when recycling buffers from
609 * each free list.
610 */
611 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
612 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
613
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800614 sge->respQ.genbit = 1;
Scott Bardone559fb512005-06-23 01:40:19 -0400615 sge->respQ.size = SGE_RESPQ_E_N;
616 sge->respQ.credits = 0;
617 size = sizeof(struct respQ_e) * sge->respQ.size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100618 sge->respQ.entries =
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800619 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
620 if (!sge->respQ.entries)
621 goto err_no_mem;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800622 return 0;
623
624err_no_mem:
625 free_rx_resources(sge);
626 return -ENOMEM;
627}
628
629/*
Scott Bardone559fb512005-06-23 01:40:19 -0400630 * Reclaims n TX descriptors and frees the buffers associated with them.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800631 */
Scott Bardone559fb512005-06-23 01:40:19 -0400632static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800633{
Scott Bardone559fb512005-06-23 01:40:19 -0400634 struct cmdQ_ce *ce;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800635 struct pci_dev *pdev = sge->adapter->pdev;
Scott Bardone559fb512005-06-23 01:40:19 -0400636 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800637
Scott Bardone559fb512005-06-23 01:40:19 -0400638 q->in_use -= n;
639 ce = &q->centries[cidx];
640 while (n--) {
FUJITA Tomonori094f92a62010-04-12 14:32:11 +0000641 if (likely(dma_unmap_len(ce, dma_len))) {
642 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
643 dma_unmap_len(ce, dma_len),
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100644 PCI_DMA_TODEVICE);
645 if (q->sop)
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800646 q->sop = 0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800647 }
Scott Bardone559fb512005-06-23 01:40:19 -0400648 if (ce->skb) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800649 dev_kfree_skb_any(ce->skb);
Scott Bardone559fb512005-06-23 01:40:19 -0400650 q->sop = 1;
651 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800652 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -0400653 if (++cidx == q->size) {
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800654 cidx = 0;
Scott Bardone559fb512005-06-23 01:40:19 -0400655 ce = q->centries;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800656 }
657 }
Scott Bardone559fb512005-06-23 01:40:19 -0400658 q->cidx = cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800659}
660
661/*
662 * Free TX resources.
663 *
664 * Assumes that SGE is stopped and all interrupts are disabled.
665 */
666static void free_tx_resources(struct sge *sge)
667{
668 struct pci_dev *pdev = sge->adapter->pdev;
669 unsigned int size, i;
670
671 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400672 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800673
Scott Bardone559fb512005-06-23 01:40:19 -0400674 if (q->centries) {
675 if (q->in_use)
676 free_cmdQ_buffers(sge, q, q->in_use);
677 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800678 }
Scott Bardone559fb512005-06-23 01:40:19 -0400679 if (q->entries) {
680 size = sizeof(struct cmdQ_e) * q->size;
681 pci_free_consistent(pdev, size, q->entries,
682 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800683 }
684 }
685}
686
687/*
688 * Allocates basic TX resources, consisting of memory mapped command Qs.
689 */
690static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
691{
692 struct pci_dev *pdev = sge->adapter->pdev;
693 unsigned int size, i;
694
695 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400696 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800697
Scott Bardone559fb512005-06-23 01:40:19 -0400698 q->genbit = 1;
699 q->sop = 1;
700 q->size = p->cmdQ_size[i];
701 q->in_use = 0;
702 q->status = 0;
703 q->processed = q->cleaned = 0;
704 q->stop_thres = 0;
705 spin_lock_init(&q->lock);
706 size = sizeof(struct cmdQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100707 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400708 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800709 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100710
Scott Bardone559fb512005-06-23 01:40:19 -0400711 size = sizeof(struct cmdQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800712 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400713 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800714 goto err_no_mem;
715 }
716
Scott Bardone559fb512005-06-23 01:40:19 -0400717 /*
718 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
719 * only. For queue 0 set the stop threshold so we can handle one more
720 * packet from each port, plus reserve an additional 24 entries for
721 * Ethernet packets only. Queue 1 never suspends nor do we reserve
722 * space for Ethernet packets.
723 */
724 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
725 (MAX_SKB_FRAGS + 1);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800726 return 0;
727
728err_no_mem:
729 free_tx_resources(sge);
730 return -ENOMEM;
731}
732
733static inline void setup_ring_params(struct adapter *adapter, u64 addr,
734 u32 size, int base_reg_lo,
735 int base_reg_hi, int size_reg)
736{
Scott Bardone559fb512005-06-23 01:40:19 -0400737 writel((u32)addr, adapter->regs + base_reg_lo);
738 writel(addr >> 32, adapter->regs + base_reg_hi);
739 writel(size, adapter->regs + size_reg);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800740}
741
742/*
743 * Enable/disable VLAN acceleration.
744 */
745void t1_set_vlan_accel(struct adapter *adapter, int on_off)
746{
747 struct sge *sge = adapter->sge;
748
749 sge->sge_control &= ~F_VLAN_XTRACT;
750 if (on_off)
751 sge->sge_control |= F_VLAN_XTRACT;
752 if (adapter->open_device_map) {
Scott Bardone559fb512005-06-23 01:40:19 -0400753 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800754 readl(adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800755 }
756}
757
758/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800759 * Programs the various SGE registers. However, the engine is not yet enabled,
760 * but sge->sge_control is setup and ready to go.
761 */
762static void configure_sge(struct sge *sge, struct sge_params *p)
763{
764 struct adapter *ap = sge->adapter;
Francois Romieu356bd142006-12-11 23:47:00 +0100765
Scott Bardone559fb512005-06-23 01:40:19 -0400766 writel(0, ap->regs + A_SG_CONTROL);
767 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800768 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
Scott Bardone559fb512005-06-23 01:40:19 -0400769 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800770 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
771 setup_ring_params(ap, sge->freelQ[0].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400772 sge->freelQ[0].size, A_SG_FL0BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800773 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
774 setup_ring_params(ap, sge->freelQ[1].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400775 sge->freelQ[1].size, A_SG_FL1BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800776 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
777
778 /* The threshold comparison uses <. */
Scott Bardone559fb512005-06-23 01:40:19 -0400779 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800780
Scott Bardone559fb512005-06-23 01:40:19 -0400781 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
782 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
783 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800784
785 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
786 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
787 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
788 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
789
790#if defined(__BIG_ENDIAN_BITFIELD)
791 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
792#endif
793
Scott Bardone559fb512005-06-23 01:40:19 -0400794 /* Initialize no-resource timer */
795 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800796
Scott Bardone559fb512005-06-23 01:40:19 -0400797 t1_sge_set_coalesce_params(sge, p);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800798}
799
800/*
801 * Return the payload capacity of the jumbo free-list buffers.
802 */
803static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
804{
805 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
Scott Bardone559fb512005-06-23 01:40:19 -0400806 sge->freelQ[sge->jumbo_fl].dma_offset -
807 sizeof(struct cpl_rx_data);
808}
809
810/*
811 * Frees all SGE related resources and the sge structure itself
812 */
813void t1_sge_destroy(struct sge *sge)
814{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800815 int i;
816
817 for_each_port(sge->adapter, i)
818 free_percpu(sge->port_stats[i]);
819
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800820 kfree(sge->tx_sched);
Scott Bardone559fb512005-06-23 01:40:19 -0400821 free_tx_resources(sge);
822 free_rx_resources(sge);
823 kfree(sge);
824}
825
826/*
827 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
828 * context Q) until the Q is full or alloc_skb fails.
829 *
830 * It is possible that the generation bits already match, indicating that the
831 * buffer is already valid and nothing needs to be done. This happens when we
832 * copied a received buffer into a new sk_buff during the interrupt processing.
833 *
834 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
835 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
836 * aligned.
837 */
838static void refill_free_list(struct sge *sge, struct freelQ *q)
839{
840 struct pci_dev *pdev = sge->adapter->pdev;
841 struct freelQ_ce *ce = &q->centries[q->pidx];
842 struct freelQ_e *e = &q->entries[q->pidx];
843 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
844
Scott Bardone559fb512005-06-23 01:40:19 -0400845 while (q->credits < q->size) {
846 struct sk_buff *skb;
847 dma_addr_t mapping;
848
849 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
850 if (!skb)
851 break;
852
853 skb_reserve(skb, q->dma_offset);
854 mapping = pci_map_single(pdev, skb->data, dma_len,
855 PCI_DMA_FROMDEVICE);
Stephen Hemminger24a427c2007-01-08 11:26:12 -0800856 skb_reserve(skb, sge->rx_pkt_pad);
857
Scott Bardone559fb512005-06-23 01:40:19 -0400858 ce->skb = skb;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +0000859 dma_unmap_addr_set(ce, dma_addr, mapping);
860 dma_unmap_len_set(ce, dma_len, dma_len);
Scott Bardone559fb512005-06-23 01:40:19 -0400861 e->addr_lo = (u32)mapping;
862 e->addr_hi = (u64)mapping >> 32;
863 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
864 wmb();
865 e->gen2 = V_CMD_GEN2(q->genbit);
866
867 e++;
868 ce++;
869 if (++q->pidx == q->size) {
870 q->pidx = 0;
871 q->genbit ^= 1;
872 ce = q->centries;
873 e = q->entries;
874 }
875 q->credits++;
876 }
Scott Bardone559fb512005-06-23 01:40:19 -0400877}
878
879/*
880 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
881 * of both rings, we go into 'few interrupt mode' in order to give the system
882 * time to free up resources.
883 */
884static void freelQs_empty(struct sge *sge)
885{
886 struct adapter *adapter = sge->adapter;
887 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
888 u32 irqholdoff_reg;
889
890 refill_free_list(sge, &sge->freelQ[0]);
891 refill_free_list(sge, &sge->freelQ[1]);
892
893 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
894 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
895 irq_reg |= F_FL_EXHAUSTED;
896 irqholdoff_reg = sge->fixed_intrtimer;
897 } else {
898 /* Clear the F_FL_EXHAUSTED interrupts for now */
899 irq_reg &= ~F_FL_EXHAUSTED;
900 irqholdoff_reg = sge->intrtimer_nres;
901 }
902 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
903 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
904
905 /* We reenable the Qs to force a freelist GTS interrupt later */
906 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
907}
908
909#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
910#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
911#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
912 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
913
914/*
915 * Disable SGE Interrupts
916 */
917void t1_sge_intr_disable(struct sge *sge)
918{
919 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
920
921 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
922 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
923}
924
925/*
926 * Enable SGE interrupts.
927 */
928void t1_sge_intr_enable(struct sge *sge)
929{
930 u32 en = SGE_INT_ENABLE;
931 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
932
Michał Mirosław30f554f2011-04-18 13:31:20 +0000933 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
Scott Bardone559fb512005-06-23 01:40:19 -0400934 en &= ~F_PACKET_TOO_BIG;
935 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
936 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
937}
938
939/*
940 * Clear SGE interrupts.
941 */
942void t1_sge_intr_clear(struct sge *sge)
943{
944 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
945 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
946}
947
948/*
949 * SGE 'Error' interrupt handler
950 */
951int t1_sge_intr_error_handler(struct sge *sge)
952{
953 struct adapter *adapter = sge->adapter;
954 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
955
Michał Mirosław30f554f2011-04-18 13:31:20 +0000956 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
Scott Bardone559fb512005-06-23 01:40:19 -0400957 cause &= ~F_PACKET_TOO_BIG;
958 if (cause & F_RESPQ_EXHAUSTED)
959 sge->stats.respQ_empty++;
960 if (cause & F_RESPQ_OVERFLOW) {
961 sge->stats.respQ_overflow++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000962 pr_alert("%s: SGE response queue overflow\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400963 adapter->name);
964 }
965 if (cause & F_FL_EXHAUSTED) {
966 sge->stats.freelistQ_empty++;
967 freelQs_empty(sge);
968 }
969 if (cause & F_PACKET_TOO_BIG) {
970 sge->stats.pkt_too_big++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000971 pr_alert("%s: SGE max packet size exceeded\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400972 adapter->name);
973 }
974 if (cause & F_PACKET_MISMATCH) {
975 sge->stats.pkt_mismatch++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000976 pr_alert("%s: SGE packet mismatch\n", adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -0400977 }
978 if (cause & SGE_INT_FATAL)
979 t1_fatal_err(adapter);
980
981 writel(cause, adapter->regs + A_SG_INT_CAUSE);
982 return 0;
983}
984
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800985const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
Scott Bardone559fb512005-06-23 01:40:19 -0400986{
987 return &sge->stats;
988}
989
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800990void t1_sge_get_port_stats(const struct sge *sge, int port,
991 struct sge_port_stats *ss)
Scott Bardone559fb512005-06-23 01:40:19 -0400992{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800993 int cpu;
994
995 memset(ss, 0, sizeof(*ss));
996 for_each_possible_cpu(cpu) {
997 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
998
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800999 ss->rx_cso_good += st->rx_cso_good;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001000 ss->tx_cso += st->tx_cso;
1001 ss->tx_tso += st->tx_tso;
Divy Le Ray7832ee02007-11-27 13:30:09 -08001002 ss->tx_need_hdrroom += st->tx_need_hdrroom;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001003 ss->vlan_xtract += st->vlan_xtract;
1004 ss->vlan_insert += st->vlan_insert;
1005 }
Scott Bardone559fb512005-06-23 01:40:19 -04001006}
1007
1008/**
1009 * recycle_fl_buf - recycle a free list buffer
1010 * @fl: the free list
1011 * @idx: index of buffer to recycle
1012 *
1013 * Recycles the specified buffer on the given free list by adding it at
1014 * the next available slot on the list.
1015 */
1016static void recycle_fl_buf(struct freelQ *fl, int idx)
1017{
1018 struct freelQ_e *from = &fl->entries[idx];
1019 struct freelQ_e *to = &fl->entries[fl->pidx];
1020
1021 fl->centries[fl->pidx] = fl->centries[idx];
1022 to->addr_lo = from->addr_lo;
1023 to->addr_hi = from->addr_hi;
1024 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1025 wmb();
1026 to->gen2 = V_CMD_GEN2(fl->genbit);
1027 fl->credits++;
1028
1029 if (++fl->pidx == fl->size) {
1030 fl->pidx = 0;
1031 fl->genbit ^= 1;
1032 }
1033}
1034
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001035static int copybreak __read_mostly = 256;
1036module_param(copybreak, int, 0);
1037MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1038
Scott Bardone559fb512005-06-23 01:40:19 -04001039/**
1040 * get_packet - return the next ingress packet buffer
1041 * @pdev: the PCI device that received the packet
1042 * @fl: the SGE free list holding the packet
1043 * @len: the actual packet length, excluding any SGE padding
Scott Bardone559fb512005-06-23 01:40:19 -04001044 *
1045 * Get the next packet from a free list and complete setup of the
1046 * sk_buff. If the packet is small we make a copy and recycle the
1047 * original buffer, otherwise we use the original buffer itself. If a
1048 * positive drop threshold is supplied packets are dropped and their
1049 * buffers recycled if (a) the number of remaining buffers is under the
1050 * threshold and the packet is too big to copy, or (b) the packet should
1051 * be copied but there is no memory for the copy.
1052 */
1053static inline struct sk_buff *get_packet(struct pci_dev *pdev,
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001054 struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001055{
1056 struct sk_buff *skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001057 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
Scott Bardone559fb512005-06-23 01:40:19 -04001058
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001059 if (len < copybreak) {
1060 skb = alloc_skb(len + 2, GFP_ATOMIC);
1061 if (!skb)
Scott Bardone559fb512005-06-23 01:40:19 -04001062 goto use_orig_buf;
1063
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001064 skb_reserve(skb, 2); /* align IP header */
1065 skb_put(skb, len);
1066 pci_dma_sync_single_for_cpu(pdev,
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001067 dma_unmap_addr(ce, dma_addr),
1068 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001069 PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001070 skb_copy_from_linear_data(ce->skb, skb->data, len);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001071 pci_dma_sync_single_for_device(pdev,
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001072 dma_unmap_addr(ce, dma_addr),
1073 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001074 PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001075 recycle_fl_buf(fl, fl->cidx);
1076 return skb;
1077 }
1078
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001079use_orig_buf:
1080 if (fl->credits < 2) {
Scott Bardone559fb512005-06-23 01:40:19 -04001081 recycle_fl_buf(fl, fl->cidx);
1082 return NULL;
1083 }
1084
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001085 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1086 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001087 skb = ce->skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001088 prefetch(skb->data);
1089
Scott Bardone559fb512005-06-23 01:40:19 -04001090 skb_put(skb, len);
1091 return skb;
1092}
1093
1094/**
1095 * unexpected_offload - handle an unexpected offload packet
1096 * @adapter: the adapter
1097 * @fl: the free list that received the packet
1098 *
1099 * Called when we receive an unexpected offload packet (e.g., the TOE
1100 * function is disabled or the card is a NIC). Prints a message and
1101 * recycles the buffer.
1102 */
1103static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1104{
1105 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1106 struct sk_buff *skb = ce->skb;
1107
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001108 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1109 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Joe Perchesc1f51212010-02-22 16:56:57 +00001110 pr_err("%s: unexpected offload packet, cmd %u\n",
Scott Bardone559fb512005-06-23 01:40:19 -04001111 adapter->name, *skb->data);
1112 recycle_fl_buf(fl, fl->cidx);
1113}
1114
1115/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001116 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1117 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1118 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1119 * Note that the *_large_page_tx_descs stuff will be optimized out when
1120 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1121 *
1122 * compute_large_page_descs() computes how many additional descriptors are
1123 * required to break down the stack's request.
1124 */
1125static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1126{
1127 unsigned int count = 0;
Francois Romieu356bd142006-12-11 23:47:00 +01001128
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001129 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1130 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
Eric Dumazete743d312010-04-14 15:59:40 -07001131 unsigned int i, len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001132 while (len > SGE_TX_DESC_MAX_PLEN) {
1133 count++;
1134 len -= SGE_TX_DESC_MAX_PLEN;
1135 }
1136 for (i = 0; nfrags--; i++) {
1137 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1138 len = frag->size;
1139 while (len > SGE_TX_DESC_MAX_PLEN) {
1140 count++;
1141 len -= SGE_TX_DESC_MAX_PLEN;
1142 }
1143 }
1144 }
1145 return count;
1146}
1147
1148/*
1149 * Write a cmdQ entry.
1150 *
1151 * Since this function writes the 'flags' field, it must not be used to
1152 * write the first cmdQ entry.
1153 */
1154static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1155 unsigned int len, unsigned int gen,
1156 unsigned int eop)
1157{
Alexander Beregalov0ee904c2009-04-11 14:50:23 +00001158 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1159
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001160 e->addr_lo = (u32)mapping;
1161 e->addr_hi = (u64)mapping >> 32;
1162 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1163 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1164}
1165
1166/*
1167 * See comment for previous function.
1168 *
1169 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1170 * *desc_len exceeds HW's capability.
1171 */
1172static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1173 struct cmdQ_e **e,
1174 struct cmdQ_ce **ce,
1175 unsigned int *gen,
1176 dma_addr_t *desc_mapping,
1177 unsigned int *desc_len,
1178 unsigned int nfrags,
1179 struct cmdQ *q)
1180{
1181 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1182 struct cmdQ_e *e1 = *e;
1183 struct cmdQ_ce *ce1 = *ce;
1184
1185 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1186 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1187 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1188 *gen, nfrags == 0 && *desc_len == 0);
1189 ce1->skb = NULL;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001190 dma_unmap_len_set(ce1, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001191 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1192 if (*desc_len) {
1193 ce1++;
1194 e1++;
1195 if (++pidx == q->size) {
1196 pidx = 0;
1197 *gen ^= 1;
1198 ce1 = q->centries;
1199 e1 = q->entries;
1200 }
1201 }
1202 }
1203 *e = e1;
1204 *ce = ce1;
1205 }
1206 return pidx;
1207}
1208
1209/*
Scott Bardone559fb512005-06-23 01:40:19 -04001210 * Write the command descriptors to transmit the given skb starting at
1211 * descriptor pidx with the given generation.
1212 */
1213static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1214 unsigned int pidx, unsigned int gen,
1215 struct cmdQ *q)
1216{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001217 dma_addr_t mapping, desc_mapping;
Scott Bardone559fb512005-06-23 01:40:19 -04001218 struct cmdQ_e *e, *e1;
1219 struct cmdQ_ce *ce;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001220 unsigned int i, flags, first_desc_len, desc_len,
1221 nfrags = skb_shinfo(skb)->nr_frags;
1222
1223 e = e1 = &q->entries[pidx];
1224 ce = &q->centries[pidx];
Scott Bardone559fb512005-06-23 01:40:19 -04001225
1226 mapping = pci_map_single(adapter->pdev, skb->data,
Eric Dumazete743d312010-04-14 15:59:40 -07001227 skb_headlen(skb), PCI_DMA_TODEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001228
1229 desc_mapping = mapping;
Eric Dumazete743d312010-04-14 15:59:40 -07001230 desc_len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001231
1232 flags = F_CMD_DATAVALID | F_CMD_SOP |
1233 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1234 V_CMD_GEN2(gen);
1235 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1236 desc_len : SGE_TX_DESC_MAX_PLEN;
1237 e->addr_lo = (u32)desc_mapping;
1238 e->addr_hi = (u64)desc_mapping >> 32;
1239 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1240 ce->skb = NULL;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001241 dma_unmap_len_set(ce, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001242
1243 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1244 desc_len > SGE_TX_DESC_MAX_PLEN) {
1245 desc_mapping += first_desc_len;
1246 desc_len -= first_desc_len;
1247 e1++;
1248 ce++;
1249 if (++pidx == q->size) {
1250 pidx = 0;
1251 gen ^= 1;
1252 e1 = q->entries;
1253 ce = q->centries;
1254 }
1255 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1256 &desc_mapping, &desc_len,
1257 nfrags, q);
1258
1259 if (likely(desc_len))
1260 write_tx_desc(e1, desc_mapping, desc_len, gen,
1261 nfrags == 0);
1262 }
1263
Scott Bardone559fb512005-06-23 01:40:19 -04001264 ce->skb = NULL;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001265 dma_unmap_addr_set(ce, dma_addr, mapping);
Eric Dumazete743d312010-04-14 15:59:40 -07001266 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
Scott Bardone559fb512005-06-23 01:40:19 -04001267
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001268 for (i = 0; nfrags--; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -04001269 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Scott Bardone559fb512005-06-23 01:40:19 -04001270 e1++;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001271 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -04001272 if (++pidx == q->size) {
1273 pidx = 0;
1274 gen ^= 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001275 e1 = q->entries;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001276 ce = q->centries;
Scott Bardone559fb512005-06-23 01:40:19 -04001277 }
1278
1279 mapping = pci_map_page(adapter->pdev, frag->page,
1280 frag->page_offset, frag->size,
1281 PCI_DMA_TODEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001282 desc_mapping = mapping;
1283 desc_len = frag->size;
1284
1285 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1286 &desc_mapping, &desc_len,
1287 nfrags, q);
1288 if (likely(desc_len))
1289 write_tx_desc(e1, desc_mapping, desc_len, gen,
1290 nfrags == 0);
Scott Bardone559fb512005-06-23 01:40:19 -04001291 ce->skb = NULL;
FUJITA Tomonori094f92a62010-04-12 14:32:11 +00001292 dma_unmap_addr_set(ce, dma_addr, mapping);
1293 dma_unmap_len_set(ce, dma_len, frag->size);
Scott Bardone559fb512005-06-23 01:40:19 -04001294 }
Scott Bardone559fb512005-06-23 01:40:19 -04001295 ce->skb = skb;
1296 wmb();
1297 e->flags = flags;
1298}
1299
1300/*
1301 * Clean up completed Tx buffers.
1302 */
1303static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1304{
1305 unsigned int reclaim = q->processed - q->cleaned;
1306
1307 if (reclaim) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001308 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1309 q->processed, q->cleaned);
Scott Bardone559fb512005-06-23 01:40:19 -04001310 free_cmdQ_buffers(sge, q, reclaim);
1311 q->cleaned += reclaim;
1312 }
1313}
1314
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001315/*
1316 * Called from tasklet. Checks the scheduler for any
1317 * pending skbs that can be sent.
1318 */
1319static void restart_sched(unsigned long arg)
1320{
1321 struct sge *sge = (struct sge *) arg;
1322 struct adapter *adapter = sge->adapter;
1323 struct cmdQ *q = &sge->cmdQ[0];
1324 struct sk_buff *skb;
1325 unsigned int credits, queued_skb = 0;
1326
1327 spin_lock(&q->lock);
1328 reclaim_completed_tx(sge, q);
1329
1330 credits = q->size - q->in_use;
1331 pr_debug("restart_sched credits=%d\n", credits);
1332 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1333 unsigned int genbit, pidx, count;
1334 count = 1 + skb_shinfo(skb)->nr_frags;
Francois Romieu356bd142006-12-11 23:47:00 +01001335 count += compute_large_page_tx_descs(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001336 q->in_use += count;
1337 genbit = q->genbit;
1338 pidx = q->pidx;
1339 q->pidx += count;
1340 if (q->pidx >= q->size) {
1341 q->pidx -= q->size;
1342 q->genbit ^= 1;
1343 }
1344 write_tx_descs(adapter, skb, pidx, genbit, q);
1345 credits = q->size - q->in_use;
1346 queued_skb = 1;
1347 }
1348
1349 if (queued_skb) {
1350 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1351 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1352 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1353 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1354 }
1355 }
1356 spin_unlock(&q->lock);
1357}
Scott Bardone559fb512005-06-23 01:40:19 -04001358
Scott Bardone559fb512005-06-23 01:40:19 -04001359/**
1360 * sge_rx - process an ingress ethernet packet
1361 * @sge: the sge structure
1362 * @fl: the free list that contains the packet buffer
1363 * @len: the packet length
1364 *
1365 * Process an ingress ethernet pakcet and deliver it to the stack.
1366 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001367static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001368{
1369 struct sk_buff *skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001370 const struct cpl_rx_pkt *p;
Scott Bardone559fb512005-06-23 01:40:19 -04001371 struct adapter *adapter = sge->adapter;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001372 struct sge_port_stats *st;
Michał Mirosław30f554f2011-04-18 13:31:20 +00001373 struct net_device *dev;
Scott Bardone559fb512005-06-23 01:40:19 -04001374
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001375 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001376 if (unlikely(!skb)) {
1377 sge->stats.rx_drops++;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001378 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001379 }
1380
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001381 p = (const struct cpl_rx_pkt *) skb->data;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001382 if (p->iff >= adapter->params.nports) {
1383 kfree_skb(skb);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001384 return;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001385 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001386 __skb_pull(skb, sizeof(*p));
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001387
Christoph Lameterca0c9582009-10-03 19:48:22 +09001388 st = this_cpu_ptr(sge->port_stats[p->iff]);
Michał Mirosław30f554f2011-04-18 13:31:20 +00001389 dev = adapter->port[p->iff].dev;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001390
Michał Mirosław30f554f2011-04-18 13:31:20 +00001391 skb->protocol = eth_type_trans(skb, dev);
1392 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
Scott Bardone559fb512005-06-23 01:40:19 -04001393 skb->protocol == htons(ETH_P_IP) &&
1394 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001395 ++st->rx_cso_good;
Scott Bardone559fb512005-06-23 01:40:19 -04001396 skb->ip_summed = CHECKSUM_UNNECESSARY;
1397 } else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001398 skb_checksum_none_assert(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001399
1400 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001401 st->vlan_xtract++;
Francois Romieu4422b002008-07-11 00:29:19 +02001402 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1403 ntohs(p->vlan));
1404 } else
Scott Bardone559fb512005-06-23 01:40:19 -04001405 netif_receive_skb(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001406}
1407
1408/*
1409 * Returns true if a command queue has enough available descriptors that
1410 * we can resume Tx operation after temporarily disabling its packet queue.
1411 */
1412static inline int enough_free_Tx_descs(const struct cmdQ *q)
1413{
1414 unsigned int r = q->processed - q->cleaned;
1415
1416 return q->in_use - r < (q->size >> 1);
1417}
1418
1419/*
1420 * Called when sufficient space has become available in the SGE command queues
1421 * after the Tx packet schedulers have been suspended to restart the Tx path.
1422 */
1423static void restart_tx_queues(struct sge *sge)
1424{
1425 struct adapter *adap = sge->adapter;
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001426 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001427
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001428 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1429 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001430
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001431 for_each_port(adap, i) {
1432 struct net_device *nd = adap->port[i].dev;
Scott Bardone559fb512005-06-23 01:40:19 -04001433
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001434 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1435 netif_running(nd)) {
1436 sge->stats.cmdQ_restarted[2]++;
1437 netif_wake_queue(nd);
Scott Bardone559fb512005-06-23 01:40:19 -04001438 }
1439 }
1440}
1441
1442/*
Francois Romieu356bd142006-12-11 23:47:00 +01001443 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
Scott Bardone559fb512005-06-23 01:40:19 -04001444 * information.
1445 */
Francois Romieu356bd142006-12-11 23:47:00 +01001446static unsigned int update_tx_info(struct adapter *adapter,
1447 unsigned int flags,
Scott Bardone559fb512005-06-23 01:40:19 -04001448 unsigned int pr0)
1449{
1450 struct sge *sge = adapter->sge;
1451 struct cmdQ *cmdq = &sge->cmdQ[0];
1452
1453 cmdq->processed += pr0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001454 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1455 freelQs_empty(sge);
1456 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1457 }
Scott Bardone559fb512005-06-23 01:40:19 -04001458 if (flags & F_CMDQ0_ENABLE) {
1459 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001460
Scott Bardone559fb512005-06-23 01:40:19 -04001461 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1462 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1463 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1464 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1465 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001466 if (sge->tx_sched)
1467 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1468
1469 flags &= ~F_CMDQ0_ENABLE;
Scott Bardone559fb512005-06-23 01:40:19 -04001470 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001471
Scott Bardone559fb512005-06-23 01:40:19 -04001472 if (unlikely(sge->stopped_tx_queues != 0))
1473 restart_tx_queues(sge);
1474
1475 return flags;
1476}
1477
1478/*
1479 * Process SGE responses, up to the supplied budget. Returns the number of
1480 * responses processed. A negative budget is effectively unlimited.
1481 */
1482static int process_responses(struct adapter *adapter, int budget)
1483{
1484 struct sge *sge = adapter->sge;
1485 struct respQ *q = &sge->respQ;
1486 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001487 int done = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001488 unsigned int flags = 0;
1489 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
Francois Romieu356bd142006-12-11 23:47:00 +01001490
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001491 while (done < budget && e->GenerationBit == q->genbit) {
Scott Bardone559fb512005-06-23 01:40:19 -04001492 flags |= e->Qsleeping;
Francois Romieu356bd142006-12-11 23:47:00 +01001493
Scott Bardone559fb512005-06-23 01:40:19 -04001494 cmdq_processed[0] += e->Cmdq0CreditReturn;
1495 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001496
Scott Bardone559fb512005-06-23 01:40:19 -04001497 /* We batch updates to the TX side to avoid cacheline
1498 * ping-pong of TX state information on MP where the sender
1499 * might run on a different CPU than this function...
1500 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001501 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001502 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1503 cmdq_processed[0] = 0;
1504 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001505
Scott Bardone559fb512005-06-23 01:40:19 -04001506 if (unlikely(cmdq_processed[1] > 16)) {
1507 sge->cmdQ[1].processed += cmdq_processed[1];
1508 cmdq_processed[1] = 0;
1509 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001510
Scott Bardone559fb512005-06-23 01:40:19 -04001511 if (likely(e->DataValid)) {
1512 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1513
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02001514 BUG_ON(!e->Sop || !e->Eop);
Scott Bardone559fb512005-06-23 01:40:19 -04001515 if (unlikely(e->Offload))
1516 unexpected_offload(adapter, fl);
1517 else
1518 sge_rx(sge, fl, e->BufferLength);
1519
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001520 ++done;
1521
Scott Bardone559fb512005-06-23 01:40:19 -04001522 /*
1523 * Note: this depends on each packet consuming a
1524 * single free-list buffer; cf. the BUG above.
1525 */
1526 if (++fl->cidx == fl->size)
1527 fl->cidx = 0;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001528 prefetch(fl->centries[fl->cidx].skb);
1529
Scott Bardone559fb512005-06-23 01:40:19 -04001530 if (unlikely(--fl->credits <
1531 fl->size - SGE_FREEL_REFILL_THRESH))
1532 refill_free_list(sge, fl);
1533 } else
1534 sge->stats.pure_rsps++;
1535
1536 e++;
1537 if (unlikely(++q->cidx == q->size)) {
1538 q->cidx = 0;
1539 q->genbit ^= 1;
1540 e = q->entries;
1541 }
1542 prefetch(e);
1543
1544 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1545 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1546 q->credits = 0;
1547 }
Scott Bardone559fb512005-06-23 01:40:19 -04001548 }
1549
Francois Romieu356bd142006-12-11 23:47:00 +01001550 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001551 sge->cmdQ[1].processed += cmdq_processed[1];
1552
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001553 return done;
Scott Bardone559fb512005-06-23 01:40:19 -04001554}
1555
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001556static inline int responses_pending(const struct adapter *adapter)
1557{
1558 const struct respQ *Q = &adapter->sge->respQ;
1559 const struct respQ_e *e = &Q->entries[Q->cidx];
1560
Eric Dumazet807540b2010-09-23 05:40:09 +00001561 return e->GenerationBit == Q->genbit;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001562}
1563
Scott Bardone559fb512005-06-23 01:40:19 -04001564/*
1565 * A simpler version of process_responses() that handles only pure (i.e.,
1566 * non data-carrying) responses. Such respones are too light-weight to justify
1567 * calling a softirq when using NAPI, so we handle them specially in hard
1568 * interrupt context. The function is called with a pointer to a response,
1569 * which the caller must ensure is a valid pure response. Returns 1 if it
1570 * encounters a valid data-carrying response, 0 otherwise.
1571 */
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001572static int process_pure_responses(struct adapter *adapter)
Scott Bardone559fb512005-06-23 01:40:19 -04001573{
1574 struct sge *sge = adapter->sge;
1575 struct respQ *q = &sge->respQ;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001576 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001577 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
Scott Bardone559fb512005-06-23 01:40:19 -04001578 unsigned int flags = 0;
1579 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1580
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001581 prefetch(fl->centries[fl->cidx].skb);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001582 if (e->DataValid)
1583 return 1;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001584
Scott Bardone559fb512005-06-23 01:40:19 -04001585 do {
1586 flags |= e->Qsleeping;
1587
1588 cmdq_processed[0] += e->Cmdq0CreditReturn;
1589 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001590
Scott Bardone559fb512005-06-23 01:40:19 -04001591 e++;
1592 if (unlikely(++q->cidx == q->size)) {
1593 q->cidx = 0;
1594 q->genbit ^= 1;
1595 e = q->entries;
1596 }
1597 prefetch(e);
1598
1599 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1600 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1601 q->credits = 0;
1602 }
1603 sge->stats.pure_rsps++;
1604 } while (e->GenerationBit == q->genbit && !e->DataValid);
1605
Francois Romieu356bd142006-12-11 23:47:00 +01001606 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001607 sge->cmdQ[1].processed += cmdq_processed[1];
1608
1609 return e->GenerationBit == q->genbit;
1610}
1611
1612/*
1613 * Handler for new data events when using NAPI. This does not need any locking
1614 * or protection from interrupts as data interrupts are off at this point and
1615 * other adapter interrupts do not interfere.
1616 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001617int t1_poll(struct napi_struct *napi, int budget)
Scott Bardone559fb512005-06-23 01:40:19 -04001618{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001619 struct adapter *adapter = container_of(napi, struct adapter, napi);
Divy Le Ray445cf802007-11-27 13:30:15 -08001620 int work_done = process_responses(adapter, budget);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001621
Divy Le Ray445cf802007-11-27 13:30:15 -08001622 if (likely(work_done < budget)) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001623 napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001624 writel(adapter->sge->respQ.cidx,
1625 adapter->regs + A_SG_SLEEPING);
1626 }
1627 return work_done;
Scott Bardone559fb512005-06-23 01:40:19 -04001628}
1629
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001630irqreturn_t t1_interrupt(int irq, void *data)
Scott Bardone559fb512005-06-23 01:40:19 -04001631{
Scott Bardone559fb512005-06-23 01:40:19 -04001632 struct adapter *adapter = data;
1633 struct sge *sge = adapter->sge;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001634 int handled;
Scott Bardone559fb512005-06-23 01:40:19 -04001635
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001636 if (likely(responses_pending(adapter))) {
Francois Romieu356bd142006-12-11 23:47:00 +01001637 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001638
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001639 if (napi_schedule_prep(&adapter->napi)) {
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001640 if (process_pure_responses(adapter))
Ben Hutchings288379f2009-01-19 16:43:59 -08001641 __napi_schedule(&adapter->napi);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001642 else {
1643 /* no data, no NAPI needed */
1644 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
Francois Romieu4422b002008-07-11 00:29:19 +02001645 /* undo schedule_prep */
1646 napi_enable(&adapter->napi);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001647 }
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001648 }
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001649 return IRQ_HANDLED;
1650 }
1651
1652 spin_lock(&adapter->async_lock);
1653 handled = t1_slow_intr_handler(adapter);
1654 spin_unlock(&adapter->async_lock);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001655
Scott Bardone559fb512005-06-23 01:40:19 -04001656 if (!handled)
1657 sge->stats.unhandled_irqs++;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001658
Scott Bardone559fb512005-06-23 01:40:19 -04001659 return IRQ_RETVAL(handled != 0);
1660}
1661
Scott Bardone559fb512005-06-23 01:40:19 -04001662/*
1663 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1664 *
1665 * The code figures out how many entries the sk_buff will require in the
1666 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1667 * has complete. Then, it doesn't access the global structure anymore, but
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001668 * uses the corresponding fields on the stack. In conjunction with a spinlock
Scott Bardone559fb512005-06-23 01:40:19 -04001669 * around that code, we can make the function reentrant without holding the
1670 * lock when we actually enqueue (which might be expensive, especially on
1671 * architectures with IO MMUs).
1672 *
1673 * This runs with softirqs disabled.
1674 */
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001675static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1676 unsigned int qid, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001677{
1678 struct sge *sge = adapter->sge;
1679 struct cmdQ *q = &sge->cmdQ[qid];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001680 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001681
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001682 if (!spin_trylock(&q->lock))
1683 return NETDEV_TX_LOCKED;
1684
Scott Bardone559fb512005-06-23 01:40:19 -04001685 reclaim_completed_tx(sge, q);
1686
1687 pidx = q->pidx;
1688 credits = q->size - q->in_use;
1689 count = 1 + skb_shinfo(skb)->nr_frags;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001690 count += compute_large_page_tx_descs(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001691
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001692 /* Ethernet packet */
1693 if (unlikely(credits < count)) {
1694 if (!netif_queue_stopped(dev)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001695 netif_stop_queue(dev);
1696 set_bit(dev->if_port, &sge->stopped_tx_queues);
Scott Bardone232a3472006-03-16 19:20:40 -05001697 sge->stats.cmdQ_full[2]++;
Joe Perchesc1f51212010-02-22 16:56:57 +00001698 pr_err("%s: Tx ring full while queue awake!\n",
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001699 adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -04001700 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001701 spin_unlock(&q->lock);
1702 return NETDEV_TX_BUSY;
Scott Bardone559fb512005-06-23 01:40:19 -04001703 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001704
1705 if (unlikely(credits - count < q->stop_thres)) {
1706 netif_stop_queue(dev);
1707 set_bit(dev->if_port, &sge->stopped_tx_queues);
1708 sge->stats.cmdQ_full[2]++;
1709 }
1710
1711 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1712 * through the scheduler.
1713 */
1714 if (sge->tx_sched && !qid && skb->dev) {
Francois Romieu356bd142006-12-11 23:47:00 +01001715use_sched:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001716 use_sched_skb = 1;
1717 /* Note that the scheduler might return a different skb than
1718 * the one passed in.
1719 */
1720 skb = sched_skb(sge, skb, credits);
1721 if (!skb) {
1722 spin_unlock(&q->lock);
1723 return NETDEV_TX_OK;
1724 }
1725 pidx = q->pidx;
1726 count = 1 + skb_shinfo(skb)->nr_frags;
1727 count += compute_large_page_tx_descs(skb);
1728 }
1729
Scott Bardone559fb512005-06-23 01:40:19 -04001730 q->in_use += count;
1731 genbit = q->genbit;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001732 pidx = q->pidx;
Scott Bardone559fb512005-06-23 01:40:19 -04001733 q->pidx += count;
1734 if (q->pidx >= q->size) {
1735 q->pidx -= q->size;
1736 q->genbit ^= 1;
1737 }
1738 spin_unlock(&q->lock);
1739
1740 write_tx_descs(adapter, skb, pidx, genbit, q);
1741
1742 /*
1743 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1744 * the doorbell if the Q is asleep. There is a natural race, where
1745 * the hardware is going to sleep just after we checked, however,
1746 * then the interrupt handler will detect the outstanding TX packet
1747 * and ring the doorbell for us.
1748 */
1749 if (qid)
1750 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1751 else {
1752 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1753 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1754 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1755 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1756 }
1757 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001758
1759 if (use_sched_skb) {
1760 if (spin_trylock(&q->lock)) {
1761 credits = q->size - q->in_use;
1762 skb = NULL;
1763 goto use_sched;
1764 }
1765 }
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001766 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001767}
1768
1769#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1770
1771/*
1772 * eth_hdr_len - return the length of an Ethernet header
1773 * @data: pointer to the start of the Ethernet header
1774 *
1775 * Returns the length of an Ethernet header, including optional VLAN tag.
1776 */
1777static inline int eth_hdr_len(const void *data)
1778{
1779 const struct ethhdr *e = data;
1780
1781 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1782}
1783
1784/*
1785 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1786 */
Stephen Hemminger613573252009-08-31 19:50:58 +00001787netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001788{
Wang Chenc3ccc122008-11-16 23:06:39 -08001789 struct adapter *adapter = dev->ml_priv;
Scott Bardone559fb512005-06-23 01:40:19 -04001790 struct sge *sge = adapter->sge;
Christoph Lameterca0c9582009-10-03 19:48:22 +09001791 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
Scott Bardone559fb512005-06-23 01:40:19 -04001792 struct cpl_tx_pkt *cpl;
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001793 struct sk_buff *orig_skb = skb;
1794 int ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001795
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001796 if (skb->protocol == htons(ETH_P_CPL5))
1797 goto send;
1798
Divy Le Ray7832ee02007-11-27 13:30:09 -08001799 /*
1800 * We are using a non-standard hard_header_len.
1801 * Allocate more header room in the rare cases it is not big enough.
1802 */
1803 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1804 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1805 ++st->tx_need_hdrroom;
1806 dev_kfree_skb_any(orig_skb);
1807 if (!skb)
1808 return NETDEV_TX_OK;
1809 }
1810
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001811 if (skb_shinfo(skb)->gso_size) {
Scott Bardone559fb512005-06-23 01:40:19 -04001812 int eth_type;
1813 struct cpl_tx_pkt_lso *hdr;
1814
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001815 ++st->tx_tso;
Scott Bardone559fb512005-06-23 01:40:19 -04001816
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001817 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Scott Bardone559fb512005-06-23 01:40:19 -04001818 CPL_ETH_II : CPL_ETH_II_VLAN;
1819
1820 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1821 hdr->opcode = CPL_TX_PKT_LSO;
1822 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001823 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001824 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
Scott Bardone559fb512005-06-23 01:40:19 -04001825 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001826 skb_shinfo(skb)->gso_size));
Scott Bardone559fb512005-06-23 01:40:19 -04001827 hdr->len = htonl(skb->len - sizeof(*hdr));
1828 cpl = (struct cpl_tx_pkt *)hdr;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001829 } else {
Scott Bardone559fb512005-06-23 01:40:19 -04001830 /*
Francois Romieu356bd142006-12-11 23:47:00 +01001831 * Packets shorter than ETH_HLEN can break the MAC, drop them
Scott Bardone559fb512005-06-23 01:40:19 -04001832 * early. Also, we may get oversized packets because some
1833 * parts of the kernel don't handle our unusual hard_header_len
1834 * right, drop those too.
1835 */
1836 if (unlikely(skb->len < ETH_HLEN ||
1837 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001838 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1839 skb->len, eth_hdr_len(skb->data), dev->mtu);
Scott Bardone559fb512005-06-23 01:40:19 -04001840 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001841 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001842 }
1843
Michał Mirosław30f554f2011-04-18 13:31:20 +00001844 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001845 ip_hdr(skb)->protocol == IPPROTO_UDP) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001846 if (unlikely(skb_checksum_help(skb))) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001847 pr_debug("%s: unable to do udp checksum\n", dev->name);
Scott Bardone559fb512005-06-23 01:40:19 -04001848 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001849 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001850 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001851 }
Scott Bardone559fb512005-06-23 01:40:19 -04001852
1853 /* Hmmm, assuming to catch the gratious arp... and we'll use
1854 * it to flush out stuck espi packets...
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001855 */
1856 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
Scott Bardone559fb512005-06-23 01:40:19 -04001857 if (skb->protocol == htons(ETH_P_ARP) &&
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -03001858 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001859 adapter->sge->espibug_skb[dev->if_port] = skb;
Scott Bardone559fb512005-06-23 01:40:19 -04001860 /* We want to re-use this skb later. We
1861 * simply bump the reference count and it
1862 * will not be freed...
1863 */
1864 skb = skb_get(skb);
1865 }
1866 }
1867
1868 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1869 cpl->opcode = CPL_TX_PKT;
1870 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001871 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001872 /* the length field isn't used so don't bother setting it */
1873
Patrick McHardy84fa7932006-08-29 16:44:56 -07001874 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
Scott Bardone559fb512005-06-23 01:40:19 -04001875 }
1876 cpl->iff = dev->if_port;
1877
1878#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
Jesse Grosseab6d182010-10-20 13:56:03 +00001879 if (vlan_tx_tag_present(skb)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001880 cpl->vlan_valid = 1;
1881 cpl->vlan = htons(vlan_tx_tag_get(skb));
1882 st->vlan_insert++;
1883 } else
1884#endif
1885 cpl->vlan_valid = 0;
1886
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001887send:
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001888 ret = t1_sge_tx(skb, adapter, 0, dev);
1889
1890 /* If transmit busy, and we reallocated skb's due to headroom limit,
1891 * then silently discard to avoid leak.
1892 */
1893 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
Francois Romieu356bd142006-12-11 23:47:00 +01001894 dev_kfree_skb_any(skb);
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001895 ret = NETDEV_TX_OK;
Francois Romieu356bd142006-12-11 23:47:00 +01001896 }
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001897 return ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001898}
1899
1900/*
1901 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1902 */
1903static void sge_tx_reclaim_cb(unsigned long data)
1904{
1905 int i;
1906 struct sge *sge = (struct sge *)data;
1907
1908 for (i = 0; i < SGE_CMDQ_N; ++i) {
1909 struct cmdQ *q = &sge->cmdQ[i];
1910
1911 if (!spin_trylock(&q->lock))
1912 continue;
1913
1914 reclaim_completed_tx(sge, q);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001915 if (i == 0 && q->in_use) { /* flush pending credits */
1916 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1917 }
Scott Bardone559fb512005-06-23 01:40:19 -04001918 spin_unlock(&q->lock);
1919 }
1920 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1921}
1922
1923/*
1924 * Propagate changes of the SGE coalescing parameters to the HW.
1925 */
1926int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1927{
Scott Bardone559fb512005-06-23 01:40:19 -04001928 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1929 core_ticks_per_usec(sge->adapter);
1930 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1931 return 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001932}
1933
1934/*
1935 * Allocates both RX and TX resources and configures the SGE. However,
1936 * the hardware is not enabled yet.
1937 */
1938int t1_sge_configure(struct sge *sge, struct sge_params *p)
1939{
1940 if (alloc_rx_resources(sge, p))
1941 return -ENOMEM;
1942 if (alloc_tx_resources(sge, p)) {
1943 free_rx_resources(sge);
1944 return -ENOMEM;
1945 }
1946 configure_sge(sge, p);
1947
1948 /*
1949 * Now that we have sized the free lists calculate the payload
1950 * capacity of the large buffers. Other parts of the driver use
1951 * this to set the max offload coalescing size so that RX packets
1952 * do not overflow our large buffers.
1953 */
1954 p->large_buf_capacity = jumbo_payload_capacity(sge);
1955 return 0;
1956}
1957
1958/*
Scott Bardone559fb512005-06-23 01:40:19 -04001959 * Disables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001960 */
Scott Bardone559fb512005-06-23 01:40:19 -04001961void t1_sge_stop(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001962{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001963 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001964 writel(0, sge->adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001965 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1966
Scott Bardone559fb512005-06-23 01:40:19 -04001967 if (is_T2(sge->adapter))
1968 del_timer_sync(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001969
Scott Bardone559fb512005-06-23 01:40:19 -04001970 del_timer_sync(&sge->tx_reclaim_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001971 if (sge->tx_sched)
1972 tx_sched_stop(sge);
1973
1974 for (i = 0; i < MAX_NPORTS; i++)
Wei Yongjunf4fe5a92009-02-25 00:45:09 +00001975 kfree_skb(sge->espibug_skb[i]);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001976}
1977
1978/*
Scott Bardone559fb512005-06-23 01:40:19 -04001979 * Enables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001980 */
Scott Bardone559fb512005-06-23 01:40:19 -04001981void t1_sge_start(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001982{
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001983 refill_free_list(sge, &sge->freelQ[0]);
1984 refill_free_list(sge, &sge->freelQ[1]);
1985
Scott Bardone559fb512005-06-23 01:40:19 -04001986 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1987 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001988 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001989
Scott Bardone559fb512005-06-23 01:40:19 -04001990 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001991
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001992 if (is_T2(sge->adapter))
Scott Bardone559fb512005-06-23 01:40:19 -04001993 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001994}
1995
1996/*
Scott Bardone559fb512005-06-23 01:40:19 -04001997 * Callback for the T2 ESPI 'stuck packet feature' workaorund
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001998 */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001999static void espibug_workaround_t204(unsigned long data)
2000{
2001 struct adapter *adapter = (struct adapter *)data;
2002 struct sge *sge = adapter->sge;
2003 unsigned int nports = adapter->params.nports;
2004 u32 seop[MAX_NPORTS];
2005
2006 if (adapter->open_device_map & PORT_MASK) {
2007 int i;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002008
Francois Romieu356bd142006-12-11 23:47:00 +01002009 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2010 return;
2011
2012 for (i = 0; i < nports; i++) {
2013 struct sk_buff *skb = sge->espibug_skb[i];
2014
2015 if (!netif_running(adapter->port[i].dev) ||
2016 netif_queue_stopped(adapter->port[i].dev) ||
2017 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2018 continue;
2019
2020 if (!skb->cb[0]) {
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002021 skb_copy_to_linear_data_offset(skb,
2022 sizeof(struct cpl_tx_pkt),
2023 ch_mac_addr,
2024 ETH_ALEN);
2025 skb_copy_to_linear_data_offset(skb,
2026 skb->len - 10,
2027 ch_mac_addr,
2028 ETH_ALEN);
Francois Romieu356bd142006-12-11 23:47:00 +01002029 skb->cb[0] = 0xff;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002030 }
Francois Romieu356bd142006-12-11 23:47:00 +01002031
2032 /* bump the reference count to avoid freeing of
2033 * the skb once the DMA has completed.
2034 */
2035 skb = skb_get(skb);
2036 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002037 }
2038 }
2039 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2040}
2041
2042static void espibug_workaround(unsigned long data)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002043{
Scott Bardone559fb512005-06-23 01:40:19 -04002044 struct adapter *adapter = (struct adapter *)data;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002045 struct sge *sge = adapter->sge;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002046
Scott Bardone559fb512005-06-23 01:40:19 -04002047 if (netif_running(adapter->port[0].dev)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002048 struct sk_buff *skb = sge->espibug_skb[0];
2049 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002050
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002051 if ((seop & 0xfff0fff) == 0xfff && skb) {
2052 if (!skb->cb[0]) {
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002053 skb_copy_to_linear_data_offset(skb,
2054 sizeof(struct cpl_tx_pkt),
2055 ch_mac_addr,
2056 ETH_ALEN);
2057 skb_copy_to_linear_data_offset(skb,
2058 skb->len - 10,
2059 ch_mac_addr,
2060 ETH_ALEN);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002061 skb->cb[0] = 0xff;
2062 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002063
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002064 /* bump the reference count to avoid freeing of the
2065 * skb once the DMA has completed.
2066 */
2067 skb = skb_get(skb);
2068 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2069 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002070 }
Scott Bardone559fb512005-06-23 01:40:19 -04002071 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002072}
2073
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002074/*
Scott Bardone559fb512005-06-23 01:40:19 -04002075 * Creates a t1_sge structure and returns suggested resource parameters.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002076 */
Scott Bardone559fb512005-06-23 01:40:19 -04002077struct sge * __devinit t1_sge_create(struct adapter *adapter,
2078 struct sge_params *p)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002079{
Stephen Hemmingercbee9f92006-11-17 17:01:52 -08002080 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002081 int i;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002082
Scott Bardone559fb512005-06-23 01:40:19 -04002083 if (!sge)
2084 return NULL;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002085
Scott Bardone559fb512005-06-23 01:40:19 -04002086 sge->adapter = adapter;
2087 sge->netdev = adapter->port[0].dev;
2088 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2089 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2090
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002091 for_each_port(adapter, i) {
2092 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2093 if (!sge->port_stats[i])
2094 goto nomem_port;
2095 }
2096
Scott Bardone559fb512005-06-23 01:40:19 -04002097 init_timer(&sge->tx_reclaim_timer);
2098 sge->tx_reclaim_timer.data = (unsigned long)sge;
2099 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2100
2101 if (is_T2(sge->adapter)) {
2102 init_timer(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002103
2104 if (adapter->params.nports > 1) {
2105 tx_sched_init(sge);
2106 sge->espibug_timer.function = espibug_workaround_t204;
Francois Romieud7487422006-12-11 23:49:13 +01002107 } else
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002108 sge->espibug_timer.function = espibug_workaround;
Scott Bardone559fb512005-06-23 01:40:19 -04002109 sge->espibug_timer.data = (unsigned long)sge->adapter;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002110
Scott Bardone559fb512005-06-23 01:40:19 -04002111 sge->espibug_timeout = 1;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002112 /* for T204, every 10ms */
2113 if (adapter->params.nports > 1)
2114 sge->espibug_timeout = HZ/100;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002115 }
Francois Romieu356bd142006-12-11 23:47:00 +01002116
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002117
Scott Bardone559fb512005-06-23 01:40:19 -04002118 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2119 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2120 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2121 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002122 if (sge->tx_sched) {
2123 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2124 p->rx_coalesce_usecs = 15;
2125 else
2126 p->rx_coalesce_usecs = 50;
2127 } else
2128 p->rx_coalesce_usecs = 50;
2129
Scott Bardone559fb512005-06-23 01:40:19 -04002130 p->coalesce_enable = 0;
2131 p->sample_interval_usecs = 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002132
Scott Bardone559fb512005-06-23 01:40:19 -04002133 return sge;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002134nomem_port:
2135 while (i >= 0) {
2136 free_percpu(sge->port_stats[i]);
2137 --i;
2138 }
2139 kfree(sge);
2140 return NULL;
2141
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002142}