blob: 526ea74e82d9590b248530148a4a0776411d608d [file] [log] [blame]
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001/*****************************************************************************
2 * *
3 * File: sge.c *
Scott Bardone559fb512005-06-23 01:40:19 -04004 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
Christoph Lameter8199d3a2005-03-30 13:34:31 -08006 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
Jeff Kirsher0ab75ae2013-12-06 06:28:43 -080015 * with this program; if not, see <http://www.gnu.org/licenses/>. *
Christoph Lameter8199d3a2005-03-30 13:34:31 -080016 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39#include "common.h"
40
Christoph Lameter8199d3a2005-03-30 13:34:31 -080041#include <linux/types.h>
42#include <linux/errno.h>
43#include <linux/pci.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080044#include <linux/ktime.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080045#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/if_vlan.h>
48#include <linux/skbuff.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080049#include <linux/mm.h>
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080050#include <linux/tcp.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080051#include <linux/ip.h>
52#include <linux/in.h>
53#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040055#include <linux/prefetch.h>
Christoph Lameter8199d3a2005-03-30 13:34:31 -080056
57#include "cpl5_cmd.h"
58#include "sge.h"
59#include "regs.h"
60#include "espi.h"
61
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080062/* This belongs in if_ether.h */
63#define ETH_P_CPL5 0xf
Christoph Lameter8199d3a2005-03-30 13:34:31 -080064
65#define SGE_CMDQ_N 2
66#define SGE_FREELQ_N 2
Scott Bardone559fb512005-06-23 01:40:19 -040067#define SGE_CMDQ0_E_N 1024
Christoph Lameter8199d3a2005-03-30 13:34:31 -080068#define SGE_CMDQ1_E_N 128
69#define SGE_FREEL_SIZE 4096
70#define SGE_JUMBO_FREEL_SIZE 512
71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024
Scott Bardone559fb512005-06-23 01:40:19 -040073#define SGE_INTRTIMER_NRES 1000
Christoph Lameter8199d3a2005-03-30 13:34:31 -080074#define SGE_RX_SM_BUF_SIZE 1536
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -080075#define SGE_TX_DESC_MAX_PLEN 16384
Christoph Lameter8199d3a2005-03-30 13:34:31 -080076
Scott Bardone559fb512005-06-23 01:40:19 -040077#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
Christoph Lameter8199d3a2005-03-30 13:34:31 -080078
79/*
Scott Bardone559fb512005-06-23 01:40:19 -040080 * Period of the TX buffer reclaim timer. This timer does not need to run
81 * frequently as TX buffers are usually reclaimed by new TX packets.
82 */
83#define TX_RECLAIM_PERIOD (HZ / 4)
84
Scott Bardone559fb512005-06-23 01:40:19 -040085#define M_CMD_LEN 0x7fffffff
86#define V_CMD_LEN(v) (v)
87#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
88#define V_CMD_GEN1(v) ((v) << 31)
89#define V_CMD_GEN2(v) (v)
90#define F_CMD_DATAVALID (1 << 1)
91#define F_CMD_SOP (1 << 2)
92#define V_CMD_EOP(v) ((v) << 3)
93
94/*
95 * Command queue, receive buffer list, and response queue descriptors.
Christoph Lameter8199d3a2005-03-30 13:34:31 -080096 */
97#if defined(__BIG_ENDIAN_BITFIELD)
98struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -040099 u32 addr_lo;
100 u32 len_gen;
101 u32 flags;
102 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800103};
104
105struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400106 u32 addr_lo;
107 u32 len_gen;
108 u32 gen2;
109 u32 addr_hi;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800110};
111
112struct respQ_e {
113 u32 Qsleeping : 4;
114 u32 Cmdq1CreditReturn : 5;
115 u32 Cmdq1DmaComplete : 5;
116 u32 Cmdq0CreditReturn : 5;
117 u32 Cmdq0DmaComplete : 5;
118 u32 FreelistQid : 2;
119 u32 CreditValid : 1;
120 u32 DataValid : 1;
121 u32 Offload : 1;
122 u32 Eop : 1;
123 u32 Sop : 1;
124 u32 GenerationBit : 1;
125 u32 BufferLength;
126};
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800127#elif defined(__LITTLE_ENDIAN_BITFIELD)
128struct cmdQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400129 u32 len_gen;
130 u32 addr_lo;
131 u32 addr_hi;
132 u32 flags;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800133};
134
135struct freelQ_e {
Scott Bardone559fb512005-06-23 01:40:19 -0400136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 gen2;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800140};
141
142struct respQ_e {
143 u32 BufferLength;
144 u32 GenerationBit : 1;
145 u32 Sop : 1;
146 u32 Eop : 1;
147 u32 Offload : 1;
148 u32 DataValid : 1;
149 u32 CreditValid : 1;
150 u32 FreelistQid : 2;
151 u32 Cmdq0DmaComplete : 5;
152 u32 Cmdq0CreditReturn : 5;
153 u32 Cmdq1DmaComplete : 5;
154 u32 Cmdq1CreditReturn : 5;
155 u32 Qsleeping : 4;
156} ;
157#endif
158
159/*
160 * SW Context Command and Freelist Queue Descriptors
161 */
162struct cmdQ_ce {
163 struct sk_buff *skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000164 DEFINE_DMA_UNMAP_ADDR(dma_addr);
165 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800166};
167
168struct freelQ_ce {
169 struct sk_buff *skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000170 DEFINE_DMA_UNMAP_ADDR(dma_addr);
171 DEFINE_DMA_UNMAP_LEN(dma_len);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800172};
173
174/*
Scott Bardone559fb512005-06-23 01:40:19 -0400175 * SW command, freelist and response rings
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800176 */
177struct cmdQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400178 unsigned long status; /* HW DMA fetch status */
179 unsigned int in_use; /* # of in-use command descriptors */
180 unsigned int size; /* # of descriptors */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800181 unsigned int processed; /* total # of descs HW has processed */
182 unsigned int cleaned; /* total # of descs SW has reclaimed */
183 unsigned int stop_thres; /* SW TX queue suspend threshold */
Scott Bardone559fb512005-06-23 01:40:19 -0400184 u16 pidx; /* producer index (SW) */
185 u16 cidx; /* consumer index (HW) */
186 u8 genbit; /* current generation (=valid) bit */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800187 u8 sop; /* is next entry start of packet? */
Scott Bardone559fb512005-06-23 01:40:19 -0400188 struct cmdQ_e *entries; /* HW command descriptor Q */
189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
Francois Romieu356bd142006-12-11 23:47:00 +0100191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800192};
193
194struct freelQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400195 unsigned int credits; /* # of available RX buffers */
196 unsigned int size; /* free list capacity */
197 u16 pidx; /* producer index (SW) */
198 u16 cidx; /* consumer index (HW) */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800199 u16 rx_buffer_size; /* Buffer size on this free list */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800200 u16 dma_offset; /* DMA offset to align IP headers */
201 u16 recycleq_idx; /* skb recycle q to use */
Scott Bardone559fb512005-06-23 01:40:19 -0400202 u8 genbit; /* current generation (=valid) bit */
203 struct freelQ_e *entries; /* HW freelist descriptor Q */
204 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
205 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800206};
207
208struct respQ {
Scott Bardone559fb512005-06-23 01:40:19 -0400209 unsigned int credits; /* credits to be returned to SGE */
210 unsigned int size; /* # of response Q descriptors */
211 u16 cidx; /* consumer index (SW) */
212 u8 genbit; /* current generation(=valid) bit */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800213 struct respQ_e *entries; /* HW response descriptor Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400214 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
215};
216
217/* Bit flags for cmdQ.status */
218enum {
219 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
220 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800221};
222
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800223/* T204 TX SW scheduler */
224
225/* Per T204 TX port */
226struct sched_port {
227 unsigned int avail; /* available bits - quota */
228 unsigned int drain_bits_per_1024ns; /* drain rate */
229 unsigned int speed; /* drain rate, mbps */
230 unsigned int mtu; /* mtu size */
231 struct sk_buff_head skbq; /* pending skbs */
232};
233
234/* Per T204 device */
235struct sched {
236 ktime_t last_updated; /* last time quotas were computed */
Francois Romieu356bd142006-12-11 23:47:00 +0100237 unsigned int max_avail; /* max bits to be sent to any port */
238 unsigned int port; /* port index (round robin ports) */
239 unsigned int num; /* num skbs in per port queues */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800240 struct sched_port p[MAX_NPORTS];
241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
242};
243static void restart_sched(unsigned long);
244
245
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800246/*
247 * Main SGE data structure
248 *
249 * Interrupts are handled by a single CPU and it is likely that on a MP system
250 * the application is migrated to another CPU. In that scenario, we try to
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800251 * separate the RX(in irq context) and TX state in order to decrease memory
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800252 * contention.
253 */
254struct sge {
Francois Romieu356bd142006-12-11 23:47:00 +0100255 struct adapter *adapter; /* adapter backpointer */
Scott Bardone559fb512005-06-23 01:40:19 -0400256 struct net_device *netdev; /* netdevice backpointer */
Francois Romieu356bd142006-12-11 23:47:00 +0100257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
258 struct respQ respQ; /* response Q */
Scott Bardone559fb512005-06-23 01:40:19 -0400259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
261 unsigned int jumbo_fl; /* jumbo freelist Q index */
Scott Bardone559fb512005-06-23 01:40:19 -0400262 unsigned int intrtimer_nres; /* no-resource interrupt timer */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800263 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
Scott Bardone559fb512005-06-23 01:40:19 -0400264 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
265 struct timer_list espibug_timer;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800266 unsigned long espibug_timeout;
267 struct sk_buff *espibug_skb[MAX_NPORTS];
Scott Bardone559fb512005-06-23 01:40:19 -0400268 u32 sge_control; /* shadow value of sge control reg */
269 struct sge_intr_counts stats;
Tejun Heo47d74272010-02-16 15:21:08 +0000270 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800271 struct sched *tx_sched;
Scott Bardone559fb512005-06-23 01:40:19 -0400272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800273};
274
Joe Perches215faf92010-12-21 02:16:10 -0800275static const u8 ch_mac_addr[ETH_ALEN] = {
276 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
277};
278
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800279/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800280 * stop tasklet and free all pending skb's
281 */
282static void tx_sched_stop(struct sge *sge)
283{
284 struct sched *s = sge->tx_sched;
285 int i;
286
287 tasklet_kill(&s->sched_tsk);
288
289 for (i = 0; i < MAX_NPORTS; i++)
290 __skb_queue_purge(&s->p[s->port].skbq);
291}
292
293/*
294 * t1_sched_update_parms() is called when the MTU or link speed changes. It
295 * re-computes scheduler parameters to scope with the change.
296 */
297unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
298 unsigned int mtu, unsigned int speed)
299{
300 struct sched *s = sge->tx_sched;
301 struct sched_port *p = &s->p[port];
302 unsigned int max_avail_segs;
303
Julia Lawall5e84e182014-12-07 20:20:56 +0100304 pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800305 if (speed)
306 p->speed = speed;
307 if (mtu)
308 p->mtu = mtu;
309
310 if (speed || mtu) {
311 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
312 do_div(drain, (p->mtu + 50) * 1000);
313 p->drain_bits_per_1024ns = (unsigned int) drain;
314
315 if (p->speed < 1000)
316 p->drain_bits_per_1024ns =
317 90 * p->drain_bits_per_1024ns / 100;
318 }
319
320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
321 p->drain_bits_per_1024ns -= 16;
322 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
323 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
324 } else {
325 s->max_avail = 16384;
326 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
327 }
328
329 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
330 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
331 p->speed, s->max_avail, max_avail_segs,
332 p->drain_bits_per_1024ns);
333
334 return max_avail_segs * (p->mtu - 40);
335}
336
Adrian Bunk68d579f2007-11-21 15:02:55 -0800337#if 0
338
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800339/*
340 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
341 * data that can be pushed per port.
342 */
343void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
344{
345 struct sched *s = sge->tx_sched;
346 unsigned int i;
347
348 s->max_avail = val;
349 for (i = 0; i < MAX_NPORTS; i++)
350 t1_sched_update_parms(sge, i, 0, 0);
351}
352
353/*
354 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
355 * is draining.
356 */
357void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
358 unsigned int val)
359{
360 struct sched *s = sge->tx_sched;
361 struct sched_port *p = &s->p[port];
362 p->drain_bits_per_1024ns = val * 1024 / 1000;
363 t1_sched_update_parms(sge, port, 0, 0);
364}
365
Adrian Bunk68d579f2007-11-21 15:02:55 -0800366#endif /* 0 */
367
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800368/*
369 * tx_sched_init() allocates resources and does basic initialization.
370 */
371static int tx_sched_init(struct sge *sge)
372{
373 struct sched *s;
374 int i;
375
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
377 if (!s)
378 return -ENOMEM;
379
380 pr_debug("tx_sched_init\n");
381 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
382 sge->tx_sched = s;
383
384 for (i = 0; i < MAX_NPORTS; i++) {
385 skb_queue_head_init(&s->p[i].skbq);
386 t1_sched_update_parms(sge, i, 1500, 1000);
387 }
388
389 return 0;
390}
391
392/*
393 * sched_update_avail() computes the delta since the last time it was called
394 * and updates the per port quota (number of bits that can be sent to the any
395 * port).
396 */
397static inline int sched_update_avail(struct sge *sge)
398{
399 struct sched *s = sge->tx_sched;
Jan Glauberfd3065b2012-12-05 23:20:14 +0000400 ktime_t now = ktime_get();
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800401 unsigned int i;
402 long long delta_time_ns;
403
404 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
405
406 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
407 if (delta_time_ns < 15000)
408 return 0;
409
410 for (i = 0; i < MAX_NPORTS; i++) {
411 struct sched_port *p = &s->p[i];
412 unsigned int delta_avail;
413
414 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
415 p->avail = min(p->avail + delta_avail, s->max_avail);
416 }
417
418 s->last_updated = now;
419
420 return 1;
421}
422
423/*
424 * sched_skb() is called from two different places. In the tx path, any
425 * packet generating load on an output port will call sched_skb()
426 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
427 * context (skb == NULL).
428 * The scheduler only returns a skb (which will then be sent) if the
429 * length of the skb is <= the current quota of the output port.
430 */
431static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
432 unsigned int credits)
433{
434 struct sched *s = sge->tx_sched;
435 struct sk_buff_head *skbq;
436 unsigned int i, len, update = 1;
437
438 pr_debug("sched_skb %p\n", skb);
439 if (!skb) {
440 if (!s->num)
441 return NULL;
442 } else {
443 skbq = &s->p[skb->dev->if_port].skbq;
444 __skb_queue_tail(skbq, skb);
445 s->num++;
446 skb = NULL;
447 }
448
449 if (credits < MAX_SKB_FRAGS + 1)
450 goto out;
451
Francois Romieu356bd142006-12-11 23:47:00 +0100452again:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800453 for (i = 0; i < MAX_NPORTS; i++) {
David S. Miller18d777a2010-04-13 03:07:17 -0700454 s->port = (s->port + 1) & (MAX_NPORTS - 1);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800455 skbq = &s->p[s->port].skbq;
456
457 skb = skb_peek(skbq);
458
459 if (!skb)
460 continue;
461
462 len = skb->len;
463 if (len <= s->p[s->port].avail) {
464 s->p[s->port].avail -= len;
465 s->num--;
466 __skb_unlink(skb, skbq);
467 goto out;
468 }
469 skb = NULL;
470 }
471
472 if (update-- && sched_update_avail(sge))
473 goto again;
474
Francois Romieu356bd142006-12-11 23:47:00 +0100475out:
476 /* If there are more pending skbs, we use the hardware to schedule us
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800477 * again.
478 */
479 if (s->num && !skb) {
480 struct cmdQ *q = &sge->cmdQ[0];
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
484 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
485 }
486 }
487 pr_debug("sched_skb ret %p\n", skb);
488
489 return skb;
490}
491
492/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800493 * PIO to indicate that memory mapped Q contains valid descriptor(s).
494 */
Scott Bardone559fb512005-06-23 01:40:19 -0400495static inline void doorbell_pio(struct adapter *adapter, u32 val)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800496{
497 wmb();
Scott Bardone559fb512005-06-23 01:40:19 -0400498 writel(val, adapter->regs + A_SG_DOORBELL);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800499}
500
501/*
502 * Frees all RX buffers on the freelist Q. The caller must make sure that
503 * the SGE is turned off before calling this function.
504 */
Scott Bardone559fb512005-06-23 01:40:19 -0400505static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800506{
Scott Bardone559fb512005-06-23 01:40:19 -0400507 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800508
Scott Bardone559fb512005-06-23 01:40:19 -0400509 while (q->credits--) {
510 struct freelQ_ce *ce = &q->centries[cidx];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800511
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000512 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513 dma_unmap_len(ce, dma_len),
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800514 PCI_DMA_FROMDEVICE);
515 dev_kfree_skb(ce->skb);
516 ce->skb = NULL;
Scott Bardone559fb512005-06-23 01:40:19 -0400517 if (++cidx == q->size)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800518 cidx = 0;
519 }
520}
521
522/*
523 * Free RX free list and response queue resources.
524 */
525static void free_rx_resources(struct sge *sge)
526{
527 struct pci_dev *pdev = sge->adapter->pdev;
528 unsigned int size, i;
529
530 if (sge->respQ.entries) {
Scott Bardone559fb512005-06-23 01:40:19 -0400531 size = sizeof(struct respQ_e) * sge->respQ.size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800532 pci_free_consistent(pdev, size, sge->respQ.entries,
533 sge->respQ.dma_addr);
534 }
535
536 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400537 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800538
Scott Bardone559fb512005-06-23 01:40:19 -0400539 if (q->centries) {
540 free_freelQ_buffers(pdev, q);
541 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800542 }
Scott Bardone559fb512005-06-23 01:40:19 -0400543 if (q->entries) {
544 size = sizeof(struct freelQ_e) * q->size;
545 pci_free_consistent(pdev, size, q->entries,
546 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800547 }
548 }
549}
550
551/*
552 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
Scott Bardone559fb512005-06-23 01:40:19 -0400553 * response queue.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800554 */
555static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
556{
557 struct pci_dev *pdev = sge->adapter->pdev;
558 unsigned int size, i;
559
560 for (i = 0; i < SGE_FREELQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400561 struct freelQ *q = &sge->freelQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800562
Scott Bardone559fb512005-06-23 01:40:19 -0400563 q->genbit = 1;
564 q->size = p->freelQ_size[i];
565 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566 size = sizeof(struct freelQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100567 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400568 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800569 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100570
Scott Bardone559fb512005-06-23 01:40:19 -0400571 size = sizeof(struct freelQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800572 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400573 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800574 goto err_no_mem;
575 }
576
577 /*
578 * Calculate the buffer sizes for the two free lists. FL0 accommodates
579 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
580 * including all the sk_buff overhead.
581 *
582 * Note: For T2 FL0 and FL1 are reversed.
583 */
584 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
585 sizeof(struct cpl_rx_data) +
586 sge->freelQ[!sge->jumbo_fl].dma_offset;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800587
588 size = (16 * 1024) -
589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
590
591 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800592
Scott Bardone559fb512005-06-23 01:40:19 -0400593 /*
594 * Setup which skb recycle Q should be used when recycling buffers from
595 * each free list.
596 */
597 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
598 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
599
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800600 sge->respQ.genbit = 1;
Scott Bardone559fb512005-06-23 01:40:19 -0400601 sge->respQ.size = SGE_RESPQ_E_N;
602 sge->respQ.credits = 0;
603 size = sizeof(struct respQ_e) * sge->respQ.size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100604 sge->respQ.entries =
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800605 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
606 if (!sge->respQ.entries)
607 goto err_no_mem;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800608 return 0;
609
610err_no_mem:
611 free_rx_resources(sge);
612 return -ENOMEM;
613}
614
615/*
Scott Bardone559fb512005-06-23 01:40:19 -0400616 * Reclaims n TX descriptors and frees the buffers associated with them.
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800617 */
Scott Bardone559fb512005-06-23 01:40:19 -0400618static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800619{
Scott Bardone559fb512005-06-23 01:40:19 -0400620 struct cmdQ_ce *ce;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800621 struct pci_dev *pdev = sge->adapter->pdev;
Scott Bardone559fb512005-06-23 01:40:19 -0400622 unsigned int cidx = q->cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800623
Scott Bardone559fb512005-06-23 01:40:19 -0400624 q->in_use -= n;
625 ce = &q->centries[cidx];
626 while (n--) {
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000627 if (likely(dma_unmap_len(ce, dma_len))) {
628 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
629 dma_unmap_len(ce, dma_len),
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100630 PCI_DMA_TODEVICE);
631 if (q->sop)
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800632 q->sop = 0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800633 }
Scott Bardone559fb512005-06-23 01:40:19 -0400634 if (ce->skb) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800635 dev_kfree_skb_any(ce->skb);
Scott Bardone559fb512005-06-23 01:40:19 -0400636 q->sop = 1;
637 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800638 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -0400639 if (++cidx == q->size) {
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800640 cidx = 0;
Scott Bardone559fb512005-06-23 01:40:19 -0400641 ce = q->centries;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800642 }
643 }
Scott Bardone559fb512005-06-23 01:40:19 -0400644 q->cidx = cidx;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800645}
646
647/*
648 * Free TX resources.
649 *
650 * Assumes that SGE is stopped and all interrupts are disabled.
651 */
652static void free_tx_resources(struct sge *sge)
653{
654 struct pci_dev *pdev = sge->adapter->pdev;
655 unsigned int size, i;
656
657 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400658 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800659
Scott Bardone559fb512005-06-23 01:40:19 -0400660 if (q->centries) {
661 if (q->in_use)
662 free_cmdQ_buffers(sge, q, q->in_use);
663 kfree(q->centries);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800664 }
Scott Bardone559fb512005-06-23 01:40:19 -0400665 if (q->entries) {
666 size = sizeof(struct cmdQ_e) * q->size;
667 pci_free_consistent(pdev, size, q->entries,
668 q->dma_addr);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800669 }
670 }
671}
672
673/*
674 * Allocates basic TX resources, consisting of memory mapped command Qs.
675 */
676static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
677{
678 struct pci_dev *pdev = sge->adapter->pdev;
679 unsigned int size, i;
680
681 for (i = 0; i < SGE_CMDQ_N; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -0400682 struct cmdQ *q = &sge->cmdQ[i];
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800683
Scott Bardone559fb512005-06-23 01:40:19 -0400684 q->genbit = 1;
685 q->sop = 1;
686 q->size = p->cmdQ_size[i];
687 q->in_use = 0;
688 q->status = 0;
689 q->processed = q->cleaned = 0;
690 q->stop_thres = 0;
691 spin_lock_init(&q->lock);
692 size = sizeof(struct cmdQ_e) * q->size;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100693 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
Scott Bardone559fb512005-06-23 01:40:19 -0400694 if (!q->entries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800695 goto err_no_mem;
Francois Romieu3e0f75b2006-12-05 23:57:41 +0100696
Scott Bardone559fb512005-06-23 01:40:19 -0400697 size = sizeof(struct cmdQ_ce) * q->size;
Stephen Hemmingercbee9f92006-11-17 17:01:52 -0800698 q->centries = kzalloc(size, GFP_KERNEL);
Scott Bardone559fb512005-06-23 01:40:19 -0400699 if (!q->centries)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800700 goto err_no_mem;
701 }
702
Scott Bardone559fb512005-06-23 01:40:19 -0400703 /*
704 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
705 * only. For queue 0 set the stop threshold so we can handle one more
706 * packet from each port, plus reserve an additional 24 entries for
707 * Ethernet packets only. Queue 1 never suspends nor do we reserve
708 * space for Ethernet packets.
709 */
710 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
711 (MAX_SKB_FRAGS + 1);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800712 return 0;
713
714err_no_mem:
715 free_tx_resources(sge);
716 return -ENOMEM;
717}
718
719static inline void setup_ring_params(struct adapter *adapter, u64 addr,
720 u32 size, int base_reg_lo,
721 int base_reg_hi, int size_reg)
722{
Scott Bardone559fb512005-06-23 01:40:19 -0400723 writel((u32)addr, adapter->regs + base_reg_lo);
724 writel(addr >> 32, adapter->regs + base_reg_hi);
725 writel(size, adapter->regs + size_reg);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800726}
727
728/*
729 * Enable/disable VLAN acceleration.
730 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000731void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800732{
733 struct sge *sge = adapter->sge;
734
Patrick McHardyf6469682013-04-19 02:04:27 +0000735 if (features & NETIF_F_HW_VLAN_CTAG_RX)
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800736 sge->sge_control |= F_VLAN_XTRACT;
Jiri Pirko133b0852011-07-20 04:54:15 +0000737 else
738 sge->sge_control &= ~F_VLAN_XTRACT;
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800739 if (adapter->open_device_map) {
Scott Bardone559fb512005-06-23 01:40:19 -0400740 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800741 readl(adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800742 }
743}
744
745/*
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800746 * Programs the various SGE registers. However, the engine is not yet enabled,
747 * but sge->sge_control is setup and ready to go.
748 */
749static void configure_sge(struct sge *sge, struct sge_params *p)
750{
751 struct adapter *ap = sge->adapter;
Francois Romieu356bd142006-12-11 23:47:00 +0100752
Scott Bardone559fb512005-06-23 01:40:19 -0400753 writel(0, ap->regs + A_SG_CONTROL);
754 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800755 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
Scott Bardone559fb512005-06-23 01:40:19 -0400756 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800757 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
758 setup_ring_params(ap, sge->freelQ[0].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400759 sge->freelQ[0].size, A_SG_FL0BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800760 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
761 setup_ring_params(ap, sge->freelQ[1].dma_addr,
Scott Bardone559fb512005-06-23 01:40:19 -0400762 sge->freelQ[1].size, A_SG_FL1BASELWR,
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800763 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
764
765 /* The threshold comparison uses <. */
Scott Bardone559fb512005-06-23 01:40:19 -0400766 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800767
Scott Bardone559fb512005-06-23 01:40:19 -0400768 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
769 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
770 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800771
772 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
773 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
774 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
775 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
776
777#if defined(__BIG_ENDIAN_BITFIELD)
778 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
779#endif
780
Scott Bardone559fb512005-06-23 01:40:19 -0400781 /* Initialize no-resource timer */
782 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800783
Scott Bardone559fb512005-06-23 01:40:19 -0400784 t1_sge_set_coalesce_params(sge, p);
Christoph Lameter8199d3a2005-03-30 13:34:31 -0800785}
786
787/*
788 * Return the payload capacity of the jumbo free-list buffers.
789 */
790static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
791{
792 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
Scott Bardone559fb512005-06-23 01:40:19 -0400793 sge->freelQ[sge->jumbo_fl].dma_offset -
794 sizeof(struct cpl_rx_data);
795}
796
797/*
798 * Frees all SGE related resources and the sge structure itself
799 */
800void t1_sge_destroy(struct sge *sge)
801{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800802 int i;
803
804 for_each_port(sge->adapter, i)
805 free_percpu(sge->port_stats[i]);
806
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -0800807 kfree(sge->tx_sched);
Scott Bardone559fb512005-06-23 01:40:19 -0400808 free_tx_resources(sge);
809 free_rx_resources(sge);
810 kfree(sge);
811}
812
813/*
814 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
815 * context Q) until the Q is full or alloc_skb fails.
816 *
817 * It is possible that the generation bits already match, indicating that the
818 * buffer is already valid and nothing needs to be done. This happens when we
819 * copied a received buffer into a new sk_buff during the interrupt processing.
820 *
821 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
822 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
823 * aligned.
824 */
825static void refill_free_list(struct sge *sge, struct freelQ *q)
826{
827 struct pci_dev *pdev = sge->adapter->pdev;
828 struct freelQ_ce *ce = &q->centries[q->pidx];
829 struct freelQ_e *e = &q->entries[q->pidx];
830 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
831
Scott Bardone559fb512005-06-23 01:40:19 -0400832 while (q->credits < q->size) {
833 struct sk_buff *skb;
834 dma_addr_t mapping;
835
Eric Dumazet70386d42013-03-20 09:33:19 -0700836 skb = dev_alloc_skb(q->rx_buffer_size);
Scott Bardone559fb512005-06-23 01:40:19 -0400837 if (!skb)
838 break;
839
840 skb_reserve(skb, q->dma_offset);
841 mapping = pci_map_single(pdev, skb->data, dma_len,
842 PCI_DMA_FROMDEVICE);
Stephen Hemminger24a427c2007-01-08 11:26:12 -0800843 skb_reserve(skb, sge->rx_pkt_pad);
844
Scott Bardone559fb512005-06-23 01:40:19 -0400845 ce->skb = skb;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +0000846 dma_unmap_addr_set(ce, dma_addr, mapping);
847 dma_unmap_len_set(ce, dma_len, dma_len);
Scott Bardone559fb512005-06-23 01:40:19 -0400848 e->addr_lo = (u32)mapping;
849 e->addr_hi = (u64)mapping >> 32;
850 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
851 wmb();
852 e->gen2 = V_CMD_GEN2(q->genbit);
853
854 e++;
855 ce++;
856 if (++q->pidx == q->size) {
857 q->pidx = 0;
858 q->genbit ^= 1;
859 ce = q->centries;
860 e = q->entries;
861 }
862 q->credits++;
863 }
Scott Bardone559fb512005-06-23 01:40:19 -0400864}
865
866/*
867 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
868 * of both rings, we go into 'few interrupt mode' in order to give the system
869 * time to free up resources.
870 */
871static void freelQs_empty(struct sge *sge)
872{
873 struct adapter *adapter = sge->adapter;
874 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
875 u32 irqholdoff_reg;
876
877 refill_free_list(sge, &sge->freelQ[0]);
878 refill_free_list(sge, &sge->freelQ[1]);
879
880 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
881 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
882 irq_reg |= F_FL_EXHAUSTED;
883 irqholdoff_reg = sge->fixed_intrtimer;
884 } else {
885 /* Clear the F_FL_EXHAUSTED interrupts for now */
886 irq_reg &= ~F_FL_EXHAUSTED;
887 irqholdoff_reg = sge->intrtimer_nres;
888 }
889 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
890 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
891
892 /* We reenable the Qs to force a freelist GTS interrupt later */
893 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
894}
895
896#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
897#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
898#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
899 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
900
901/*
902 * Disable SGE Interrupts
903 */
904void t1_sge_intr_disable(struct sge *sge)
905{
906 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
907
908 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
909 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
910}
911
912/*
913 * Enable SGE interrupts.
914 */
915void t1_sge_intr_enable(struct sge *sge)
916{
917 u32 en = SGE_INT_ENABLE;
918 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
919
Michał Mirosław30f554f2011-04-18 13:31:20 +0000920 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
Scott Bardone559fb512005-06-23 01:40:19 -0400921 en &= ~F_PACKET_TOO_BIG;
922 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
923 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
924}
925
926/*
927 * Clear SGE interrupts.
928 */
929void t1_sge_intr_clear(struct sge *sge)
930{
931 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
932 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
933}
934
935/*
936 * SGE 'Error' interrupt handler
937 */
938int t1_sge_intr_error_handler(struct sge *sge)
939{
940 struct adapter *adapter = sge->adapter;
941 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
942
Michał Mirosław30f554f2011-04-18 13:31:20 +0000943 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
Scott Bardone559fb512005-06-23 01:40:19 -0400944 cause &= ~F_PACKET_TOO_BIG;
945 if (cause & F_RESPQ_EXHAUSTED)
946 sge->stats.respQ_empty++;
947 if (cause & F_RESPQ_OVERFLOW) {
948 sge->stats.respQ_overflow++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000949 pr_alert("%s: SGE response queue overflow\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400950 adapter->name);
951 }
952 if (cause & F_FL_EXHAUSTED) {
953 sge->stats.freelistQ_empty++;
954 freelQs_empty(sge);
955 }
956 if (cause & F_PACKET_TOO_BIG) {
957 sge->stats.pkt_too_big++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000958 pr_alert("%s: SGE max packet size exceeded\n",
Scott Bardone559fb512005-06-23 01:40:19 -0400959 adapter->name);
960 }
961 if (cause & F_PACKET_MISMATCH) {
962 sge->stats.pkt_mismatch++;
Joe Perchesc1f51212010-02-22 16:56:57 +0000963 pr_alert("%s: SGE packet mismatch\n", adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -0400964 }
965 if (cause & SGE_INT_FATAL)
966 t1_fatal_err(adapter);
967
968 writel(cause, adapter->regs + A_SG_INT_CAUSE);
969 return 0;
970}
971
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800972const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
Scott Bardone559fb512005-06-23 01:40:19 -0400973{
974 return &sge->stats;
975}
976
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800977void t1_sge_get_port_stats(const struct sge *sge, int port,
978 struct sge_port_stats *ss)
Scott Bardone559fb512005-06-23 01:40:19 -0400979{
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800980 int cpu;
981
982 memset(ss, 0, sizeof(*ss));
983 for_each_possible_cpu(cpu) {
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
985
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800986 ss->rx_cso_good += st->rx_cso_good;
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800987 ss->tx_cso += st->tx_cso;
988 ss->tx_tso += st->tx_tso;
Divy Le Ray7832ee02007-11-27 13:30:09 -0800989 ss->tx_need_hdrroom += st->tx_need_hdrroom;
Stephen Hemminger56f643c2006-12-01 16:36:21 -0800990 ss->vlan_xtract += st->vlan_xtract;
991 ss->vlan_insert += st->vlan_insert;
992 }
Scott Bardone559fb512005-06-23 01:40:19 -0400993}
994
995/**
996 * recycle_fl_buf - recycle a free list buffer
997 * @fl: the free list
998 * @idx: index of buffer to recycle
999 *
1000 * Recycles the specified buffer on the given free list by adding it at
1001 * the next available slot on the list.
1002 */
1003static void recycle_fl_buf(struct freelQ *fl, int idx)
1004{
1005 struct freelQ_e *from = &fl->entries[idx];
1006 struct freelQ_e *to = &fl->entries[fl->pidx];
1007
1008 fl->centries[fl->pidx] = fl->centries[idx];
1009 to->addr_lo = from->addr_lo;
1010 to->addr_hi = from->addr_hi;
1011 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1012 wmb();
1013 to->gen2 = V_CMD_GEN2(fl->genbit);
1014 fl->credits++;
1015
1016 if (++fl->pidx == fl->size) {
1017 fl->pidx = 0;
1018 fl->genbit ^= 1;
1019 }
1020}
1021
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001022static int copybreak __read_mostly = 256;
1023module_param(copybreak, int, 0);
1024MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1025
Scott Bardone559fb512005-06-23 01:40:19 -04001026/**
1027 * get_packet - return the next ingress packet buffer
Alexander Duycke0e31212014-12-09 19:41:03 -08001028 * @adapter: the adapter that received the packet
Scott Bardone559fb512005-06-23 01:40:19 -04001029 * @fl: the SGE free list holding the packet
1030 * @len: the actual packet length, excluding any SGE padding
Scott Bardone559fb512005-06-23 01:40:19 -04001031 *
1032 * Get the next packet from a free list and complete setup of the
1033 * sk_buff. If the packet is small we make a copy and recycle the
1034 * original buffer, otherwise we use the original buffer itself. If a
1035 * positive drop threshold is supplied packets are dropped and their
1036 * buffers recycled if (a) the number of remaining buffers is under the
1037 * threshold and the packet is too big to copy, or (b) the packet should
1038 * be copied but there is no memory for the copy.
1039 */
Alexander Duycke0e31212014-12-09 19:41:03 -08001040static inline struct sk_buff *get_packet(struct adapter *adapter,
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001041 struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001042{
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001043 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
Alexander Duycke0e31212014-12-09 19:41:03 -08001044 struct pci_dev *pdev = adapter->pdev;
1045 struct sk_buff *skb;
Scott Bardone559fb512005-06-23 01:40:19 -04001046
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001047 if (len < copybreak) {
Alexander Duycke0e31212014-12-09 19:41:03 -08001048 skb = napi_alloc_skb(&adapter->napi, len);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001049 if (!skb)
Scott Bardone559fb512005-06-23 01:40:19 -04001050 goto use_orig_buf;
1051
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001052 skb_put(skb, len);
1053 pci_dma_sync_single_for_cpu(pdev,
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001054 dma_unmap_addr(ce, dma_addr),
1055 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001056 PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001057 skb_copy_from_linear_data(ce->skb, skb->data, len);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001058 pci_dma_sync_single_for_device(pdev,
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001059 dma_unmap_addr(ce, dma_addr),
1060 dma_unmap_len(ce, dma_len),
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001061 PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001062 recycle_fl_buf(fl, fl->cidx);
1063 return skb;
1064 }
1065
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001066use_orig_buf:
1067 if (fl->credits < 2) {
Scott Bardone559fb512005-06-23 01:40:19 -04001068 recycle_fl_buf(fl, fl->cidx);
1069 return NULL;
1070 }
1071
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001072 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Scott Bardone559fb512005-06-23 01:40:19 -04001074 skb = ce->skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001075 prefetch(skb->data);
1076
Scott Bardone559fb512005-06-23 01:40:19 -04001077 skb_put(skb, len);
1078 return skb;
1079}
1080
1081/**
1082 * unexpected_offload - handle an unexpected offload packet
1083 * @adapter: the adapter
1084 * @fl: the free list that received the packet
1085 *
1086 * Called when we receive an unexpected offload packet (e.g., the TOE
1087 * function is disabled or the card is a NIC). Prints a message and
1088 * recycles the buffer.
1089 */
1090static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1091{
1092 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1093 struct sk_buff *skb = ce->skb;
1094
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001095 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
Joe Perchesc1f51212010-02-22 16:56:57 +00001097 pr_err("%s: unexpected offload packet, cmd %u\n",
Scott Bardone559fb512005-06-23 01:40:19 -04001098 adapter->name, *skb->data);
1099 recycle_fl_buf(fl, fl->cidx);
1100}
1101
1102/*
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001103 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1104 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1105 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1106 * Note that the *_large_page_tx_descs stuff will be optimized out when
1107 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1108 *
1109 * compute_large_page_descs() computes how many additional descriptors are
1110 * required to break down the stack's request.
1111 */
1112static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1113{
1114 unsigned int count = 0;
Francois Romieu356bd142006-12-11 23:47:00 +01001115
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001116 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1117 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
Eric Dumazete743d312010-04-14 15:59:40 -07001118 unsigned int i, len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001119 while (len > SGE_TX_DESC_MAX_PLEN) {
1120 count++;
1121 len -= SGE_TX_DESC_MAX_PLEN;
1122 }
1123 for (i = 0; nfrags--; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001124 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1125 len = skb_frag_size(frag);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001126 while (len > SGE_TX_DESC_MAX_PLEN) {
1127 count++;
1128 len -= SGE_TX_DESC_MAX_PLEN;
1129 }
1130 }
1131 }
1132 return count;
1133}
1134
1135/*
1136 * Write a cmdQ entry.
1137 *
1138 * Since this function writes the 'flags' field, it must not be used to
1139 * write the first cmdQ entry.
1140 */
1141static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1142 unsigned int len, unsigned int gen,
1143 unsigned int eop)
1144{
Alexander Beregalov0ee904c2009-04-11 14:50:23 +00001145 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1146
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001147 e->addr_lo = (u32)mapping;
1148 e->addr_hi = (u64)mapping >> 32;
1149 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1150 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1151}
1152
1153/*
1154 * See comment for previous function.
1155 *
1156 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1157 * *desc_len exceeds HW's capability.
1158 */
1159static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1160 struct cmdQ_e **e,
1161 struct cmdQ_ce **ce,
1162 unsigned int *gen,
1163 dma_addr_t *desc_mapping,
1164 unsigned int *desc_len,
1165 unsigned int nfrags,
1166 struct cmdQ *q)
1167{
1168 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1169 struct cmdQ_e *e1 = *e;
1170 struct cmdQ_ce *ce1 = *ce;
1171
1172 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1173 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1174 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1175 *gen, nfrags == 0 && *desc_len == 0);
1176 ce1->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001177 dma_unmap_len_set(ce1, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001178 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1179 if (*desc_len) {
1180 ce1++;
1181 e1++;
1182 if (++pidx == q->size) {
1183 pidx = 0;
1184 *gen ^= 1;
1185 ce1 = q->centries;
1186 e1 = q->entries;
1187 }
1188 }
1189 }
1190 *e = e1;
1191 *ce = ce1;
1192 }
1193 return pidx;
1194}
1195
1196/*
Scott Bardone559fb512005-06-23 01:40:19 -04001197 * Write the command descriptors to transmit the given skb starting at
1198 * descriptor pidx with the given generation.
1199 */
1200static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1201 unsigned int pidx, unsigned int gen,
1202 struct cmdQ *q)
1203{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001204 dma_addr_t mapping, desc_mapping;
Scott Bardone559fb512005-06-23 01:40:19 -04001205 struct cmdQ_e *e, *e1;
1206 struct cmdQ_ce *ce;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001207 unsigned int i, flags, first_desc_len, desc_len,
1208 nfrags = skb_shinfo(skb)->nr_frags;
1209
1210 e = e1 = &q->entries[pidx];
1211 ce = &q->centries[pidx];
Scott Bardone559fb512005-06-23 01:40:19 -04001212
1213 mapping = pci_map_single(adapter->pdev, skb->data,
Eric Dumazete743d312010-04-14 15:59:40 -07001214 skb_headlen(skb), PCI_DMA_TODEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001215
1216 desc_mapping = mapping;
Eric Dumazete743d312010-04-14 15:59:40 -07001217 desc_len = skb_headlen(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001218
1219 flags = F_CMD_DATAVALID | F_CMD_SOP |
1220 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1221 V_CMD_GEN2(gen);
1222 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1223 desc_len : SGE_TX_DESC_MAX_PLEN;
1224 e->addr_lo = (u32)desc_mapping;
1225 e->addr_hi = (u64)desc_mapping >> 32;
1226 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1227 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001228 dma_unmap_len_set(ce, dma_len, 0);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001229
1230 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1231 desc_len > SGE_TX_DESC_MAX_PLEN) {
1232 desc_mapping += first_desc_len;
1233 desc_len -= first_desc_len;
1234 e1++;
1235 ce++;
1236 if (++pidx == q->size) {
1237 pidx = 0;
1238 gen ^= 1;
1239 e1 = q->entries;
1240 ce = q->centries;
1241 }
1242 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1243 &desc_mapping, &desc_len,
1244 nfrags, q);
1245
1246 if (likely(desc_len))
1247 write_tx_desc(e1, desc_mapping, desc_len, gen,
1248 nfrags == 0);
1249 }
1250
Scott Bardone559fb512005-06-23 01:40:19 -04001251 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001252 dma_unmap_addr_set(ce, dma_addr, mapping);
Eric Dumazete743d312010-04-14 15:59:40 -07001253 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
Scott Bardone559fb512005-06-23 01:40:19 -04001254
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001255 for (i = 0; nfrags--; i++) {
Scott Bardone559fb512005-06-23 01:40:19 -04001256 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Scott Bardone559fb512005-06-23 01:40:19 -04001257 e1++;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001258 ce++;
Scott Bardone559fb512005-06-23 01:40:19 -04001259 if (++pidx == q->size) {
1260 pidx = 0;
1261 gen ^= 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001262 e1 = q->entries;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001263 ce = q->centries;
Scott Bardone559fb512005-06-23 01:40:19 -04001264 }
1265
Ian Campbell011392222011-10-05 00:28:52 +00001266 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001267 skb_frag_size(frag), DMA_TO_DEVICE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001268 desc_mapping = mapping;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001269 desc_len = skb_frag_size(frag);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001270
1271 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1272 &desc_mapping, &desc_len,
1273 nfrags, q);
1274 if (likely(desc_len))
1275 write_tx_desc(e1, desc_mapping, desc_len, gen,
1276 nfrags == 0);
Scott Bardone559fb512005-06-23 01:40:19 -04001277 ce->skb = NULL;
FUJITA Tomonori094f92a2010-04-12 14:32:11 +00001278 dma_unmap_addr_set(ce, dma_addr, mapping);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001279 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
Scott Bardone559fb512005-06-23 01:40:19 -04001280 }
Scott Bardone559fb512005-06-23 01:40:19 -04001281 ce->skb = skb;
1282 wmb();
1283 e->flags = flags;
1284}
1285
1286/*
1287 * Clean up completed Tx buffers.
1288 */
1289static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1290{
1291 unsigned int reclaim = q->processed - q->cleaned;
1292
1293 if (reclaim) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001294 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1295 q->processed, q->cleaned);
Scott Bardone559fb512005-06-23 01:40:19 -04001296 free_cmdQ_buffers(sge, q, reclaim);
1297 q->cleaned += reclaim;
1298 }
1299}
1300
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001301/*
1302 * Called from tasklet. Checks the scheduler for any
1303 * pending skbs that can be sent.
1304 */
1305static void restart_sched(unsigned long arg)
1306{
1307 struct sge *sge = (struct sge *) arg;
1308 struct adapter *adapter = sge->adapter;
1309 struct cmdQ *q = &sge->cmdQ[0];
1310 struct sk_buff *skb;
1311 unsigned int credits, queued_skb = 0;
1312
1313 spin_lock(&q->lock);
1314 reclaim_completed_tx(sge, q);
1315
1316 credits = q->size - q->in_use;
1317 pr_debug("restart_sched credits=%d\n", credits);
1318 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1319 unsigned int genbit, pidx, count;
1320 count = 1 + skb_shinfo(skb)->nr_frags;
Francois Romieu356bd142006-12-11 23:47:00 +01001321 count += compute_large_page_tx_descs(skb);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001322 q->in_use += count;
1323 genbit = q->genbit;
1324 pidx = q->pidx;
1325 q->pidx += count;
1326 if (q->pidx >= q->size) {
1327 q->pidx -= q->size;
1328 q->genbit ^= 1;
1329 }
1330 write_tx_descs(adapter, skb, pidx, genbit, q);
1331 credits = q->size - q->in_use;
1332 queued_skb = 1;
1333 }
1334
1335 if (queued_skb) {
1336 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1337 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1338 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1339 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1340 }
1341 }
1342 spin_unlock(&q->lock);
1343}
Scott Bardone559fb512005-06-23 01:40:19 -04001344
Scott Bardone559fb512005-06-23 01:40:19 -04001345/**
1346 * sge_rx - process an ingress ethernet packet
1347 * @sge: the sge structure
1348 * @fl: the free list that contains the packet buffer
1349 * @len: the packet length
1350 *
1351 * Process an ingress ethernet pakcet and deliver it to the stack.
1352 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001353static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
Scott Bardone559fb512005-06-23 01:40:19 -04001354{
1355 struct sk_buff *skb;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001356 const struct cpl_rx_pkt *p;
Scott Bardone559fb512005-06-23 01:40:19 -04001357 struct adapter *adapter = sge->adapter;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001358 struct sge_port_stats *st;
Michał Mirosław30f554f2011-04-18 13:31:20 +00001359 struct net_device *dev;
Scott Bardone559fb512005-06-23 01:40:19 -04001360
Alexander Duycke0e31212014-12-09 19:41:03 -08001361 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001362 if (unlikely(!skb)) {
1363 sge->stats.rx_drops++;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001364 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001365 }
1366
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001367 p = (const struct cpl_rx_pkt *) skb->data;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001368 if (p->iff >= adapter->params.nports) {
1369 kfree_skb(skb);
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001370 return;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001371 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001372 __skb_pull(skb, sizeof(*p));
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001373
Christoph Lameterca0c9582009-10-03 19:48:22 +09001374 st = this_cpu_ptr(sge->port_stats[p->iff]);
Michał Mirosław30f554f2011-04-18 13:31:20 +00001375 dev = adapter->port[p->iff].dev;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001376
Michał Mirosław30f554f2011-04-18 13:31:20 +00001377 skb->protocol = eth_type_trans(skb, dev);
1378 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
Scott Bardone559fb512005-06-23 01:40:19 -04001379 skb->protocol == htons(ETH_P_IP) &&
1380 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001381 ++st->rx_cso_good;
Scott Bardone559fb512005-06-23 01:40:19 -04001382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383 } else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001384 skb_checksum_none_assert(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001385
Jiri Pirko133b0852011-07-20 04:54:15 +00001386 if (p->vlan_valid) {
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001387 st->vlan_xtract++;
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
Jiri Pirko133b0852011-07-20 04:54:15 +00001389 }
1390 netif_receive_skb(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001391}
1392
1393/*
1394 * Returns true if a command queue has enough available descriptors that
1395 * we can resume Tx operation after temporarily disabling its packet queue.
1396 */
1397static inline int enough_free_Tx_descs(const struct cmdQ *q)
1398{
1399 unsigned int r = q->processed - q->cleaned;
1400
1401 return q->in_use - r < (q->size >> 1);
1402}
1403
1404/*
1405 * Called when sufficient space has become available in the SGE command queues
1406 * after the Tx packet schedulers have been suspended to restart the Tx path.
1407 */
1408static void restart_tx_queues(struct sge *sge)
1409{
1410 struct adapter *adap = sge->adapter;
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001411 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001412
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001413 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1414 return;
Scott Bardone559fb512005-06-23 01:40:19 -04001415
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001416 for_each_port(adap, i) {
1417 struct net_device *nd = adap->port[i].dev;
Scott Bardone559fb512005-06-23 01:40:19 -04001418
Francois Romieu3e0f75b2006-12-05 23:57:41 +01001419 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1420 netif_running(nd)) {
1421 sge->stats.cmdQ_restarted[2]++;
1422 netif_wake_queue(nd);
Scott Bardone559fb512005-06-23 01:40:19 -04001423 }
1424 }
1425}
1426
1427/*
Francois Romieu356bd142006-12-11 23:47:00 +01001428 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
Scott Bardone559fb512005-06-23 01:40:19 -04001429 * information.
1430 */
Francois Romieu356bd142006-12-11 23:47:00 +01001431static unsigned int update_tx_info(struct adapter *adapter,
1432 unsigned int flags,
Scott Bardone559fb512005-06-23 01:40:19 -04001433 unsigned int pr0)
1434{
1435 struct sge *sge = adapter->sge;
1436 struct cmdQ *cmdq = &sge->cmdQ[0];
1437
1438 cmdq->processed += pr0;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001439 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1440 freelQs_empty(sge);
1441 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1442 }
Scott Bardone559fb512005-06-23 01:40:19 -04001443 if (flags & F_CMDQ0_ENABLE) {
1444 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001445
Scott Bardone559fb512005-06-23 01:40:19 -04001446 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1447 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1448 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1449 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1450 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001451 if (sge->tx_sched)
1452 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1453
1454 flags &= ~F_CMDQ0_ENABLE;
Scott Bardone559fb512005-06-23 01:40:19 -04001455 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001456
Scott Bardone559fb512005-06-23 01:40:19 -04001457 if (unlikely(sge->stopped_tx_queues != 0))
1458 restart_tx_queues(sge);
1459
1460 return flags;
1461}
1462
1463/*
1464 * Process SGE responses, up to the supplied budget. Returns the number of
1465 * responses processed. A negative budget is effectively unlimited.
1466 */
1467static int process_responses(struct adapter *adapter, int budget)
1468{
1469 struct sge *sge = adapter->sge;
1470 struct respQ *q = &sge->respQ;
1471 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001472 int done = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001473 unsigned int flags = 0;
1474 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
Francois Romieu356bd142006-12-11 23:47:00 +01001475
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001476 while (done < budget && e->GenerationBit == q->genbit) {
Scott Bardone559fb512005-06-23 01:40:19 -04001477 flags |= e->Qsleeping;
Francois Romieu356bd142006-12-11 23:47:00 +01001478
Scott Bardone559fb512005-06-23 01:40:19 -04001479 cmdq_processed[0] += e->Cmdq0CreditReturn;
1480 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001481
Scott Bardone559fb512005-06-23 01:40:19 -04001482 /* We batch updates to the TX side to avoid cacheline
1483 * ping-pong of TX state information on MP where the sender
1484 * might run on a different CPU than this function...
1485 */
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001486 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001487 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1488 cmdq_processed[0] = 0;
1489 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001490
Scott Bardone559fb512005-06-23 01:40:19 -04001491 if (unlikely(cmdq_processed[1] > 16)) {
1492 sge->cmdQ[1].processed += cmdq_processed[1];
1493 cmdq_processed[1] = 0;
1494 }
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001495
Scott Bardone559fb512005-06-23 01:40:19 -04001496 if (likely(e->DataValid)) {
1497 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1498
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02001499 BUG_ON(!e->Sop || !e->Eop);
Scott Bardone559fb512005-06-23 01:40:19 -04001500 if (unlikely(e->Offload))
1501 unexpected_offload(adapter, fl);
1502 else
1503 sge_rx(sge, fl, e->BufferLength);
1504
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001505 ++done;
1506
Scott Bardone559fb512005-06-23 01:40:19 -04001507 /*
1508 * Note: this depends on each packet consuming a
1509 * single free-list buffer; cf. the BUG above.
1510 */
1511 if (++fl->cidx == fl->size)
1512 fl->cidx = 0;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001513 prefetch(fl->centries[fl->cidx].skb);
1514
Scott Bardone559fb512005-06-23 01:40:19 -04001515 if (unlikely(--fl->credits <
1516 fl->size - SGE_FREEL_REFILL_THRESH))
1517 refill_free_list(sge, fl);
1518 } else
1519 sge->stats.pure_rsps++;
1520
1521 e++;
1522 if (unlikely(++q->cidx == q->size)) {
1523 q->cidx = 0;
1524 q->genbit ^= 1;
1525 e = q->entries;
1526 }
1527 prefetch(e);
1528
1529 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1530 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1531 q->credits = 0;
1532 }
Scott Bardone559fb512005-06-23 01:40:19 -04001533 }
1534
Francois Romieu356bd142006-12-11 23:47:00 +01001535 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001536 sge->cmdQ[1].processed += cmdq_processed[1];
1537
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001538 return done;
Scott Bardone559fb512005-06-23 01:40:19 -04001539}
1540
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001541static inline int responses_pending(const struct adapter *adapter)
1542{
1543 const struct respQ *Q = &adapter->sge->respQ;
1544 const struct respQ_e *e = &Q->entries[Q->cidx];
1545
Eric Dumazet807540b2010-09-23 05:40:09 +00001546 return e->GenerationBit == Q->genbit;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001547}
1548
Scott Bardone559fb512005-06-23 01:40:19 -04001549/*
1550 * A simpler version of process_responses() that handles only pure (i.e.,
1551 * non data-carrying) responses. Such respones are too light-weight to justify
1552 * calling a softirq when using NAPI, so we handle them specially in hard
1553 * interrupt context. The function is called with a pointer to a response,
1554 * which the caller must ensure is a valid pure response. Returns 1 if it
1555 * encounters a valid data-carrying response, 0 otherwise.
1556 */
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001557static int process_pure_responses(struct adapter *adapter)
Scott Bardone559fb512005-06-23 01:40:19 -04001558{
1559 struct sge *sge = adapter->sge;
1560 struct respQ *q = &sge->respQ;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001561 struct respQ_e *e = &q->entries[q->cidx];
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001562 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
Scott Bardone559fb512005-06-23 01:40:19 -04001563 unsigned int flags = 0;
1564 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1565
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001566 prefetch(fl->centries[fl->cidx].skb);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001567 if (e->DataValid)
1568 return 1;
Stephen Hemminger24a427c2007-01-08 11:26:12 -08001569
Scott Bardone559fb512005-06-23 01:40:19 -04001570 do {
1571 flags |= e->Qsleeping;
1572
1573 cmdq_processed[0] += e->Cmdq0CreditReturn;
1574 cmdq_processed[1] += e->Cmdq1CreditReturn;
Francois Romieu356bd142006-12-11 23:47:00 +01001575
Scott Bardone559fb512005-06-23 01:40:19 -04001576 e++;
1577 if (unlikely(++q->cidx == q->size)) {
1578 q->cidx = 0;
1579 q->genbit ^= 1;
1580 e = q->entries;
1581 }
1582 prefetch(e);
1583
1584 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1585 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1586 q->credits = 0;
1587 }
1588 sge->stats.pure_rsps++;
1589 } while (e->GenerationBit == q->genbit && !e->DataValid);
1590
Francois Romieu356bd142006-12-11 23:47:00 +01001591 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
Scott Bardone559fb512005-06-23 01:40:19 -04001592 sge->cmdQ[1].processed += cmdq_processed[1];
1593
1594 return e->GenerationBit == q->genbit;
1595}
1596
1597/*
1598 * Handler for new data events when using NAPI. This does not need any locking
1599 * or protection from interrupts as data interrupts are off at this point and
1600 * other adapter interrupts do not interfere.
1601 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001602int t1_poll(struct napi_struct *napi, int budget)
Scott Bardone559fb512005-06-23 01:40:19 -04001603{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001604 struct adapter *adapter = container_of(napi, struct adapter, napi);
Divy Le Ray445cf802007-11-27 13:30:15 -08001605 int work_done = process_responses(adapter, budget);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001606
Divy Le Ray445cf802007-11-27 13:30:15 -08001607 if (likely(work_done < budget)) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001608 napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001609 writel(adapter->sge->respQ.cidx,
1610 adapter->regs + A_SG_SLEEPING);
1611 }
1612 return work_done;
Scott Bardone559fb512005-06-23 01:40:19 -04001613}
1614
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001615irqreturn_t t1_interrupt(int irq, void *data)
Scott Bardone559fb512005-06-23 01:40:19 -04001616{
Scott Bardone559fb512005-06-23 01:40:19 -04001617 struct adapter *adapter = data;
1618 struct sge *sge = adapter->sge;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001619 int handled;
Scott Bardone559fb512005-06-23 01:40:19 -04001620
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001621 if (likely(responses_pending(adapter))) {
Francois Romieu356bd142006-12-11 23:47:00 +01001622 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001623
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001624 if (napi_schedule_prep(&adapter->napi)) {
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001625 if (process_pure_responses(adapter))
Ben Hutchings288379f2009-01-19 16:43:59 -08001626 __napi_schedule(&adapter->napi);
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001627 else {
1628 /* no data, no NAPI needed */
1629 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
Francois Romieu4422b002008-07-11 00:29:19 +02001630 /* undo schedule_prep */
1631 napi_enable(&adapter->napi);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001632 }
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001633 }
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001634 return IRQ_HANDLED;
1635 }
1636
1637 spin_lock(&adapter->async_lock);
1638 handled = t1_slow_intr_handler(adapter);
1639 spin_unlock(&adapter->async_lock);
Stephen Hemminger7fe26a62006-12-08 11:08:33 -08001640
Scott Bardone559fb512005-06-23 01:40:19 -04001641 if (!handled)
1642 sge->stats.unhandled_irqs++;
Stephen Hemminger3de00b82007-01-08 11:26:30 -08001643
Scott Bardone559fb512005-06-23 01:40:19 -04001644 return IRQ_RETVAL(handled != 0);
1645}
1646
Scott Bardone559fb512005-06-23 01:40:19 -04001647/*
1648 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1649 *
1650 * The code figures out how many entries the sk_buff will require in the
1651 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1652 * has complete. Then, it doesn't access the global structure anymore, but
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001653 * uses the corresponding fields on the stack. In conjunction with a spinlock
Scott Bardone559fb512005-06-23 01:40:19 -04001654 * around that code, we can make the function reentrant without holding the
1655 * lock when we actually enqueue (which might be expensive, especially on
1656 * architectures with IO MMUs).
1657 *
1658 * This runs with softirqs disabled.
1659 */
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001660static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1661 unsigned int qid, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001662{
1663 struct sge *sge = adapter->sge;
1664 struct cmdQ *q = &sge->cmdQ[qid];
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001665 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
Scott Bardone559fb512005-06-23 01:40:19 -04001666
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001667 if (!spin_trylock(&q->lock))
1668 return NETDEV_TX_LOCKED;
1669
Scott Bardone559fb512005-06-23 01:40:19 -04001670 reclaim_completed_tx(sge, q);
1671
1672 pidx = q->pidx;
1673 credits = q->size - q->in_use;
1674 count = 1 + skb_shinfo(skb)->nr_frags;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001675 count += compute_large_page_tx_descs(skb);
Scott Bardone559fb512005-06-23 01:40:19 -04001676
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001677 /* Ethernet packet */
1678 if (unlikely(credits < count)) {
1679 if (!netif_queue_stopped(dev)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001680 netif_stop_queue(dev);
1681 set_bit(dev->if_port, &sge->stopped_tx_queues);
Scott Bardone232a3472006-03-16 19:20:40 -05001682 sge->stats.cmdQ_full[2]++;
Joe Perchesc1f51212010-02-22 16:56:57 +00001683 pr_err("%s: Tx ring full while queue awake!\n",
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001684 adapter->name);
Scott Bardone559fb512005-06-23 01:40:19 -04001685 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001686 spin_unlock(&q->lock);
1687 return NETDEV_TX_BUSY;
Scott Bardone559fb512005-06-23 01:40:19 -04001688 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001689
1690 if (unlikely(credits - count < q->stop_thres)) {
1691 netif_stop_queue(dev);
1692 set_bit(dev->if_port, &sge->stopped_tx_queues);
1693 sge->stats.cmdQ_full[2]++;
1694 }
1695
1696 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1697 * through the scheduler.
1698 */
1699 if (sge->tx_sched && !qid && skb->dev) {
Francois Romieu356bd142006-12-11 23:47:00 +01001700use_sched:
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001701 use_sched_skb = 1;
1702 /* Note that the scheduler might return a different skb than
1703 * the one passed in.
1704 */
1705 skb = sched_skb(sge, skb, credits);
1706 if (!skb) {
1707 spin_unlock(&q->lock);
1708 return NETDEV_TX_OK;
1709 }
1710 pidx = q->pidx;
1711 count = 1 + skb_shinfo(skb)->nr_frags;
1712 count += compute_large_page_tx_descs(skb);
1713 }
1714
Scott Bardone559fb512005-06-23 01:40:19 -04001715 q->in_use += count;
1716 genbit = q->genbit;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001717 pidx = q->pidx;
Scott Bardone559fb512005-06-23 01:40:19 -04001718 q->pidx += count;
1719 if (q->pidx >= q->size) {
1720 q->pidx -= q->size;
1721 q->genbit ^= 1;
1722 }
1723 spin_unlock(&q->lock);
1724
1725 write_tx_descs(adapter, skb, pidx, genbit, q);
1726
1727 /*
1728 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1729 * the doorbell if the Q is asleep. There is a natural race, where
1730 * the hardware is going to sleep just after we checked, however,
1731 * then the interrupt handler will detect the outstanding TX packet
1732 * and ring the doorbell for us.
1733 */
1734 if (qid)
1735 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1736 else {
1737 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1738 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1739 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1740 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1741 }
1742 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001743
1744 if (use_sched_skb) {
1745 if (spin_trylock(&q->lock)) {
1746 credits = q->size - q->in_use;
1747 skb = NULL;
1748 goto use_sched;
1749 }
1750 }
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001751 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001752}
1753
1754#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1755
1756/*
1757 * eth_hdr_len - return the length of an Ethernet header
1758 * @data: pointer to the start of the Ethernet header
1759 *
1760 * Returns the length of an Ethernet header, including optional VLAN tag.
1761 */
1762static inline int eth_hdr_len(const void *data)
1763{
1764 const struct ethhdr *e = data;
1765
1766 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1767}
1768
1769/*
1770 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1771 */
Stephen Hemminger613573252009-08-31 19:50:58 +00001772netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
Scott Bardone559fb512005-06-23 01:40:19 -04001773{
Wang Chenc3ccc122008-11-16 23:06:39 -08001774 struct adapter *adapter = dev->ml_priv;
Scott Bardone559fb512005-06-23 01:40:19 -04001775 struct sge *sge = adapter->sge;
Christoph Lameterca0c9582009-10-03 19:48:22 +09001776 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
Scott Bardone559fb512005-06-23 01:40:19 -04001777 struct cpl_tx_pkt *cpl;
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001778 struct sk_buff *orig_skb = skb;
1779 int ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001780
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001781 if (skb->protocol == htons(ETH_P_CPL5))
1782 goto send;
1783
Divy Le Ray7832ee02007-11-27 13:30:09 -08001784 /*
1785 * We are using a non-standard hard_header_len.
1786 * Allocate more header room in the rare cases it is not big enough.
1787 */
1788 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1789 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1790 ++st->tx_need_hdrroom;
1791 dev_kfree_skb_any(orig_skb);
1792 if (!skb)
1793 return NETDEV_TX_OK;
1794 }
1795
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001796 if (skb_shinfo(skb)->gso_size) {
Scott Bardone559fb512005-06-23 01:40:19 -04001797 int eth_type;
1798 struct cpl_tx_pkt_lso *hdr;
1799
Stephen Hemminger56f643c2006-12-01 16:36:21 -08001800 ++st->tx_tso;
Scott Bardone559fb512005-06-23 01:40:19 -04001801
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001802 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Scott Bardone559fb512005-06-23 01:40:19 -04001803 CPL_ETH_II : CPL_ETH_II_VLAN;
1804
1805 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1806 hdr->opcode = CPL_TX_PKT_LSO;
1807 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001808 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001809 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
Scott Bardone559fb512005-06-23 01:40:19 -04001810 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001811 skb_shinfo(skb)->gso_size));
Scott Bardone559fb512005-06-23 01:40:19 -04001812 hdr->len = htonl(skb->len - sizeof(*hdr));
1813 cpl = (struct cpl_tx_pkt *)hdr;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001814 } else {
Scott Bardone559fb512005-06-23 01:40:19 -04001815 /*
Francois Romieu356bd142006-12-11 23:47:00 +01001816 * Packets shorter than ETH_HLEN can break the MAC, drop them
Scott Bardone559fb512005-06-23 01:40:19 -04001817 * early. Also, we may get oversized packets because some
1818 * parts of the kernel don't handle our unusual hard_header_len
1819 * right, drop those too.
1820 */
1821 if (unlikely(skb->len < ETH_HLEN ||
1822 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
Joe Perches428ac432013-01-06 13:34:49 +00001823 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1824 skb->len, eth_hdr_len(skb->data), dev->mtu);
Scott Bardone559fb512005-06-23 01:40:19 -04001825 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001826 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001827 }
1828
Michał Mirosław30f554f2011-04-18 13:31:20 +00001829 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001830 ip_hdr(skb)->protocol == IPPROTO_UDP) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001831 if (unlikely(skb_checksum_help(skb))) {
Joe Perches428ac432013-01-06 13:34:49 +00001832 netdev_dbg(dev, "unable to do udp checksum\n");
Scott Bardone559fb512005-06-23 01:40:19 -04001833 dev_kfree_skb_any(skb);
Stephen Hemmingeraa845052005-12-14 14:38:44 -08001834 return NETDEV_TX_OK;
Scott Bardone559fb512005-06-23 01:40:19 -04001835 }
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001836 }
Scott Bardone559fb512005-06-23 01:40:19 -04001837
1838 /* Hmmm, assuming to catch the gratious arp... and we'll use
1839 * it to flush out stuck espi packets...
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001840 */
1841 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
Scott Bardone559fb512005-06-23 01:40:19 -04001842 if (skb->protocol == htons(ETH_P_ARP) &&
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -03001843 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001844 adapter->sge->espibug_skb[dev->if_port] = skb;
Scott Bardone559fb512005-06-23 01:40:19 -04001845 /* We want to re-use this skb later. We
1846 * simply bump the reference count and it
1847 * will not be freed...
1848 */
1849 skb = skb_get(skb);
1850 }
1851 }
1852
1853 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1854 cpl->opcode = CPL_TX_PKT;
1855 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001856 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
Scott Bardone559fb512005-06-23 01:40:19 -04001857 /* the length field isn't used so don't bother setting it */
1858
Patrick McHardy84fa7932006-08-29 16:44:56 -07001859 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
Scott Bardone559fb512005-06-23 01:40:19 -04001860 }
1861 cpl->iff = dev->if_port;
1862
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001863 if (skb_vlan_tag_present(skb)) {
Scott Bardone559fb512005-06-23 01:40:19 -04001864 cpl->vlan_valid = 1;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001865 cpl->vlan = htons(skb_vlan_tag_get(skb));
Scott Bardone559fb512005-06-23 01:40:19 -04001866 st->vlan_insert++;
1867 } else
Scott Bardone559fb512005-06-23 01:40:19 -04001868 cpl->vlan_valid = 0;
1869
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001870send:
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001871 ret = t1_sge_tx(skb, adapter, 0, dev);
1872
1873 /* If transmit busy, and we reallocated skb's due to headroom limit,
1874 * then silently discard to avoid leak.
1875 */
1876 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
Francois Romieu356bd142006-12-11 23:47:00 +01001877 dev_kfree_skb_any(skb);
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001878 ret = NETDEV_TX_OK;
Francois Romieu356bd142006-12-11 23:47:00 +01001879 }
Stephen Hemmingercabdfb32006-12-01 16:36:22 -08001880 return ret;
Scott Bardone559fb512005-06-23 01:40:19 -04001881}
1882
1883/*
1884 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1885 */
1886static void sge_tx_reclaim_cb(unsigned long data)
1887{
1888 int i;
1889 struct sge *sge = (struct sge *)data;
1890
1891 for (i = 0; i < SGE_CMDQ_N; ++i) {
1892 struct cmdQ *q = &sge->cmdQ[i];
1893
1894 if (!spin_trylock(&q->lock))
1895 continue;
1896
1897 reclaim_completed_tx(sge, q);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001898 if (i == 0 && q->in_use) { /* flush pending credits */
1899 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1900 }
Scott Bardone559fb512005-06-23 01:40:19 -04001901 spin_unlock(&q->lock);
1902 }
1903 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1904}
1905
1906/*
1907 * Propagate changes of the SGE coalescing parameters to the HW.
1908 */
1909int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1910{
Scott Bardone559fb512005-06-23 01:40:19 -04001911 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1912 core_ticks_per_usec(sge->adapter);
1913 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1914 return 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001915}
1916
1917/*
1918 * Allocates both RX and TX resources and configures the SGE. However,
1919 * the hardware is not enabled yet.
1920 */
1921int t1_sge_configure(struct sge *sge, struct sge_params *p)
1922{
1923 if (alloc_rx_resources(sge, p))
1924 return -ENOMEM;
1925 if (alloc_tx_resources(sge, p)) {
1926 free_rx_resources(sge);
1927 return -ENOMEM;
1928 }
1929 configure_sge(sge, p);
1930
1931 /*
1932 * Now that we have sized the free lists calculate the payload
1933 * capacity of the large buffers. Other parts of the driver use
1934 * this to set the max offload coalescing size so that RX packets
1935 * do not overflow our large buffers.
1936 */
1937 p->large_buf_capacity = jumbo_payload_capacity(sge);
1938 return 0;
1939}
1940
1941/*
Scott Bardone559fb512005-06-23 01:40:19 -04001942 * Disables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001943 */
Scott Bardone559fb512005-06-23 01:40:19 -04001944void t1_sge_stop(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001945{
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001946 int i;
Scott Bardone559fb512005-06-23 01:40:19 -04001947 writel(0, sge->adapter->regs + A_SG_CONTROL);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001948 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1949
Scott Bardone559fb512005-06-23 01:40:19 -04001950 if (is_T2(sge->adapter))
1951 del_timer_sync(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001952
Scott Bardone559fb512005-06-23 01:40:19 -04001953 del_timer_sync(&sge->tx_reclaim_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001954 if (sge->tx_sched)
1955 tx_sched_stop(sge);
1956
1957 for (i = 0; i < MAX_NPORTS; i++)
Wei Yongjunf4fe5a92009-02-25 00:45:09 +00001958 kfree_skb(sge->espibug_skb[i]);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001959}
1960
1961/*
Scott Bardone559fb512005-06-23 01:40:19 -04001962 * Enables the DMA engine.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001963 */
Scott Bardone559fb512005-06-23 01:40:19 -04001964void t1_sge_start(struct sge *sge)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001965{
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001966 refill_free_list(sge, &sge->freelQ[0]);
1967 refill_free_list(sge, &sge->freelQ[1]);
1968
Scott Bardone559fb512005-06-23 01:40:19 -04001969 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1970 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001971 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001972
Scott Bardone559fb512005-06-23 01:40:19 -04001973 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001974
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001975 if (is_T2(sge->adapter))
Scott Bardone559fb512005-06-23 01:40:19 -04001976 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001977}
1978
1979/*
Scott Bardone559fb512005-06-23 01:40:19 -04001980 * Callback for the T2 ESPI 'stuck packet feature' workaorund
Christoph Lameter8199d3a2005-03-30 13:34:31 -08001981 */
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001982static void espibug_workaround_t204(unsigned long data)
1983{
1984 struct adapter *adapter = (struct adapter *)data;
1985 struct sge *sge = adapter->sge;
1986 unsigned int nports = adapter->params.nports;
1987 u32 seop[MAX_NPORTS];
1988
1989 if (adapter->open_device_map & PORT_MASK) {
1990 int i;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08001991
Francois Romieu356bd142006-12-11 23:47:00 +01001992 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
1993 return;
1994
1995 for (i = 0; i < nports; i++) {
1996 struct sk_buff *skb = sge->espibug_skb[i];
1997
1998 if (!netif_running(adapter->port[i].dev) ||
1999 netif_queue_stopped(adapter->port[i].dev) ||
2000 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2001 continue;
2002
2003 if (!skb->cb[0]) {
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002004 skb_copy_to_linear_data_offset(skb,
2005 sizeof(struct cpl_tx_pkt),
2006 ch_mac_addr,
2007 ETH_ALEN);
2008 skb_copy_to_linear_data_offset(skb,
2009 skb->len - 10,
2010 ch_mac_addr,
2011 ETH_ALEN);
Francois Romieu356bd142006-12-11 23:47:00 +01002012 skb->cb[0] = 0xff;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002013 }
Francois Romieu356bd142006-12-11 23:47:00 +01002014
2015 /* bump the reference count to avoid freeing of
2016 * the skb once the DMA has completed.
2017 */
2018 skb = skb_get(skb);
2019 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002020 }
2021 }
2022 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2023}
2024
2025static void espibug_workaround(unsigned long data)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002026{
Scott Bardone559fb512005-06-23 01:40:19 -04002027 struct adapter *adapter = (struct adapter *)data;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002028 struct sge *sge = adapter->sge;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002029
Scott Bardone559fb512005-06-23 01:40:19 -04002030 if (netif_running(adapter->port[0].dev)) {
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002031 struct sk_buff *skb = sge->espibug_skb[0];
2032 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002033
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002034 if ((seop & 0xfff0fff) == 0xfff && skb) {
2035 if (!skb->cb[0]) {
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03002036 skb_copy_to_linear_data_offset(skb,
2037 sizeof(struct cpl_tx_pkt),
2038 ch_mac_addr,
2039 ETH_ALEN);
2040 skb_copy_to_linear_data_offset(skb,
2041 skb->len - 10,
2042 ch_mac_addr,
2043 ETH_ALEN);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002044 skb->cb[0] = 0xff;
2045 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002046
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002047 /* bump the reference count to avoid freeing of the
2048 * skb once the DMA has completed.
2049 */
2050 skb = skb_get(skb);
2051 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2052 }
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002053 }
Scott Bardone559fb512005-06-23 01:40:19 -04002054 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002055}
2056
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002057/*
Scott Bardone559fb512005-06-23 01:40:19 -04002058 * Creates a t1_sge structure and returns suggested resource parameters.
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002059 */
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00002060struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002061{
Stephen Hemmingercbee9f92006-11-17 17:01:52 -08002062 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002063 int i;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002064
Scott Bardone559fb512005-06-23 01:40:19 -04002065 if (!sge)
2066 return NULL;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002067
Scott Bardone559fb512005-06-23 01:40:19 -04002068 sge->adapter = adapter;
2069 sge->netdev = adapter->port[0].dev;
2070 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2071 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2072
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002073 for_each_port(adapter, i) {
2074 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2075 if (!sge->port_stats[i])
2076 goto nomem_port;
2077 }
2078
Scott Bardone559fb512005-06-23 01:40:19 -04002079 init_timer(&sge->tx_reclaim_timer);
2080 sge->tx_reclaim_timer.data = (unsigned long)sge;
2081 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2082
2083 if (is_T2(sge->adapter)) {
2084 init_timer(&sge->espibug_timer);
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002085
2086 if (adapter->params.nports > 1) {
2087 tx_sched_init(sge);
2088 sge->espibug_timer.function = espibug_workaround_t204;
Francois Romieud7487422006-12-11 23:49:13 +01002089 } else
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002090 sge->espibug_timer.function = espibug_workaround;
Scott Bardone559fb512005-06-23 01:40:19 -04002091 sge->espibug_timer.data = (unsigned long)sge->adapter;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002092
Scott Bardone559fb512005-06-23 01:40:19 -04002093 sge->espibug_timeout = 1;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002094 /* for T204, every 10ms */
2095 if (adapter->params.nports > 1)
2096 sge->espibug_timeout = HZ/100;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002097 }
Francois Romieu356bd142006-12-11 23:47:00 +01002098
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002099
Scott Bardone559fb512005-06-23 01:40:19 -04002100 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2101 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2102 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2103 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
Stephen Hemmingerf1d3d382006-12-01 16:36:16 -08002104 if (sge->tx_sched) {
2105 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2106 p->rx_coalesce_usecs = 15;
2107 else
2108 p->rx_coalesce_usecs = 50;
2109 } else
2110 p->rx_coalesce_usecs = 50;
2111
Scott Bardone559fb512005-06-23 01:40:19 -04002112 p->coalesce_enable = 0;
2113 p->sample_interval_usecs = 0;
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002114
Scott Bardone559fb512005-06-23 01:40:19 -04002115 return sge;
Stephen Hemminger56f643c2006-12-01 16:36:21 -08002116nomem_port:
2117 while (i >= 0) {
2118 free_percpu(sge->port_stats[i]);
2119 --i;
2120 }
2121 kfree(sge);
2122 return NULL;
2123
Christoph Lameter8199d3a2005-03-30 13:34:31 -08002124}