blob: 63eb97473c8bfed3270e9b141a3af23eb92def9e [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Raya02d44a2008-10-13 18:47:30 -07002 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080048
Divy Le Ray4d22de32007-01-18 22:04:14 -050049#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070050#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050051
Divy Le Raye0994eb2007-02-24 16:44:17 -080052/*
Divy Le Raycf992af2007-05-30 21:10:47 -070053 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080056 */
Divy Le Raycf992af2007-05-30 21:10:47 -070057#define FL0_PG_CHUNK_SIZE 2048
Divy Le Ray7385ecf2008-05-21 18:56:21 -070058#define FL0_PG_ORDER 0
59#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
Divy Le Raycf992af2007-05-30 21:10:47 -070061
Divy Le Raye0994eb2007-02-24 16:44:17 -080062#define SGE_RX_DROP_THRES 16
Divy Le Ray4d22de32007-01-18 22:04:14 -050063
64/*
65 * Period of the Tx buffer reclaim timer. This timer does not need to run
66 * frequently as Tx buffers are usually reclaimed by new Tx packets.
67 */
68#define TX_RECLAIM_PERIOD (HZ / 4)
69
70/* WR size in bytes */
71#define WR_LEN (WR_FLITS * 8)
72
73/*
74 * Types of Tx queues in each queue set. Order here matters, do not change.
75 */
76enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
77
78/* Values for sge_txq.flags */
79enum {
80 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
81 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
82};
83
84struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040085 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050086};
87
88struct rx_desc {
89 __be32 addr_lo;
90 __be32 len_gen;
91 __be32 gen2;
92 __be32 addr_hi;
93};
94
95struct tx_sw_desc { /* SW state per Tx descriptor */
96 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -080097 u8 eop; /* set if last descriptor for packet */
98 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
99 u8 fragidx; /* first page fragment associated with descriptor */
100 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500101};
102
Divy Le Raycf992af2007-05-30 21:10:47 -0700103struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800104 union {
105 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700106 struct fl_pg_chunk pg_chunk;
107 };
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500109};
110
111struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
113 __be32 flags;
114 __be32 len_cq;
115 u8 imm_data[47];
116 u8 intr_gen;
117};
118
Divy Le Ray4d22de32007-01-18 22:04:14 -0500119/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800120 * Holds unmapping information for Tx packets that need deferred unmapping.
121 * This structure lives at skb->head and must be allocated by callers.
122 */
123struct deferred_unmap_info {
124 struct pci_dev *pdev;
125 dma_addr_t addr[MAX_SKB_FRAGS + 1];
126};
127
128/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500129 * Maps a number of flits to the number of Tx descriptors that can hold them.
130 * The formula is
131 *
132 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
133 *
134 * HW allows up to 4 descriptors to be combined into a WR.
135 */
136static u8 flit_desc_map[] = {
137 0,
138#if SGE_NUM_GENBITS == 1
139 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
140 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
141 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
142 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
143#elif SGE_NUM_GENBITS == 2
144 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
146 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
147 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
148#else
149# error "SGE_NUM_GENBITS must be 1 or 2"
150#endif
151};
152
153static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
154{
155 return container_of(q, struct sge_qset, fl[qidx]);
156}
157
158static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
159{
160 return container_of(q, struct sge_qset, rspq);
161}
162
163static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
164{
165 return container_of(q, struct sge_qset, txq[qidx]);
166}
167
168/**
169 * refill_rspq - replenish an SGE response queue
170 * @adapter: the adapter
171 * @q: the response queue to replenish
172 * @credits: how many new responses to make available
173 *
174 * Replenishes a response queue by making the supplied number of responses
175 * available to HW.
176 */
177static inline void refill_rspq(struct adapter *adapter,
178 const struct sge_rspq *q, unsigned int credits)
179{
Divy Le Rayafefce62007-11-16 11:22:21 -0800180 rmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -0500181 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
182 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
183}
184
185/**
186 * need_skb_unmap - does the platform need unmapping of sk_buffs?
187 *
188 * Returns true if the platfrom needs sk_buff unmapping. The compiler
189 * optimizes away unecessary code if this returns true.
190 */
191static inline int need_skb_unmap(void)
192{
193 /*
194 * This structure is used to tell if the platfrom needs buffer
195 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
196 */
197 struct dummy {
198 DECLARE_PCI_UNMAP_ADDR(addr);
199 };
200
201 return sizeof(struct dummy) != 0;
202}
203
204/**
205 * unmap_skb - unmap a packet main body and its page fragments
206 * @skb: the packet
207 * @q: the Tx queue containing Tx descriptors for the packet
208 * @cidx: index of Tx descriptor
209 * @pdev: the PCI device
210 *
211 * Unmap the main body of an sk_buff and its page fragments, if any.
212 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800213 * to conserve space for metadata, the information necessary to unmap an
214 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
215 * descriptors (the physical addresses of the various data buffers), and
216 * the SW descriptor state (assorted indices). The send functions
217 * initialize the indices for the first packet descriptor so we can unmap
218 * the buffers held in the first Tx descriptor here, and we have enough
219 * information at this point to set the state for the next Tx descriptor.
220 *
221 * Note that it is possible to clean up the first descriptor of a packet
222 * before the send routines have written the next descriptors, but this
223 * race does not cause any problem. We just end up writing the unmapping
224 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500225 */
226static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
227 unsigned int cidx, struct pci_dev *pdev)
228{
229 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800230 struct tx_sw_desc *d = &q->sdesc[cidx];
231 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500232
Divy Le Ray23561c92007-11-16 11:22:05 -0800233 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
234 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500235
Divy Le Ray23561c92007-11-16 11:22:05 -0800236 if (frag_idx == 0 && skb_headlen(skb)) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
238 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500239 j = 1;
240 }
241
Divy Le Ray23561c92007-11-16 11:22:05 -0800242 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500243 nfrags = skb_shinfo(skb)->nr_frags;
244
245 while (frag_idx < nfrags && curflit < WR_FLITS) {
246 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
247 skb_shinfo(skb)->frags[frag_idx].size,
248 PCI_DMA_TODEVICE);
249 j ^= 1;
250 if (j == 0) {
251 sgp++;
252 curflit++;
253 }
254 curflit++;
255 frag_idx++;
256 }
257
Divy Le Ray23561c92007-11-16 11:22:05 -0800258 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
259 d = cidx + 1 == q->size ? q->sdesc : d + 1;
260 d->fragidx = frag_idx;
261 d->addr_idx = j;
262 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500263 }
264}
265
266/**
267 * free_tx_desc - reclaims Tx descriptors and their buffers
268 * @adapter: the adapter
269 * @q: the Tx queue to reclaim descriptors from
270 * @n: the number of descriptors to reclaim
271 *
272 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
273 * Tx buffers. Called with the Tx queue lock held.
274 */
275static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
276 unsigned int n)
277{
278 struct tx_sw_desc *d;
279 struct pci_dev *pdev = adapter->pdev;
280 unsigned int cidx = q->cidx;
281
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800282 const int need_unmap = need_skb_unmap() &&
283 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
284
Divy Le Ray4d22de32007-01-18 22:04:14 -0500285 d = &q->sdesc[cidx];
286 while (n--) {
287 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800288 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500289 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800290 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500291 kfree_skb(d->skb);
292 }
293 ++d;
294 if (++cidx == q->size) {
295 cidx = 0;
296 d = q->sdesc;
297 }
298 }
299 q->cidx = cidx;
300}
301
302/**
303 * reclaim_completed_tx - reclaims completed Tx descriptors
304 * @adapter: the adapter
305 * @q: the Tx queue to reclaim completed descriptors from
306 *
307 * Reclaims Tx descriptors that the SGE has indicated it has processed,
308 * and frees the associated buffers if possible. Called with the Tx
309 * queue's lock held.
310 */
311static inline void reclaim_completed_tx(struct adapter *adapter,
312 struct sge_txq *q)
313{
314 unsigned int reclaim = q->processed - q->cleaned;
315
316 if (reclaim) {
317 free_tx_desc(adapter, q, reclaim);
318 q->cleaned += reclaim;
319 q->in_use -= reclaim;
320 }
321}
322
323/**
324 * should_restart_tx - are there enough resources to restart a Tx queue?
325 * @q: the Tx queue
326 *
327 * Checks if there are enough descriptors to restart a suspended Tx queue.
328 */
329static inline int should_restart_tx(const struct sge_txq *q)
330{
331 unsigned int r = q->processed - q->cleaned;
332
333 return q->in_use - r < (q->size >> 1);
334}
335
336/**
337 * free_rx_bufs - free the Rx buffers on an SGE free list
338 * @pdev: the PCI device associated with the adapter
339 * @rxq: the SGE free list to clean up
340 *
341 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
342 * this queue should be stopped before calling this function.
343 */
344static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
345{
346 unsigned int cidx = q->cidx;
347
348 while (q->credits--) {
349 struct rx_sw_desc *d = &q->sdesc[cidx];
350
351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
352 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Raycf992af2007-05-30 21:10:47 -0700353 if (q->use_pages) {
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700354 if (d->pg_chunk.page)
355 put_page(d->pg_chunk.page);
Divy Le Raycf992af2007-05-30 21:10:47 -0700356 d->pg_chunk.page = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800357 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700358 kfree_skb(d->skb);
359 d->skb = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800360 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500361 if (++cidx == q->size)
362 cidx = 0;
363 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800364
Divy Le Raycf992af2007-05-30 21:10:47 -0700365 if (q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700366 __free_pages(q->pg_chunk.page, q->order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700367 q->pg_chunk.page = NULL;
368 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500369}
370
371/**
372 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700373 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500374 * @len: the buffer length
375 * @d: the HW Rx descriptor to write
376 * @sd: the SW Rx descriptor to write
377 * @gen: the generation bit value
378 * @pdev: the PCI device associated with the adapter
379 *
380 * Add a buffer of the given length to the supplied HW and SW Rx
381 * descriptors.
382 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700383static inline int add_one_rx_buf(void *va, unsigned int len,
384 struct rx_desc *d, struct rx_sw_desc *sd,
385 unsigned int gen, struct pci_dev *pdev)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500386{
387 dma_addr_t mapping;
388
Divy Le Raye0994eb2007-02-24 16:44:17 -0800389 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700390 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700391 return -ENOMEM;
392
Divy Le Ray4d22de32007-01-18 22:04:14 -0500393 pci_unmap_addr_set(sd, dma_addr, mapping);
394
395 d->addr_lo = cpu_to_be32(mapping);
396 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
397 wmb();
398 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
399 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700400 return 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500401}
402
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700403static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
404 unsigned int order)
Divy Le Raycf992af2007-05-30 21:10:47 -0700405{
406 if (!q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700407 q->pg_chunk.page = alloc_pages(gfp, order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700408 if (unlikely(!q->pg_chunk.page))
409 return -ENOMEM;
410 q->pg_chunk.va = page_address(q->pg_chunk.page);
411 q->pg_chunk.offset = 0;
412 }
413 sd->pg_chunk = q->pg_chunk;
414
415 q->pg_chunk.offset += q->buf_size;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700416 if (q->pg_chunk.offset == (PAGE_SIZE << order))
Divy Le Raycf992af2007-05-30 21:10:47 -0700417 q->pg_chunk.page = NULL;
418 else {
419 q->pg_chunk.va += q->buf_size;
420 get_page(q->pg_chunk.page);
421 }
422 return 0;
423}
424
Divy Le Ray4d22de32007-01-18 22:04:14 -0500425/**
426 * refill_fl - refill an SGE free-buffer list
427 * @adapter: the adapter
428 * @q: the free-list to refill
429 * @n: the number of new buffers to allocate
430 * @gfp: the gfp flags for allocating new buffers
431 *
432 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
433 * allocated with the supplied gfp flags. The caller must assure that
434 * @n does not exceed the queue's capacity.
435 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700436static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500437{
Divy Le Raycf992af2007-05-30 21:10:47 -0700438 void *buf_start;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500439 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
440 struct rx_desc *d = &q->desc[q->pidx];
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700441 unsigned int count = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500442
443 while (n--) {
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700444 int err;
445
Divy Le Raycf992af2007-05-30 21:10:47 -0700446 if (q->use_pages) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700447 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700448nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800449 break;
450 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700451 buf_start = sd->pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800452 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700453 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800454
Divy Le Raycf992af2007-05-30 21:10:47 -0700455 if (!skb)
456 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800457
Divy Le Raycf992af2007-05-30 21:10:47 -0700458 sd->skb = skb;
459 buf_start = skb->data;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800460 }
461
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700462 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
463 adap->pdev);
464 if (unlikely(err)) {
465 if (!q->use_pages) {
466 kfree_skb(sd->skb);
467 sd->skb = NULL;
468 }
469 break;
470 }
471
Divy Le Ray4d22de32007-01-18 22:04:14 -0500472 d++;
473 sd++;
474 if (++q->pidx == q->size) {
475 q->pidx = 0;
476 q->gen ^= 1;
477 sd = q->sdesc;
478 d = q->desc;
479 }
480 q->credits++;
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700481 count++;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500482 }
Divy Le Rayafefce62007-11-16 11:22:21 -0800483 wmb();
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700484 if (likely(count))
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
486
487 return count;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500488}
489
490static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
491{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700492 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
493 GFP_ATOMIC | __GFP_COMP);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500494}
495
496/**
497 * recycle_rx_buf - recycle a receive buffer
498 * @adapter: the adapter
499 * @q: the SGE free list
500 * @idx: index of buffer to recycle
501 *
502 * Recycles the specified buffer on the given free list by adding it at
503 * the next available slot on the list.
504 */
505static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
506 unsigned int idx)
507{
508 struct rx_desc *from = &q->desc[idx];
509 struct rx_desc *to = &q->desc[q->pidx];
510
Divy Le Raycf992af2007-05-30 21:10:47 -0700511 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500512 to->addr_lo = from->addr_lo; /* already big endian */
513 to->addr_hi = from->addr_hi; /* likewise */
514 wmb();
515 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
516 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
517 q->credits++;
518
519 if (++q->pidx == q->size) {
520 q->pidx = 0;
521 q->gen ^= 1;
522 }
523 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
524}
525
526/**
527 * alloc_ring - allocate resources for an SGE descriptor ring
528 * @pdev: the PCI device
529 * @nelem: the number of descriptors
530 * @elem_size: the size of each descriptor
531 * @sw_size: the size of the SW state associated with each ring element
532 * @phys: the physical address of the allocated ring
533 * @metadata: address of the array holding the SW state for the ring
534 *
535 * Allocates resources for an SGE descriptor ring, such as Tx queues,
536 * free buffer lists, or response queues. Each SGE ring requires
537 * space for its HW descriptors plus, optionally, space for the SW state
538 * associated with each HW entry (the metadata). The function returns
539 * three values: the virtual address for the HW ring (the return value
540 * of the function), the physical address of the HW ring, and the address
541 * of the SW ring.
542 */
543static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800544 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500545{
546 size_t len = nelem * elem_size;
547 void *s = NULL;
548 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
549
550 if (!p)
551 return NULL;
Divy Le Ray52565542008-11-26 15:35:59 -0800552 if (sw_size && metadata) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500553 s = kcalloc(nelem, sw_size, GFP_KERNEL);
554
555 if (!s) {
556 dma_free_coherent(&pdev->dev, len, p, *phys);
557 return NULL;
558 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500559 *(void **)metadata = s;
Divy Le Ray52565542008-11-26 15:35:59 -0800560 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500561 memset(p, 0, len);
562 return p;
563}
564
565/**
Divy Le Ray204e2f92008-05-06 19:26:01 -0700566 * t3_reset_qset - reset a sge qset
567 * @q: the queue set
568 *
569 * Reset the qset structure.
570 * the NAPI structure is preserved in the event of
571 * the qset's reincarnation, for example during EEH recovery.
572 */
573static void t3_reset_qset(struct sge_qset *q)
574{
575 if (q->adap &&
576 !(q->adap->flags & NAPI_INIT)) {
577 memset(q, 0, sizeof(*q));
578 return;
579 }
580
581 q->adap = NULL;
582 memset(&q->rspq, 0, sizeof(q->rspq));
583 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
585 q->txq_stopped = 0;
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700586 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
Divy Le Rayb47385b2008-05-21 18:56:26 -0700587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
Divy Le Ray204e2f92008-05-06 19:26:01 -0700589}
590
591
592/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
595 * @q: the queue set
596 *
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
600 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700601static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500602{
603 int i;
604 struct pci_dev *pdev = adapter->pdev;
605
Divy Le Ray4d22de32007-01-18 22:04:14 -0500606 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
607 if (q->fl[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700608 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500609 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700610 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500611 free_rx_bufs(pdev, &q->fl[i]);
612 kfree(q->fl[i].sdesc);
613 dma_free_coherent(&pdev->dev,
614 q->fl[i].size *
615 sizeof(struct rx_desc), q->fl[i].desc,
616 q->fl[i].phys_addr);
617 }
618
619 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
620 if (q->txq[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700621 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500622 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
Roland Dreierb1186de2008-03-20 13:30:48 -0700623 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500624 if (q->txq[i].sdesc) {
625 free_tx_desc(adapter, &q->txq[i],
626 q->txq[i].in_use);
627 kfree(q->txq[i].sdesc);
628 }
629 dma_free_coherent(&pdev->dev,
630 q->txq[i].size *
631 sizeof(struct tx_desc),
632 q->txq[i].desc, q->txq[i].phys_addr);
633 __skb_queue_purge(&q->txq[i].sendq);
634 }
635
636 if (q->rspq.desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700637 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500638 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700639 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500640 dma_free_coherent(&pdev->dev,
641 q->rspq.size * sizeof(struct rsp_desc),
642 q->rspq.desc, q->rspq.phys_addr);
643 }
644
Divy Le Ray204e2f92008-05-06 19:26:01 -0700645 t3_reset_qset(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500646}
647
648/**
649 * init_qset_cntxt - initialize an SGE queue set context info
650 * @qs: the queue set
651 * @id: the queue set id
652 *
653 * Initializes the TIDs and context ids for the queues of a queue set.
654 */
655static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
656{
657 qs->rspq.cntxt_id = id;
658 qs->fl[0].cntxt_id = 2 * id;
659 qs->fl[1].cntxt_id = 2 * id + 1;
660 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
661 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
662 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
663 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
664 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
665}
666
667/**
668 * sgl_len - calculates the size of an SGL of the given capacity
669 * @n: the number of SGL entries
670 *
671 * Calculates the number of flits needed for a scatter/gather list that
672 * can hold the given number of entries.
673 */
674static inline unsigned int sgl_len(unsigned int n)
675{
676 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
677 return (3 * n) / 2 + (n & 1);
678}
679
680/**
681 * flits_to_desc - returns the num of Tx descriptors for the given flits
682 * @n: the number of flits
683 *
684 * Calculates the number of Tx descriptors needed for the supplied number
685 * of flits.
686 */
687static inline unsigned int flits_to_desc(unsigned int n)
688{
689 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
690 return flit_desc_map[n];
691}
692
693/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700694 * get_packet - return the next ingress packet buffer from a free list
695 * @adap: the adapter that received the packet
696 * @fl: the SGE free list holding the packet
697 * @len: the packet length including any SGE padding
698 * @drop_thres: # of remaining buffers before we start dropping packets
699 *
700 * Get the next packet from a free list and complete setup of the
701 * sk_buff. If the packet is small we make a copy and recycle the
702 * original buffer, otherwise we use the original buffer itself. If a
703 * positive drop threshold is supplied packets are dropped and their
704 * buffers recycled if (a) the number of remaining buffers is under the
705 * threshold and the packet is too big to copy, or (b) the packet should
706 * be copied but there is no memory for the copy.
707 */
708static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
709 unsigned int len, unsigned int drop_thres)
710{
711 struct sk_buff *skb = NULL;
712 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
713
714 prefetch(sd->skb->data);
715 fl->credits--;
716
717 if (len <= SGE_RX_COPY_THRES) {
718 skb = alloc_skb(len, GFP_ATOMIC);
719 if (likely(skb != NULL)) {
720 __skb_put(skb, len);
721 pci_dma_sync_single_for_cpu(adap->pdev,
722 pci_unmap_addr(sd, dma_addr), len,
723 PCI_DMA_FROMDEVICE);
724 memcpy(skb->data, sd->skb->data, len);
725 pci_dma_sync_single_for_device(adap->pdev,
726 pci_unmap_addr(sd, dma_addr), len,
727 PCI_DMA_FROMDEVICE);
728 } else if (!drop_thres)
729 goto use_orig_buf;
730recycle:
731 recycle_rx_buf(adap, fl, fl->cidx);
732 return skb;
733 }
734
735 if (unlikely(fl->credits < drop_thres))
736 goto recycle;
737
738use_orig_buf:
739 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
740 fl->buf_size, PCI_DMA_FROMDEVICE);
741 skb = sd->skb;
742 skb_put(skb, len);
743 __refill_fl(adap, fl);
744 return skb;
745}
746
747/**
748 * get_packet_pg - return the next ingress packet buffer from a free list
749 * @adap: the adapter that received the packet
750 * @fl: the SGE free list holding the packet
751 * @len: the packet length including any SGE padding
752 * @drop_thres: # of remaining buffers before we start dropping packets
753 *
754 * Get the next packet from a free list populated with page chunks.
755 * If the packet is small we make a copy and recycle the original buffer,
756 * otherwise we attach the original buffer as a page fragment to a fresh
757 * sk_buff. If a positive drop threshold is supplied packets are dropped
758 * and their buffers recycled if (a) the number of remaining buffers is
759 * under the threshold and the packet is too big to copy, or (b) there's
760 * no system memory.
761 *
762 * Note: this function is similar to @get_packet but deals with Rx buffers
763 * that are page chunks rather than sk_buffs.
764 */
765static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700766 struct sge_rspq *q, unsigned int len,
767 unsigned int drop_thres)
Divy Le Raycf992af2007-05-30 21:10:47 -0700768{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700769 struct sk_buff *newskb, *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700770 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
771
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700772 newskb = skb = q->pg_skb;
773
774 if (!skb && (len <= SGE_RX_COPY_THRES)) {
775 newskb = alloc_skb(len, GFP_ATOMIC);
776 if (likely(newskb != NULL)) {
777 __skb_put(newskb, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700778 pci_dma_sync_single_for_cpu(adap->pdev,
779 pci_unmap_addr(sd, dma_addr), len,
780 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700781 memcpy(newskb->data, sd->pg_chunk.va, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700782 pci_dma_sync_single_for_device(adap->pdev,
783 pci_unmap_addr(sd, dma_addr), len,
784 PCI_DMA_FROMDEVICE);
785 } else if (!drop_thres)
786 return NULL;
787recycle:
788 fl->credits--;
789 recycle_rx_buf(adap, fl, fl->cidx);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700790 q->rx_recycle_buf++;
791 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700792 }
793
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700794 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
Divy Le Raycf992af2007-05-30 21:10:47 -0700795 goto recycle;
796
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700797 if (!skb)
Divy Le Rayb47385b2008-05-21 18:56:26 -0700798 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700799 if (unlikely(!newskb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700800 if (!drop_thres)
801 return NULL;
802 goto recycle;
803 }
804
805 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
806 fl->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700807 if (!skb) {
808 __skb_put(newskb, SGE_RX_PULL_LEN);
809 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
810 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
811 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
812 len - SGE_RX_PULL_LEN);
813 newskb->len = len;
814 newskb->data_len = len - SGE_RX_PULL_LEN;
815 } else {
816 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
817 sd->pg_chunk.page,
818 sd->pg_chunk.offset, len);
819 newskb->len += len;
820 newskb->data_len += len;
821 }
822 newskb->truesize += newskb->data_len;
Divy Le Raycf992af2007-05-30 21:10:47 -0700823
824 fl->credits--;
825 /*
826 * We do not refill FLs here, we let the caller do it to overlap a
827 * prefetch.
828 */
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700829 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700830}
831
832/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500833 * get_imm_packet - return the next ingress packet buffer from a response
834 * @resp: the response descriptor containing the packet data
835 *
836 * Return a packet containing the immediate data of the given response.
837 */
838static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
839{
840 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
841
842 if (skb) {
843 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300844 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500845 }
846 return skb;
847}
848
849/**
850 * calc_tx_descs - calculate the number of Tx descriptors for a packet
851 * @skb: the packet
852 *
853 * Returns the number of Tx descriptors needed for the given Ethernet
854 * packet. Ethernet packets require addition of WR and CPL headers.
855 */
856static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
857{
858 unsigned int flits;
859
860 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
861 return 1;
862
863 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
864 if (skb_shinfo(skb)->gso_size)
865 flits++;
866 return flits_to_desc(flits);
867}
868
869/**
870 * make_sgl - populate a scatter/gather list for a packet
871 * @skb: the packet
872 * @sgp: the SGL to populate
873 * @start: start address of skb main body data to include in the SGL
874 * @len: length of skb main body data to include in the SGL
875 * @pdev: the PCI device
876 *
877 * Generates a scatter/gather list for the buffers that make up a packet
878 * and returns the SGL size in 8-byte words. The caller must size the SGL
879 * appropriately.
880 */
881static inline unsigned int make_sgl(const struct sk_buff *skb,
882 struct sg_ent *sgp, unsigned char *start,
883 unsigned int len, struct pci_dev *pdev)
884{
885 dma_addr_t mapping;
886 unsigned int i, j = 0, nfrags;
887
888 if (len) {
889 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
890 sgp->len[0] = cpu_to_be32(len);
891 sgp->addr[0] = cpu_to_be64(mapping);
892 j = 1;
893 }
894
895 nfrags = skb_shinfo(skb)->nr_frags;
896 for (i = 0; i < nfrags; i++) {
897 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
898
899 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
900 frag->size, PCI_DMA_TODEVICE);
901 sgp->len[j] = cpu_to_be32(frag->size);
902 sgp->addr[j] = cpu_to_be64(mapping);
903 j ^= 1;
904 if (j == 0)
905 ++sgp;
906 }
907 if (j)
908 sgp->len[j] = 0;
909 return ((nfrags + (len != 0)) * 3) / 2 + j;
910}
911
912/**
913 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
914 * @adap: the adapter
915 * @q: the Tx queue
916 *
917 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
918 * where the HW is going to sleep just after we checked, however,
919 * then the interrupt handler will detect the outstanding TX packet
920 * and ring the doorbell for us.
921 *
922 * When GTS is disabled we unconditionally ring the doorbell.
923 */
924static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
925{
926#if USE_GTS
927 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
928 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
929 set_bit(TXQ_LAST_PKT_DB, &q->flags);
930 t3_write_reg(adap, A_SG_KDOORBELL,
931 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
932 }
933#else
934 wmb(); /* write descriptors before telling HW */
935 t3_write_reg(adap, A_SG_KDOORBELL,
936 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
937#endif
938}
939
940static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
941{
942#if SGE_NUM_GENBITS == 2
943 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
944#endif
945}
946
947/**
948 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
949 * @ndesc: number of Tx descriptors spanned by the SGL
950 * @skb: the packet corresponding to the WR
951 * @d: first Tx descriptor to be written
952 * @pidx: index of above descriptors
953 * @q: the SGE Tx queue
954 * @sgl: the SGL
955 * @flits: number of flits to the start of the SGL in the first descriptor
956 * @sgl_flits: the SGL size in flits
957 * @gen: the Tx descriptor generation
958 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
959 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
960 *
961 * Write a work request header and an associated SGL. If the SGL is
962 * small enough to fit into one Tx descriptor it has already been written
963 * and we just need to write the WR header. Otherwise we distribute the
964 * SGL across the number of descriptors it spans.
965 */
966static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
967 struct tx_desc *d, unsigned int pidx,
968 const struct sge_txq *q,
969 const struct sg_ent *sgl,
970 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -0400971 unsigned int gen, __be32 wr_hi,
972 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500973{
974 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
975 struct tx_sw_desc *sd = &q->sdesc[pidx];
976
977 sd->skb = skb;
978 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800979 sd->fragidx = 0;
980 sd->addr_idx = 0;
981 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500982 }
983
984 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800985 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500986 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
987 V_WR_SGLSFLT(flits)) | wr_hi;
988 wmb();
989 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
990 V_WR_GEN(gen)) | wr_lo;
991 wr_gen2(d, gen);
992 } else {
993 unsigned int ogen = gen;
994 const u64 *fp = (const u64 *)sgl;
995 struct work_request_hdr *wp = wrp;
996
997 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
998 V_WR_SGLSFLT(flits)) | wr_hi;
999
1000 while (sgl_flits) {
1001 unsigned int avail = WR_FLITS - flits;
1002
1003 if (avail > sgl_flits)
1004 avail = sgl_flits;
1005 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1006 sgl_flits -= avail;
1007 ndesc--;
1008 if (!sgl_flits)
1009 break;
1010
1011 fp += avail;
1012 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -08001013 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001014 sd++;
1015 if (++pidx == q->size) {
1016 pidx = 0;
1017 gen ^= 1;
1018 d = q->desc;
1019 sd = q->sdesc;
1020 }
1021
1022 sd->skb = skb;
1023 wrp = (struct work_request_hdr *)d;
1024 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(1)) | wr_hi;
1026 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1027 sgl_flits + 1)) |
1028 V_WR_GEN(gen)) | wr_lo;
1029 wr_gen2(d, gen);
1030 flits = 1;
1031 }
Divy Le Ray23561c92007-11-16 11:22:05 -08001032 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001033 wrp->wr_hi |= htonl(F_WR_EOP);
1034 wmb();
1035 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1036 wr_gen2((struct tx_desc *)wp, ogen);
1037 WARN_ON(ndesc != 0);
1038 }
1039}
1040
1041/**
1042 * write_tx_pkt_wr - write a TX_PKT work request
1043 * @adap: the adapter
1044 * @skb: the packet to send
1045 * @pi: the egress interface
1046 * @pidx: index of the first Tx descriptor to write
1047 * @gen: the generation value to use
1048 * @q: the Tx queue
1049 * @ndesc: number of descriptors the packet will occupy
1050 * @compl: the value of the COMPL bit to use
1051 *
1052 * Generate a TX_PKT work request to send the supplied packet.
1053 */
1054static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1055 const struct port_info *pi,
1056 unsigned int pidx, unsigned int gen,
1057 struct sge_txq *q, unsigned int ndesc,
1058 unsigned int compl)
1059{
1060 unsigned int flits, sgl_flits, cntrl, tso_info;
1061 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1062 struct tx_desc *d = &q->desc[pidx];
1063 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1064
1065 cpl->len = htonl(skb->len | 0x80000000);
1066 cntrl = V_TXPKT_INTF(pi->port_id);
1067
1068 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1069 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1070
1071 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1072 if (tso_info) {
1073 int eth_type;
1074 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1075
1076 d->flit[2] = 0;
1077 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1078 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001079 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001080 CPL_ETH_II : CPL_ETH_II_VLAN;
1081 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001082 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001083 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001084 hdr->lso_info = htonl(tso_info);
1085 flits = 3;
1086 } else {
1087 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1088 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1089 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1090 cpl->cntrl = htonl(cntrl);
1091
1092 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1093 q->sdesc[pidx].skb = NULL;
1094 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001095 skb_copy_from_linear_data(skb, &d->flit[2],
1096 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001097 else
1098 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1099
1100 flits = (skb->len + 7) / 8 + 2;
1101 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1102 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1103 | F_WR_SOP | F_WR_EOP | compl);
1104 wmb();
1105 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1106 V_WR_TID(q->token));
1107 wr_gen2(d, gen);
1108 kfree_skb(skb);
1109 return;
1110 }
1111
1112 flits = 2;
1113 }
1114
1115 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1116 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001117
1118 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1119 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1120 htonl(V_WR_TID(q->token)));
1121}
1122
Divy Le Ray82ad3322008-12-16 01:09:39 -08001123static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1124 struct sge_qset *qs, struct sge_txq *q)
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301125{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001126 netif_tx_stop_queue(txq);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301127 set_bit(TXQ_ETH, &qs->txq_stopped);
1128 q->stops++;
1129}
1130
Divy Le Ray4d22de32007-01-18 22:04:14 -05001131/**
1132 * eth_xmit - add a packet to the Ethernet Tx queue
1133 * @skb: the packet
1134 * @dev: the egress net device
1135 *
1136 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1137 */
1138int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1139{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001140 int qidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001141 unsigned int ndesc, pidx, credits, gen, compl;
1142 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001143 struct adapter *adap = pi->adapter;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001144 struct netdev_queue *txq;
1145 struct sge_qset *qs;
1146 struct sge_txq *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001147
1148 /*
1149 * The chip min packet length is 9 octets but play safe and reject
1150 * anything shorter than an Ethernet header.
1151 */
1152 if (unlikely(skb->len < ETH_HLEN)) {
1153 dev_kfree_skb(skb);
1154 return NETDEV_TX_OK;
1155 }
1156
Divy Le Ray82ad3322008-12-16 01:09:39 -08001157 qidx = skb_get_queue_mapping(skb);
1158 qs = &pi->qs[qidx];
1159 q = &qs->txq[TXQ_ETH];
1160 txq = netdev_get_tx_queue(dev, qidx);
1161
Divy Le Ray4d22de32007-01-18 22:04:14 -05001162 spin_lock(&q->lock);
1163 reclaim_completed_tx(adap, q);
1164
1165 credits = q->size - q->in_use;
1166 ndesc = calc_tx_descs(skb);
1167
1168 if (unlikely(credits < ndesc)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001169 t3_stop_tx_queue(txq, qs, q);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301170 dev_err(&adap->pdev->dev,
1171 "%s: Tx ring %u full while queue awake!\n",
1172 dev->name, q->cntxt_id & 7);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001173 spin_unlock(&q->lock);
1174 return NETDEV_TX_BUSY;
1175 }
1176
1177 q->in_use += ndesc;
Divy Le Raycd7e9032008-03-13 00:13:30 -07001178 if (unlikely(credits - ndesc < q->stop_thres)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001179 t3_stop_tx_queue(txq, qs, q);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001180
1181 if (should_restart_tx(q) &&
1182 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1183 q->restarts++;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001184 netif_tx_wake_queue(txq);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001185 }
1186 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001187
1188 gen = q->gen;
1189 q->unacked += ndesc;
1190 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1191 q->unacked &= 7;
1192 pidx = q->pidx;
1193 q->pidx += ndesc;
1194 if (q->pidx >= q->size) {
1195 q->pidx -= q->size;
1196 q->gen ^= 1;
1197 }
1198
1199 /* update port statistics */
1200 if (skb->ip_summed == CHECKSUM_COMPLETE)
1201 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1202 if (skb_shinfo(skb)->gso_size)
1203 qs->port_stats[SGE_PSTAT_TSO]++;
1204 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1205 qs->port_stats[SGE_PSTAT_VLANINS]++;
1206
1207 dev->trans_start = jiffies;
1208 spin_unlock(&q->lock);
1209
1210 /*
1211 * We do not use Tx completion interrupts to free DMAd Tx packets.
1212 * This is good for performamce but means that we rely on new Tx
1213 * packets arriving to run the destructors of completed packets,
1214 * which open up space in their sockets' send queues. Sometimes
1215 * we do not get such new packets causing Tx to stall. A single
1216 * UDP transmitter is a good example of this situation. We have
1217 * a clean up timer that periodically reclaims completed packets
1218 * but it doesn't run often enough (nor do we want it to) to prevent
1219 * lengthy stalls. A solution to this problem is to run the
1220 * destructor early, after the packet is queued but before it's DMAd.
1221 * A cons is that we lie to socket memory accounting, but the amount
1222 * of extra memory is reasonable (limited by the number of Tx
1223 * descriptors), the packets do actually get freed quickly by new
1224 * packets almost always, and for protocols like TCP that wait for
1225 * acks to really free up the data the extra memory is even less.
1226 * On the positive side we run the destructors on the sending CPU
1227 * rather than on a potentially different completing CPU, usually a
1228 * good thing. We also run them without holding our Tx queue lock,
1229 * unlike what reclaim_completed_tx() would otherwise do.
1230 *
1231 * Run the destructor before telling the DMA engine about the packet
1232 * to make sure it doesn't complete and get freed prematurely.
1233 */
1234 if (likely(!skb_shared(skb)))
1235 skb_orphan(skb);
1236
1237 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1238 check_ring_tx_db(adap, q);
1239 return NETDEV_TX_OK;
1240}
1241
1242/**
1243 * write_imm - write a packet into a Tx descriptor as immediate data
1244 * @d: the Tx descriptor to write
1245 * @skb: the packet
1246 * @len: the length of packet data to write as immediate data
1247 * @gen: the generation bit value to write
1248 *
1249 * Writes a packet as immediate data into a Tx descriptor. The packet
1250 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001251 * carefully so the SGE doesn't read it accidentally before it's written
1252 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001253 */
1254static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1255 unsigned int len, unsigned int gen)
1256{
1257 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1258 struct work_request_hdr *to = (struct work_request_hdr *)d;
1259
Divy Le Ray27186dc2007-08-21 20:49:15 -07001260 if (likely(!skb->data_len))
1261 memcpy(&to[1], &from[1], len - sizeof(*from));
1262 else
1263 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1264
Divy Le Ray4d22de32007-01-18 22:04:14 -05001265 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1266 V_WR_BCNTLFLT(len & 7));
1267 wmb();
1268 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1269 V_WR_LEN((len + 7) / 8));
1270 wr_gen2(d, gen);
1271 kfree_skb(skb);
1272}
1273
1274/**
1275 * check_desc_avail - check descriptor availability on a send queue
1276 * @adap: the adapter
1277 * @q: the send queue
1278 * @skb: the packet needing the descriptors
1279 * @ndesc: the number of Tx descriptors needed
1280 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1281 *
1282 * Checks if the requested number of Tx descriptors is available on an
1283 * SGE send queue. If the queue is already suspended or not enough
1284 * descriptors are available the packet is queued for later transmission.
1285 * Must be called with the Tx queue locked.
1286 *
1287 * Returns 0 if enough descriptors are available, 1 if there aren't
1288 * enough descriptors and the packet has been queued, and 2 if the caller
1289 * needs to retry because there weren't enough descriptors at the
1290 * beginning of the call but some freed up in the mean time.
1291 */
1292static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1293 struct sk_buff *skb, unsigned int ndesc,
1294 unsigned int qid)
1295{
1296 if (unlikely(!skb_queue_empty(&q->sendq))) {
1297 addq_exit:__skb_queue_tail(&q->sendq, skb);
1298 return 1;
1299 }
1300 if (unlikely(q->size - q->in_use < ndesc)) {
1301 struct sge_qset *qs = txq_to_qset(q, qid);
1302
1303 set_bit(qid, &qs->txq_stopped);
1304 smp_mb__after_clear_bit();
1305
1306 if (should_restart_tx(q) &&
1307 test_and_clear_bit(qid, &qs->txq_stopped))
1308 return 2;
1309
1310 q->stops++;
1311 goto addq_exit;
1312 }
1313 return 0;
1314}
1315
1316/**
1317 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1318 * @q: the SGE control Tx queue
1319 *
1320 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1321 * that send only immediate data (presently just the control queues) and
1322 * thus do not have any sk_buffs to release.
1323 */
1324static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1325{
1326 unsigned int reclaim = q->processed - q->cleaned;
1327
1328 q->in_use -= reclaim;
1329 q->cleaned += reclaim;
1330}
1331
1332static inline int immediate(const struct sk_buff *skb)
1333{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001334 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001335}
1336
1337/**
1338 * ctrl_xmit - send a packet through an SGE control Tx queue
1339 * @adap: the adapter
1340 * @q: the control queue
1341 * @skb: the packet
1342 *
1343 * Send a packet through an SGE control Tx queue. Packets sent through
1344 * a control queue must fit entirely as immediate data in a single Tx
1345 * descriptor and have no page fragments.
1346 */
1347static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1348 struct sk_buff *skb)
1349{
1350 int ret;
1351 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1352
1353 if (unlikely(!immediate(skb))) {
1354 WARN_ON(1);
1355 dev_kfree_skb(skb);
1356 return NET_XMIT_SUCCESS;
1357 }
1358
1359 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1360 wrp->wr_lo = htonl(V_WR_TID(q->token));
1361
1362 spin_lock(&q->lock);
1363 again:reclaim_completed_tx_imm(q);
1364
1365 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1366 if (unlikely(ret)) {
1367 if (ret == 1) {
1368 spin_unlock(&q->lock);
1369 return NET_XMIT_CN;
1370 }
1371 goto again;
1372 }
1373
1374 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1375
1376 q->in_use++;
1377 if (++q->pidx >= q->size) {
1378 q->pidx = 0;
1379 q->gen ^= 1;
1380 }
1381 spin_unlock(&q->lock);
1382 wmb();
1383 t3_write_reg(adap, A_SG_KDOORBELL,
1384 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1385 return NET_XMIT_SUCCESS;
1386}
1387
1388/**
1389 * restart_ctrlq - restart a suspended control queue
1390 * @qs: the queue set cotaining the control queue
1391 *
1392 * Resumes transmission on a suspended Tx control queue.
1393 */
1394static void restart_ctrlq(unsigned long data)
1395{
1396 struct sk_buff *skb;
1397 struct sge_qset *qs = (struct sge_qset *)data;
1398 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001399
1400 spin_lock(&q->lock);
1401 again:reclaim_completed_tx_imm(q);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403 while (q->in_use < q->size &&
1404 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001405
1406 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1407
1408 if (++q->pidx >= q->size) {
1409 q->pidx = 0;
1410 q->gen ^= 1;
1411 }
1412 q->in_use++;
1413 }
1414
1415 if (!skb_queue_empty(&q->sendq)) {
1416 set_bit(TXQ_CTRL, &qs->txq_stopped);
1417 smp_mb__after_clear_bit();
1418
1419 if (should_restart_tx(q) &&
1420 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1421 goto again;
1422 q->stops++;
1423 }
1424
1425 spin_unlock(&q->lock);
Divy Le Rayafefce62007-11-16 11:22:21 -08001426 wmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001427 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001428 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1429}
1430
Divy Le Ray14ab9892007-01-30 19:43:50 -08001431/*
1432 * Send a management message through control queue 0
1433 */
1434int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1435{
Divy Le Ray204e2f92008-05-06 19:26:01 -07001436 int ret;
Divy Le Raybc4b6b52007-12-17 18:47:41 -08001437 local_bh_disable();
1438 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1439 local_bh_enable();
1440
1441 return ret;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001442}
1443
Divy Le Ray4d22de32007-01-18 22:04:14 -05001444/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001445 * deferred_unmap_destructor - unmap a packet when it is freed
1446 * @skb: the packet
1447 *
1448 * This is the packet destructor used for Tx packets that need to remain
1449 * mapped until they are freed rather than until their Tx descriptors are
1450 * freed.
1451 */
1452static void deferred_unmap_destructor(struct sk_buff *skb)
1453{
1454 int i;
1455 const dma_addr_t *p;
1456 const struct skb_shared_info *si;
1457 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001458
1459 dui = (struct deferred_unmap_info *)skb->head;
1460 p = dui->addr;
1461
Divy Le Ray23561c92007-11-16 11:22:05 -08001462 if (skb->tail - skb->transport_header)
1463 pci_unmap_single(dui->pdev, *p++,
1464 skb->tail - skb->transport_header,
1465 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001466
1467 si = skb_shinfo(skb);
1468 for (i = 0; i < si->nr_frags; i++)
1469 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1470 PCI_DMA_TODEVICE);
1471}
1472
1473static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1474 const struct sg_ent *sgl, int sgl_flits)
1475{
1476 dma_addr_t *p;
1477 struct deferred_unmap_info *dui;
1478
1479 dui = (struct deferred_unmap_info *)skb->head;
1480 dui->pdev = pdev;
1481 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1482 *p++ = be64_to_cpu(sgl->addr[0]);
1483 *p++ = be64_to_cpu(sgl->addr[1]);
1484 }
1485 if (sgl_flits)
1486 *p = be64_to_cpu(sgl->addr[0]);
1487}
1488
1489/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001490 * write_ofld_wr - write an offload work request
1491 * @adap: the adapter
1492 * @skb: the packet to send
1493 * @q: the Tx queue
1494 * @pidx: index of the first Tx descriptor to write
1495 * @gen: the generation value to use
1496 * @ndesc: number of descriptors the packet will occupy
1497 *
1498 * Write an offload work request to send the supplied packet. The packet
1499 * data already carry the work request with most fields populated.
1500 */
1501static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1502 struct sge_txq *q, unsigned int pidx,
1503 unsigned int gen, unsigned int ndesc)
1504{
1505 unsigned int sgl_flits, flits;
1506 struct work_request_hdr *from;
1507 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1508 struct tx_desc *d = &q->desc[pidx];
1509
1510 if (immediate(skb)) {
1511 q->sdesc[pidx].skb = NULL;
1512 write_imm(d, skb, skb->len, gen);
1513 return;
1514 }
1515
1516 /* Only TX_DATA builds SGLs */
1517
1518 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001519 memcpy(&d->flit[1], &from[1],
1520 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001521
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001522 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001523 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001524 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001525 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001526 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001527 if (need_skb_unmap()) {
1528 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1529 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001530 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001531
1532 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1533 gen, from->wr_hi, from->wr_lo);
1534}
1535
1536/**
1537 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1538 * @skb: the packet
1539 *
1540 * Returns the number of Tx descriptors needed for the given offload
1541 * packet. These packets are already fully constructed.
1542 */
1543static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1544{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001545 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001546
Divy Le Ray27186dc2007-08-21 20:49:15 -07001547 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001548 return 1; /* packet fits as immediate data */
1549
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001550 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001551 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001552 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001553 cnt++;
1554 return flits_to_desc(flits + sgl_len(cnt));
1555}
1556
1557/**
1558 * ofld_xmit - send a packet through an offload queue
1559 * @adap: the adapter
1560 * @q: the Tx offload queue
1561 * @skb: the packet
1562 *
1563 * Send an offload packet through an SGE offload queue.
1564 */
1565static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1566 struct sk_buff *skb)
1567{
1568 int ret;
1569 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1570
1571 spin_lock(&q->lock);
1572 again:reclaim_completed_tx(adap, q);
1573
1574 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1575 if (unlikely(ret)) {
1576 if (ret == 1) {
1577 skb->priority = ndesc; /* save for restart */
1578 spin_unlock(&q->lock);
1579 return NET_XMIT_CN;
1580 }
1581 goto again;
1582 }
1583
1584 gen = q->gen;
1585 q->in_use += ndesc;
1586 pidx = q->pidx;
1587 q->pidx += ndesc;
1588 if (q->pidx >= q->size) {
1589 q->pidx -= q->size;
1590 q->gen ^= 1;
1591 }
1592 spin_unlock(&q->lock);
1593
1594 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1595 check_ring_tx_db(adap, q);
1596 return NET_XMIT_SUCCESS;
1597}
1598
1599/**
1600 * restart_offloadq - restart a suspended offload queue
1601 * @qs: the queue set cotaining the offload queue
1602 *
1603 * Resumes transmission on a suspended Tx offload queue.
1604 */
1605static void restart_offloadq(unsigned long data)
1606{
1607 struct sk_buff *skb;
1608 struct sge_qset *qs = (struct sge_qset *)data;
1609 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001610 const struct port_info *pi = netdev_priv(qs->netdev);
1611 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001612
1613 spin_lock(&q->lock);
1614 again:reclaim_completed_tx(adap, q);
1615
1616 while ((skb = skb_peek(&q->sendq)) != NULL) {
1617 unsigned int gen, pidx;
1618 unsigned int ndesc = skb->priority;
1619
1620 if (unlikely(q->size - q->in_use < ndesc)) {
1621 set_bit(TXQ_OFLD, &qs->txq_stopped);
1622 smp_mb__after_clear_bit();
1623
1624 if (should_restart_tx(q) &&
1625 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1626 goto again;
1627 q->stops++;
1628 break;
1629 }
1630
1631 gen = q->gen;
1632 q->in_use += ndesc;
1633 pidx = q->pidx;
1634 q->pidx += ndesc;
1635 if (q->pidx >= q->size) {
1636 q->pidx -= q->size;
1637 q->gen ^= 1;
1638 }
1639 __skb_unlink(skb, &q->sendq);
1640 spin_unlock(&q->lock);
1641
1642 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1643 spin_lock(&q->lock);
1644 }
1645 spin_unlock(&q->lock);
1646
1647#if USE_GTS
1648 set_bit(TXQ_RUNNING, &q->flags);
1649 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1650#endif
Divy Le Rayafefce62007-11-16 11:22:21 -08001651 wmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -05001652 t3_write_reg(adap, A_SG_KDOORBELL,
1653 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1654}
1655
1656/**
1657 * queue_set - return the queue set a packet should use
1658 * @skb: the packet
1659 *
1660 * Maps a packet to the SGE queue set it should use. The desired queue
1661 * set is carried in bits 1-3 in the packet's priority.
1662 */
1663static inline int queue_set(const struct sk_buff *skb)
1664{
1665 return skb->priority >> 1;
1666}
1667
1668/**
1669 * is_ctrl_pkt - return whether an offload packet is a control packet
1670 * @skb: the packet
1671 *
1672 * Determines whether an offload packet should use an OFLD or a CTRL
1673 * Tx queue. This is indicated by bit 0 in the packet's priority.
1674 */
1675static inline int is_ctrl_pkt(const struct sk_buff *skb)
1676{
1677 return skb->priority & 1;
1678}
1679
1680/**
1681 * t3_offload_tx - send an offload packet
1682 * @tdev: the offload device to send to
1683 * @skb: the packet
1684 *
1685 * Sends an offload packet. We use the packet priority to select the
1686 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1687 * should be sent as regular or control, bits 1-3 select the queue set.
1688 */
1689int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1690{
1691 struct adapter *adap = tdev2adap(tdev);
1692 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1693
1694 if (unlikely(is_ctrl_pkt(skb)))
1695 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1696
1697 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1698}
1699
1700/**
1701 * offload_enqueue - add an offload packet to an SGE offload receive queue
1702 * @q: the SGE response queue
1703 * @skb: the packet
1704 *
1705 * Add a new offload packet to an SGE response queue's offload packet
1706 * queue. If the packet is the first on the queue it schedules the RX
1707 * softirq to process the queue.
1708 */
1709static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1710{
David S. Miller147e70e2008-09-22 01:29:52 -07001711 int was_empty = skb_queue_empty(&q->rx_queue);
1712
1713 __skb_queue_tail(&q->rx_queue, skb);
1714
1715 if (was_empty) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001716 struct sge_qset *qs = rspq_to_qset(q);
1717
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001718 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001719 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001720}
1721
1722/**
1723 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1724 * @tdev: the offload device that will be receiving the packets
1725 * @q: the SGE response queue that assembled the bundle
1726 * @skbs: the partial bundle
1727 * @n: the number of packets in the bundle
1728 *
1729 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1730 */
1731static inline void deliver_partial_bundle(struct t3cdev *tdev,
1732 struct sge_rspq *q,
1733 struct sk_buff *skbs[], int n)
1734{
1735 if (n) {
1736 q->offload_bundles++;
1737 tdev->recv(tdev, skbs, n);
1738 }
1739}
1740
1741/**
1742 * ofld_poll - NAPI handler for offload packets in interrupt mode
1743 * @dev: the network device doing the polling
1744 * @budget: polling budget
1745 *
1746 * The NAPI handler for offload packets when a response queue is serviced
1747 * by the hard interrupt handler, i.e., when it's operating in non-polling
1748 * mode. Creates small packet batches and sends them through the offload
1749 * receive handler. Batches need to be of modest size as we do prefetches
1750 * on the packets in each.
1751 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001752static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001753{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001754 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001755 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001756 struct adapter *adapter = qs->adap;
1757 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001758
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001759 while (work_done < budget) {
David S. Miller147e70e2008-09-22 01:29:52 -07001760 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1761 struct sk_buff_head queue;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001762 int ngathered;
1763
1764 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001765 __skb_queue_head_init(&queue);
1766 skb_queue_splice_init(&q->rx_queue, &queue);
1767 if (skb_queue_empty(&queue)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001768 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001769 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001770 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001771 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001772 spin_unlock_irq(&q->lock);
1773
David S. Miller147e70e2008-09-22 01:29:52 -07001774 ngathered = 0;
1775 skb_queue_walk_safe(&queue, skb, tmp) {
1776 if (work_done >= budget)
1777 break;
1778 work_done++;
1779
1780 __skb_unlink(skb, &queue);
1781 prefetch(skb->data);
1782 skbs[ngathered] = skb;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001783 if (++ngathered == RX_BUNDLE_SIZE) {
1784 q->offload_bundles++;
1785 adapter->tdev.recv(&adapter->tdev, skbs,
1786 ngathered);
1787 ngathered = 0;
1788 }
1789 }
David S. Miller147e70e2008-09-22 01:29:52 -07001790 if (!skb_queue_empty(&queue)) {
1791 /* splice remaining packets back onto Rx queue */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001792 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001793 skb_queue_splice(&queue, &q->rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001794 spin_unlock_irq(&q->lock);
1795 }
1796 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1797 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001798
1799 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001800}
1801
1802/**
1803 * rx_offload - process a received offload packet
1804 * @tdev: the offload device receiving the packet
1805 * @rq: the response queue that received the packet
1806 * @skb: the packet
1807 * @rx_gather: a gather list of packets if we are building a bundle
1808 * @gather_idx: index of the next available slot in the bundle
1809 *
1810 * Process an ingress offload pakcet and add it to the offload ingress
1811 * queue. Returns the index of the next available slot in the bundle.
1812 */
1813static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1814 struct sk_buff *skb, struct sk_buff *rx_gather[],
1815 unsigned int gather_idx)
1816{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001817 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001818 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001819 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001820
1821 if (rq->polling) {
1822 rx_gather[gather_idx++] = skb;
1823 if (gather_idx == RX_BUNDLE_SIZE) {
1824 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1825 gather_idx = 0;
1826 rq->offload_bundles++;
1827 }
1828 } else
1829 offload_enqueue(rq, skb);
1830
1831 return gather_idx;
1832}
1833
1834/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001835 * restart_tx - check whether to restart suspended Tx queues
1836 * @qs: the queue set to resume
1837 *
1838 * Restarts suspended Tx queues of an SGE queue set if they have enough
1839 * free resources to resume operation.
1840 */
1841static void restart_tx(struct sge_qset *qs)
1842{
1843 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1844 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1845 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1846 qs->txq[TXQ_ETH].restarts++;
1847 if (netif_running(qs->netdev))
Divy Le Ray82ad3322008-12-16 01:09:39 -08001848 netif_tx_wake_queue(qs->tx_q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001849 }
1850
1851 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1852 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1853 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1854 qs->txq[TXQ_OFLD].restarts++;
1855 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1856 }
1857 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1858 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1859 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1860 qs->txq[TXQ_CTRL].restarts++;
1861 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1862 }
1863}
1864
1865/**
1866 * rx_eth - process an ingress ethernet packet
1867 * @adap: the adapter
1868 * @rq: the response queue that received the packet
1869 * @skb: the packet
1870 * @pad: amount of padding at the start of the buffer
1871 *
1872 * Process an ingress ethernet pakcet and deliver it to the stack.
1873 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1874 * if it was immediate data in a response.
1875 */
1876static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
Divy Le Rayb47385b2008-05-21 18:56:26 -07001877 struct sk_buff *skb, int pad, int lro)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001878{
1879 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001880 struct sge_qset *qs = rspq_to_qset(rq);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001881 struct port_info *pi;
1882
Divy Le Ray4d22de32007-01-18 22:04:14 -05001883 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001884 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001885 pi = netdev_priv(skb->dev);
Al Viro05e5c112007-12-22 18:56:23 +00001886 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
Divy Le Ray4d22de32007-01-18 22:04:14 -05001887 !p->fragment) {
1888 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1889 skb->ip_summed = CHECKSUM_UNNECESSARY;
1890 } else
1891 skb->ip_summed = CHECKSUM_NONE;
1892
1893 if (unlikely(p->vlan_valid)) {
1894 struct vlan_group *grp = pi->vlan_grp;
1895
Divy Le Rayb47385b2008-05-21 18:56:26 -07001896 qs->port_stats[SGE_PSTAT_VLANEX]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001897 if (likely(grp))
Divy Le Rayb47385b2008-05-21 18:56:26 -07001898 if (lro)
1899 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1900 grp,
1901 ntohs(p->vlan),
1902 p);
1903 else
1904 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1905 rq->polling);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001906 else
1907 dev_kfree_skb_any(skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001908 } else if (rq->polling) {
1909 if (lro)
1910 lro_receive_skb(&qs->lro_mgr, skb, p);
1911 else
1912 netif_receive_skb(skb);
1913 } else
Divy Le Ray4d22de32007-01-18 22:04:14 -05001914 netif_rx(skb);
1915}
1916
Divy Le Rayb47385b2008-05-21 18:56:26 -07001917static inline int is_eth_tcp(u32 rss)
1918{
1919 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1920}
1921
1922/**
1923 * lro_frame_ok - check if an ingress packet is eligible for LRO
1924 * @p: the CPL header of the packet
1925 *
1926 * Returns true if a received packet is eligible for LRO.
1927 * The following conditions must be true:
1928 * - packet is TCP/IP Ethernet II (checked elsewhere)
1929 * - not an IP fragment
1930 * - no IP options
1931 * - TCP/IP checksums are correct
1932 * - the packet is for this host
1933 */
1934static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1935{
1936 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1937 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1938
1939 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1940 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1941}
1942
Divy Le Rayb47385b2008-05-21 18:56:26 -07001943static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1944 u64 *hdr_flags, void *priv)
1945{
1946 const struct cpl_rx_pkt *cpl = priv;
1947
1948 if (!lro_frame_ok(cpl))
1949 return -1;
1950
1951 *eh = (struct ethhdr *)(cpl + 1);
1952 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1953 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1954
Divy Le Rayb47385b2008-05-21 18:56:26 -07001955 *hdr_flags = LRO_IPV4 | LRO_TCP;
1956 return 0;
1957}
1958
1959static int t3_get_skb_header(struct sk_buff *skb,
1960 void **iph, void **tcph, u64 *hdr_flags,
1961 void *priv)
1962{
1963 void *eh;
1964
1965 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1966}
1967
1968static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
1969 void **iph, void **tcph, u64 *hdr_flags,
1970 void *priv)
1971{
1972 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
1973}
1974
1975/**
1976 * lro_add_page - add a page chunk to an LRO session
1977 * @adap: the adapter
1978 * @qs: the associated queue set
1979 * @fl: the free list containing the page chunk to add
1980 * @len: packet length
1981 * @complete: Indicates the last fragment of a frame
1982 *
1983 * Add a received packet contained in a page chunk to an existing LRO
1984 * session.
1985 */
1986static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1987 struct sge_fl *fl, int len, int complete)
1988{
1989 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1990 struct cpl_rx_pkt *cpl;
1991 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
1992 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
1993 int offset = 0;
1994
1995 if (!nr_frags) {
1996 offset = 2 + sizeof(struct cpl_rx_pkt);
1997 qs->lro_va = cpl = sd->pg_chunk.va + 2;
1998 }
1999
2000 fl->credits--;
2001
2002 len -= offset;
2003 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2004 fl->buf_size, PCI_DMA_FROMDEVICE);
2005
2006 rx_frag += nr_frags;
2007 rx_frag->page = sd->pg_chunk.page;
2008 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2009 rx_frag->size = len;
2010 frag_len += len;
2011 qs->lro_nfrags++;
2012 qs->lro_frag_len = frag_len;
2013
2014 if (!complete)
2015 return;
2016
2017 qs->lro_nfrags = qs->lro_frag_len = 0;
2018 cpl = qs->lro_va;
2019
2020 if (unlikely(cpl->vlan_valid)) {
2021 struct net_device *dev = qs->netdev;
2022 struct port_info *pi = netdev_priv(dev);
2023 struct vlan_group *grp = pi->vlan_grp;
2024
2025 if (likely(grp != NULL)) {
2026 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2027 qs->lro_frag_tbl,
2028 frag_len, frag_len,
2029 grp, ntohs(cpl->vlan),
2030 cpl, 0);
2031 return;
2032 }
2033 }
2034 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2035 frag_len, frag_len, cpl, 0);
2036}
2037
2038/**
2039 * init_lro_mgr - initialize a LRO manager object
2040 * @lro_mgr: the LRO manager object
2041 */
2042static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2043{
2044 lro_mgr->dev = qs->netdev;
2045 lro_mgr->features = LRO_F_NAPI;
2046 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2047 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2048 lro_mgr->max_desc = T3_MAX_LRO_SES;
2049 lro_mgr->lro_arr = qs->lro_desc;
2050 lro_mgr->get_frag_header = t3_get_frag_header;
2051 lro_mgr->get_skb_header = t3_get_skb_header;
2052 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2053 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2054 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2055}
2056
Divy Le Ray4d22de32007-01-18 22:04:14 -05002057/**
2058 * handle_rsp_cntrl_info - handles control information in a response
2059 * @qs: the queue set corresponding to the response
2060 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05002061 *
2062 * Handles the control information of an SGE response, such as GTS
2063 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08002064 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002065 */
Divy Le Ray6195c712007-01-30 19:43:56 -08002066static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002067{
2068 unsigned int credits;
2069
2070#if USE_GTS
2071 if (flags & F_RSPD_TXQ0_GTS)
2072 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2073#endif
2074
Divy Le Ray4d22de32007-01-18 22:04:14 -05002075 credits = G_RSPD_TXQ0_CR(flags);
2076 if (credits)
2077 qs->txq[TXQ_ETH].processed += credits;
2078
Divy Le Ray6195c712007-01-30 19:43:56 -08002079 credits = G_RSPD_TXQ2_CR(flags);
2080 if (credits)
2081 qs->txq[TXQ_CTRL].processed += credits;
2082
Divy Le Ray4d22de32007-01-18 22:04:14 -05002083# if USE_GTS
2084 if (flags & F_RSPD_TXQ1_GTS)
2085 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2086# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08002087 credits = G_RSPD_TXQ1_CR(flags);
2088 if (credits)
2089 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002090}
2091
2092/**
2093 * check_ring_db - check if we need to ring any doorbells
2094 * @adapter: the adapter
2095 * @qs: the queue set whose Tx queues are to be examined
2096 * @sleeping: indicates which Tx queue sent GTS
2097 *
2098 * Checks if some of a queue set's Tx queues need to ring their doorbells
2099 * to resume transmission after idling while they still have unprocessed
2100 * descriptors.
2101 */
2102static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2103 unsigned int sleeping)
2104{
2105 if (sleeping & F_RSPD_TXQ0_GTS) {
2106 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2107
2108 if (txq->cleaned + txq->in_use != txq->processed &&
2109 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2110 set_bit(TXQ_RUNNING, &txq->flags);
2111 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2112 V_EGRCNTX(txq->cntxt_id));
2113 }
2114 }
2115
2116 if (sleeping & F_RSPD_TXQ1_GTS) {
2117 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2118
2119 if (txq->cleaned + txq->in_use != txq->processed &&
2120 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2121 set_bit(TXQ_RUNNING, &txq->flags);
2122 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2123 V_EGRCNTX(txq->cntxt_id));
2124 }
2125 }
2126}
2127
2128/**
2129 * is_new_response - check if a response is newly written
2130 * @r: the response descriptor
2131 * @q: the response queue
2132 *
2133 * Returns true if a response descriptor contains a yet unprocessed
2134 * response.
2135 */
2136static inline int is_new_response(const struct rsp_desc *r,
2137 const struct sge_rspq *q)
2138{
2139 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2140}
2141
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002142static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2143{
2144 q->pg_skb = NULL;
2145 q->rx_recycle_buf = 0;
2146}
2147
Divy Le Ray4d22de32007-01-18 22:04:14 -05002148#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2149#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2150 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2151 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2152 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2153
2154/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2155#define NOMEM_INTR_DELAY 2500
2156
2157/**
2158 * process_responses - process responses from an SGE response queue
2159 * @adap: the adapter
2160 * @qs: the queue set to which the response queue belongs
2161 * @budget: how many responses can be processed in this round
2162 *
2163 * Process responses from an SGE response queue up to the supplied budget.
2164 * Responses include received packets as well as credits and other events
2165 * for the queues that belong to the response queue's queue set.
2166 * A negative budget is effectively unlimited.
2167 *
2168 * Additionally choose the interrupt holdoff time for the next interrupt
2169 * on this queue. If the system is under memory shortage use a fairly
2170 * long delay to help recovery.
2171 */
2172static int process_responses(struct adapter *adap, struct sge_qset *qs,
2173 int budget)
2174{
2175 struct sge_rspq *q = &qs->rspq;
2176 struct rsp_desc *r = &q->desc[q->cidx];
2177 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08002178 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002179 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2180 int ngathered = 0;
2181
2182 q->next_holdoff = q->holdoff_tmr;
2183
2184 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Rayb47385b2008-05-21 18:56:26 -07002185 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002186 struct sk_buff *skb = NULL;
2187 u32 len, flags = ntohl(r->flags);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002188 __be32 rss_hi = *(const __be32 *)r,
2189 rss_lo = r->rss_hdr.rss_hash_val;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002190
2191 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2192
2193 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2194 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2195 if (!skb)
2196 goto no_mem;
2197
2198 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2199 skb->data[0] = CPL_ASYNC_NOTIF;
2200 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2201 q->async_notif++;
2202 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2203 skb = get_imm_packet(r);
2204 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002205no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002206 q->next_holdoff = NOMEM_INTR_DELAY;
2207 q->nomem++;
2208 /* consume one credit since we tried */
2209 budget_left--;
2210 break;
2211 }
2212 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002213 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002214 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002215 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002216
Divy Le Rayb47385b2008-05-21 18:56:26 -07002217 if (eth)
2218 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2219
Divy Le Raycf992af2007-05-30 21:10:47 -07002220 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2221 if (fl->use_pages) {
2222 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002223
Divy Le Raycf992af2007-05-30 21:10:47 -07002224 prefetch(addr);
2225#if L1_CACHE_BYTES < 128
2226 prefetch(addr + L1_CACHE_BYTES);
2227#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08002228 __refill_fl(adap, fl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002229 if (lro > 0) {
2230 lro_add_page(adap, qs, fl,
2231 G_RSPD_LEN(len),
2232 flags & F_RSPD_EOP);
2233 goto next_fl;
2234 }
Divy Le Raye0994eb2007-02-24 16:44:17 -08002235
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002236 skb = get_packet_pg(adap, fl, q,
2237 G_RSPD_LEN(len),
2238 eth ?
2239 SGE_RX_DROP_THRES : 0);
2240 q->pg_skb = skb;
Divy Le Raycf992af2007-05-30 21:10:47 -07002241 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08002242 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2243 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002244 if (unlikely(!skb)) {
2245 if (!eth)
2246 goto no_mem;
2247 q->rx_drops++;
2248 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2249 __skb_pull(skb, 2);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002250next_fl:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002251 if (++fl->cidx == fl->size)
2252 fl->cidx = 0;
2253 } else
2254 q->pure_rsps++;
2255
2256 if (flags & RSPD_CTRL_MASK) {
2257 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002258 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002259 }
2260
2261 r++;
2262 if (unlikely(++q->cidx == q->size)) {
2263 q->cidx = 0;
2264 q->gen ^= 1;
2265 r = q->desc;
2266 }
2267 prefetch(r);
2268
2269 if (++q->credits >= (q->size / 4)) {
2270 refill_rspq(adap, q, q->credits);
2271 q->credits = 0;
2272 }
2273
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002274 packet_complete = flags &
2275 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2276 F_RSPD_ASYNC_NOTIF);
2277
2278 if (skb != NULL && packet_complete) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002279 if (eth)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002280 rx_eth(adap, q, skb, ethpad, lro);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002281 else {
Divy Le Rayafefce62007-11-16 11:22:21 -08002282 q->offload_pkts++;
Divy Le Raycf992af2007-05-30 21:10:47 -07002283 /* Preserve the RSS info in csum & priority */
2284 skb->csum = rss_hi;
2285 skb->priority = rss_lo;
2286 ngathered = rx_offload(&adap->tdev, q, skb,
2287 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002288 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002289 }
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002290
2291 if (flags & F_RSPD_EOP)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002292 clear_rspq_bufstate(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002293 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002294 --budget_left;
2295 }
2296
Divy Le Ray4d22de32007-01-18 22:04:14 -05002297 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002298 lro_flush_all(&qs->lro_mgr);
2299 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2300 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2301 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2302
Divy Le Ray4d22de32007-01-18 22:04:14 -05002303 if (sleeping)
2304 check_ring_db(adap, qs, sleeping);
2305
2306 smp_mb(); /* commit Tx queue .processed updates */
2307 if (unlikely(qs->txq_stopped != 0))
2308 restart_tx(qs);
2309
2310 budget -= budget_left;
2311 return budget;
2312}
2313
2314static inline int is_pure_response(const struct rsp_desc *r)
2315{
Roland Dreierc5419e62008-11-28 21:55:42 -08002316 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002317
2318 return (n | r->len_cq) == 0;
2319}
2320
2321/**
2322 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002323 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002324 * @budget: how many packets we can process in this round
2325 *
2326 * Handler for new data events when using NAPI.
2327 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002328static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002329{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002330 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2331 struct adapter *adap = qs->adap;
2332 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002333
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002334 if (likely(work_done < budget)) {
2335 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002336
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002337 /*
2338 * Because we don't atomically flush the following
2339 * write it is possible that in very rare cases it can
2340 * reach the device in a way that races with a new
2341 * response being written plus an error interrupt
2342 * causing the NAPI interrupt handler below to return
2343 * unhandled status to the OS. To protect against
2344 * this would require flushing the write and doing
2345 * both the write and the flush with interrupts off.
2346 * Way too expensive and unjustifiable given the
2347 * rarity of the race.
2348 *
2349 * The race cannot happen at all with MSI-X.
2350 */
2351 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2352 V_NEWTIMER(qs->rspq.next_holdoff) |
2353 V_NEWINDEX(qs->rspq.cidx));
2354 }
2355 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002356}
2357
2358/*
2359 * Returns true if the device is already scheduled for polling.
2360 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002361static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002362{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002363 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002364}
2365
2366/**
2367 * process_pure_responses - process pure responses from a response queue
2368 * @adap: the adapter
2369 * @qs: the queue set owning the response queue
2370 * @r: the first pure response to process
2371 *
2372 * A simpler version of process_responses() that handles only pure (i.e.,
2373 * non data-carrying) responses. Such respones are too light-weight to
2374 * justify calling a softirq under NAPI, so we handle them specially in
2375 * the interrupt handler. The function is called with a pointer to a
2376 * response, which the caller must ensure is a valid pure response.
2377 *
2378 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2379 */
2380static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2381 struct rsp_desc *r)
2382{
2383 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002384 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002385
2386 do {
2387 u32 flags = ntohl(r->flags);
2388
2389 r++;
2390 if (unlikely(++q->cidx == q->size)) {
2391 q->cidx = 0;
2392 q->gen ^= 1;
2393 r = q->desc;
2394 }
2395 prefetch(r);
2396
2397 if (flags & RSPD_CTRL_MASK) {
2398 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002399 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002400 }
2401
2402 q->pure_rsps++;
2403 if (++q->credits >= (q->size / 4)) {
2404 refill_rspq(adap, q, q->credits);
2405 q->credits = 0;
2406 }
2407 } while (is_new_response(r, q) && is_pure_response(r));
2408
Divy Le Ray4d22de32007-01-18 22:04:14 -05002409 if (sleeping)
2410 check_ring_db(adap, qs, sleeping);
2411
2412 smp_mb(); /* commit Tx queue .processed updates */
2413 if (unlikely(qs->txq_stopped != 0))
2414 restart_tx(qs);
2415
2416 return is_new_response(r, q);
2417}
2418
2419/**
2420 * handle_responses - decide what to do with new responses in NAPI mode
2421 * @adap: the adapter
2422 * @q: the response queue
2423 *
2424 * This is used by the NAPI interrupt handlers to decide what to do with
2425 * new SGE responses. If there are no new responses it returns -1. If
2426 * there are new responses and they are pure (i.e., non-data carrying)
2427 * it handles them straight in hard interrupt context as they are very
2428 * cheap and don't deliver any packets. Finally, if there are any data
2429 * signaling responses it schedules the NAPI handler. Returns 1 if it
2430 * schedules NAPI, 0 if all new responses were pure.
2431 *
2432 * The caller must ascertain NAPI is not already running.
2433 */
2434static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2435{
2436 struct sge_qset *qs = rspq_to_qset(q);
2437 struct rsp_desc *r = &q->desc[q->cidx];
2438
2439 if (!is_new_response(r, q))
2440 return -1;
2441 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2442 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2443 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2444 return 0;
2445 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002446 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002447 return 1;
2448}
2449
2450/*
2451 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2452 * (i.e., response queue serviced in hard interrupt).
2453 */
2454irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2455{
2456 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002457 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002458 struct sge_rspq *q = &qs->rspq;
2459
2460 spin_lock(&q->lock);
2461 if (process_responses(adap, qs, -1) == 0)
2462 q->unhandled_irqs++;
2463 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2464 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2465 spin_unlock(&q->lock);
2466 return IRQ_HANDLED;
2467}
2468
2469/*
2470 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2471 * (i.e., response queue serviced by NAPI polling).
2472 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002473static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002474{
2475 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002476 struct sge_rspq *q = &qs->rspq;
2477
2478 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002479
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002480 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002481 q->unhandled_irqs++;
2482 spin_unlock(&q->lock);
2483 return IRQ_HANDLED;
2484}
2485
2486/*
2487 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2488 * SGE response queues as well as error and other async events as they all use
2489 * the same MSI vector. We use one SGE response queue per port in this mode
2490 * and protect all response queues with queue 0's lock.
2491 */
2492static irqreturn_t t3_intr_msi(int irq, void *cookie)
2493{
2494 int new_packets = 0;
2495 struct adapter *adap = cookie;
2496 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2497
2498 spin_lock(&q->lock);
2499
2500 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2501 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2502 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2503 new_packets = 1;
2504 }
2505
2506 if (adap->params.nports == 2 &&
2507 process_responses(adap, &adap->sge.qs[1], -1)) {
2508 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2509
2510 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2511 V_NEWTIMER(q1->next_holdoff) |
2512 V_NEWINDEX(q1->cidx));
2513 new_packets = 1;
2514 }
2515
2516 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2517 q->unhandled_irqs++;
2518
2519 spin_unlock(&q->lock);
2520 return IRQ_HANDLED;
2521}
2522
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002523static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002524{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002525 struct sge_rspq *q = &qs->rspq;
2526
2527 if (!napi_is_scheduled(&qs->napi) &&
2528 is_new_response(&q->desc[q->cidx], q)) {
2529 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002530 return 1;
2531 }
2532 return 0;
2533}
2534
2535/*
2536 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2537 * by NAPI polling). Handles data events from SGE response queues as well as
2538 * error and other async events as they all use the same MSI vector. We use
2539 * one SGE response queue per port in this mode and protect all response
2540 * queues with queue 0's lock.
2541 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002542static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002543{
2544 int new_packets;
2545 struct adapter *adap = cookie;
2546 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2547
2548 spin_lock(&q->lock);
2549
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002550 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002551 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002552 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002553 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2554 q->unhandled_irqs++;
2555
2556 spin_unlock(&q->lock);
2557 return IRQ_HANDLED;
2558}
2559
2560/*
2561 * A helper function that processes responses and issues GTS.
2562 */
2563static inline int process_responses_gts(struct adapter *adap,
2564 struct sge_rspq *rq)
2565{
2566 int work;
2567
2568 work = process_responses(adap, rspq_to_qset(rq), -1);
2569 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2570 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2571 return work;
2572}
2573
2574/*
2575 * The legacy INTx interrupt handler. This needs to handle data events from
2576 * SGE response queues as well as error and other async events as they all use
2577 * the same interrupt pin. We use one SGE response queue per port in this mode
2578 * and protect all response queues with queue 0's lock.
2579 */
2580static irqreturn_t t3_intr(int irq, void *cookie)
2581{
2582 int work_done, w0, w1;
2583 struct adapter *adap = cookie;
2584 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2585 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2586
2587 spin_lock(&q0->lock);
2588
2589 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2590 w1 = adap->params.nports == 2 &&
2591 is_new_response(&q1->desc[q1->cidx], q1);
2592
2593 if (likely(w0 | w1)) {
2594 t3_write_reg(adap, A_PL_CLI, 0);
2595 t3_read_reg(adap, A_PL_CLI); /* flush */
2596
2597 if (likely(w0))
2598 process_responses_gts(adap, q0);
2599
2600 if (w1)
2601 process_responses_gts(adap, q1);
2602
2603 work_done = w0 | w1;
2604 } else
2605 work_done = t3_slow_intr_handler(adap);
2606
2607 spin_unlock(&q0->lock);
2608 return IRQ_RETVAL(work_done != 0);
2609}
2610
2611/*
2612 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2613 * Handles data events from SGE response queues as well as error and other
2614 * async events as they all use the same interrupt pin. We use one SGE
2615 * response queue per port in this mode and protect all response queues with
2616 * queue 0's lock.
2617 */
2618static irqreturn_t t3b_intr(int irq, void *cookie)
2619{
2620 u32 map;
2621 struct adapter *adap = cookie;
2622 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2623
2624 t3_write_reg(adap, A_PL_CLI, 0);
2625 map = t3_read_reg(adap, A_SG_DATA_INTR);
2626
2627 if (unlikely(!map)) /* shared interrupt, most likely */
2628 return IRQ_NONE;
2629
2630 spin_lock(&q0->lock);
2631
2632 if (unlikely(map & F_ERRINTR))
2633 t3_slow_intr_handler(adap);
2634
2635 if (likely(map & 1))
2636 process_responses_gts(adap, q0);
2637
2638 if (map & 2)
2639 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2640
2641 spin_unlock(&q0->lock);
2642 return IRQ_HANDLED;
2643}
2644
2645/*
2646 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2647 * Handles data events from SGE response queues as well as error and other
2648 * async events as they all use the same interrupt pin. We use one SGE
2649 * response queue per port in this mode and protect all response queues with
2650 * queue 0's lock.
2651 */
2652static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2653{
2654 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002655 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002656 struct sge_qset *qs0 = &adap->sge.qs[0];
2657 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002658
2659 t3_write_reg(adap, A_PL_CLI, 0);
2660 map = t3_read_reg(adap, A_SG_DATA_INTR);
2661
2662 if (unlikely(!map)) /* shared interrupt, most likely */
2663 return IRQ_NONE;
2664
2665 spin_lock(&q0->lock);
2666
2667 if (unlikely(map & F_ERRINTR))
2668 t3_slow_intr_handler(adap);
2669
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002670 if (likely(map & 1))
2671 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002672
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002673 if (map & 2)
2674 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002675
2676 spin_unlock(&q0->lock);
2677 return IRQ_HANDLED;
2678}
2679
2680/**
2681 * t3_intr_handler - select the top-level interrupt handler
2682 * @adap: the adapter
2683 * @polling: whether using NAPI to service response queues
2684 *
2685 * Selects the top-level interrupt handler based on the type of interrupts
2686 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2687 * response queues.
2688 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002689irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002690{
2691 if (adap->flags & USING_MSIX)
2692 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2693 if (adap->flags & USING_MSI)
2694 return polling ? t3_intr_msi_napi : t3_intr_msi;
2695 if (adap->params.rev > 0)
2696 return polling ? t3b_intr_napi : t3b_intr;
2697 return t3_intr;
2698}
2699
Divy Le Rayb8819552007-12-17 18:47:31 -08002700#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2701 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2702 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2703 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2704 F_HIRCQPARITYERROR)
2705#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2706#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2707 F_RSPQDISABLED)
2708
Divy Le Ray4d22de32007-01-18 22:04:14 -05002709/**
2710 * t3_sge_err_intr_handler - SGE async event interrupt handler
2711 * @adapter: the adapter
2712 *
2713 * Interrupt handler for SGE asynchronous (non-data) events.
2714 */
2715void t3_sge_err_intr_handler(struct adapter *adapter)
2716{
2717 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2718
Divy Le Rayb8819552007-12-17 18:47:31 -08002719 if (status & SGE_PARERR)
2720 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2721 status & SGE_PARERR);
2722 if (status & SGE_FRAMINGERR)
2723 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2724 status & SGE_FRAMINGERR);
2725
Divy Le Ray4d22de32007-01-18 22:04:14 -05002726 if (status & F_RSPQCREDITOVERFOW)
2727 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2728
2729 if (status & F_RSPQDISABLED) {
2730 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2731
2732 CH_ALERT(adapter,
2733 "packet delivered to disabled response queue "
2734 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2735 }
2736
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002737 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2738 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2739 status & F_HIPIODRBDROPERR ? "high" : "lo");
2740
Divy Le Ray4d22de32007-01-18 22:04:14 -05002741 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
Divy Le Rayb8819552007-12-17 18:47:31 -08002742 if (status & SGE_FATALERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002743 t3_fatal_err(adapter);
2744}
2745
2746/**
2747 * sge_timer_cb - perform periodic maintenance of an SGE qset
2748 * @data: the SGE queue set to maintain
2749 *
2750 * Runs periodically from a timer to perform maintenance of an SGE queue
2751 * set. It performs two tasks:
2752 *
2753 * a) Cleans up any completed Tx descriptors that may still be pending.
2754 * Normal descriptor cleanup happens when new packets are added to a Tx
2755 * queue so this timer is relatively infrequent and does any cleanup only
2756 * if the Tx queue has not seen any new packets in a while. We make a
2757 * best effort attempt to reclaim descriptors, in that we don't wait
2758 * around if we cannot get a queue's lock (which most likely is because
2759 * someone else is queueing new packets and so will also handle the clean
2760 * up). Since control queues use immediate data exclusively we don't
2761 * bother cleaning them up here.
2762 *
2763 * b) Replenishes Rx queues that have run out due to memory shortage.
2764 * Normally new Rx buffers are added when existing ones are consumed but
2765 * when out of memory a queue can become empty. We try to add only a few
2766 * buffers here, the queue will be replenished fully as these new buffers
2767 * are used up if memory shortage has subsided.
2768 */
2769static void sge_timer_cb(unsigned long data)
2770{
2771 spinlock_t *lock;
2772 struct sge_qset *qs = (struct sge_qset *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002773 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002774
2775 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2776 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2777 spin_unlock(&qs->txq[TXQ_ETH].lock);
2778 }
2779 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2780 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2781 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2782 }
2783 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002784 &adap->sge.qs[0].rspq.lock;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002785 if (spin_trylock_irq(lock)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002786 if (!napi_is_scheduled(&qs->napi)) {
Divy Le Raybae73f42007-02-24 16:44:12 -08002787 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2788
Divy Le Ray4d22de32007-01-18 22:04:14 -05002789 if (qs->fl[0].credits < qs->fl[0].size)
2790 __refill_fl(adap, &qs->fl[0]);
2791 if (qs->fl[1].credits < qs->fl[1].size)
2792 __refill_fl(adap, &qs->fl[1]);
Divy Le Raybae73f42007-02-24 16:44:12 -08002793
2794 if (status & (1 << qs->rspq.cntxt_id)) {
2795 qs->rspq.starved++;
2796 if (qs->rspq.credits) {
2797 refill_rspq(adap, &qs->rspq, 1);
2798 qs->rspq.credits--;
2799 qs->rspq.restarted++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002800 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
Divy Le Raybae73f42007-02-24 16:44:12 -08002801 1 << qs->rspq.cntxt_id);
2802 }
2803 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002804 }
2805 spin_unlock_irq(lock);
2806 }
2807 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2808}
2809
2810/**
2811 * t3_update_qset_coalesce - update coalescing settings for a queue set
2812 * @qs: the SGE queue set
2813 * @p: new queue set parameters
2814 *
2815 * Update the coalescing settings for an SGE queue set. Nothing is done
2816 * if the queue set is not initialized yet.
2817 */
2818void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2819{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002820 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2821 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002822 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002823}
2824
2825/**
2826 * t3_sge_alloc_qset - initialize an SGE queue set
2827 * @adapter: the adapter
2828 * @id: the queue set id
2829 * @nports: how many Ethernet ports will be using this queue set
2830 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2831 * @p: configuration parameters for this queue set
2832 * @ntxq: number of Tx queues for the queue set
2833 * @netdev: net device associated with this queue set
Divy Le Ray82ad3322008-12-16 01:09:39 -08002834 * @netdevq: net device TX queue associated with this queue set
Divy Le Ray4d22de32007-01-18 22:04:14 -05002835 *
2836 * Allocate resources and initialize an SGE queue set. A queue set
2837 * comprises a response queue, two Rx free-buffer queues, and up to 3
2838 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2839 * queue, offload queue, and control queue.
2840 */
2841int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2842 int irq_vec_idx, const struct qset_params *p,
Divy Le Ray82ad3322008-12-16 01:09:39 -08002843 int ntxq, struct net_device *dev,
2844 struct netdev_queue *netdevq)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002845{
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002846 int i, avail, ret = -ENOMEM;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002847 struct sge_qset *q = &adapter->sge.qs[id];
Divy Le Rayb47385b2008-05-21 18:56:26 -07002848 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002849
2850 init_qset_cntxt(q, id);
Divy Le Ray20d3fc12008-10-08 17:36:03 -07002851 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002852
2853 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2854 sizeof(struct rx_desc),
2855 sizeof(struct rx_sw_desc),
2856 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2857 if (!q->fl[0].desc)
2858 goto err;
2859
2860 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2861 sizeof(struct rx_desc),
2862 sizeof(struct rx_sw_desc),
2863 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2864 if (!q->fl[1].desc)
2865 goto err;
2866
2867 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2868 sizeof(struct rsp_desc), 0,
2869 &q->rspq.phys_addr, NULL);
2870 if (!q->rspq.desc)
2871 goto err;
2872
2873 for (i = 0; i < ntxq; ++i) {
2874 /*
2875 * The control queue always uses immediate data so does not
2876 * need to keep track of any sk_buffs.
2877 */
2878 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2879
2880 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2881 sizeof(struct tx_desc), sz,
2882 &q->txq[i].phys_addr,
2883 &q->txq[i].sdesc);
2884 if (!q->txq[i].desc)
2885 goto err;
2886
2887 q->txq[i].gen = 1;
2888 q->txq[i].size = p->txq_size[i];
2889 spin_lock_init(&q->txq[i].lock);
2890 skb_queue_head_init(&q->txq[i].sendq);
2891 }
2892
2893 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2894 (unsigned long)q);
2895 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2896 (unsigned long)q);
2897
2898 q->fl[0].gen = q->fl[1].gen = 1;
2899 q->fl[0].size = p->fl_size;
2900 q->fl[1].size = p->jumbo_size;
2901
2902 q->rspq.gen = 1;
2903 q->rspq.size = p->rspq_size;
2904 spin_lock_init(&q->rspq.lock);
David S. Miller147e70e2008-09-22 01:29:52 -07002905 skb_queue_head_init(&q->rspq.rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002906
2907 q->txq[TXQ_ETH].stop_thres = nports *
2908 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2909
Divy Le Raycf992af2007-05-30 21:10:47 -07002910#if FL0_PG_CHUNK_SIZE > 0
2911 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002912#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002913 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002914#endif
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002915#if FL1_PG_CHUNK_SIZE > 0
2916 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2917#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002918 q->fl[1].buf_size = is_offload(adapter) ?
2919 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2920 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002921#endif
2922
2923 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2924 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2925 q->fl[0].order = FL0_PG_ORDER;
2926 q->fl[1].order = FL1_PG_ORDER;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002927
Divy Le Rayb47385b2008-05-21 18:56:26 -07002928 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2929 sizeof(struct skb_frag_struct),
2930 GFP_KERNEL);
2931 q->lro_nfrags = q->lro_frag_len = 0;
Roland Dreierb1186de2008-03-20 13:30:48 -07002932 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002933
2934 /* FL threshold comparison uses < */
2935 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2936 q->rspq.phys_addr, q->rspq.size,
2937 q->fl[0].buf_size, 1, 0);
2938 if (ret)
2939 goto err_unlock;
2940
2941 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2942 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2943 q->fl[i].phys_addr, q->fl[i].size,
2944 q->fl[i].buf_size, p->cong_thres, 1,
2945 0);
2946 if (ret)
2947 goto err_unlock;
2948 }
2949
2950 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2951 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2952 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2953 1, 0);
2954 if (ret)
2955 goto err_unlock;
2956
2957 if (ntxq > 1) {
2958 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2959 USE_GTS, SGE_CNTXT_OFLD, id,
2960 q->txq[TXQ_OFLD].phys_addr,
2961 q->txq[TXQ_OFLD].size, 0, 1, 0);
2962 if (ret)
2963 goto err_unlock;
2964 }
2965
2966 if (ntxq > 2) {
2967 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2968 SGE_CNTXT_CTRL, id,
2969 q->txq[TXQ_CTRL].phys_addr,
2970 q->txq[TXQ_CTRL].size,
2971 q->txq[TXQ_CTRL].token, 1, 0);
2972 if (ret)
2973 goto err_unlock;
2974 }
2975
Roland Dreierb1186de2008-03-20 13:30:48 -07002976 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002977
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002978 q->adap = adapter;
2979 q->netdev = dev;
Divy Le Ray82ad3322008-12-16 01:09:39 -08002980 q->tx_q = netdevq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002981 t3_update_qset_coalesce(q, p);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002982
2983 init_lro_mgr(q, lro_mgr);
2984
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002985 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2986 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002987 if (!avail) {
2988 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2989 goto err;
2990 }
2991 if (avail < q->fl[0].size)
2992 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2993 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002994
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002995 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2996 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002997 if (avail < q->fl[1].size)
2998 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2999 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003000 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3001
3002 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3003 V_NEWTIMER(q->rspq.holdoff_tmr));
3004
3005 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3006 return 0;
3007
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003008err_unlock:
Roland Dreierb1186de2008-03-20 13:30:48 -07003009 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003010err:
Divy Le Ray4d22de32007-01-18 22:04:14 -05003011 t3_free_qset(adapter, q);
3012 return ret;
3013}
3014
3015/**
Divy Le Ray0ca41c02008-09-25 14:05:28 +00003016 * t3_stop_sge_timers - stop SGE timer call backs
3017 * @adap: the adapter
3018 *
3019 * Stops each SGE queue set's timer call back
3020 */
3021void t3_stop_sge_timers(struct adapter *adap)
3022{
3023 int i;
3024
3025 for (i = 0; i < SGE_QSETS; ++i) {
3026 struct sge_qset *q = &adap->sge.qs[i];
3027
3028 if (q->tx_reclaim_timer.function)
3029 del_timer_sync(&q->tx_reclaim_timer);
3030 }
3031}
3032
3033/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05003034 * t3_free_sge_resources - free SGE resources
3035 * @adap: the adapter
3036 *
3037 * Frees resources used by the SGE queue sets.
3038 */
3039void t3_free_sge_resources(struct adapter *adap)
3040{
3041 int i;
3042
3043 for (i = 0; i < SGE_QSETS; ++i)
3044 t3_free_qset(adap, &adap->sge.qs[i]);
3045}
3046
3047/**
3048 * t3_sge_start - enable SGE
3049 * @adap: the adapter
3050 *
3051 * Enables the SGE for DMAs. This is the last step in starting packet
3052 * transfers.
3053 */
3054void t3_sge_start(struct adapter *adap)
3055{
3056 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3057}
3058
3059/**
3060 * t3_sge_stop - disable SGE operation
3061 * @adap: the adapter
3062 *
3063 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3064 * from error interrupts) or from normal process context. In the latter
3065 * case it also disables any pending queue restart tasklets. Note that
3066 * if it is called in interrupt context it cannot disable the restart
3067 * tasklets as it cannot wait, however the tasklets will have no effect
3068 * since the doorbells are disabled and the driver will call this again
3069 * later from process context, at which time the tasklets will be stopped
3070 * if they are still running.
3071 */
3072void t3_sge_stop(struct adapter *adap)
3073{
3074 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3075 if (!in_interrupt()) {
3076 int i;
3077
3078 for (i = 0; i < SGE_QSETS; ++i) {
3079 struct sge_qset *qs = &adap->sge.qs[i];
3080
3081 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3082 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3083 }
3084 }
3085}
3086
3087/**
3088 * t3_sge_init - initialize SGE
3089 * @adap: the adapter
3090 * @p: the SGE parameters
3091 *
3092 * Performs SGE initialization needed every time after a chip reset.
3093 * We do not initialize any of the queue sets here, instead the driver
3094 * top-level must request those individually. We also do not enable DMA
3095 * here, that should be done after the queues have been set up.
3096 */
3097void t3_sge_init(struct adapter *adap, struct sge_params *p)
3098{
3099 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3100
3101 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
Divy Le Rayb8819552007-12-17 18:47:31 -08003102 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
Divy Le Ray4d22de32007-01-18 22:04:14 -05003103 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3104 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3105#if SGE_NUM_GENBITS == 1
3106 ctrl |= F_EGRGENCTRL;
3107#endif
3108 if (adap->params.rev > 0) {
3109 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3110 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003111 }
3112 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3113 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3114 V_LORCQDRBTHRSH(512));
3115 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3116 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08003117 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Rayb8819552007-12-17 18:47:31 -08003118 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3119 adap->params.rev < T3_REV_C ? 1000 : 500);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003120 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3121 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3122 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3123 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3124 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3125}
3126
3127/**
3128 * t3_sge_prep - one-time SGE initialization
3129 * @adap: the associated adapter
3130 * @p: SGE parameters
3131 *
3132 * Performs one-time initialization of SGE SW state. Includes determining
3133 * defaults for the assorted SGE parameters, which admins can change until
3134 * they are used to initialize the SGE.
3135 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003136void t3_sge_prep(struct adapter *adap, struct sge_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003137{
3138 int i;
3139
3140 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3141 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3142
3143 for (i = 0; i < SGE_QSETS; ++i) {
3144 struct qset_params *q = p->qset + i;
3145
3146 q->polling = adap->params.rev > 0;
3147 q->coalesce_usecs = 5;
3148 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003149 q->fl_size = 1024;
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003150 q->jumbo_size = 512;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003151 q->txq_size[TXQ_ETH] = 1024;
3152 q->txq_size[TXQ_OFLD] = 1024;
3153 q->txq_size[TXQ_CTRL] = 256;
3154 q->cong_thres = 0;
3155 }
3156
3157 spin_lock_init(&adap->sge.reg_lock);
3158}
3159
3160/**
3161 * t3_get_desc - dump an SGE descriptor for debugging purposes
3162 * @qs: the queue set
3163 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3164 * @idx: the descriptor index in the queue
3165 * @data: where to dump the descriptor contents
3166 *
3167 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3168 * size of the descriptor.
3169 */
3170int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3171 unsigned char *data)
3172{
3173 if (qnum >= 6)
3174 return -EINVAL;
3175
3176 if (qnum < 3) {
3177 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3178 return -EINVAL;
3179 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3180 return sizeof(struct tx_desc);
3181 }
3182
3183 if (qnum == 3) {
3184 if (!qs->rspq.desc || idx >= qs->rspq.size)
3185 return -EINVAL;
3186 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3187 return sizeof(struct rsp_desc);
3188 }
3189
3190 qnum -= 4;
3191 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3192 return -EINVAL;
3193 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3194 return sizeof(struct rx_desc);
3195}