blob: 7b13d8a31e38c1bcc5af6fc0dadb9dd9e3632f76 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080048
Divy Le Ray4d22de32007-01-18 22:04:14 -050049#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070050#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050051
Divy Le Raye0994eb2007-02-24 16:44:17 -080052/*
Divy Le Raycf992af2007-05-30 21:10:47 -070053 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080056 */
Divy Le Raycf992af2007-05-30 21:10:47 -070057#define FL0_PG_CHUNK_SIZE 2048
58
Divy Le Raye0994eb2007-02-24 16:44:17 -080059#define SGE_RX_DROP_THRES 16
Divy Le Ray4d22de32007-01-18 22:04:14 -050060
61/*
62 * Period of the Tx buffer reclaim timer. This timer does not need to run
63 * frequently as Tx buffers are usually reclaimed by new Tx packets.
64 */
65#define TX_RECLAIM_PERIOD (HZ / 4)
66
67/* WR size in bytes */
68#define WR_LEN (WR_FLITS * 8)
69
70/*
71 * Types of Tx queues in each queue set. Order here matters, do not change.
72 */
73enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
74
75/* Values for sge_txq.flags */
76enum {
77 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
78 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
79};
80
81struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040082 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050083};
84
85struct rx_desc {
86 __be32 addr_lo;
87 __be32 len_gen;
88 __be32 gen2;
89 __be32 addr_hi;
90};
91
92struct tx_sw_desc { /* SW state per Tx descriptor */
93 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -080094 u8 eop; /* set if last descriptor for packet */
95 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
96 u8 fragidx; /* first page fragment associated with descriptor */
97 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -050098};
99
Divy Le Raycf992af2007-05-30 21:10:47 -0700100struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800101 union {
102 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700103 struct fl_pg_chunk pg_chunk;
104 };
105 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500106};
107
108struct rsp_desc { /* response queue descriptor */
109 struct rss_header rss_hdr;
110 __be32 flags;
111 __be32 len_cq;
112 u8 imm_data[47];
113 u8 intr_gen;
114};
115
Divy Le Ray4d22de32007-01-18 22:04:14 -0500116/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800117 * Holds unmapping information for Tx packets that need deferred unmapping.
118 * This structure lives at skb->head and must be allocated by callers.
119 */
120struct deferred_unmap_info {
121 struct pci_dev *pdev;
122 dma_addr_t addr[MAX_SKB_FRAGS + 1];
123};
124
125/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500126 * Maps a number of flits to the number of Tx descriptors that can hold them.
127 * The formula is
128 *
129 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
130 *
131 * HW allows up to 4 descriptors to be combined into a WR.
132 */
133static u8 flit_desc_map[] = {
134 0,
135#if SGE_NUM_GENBITS == 1
136 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
138 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
139 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
140#elif SGE_NUM_GENBITS == 2
141 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
143 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
144 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
145#else
146# error "SGE_NUM_GENBITS must be 1 or 2"
147#endif
148};
149
150static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
151{
152 return container_of(q, struct sge_qset, fl[qidx]);
153}
154
155static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
156{
157 return container_of(q, struct sge_qset, rspq);
158}
159
160static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
161{
162 return container_of(q, struct sge_qset, txq[qidx]);
163}
164
165/**
166 * refill_rspq - replenish an SGE response queue
167 * @adapter: the adapter
168 * @q: the response queue to replenish
169 * @credits: how many new responses to make available
170 *
171 * Replenishes a response queue by making the supplied number of responses
172 * available to HW.
173 */
174static inline void refill_rspq(struct adapter *adapter,
175 const struct sge_rspq *q, unsigned int credits)
176{
177 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
178 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
179}
180
181/**
182 * need_skb_unmap - does the platform need unmapping of sk_buffs?
183 *
184 * Returns true if the platfrom needs sk_buff unmapping. The compiler
185 * optimizes away unecessary code if this returns true.
186 */
187static inline int need_skb_unmap(void)
188{
189 /*
190 * This structure is used to tell if the platfrom needs buffer
191 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
192 */
193 struct dummy {
194 DECLARE_PCI_UNMAP_ADDR(addr);
195 };
196
197 return sizeof(struct dummy) != 0;
198}
199
200/**
201 * unmap_skb - unmap a packet main body and its page fragments
202 * @skb: the packet
203 * @q: the Tx queue containing Tx descriptors for the packet
204 * @cidx: index of Tx descriptor
205 * @pdev: the PCI device
206 *
207 * Unmap the main body of an sk_buff and its page fragments, if any.
208 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800209 * to conserve space for metadata, the information necessary to unmap an
210 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
211 * descriptors (the physical addresses of the various data buffers), and
212 * the SW descriptor state (assorted indices). The send functions
213 * initialize the indices for the first packet descriptor so we can unmap
214 * the buffers held in the first Tx descriptor here, and we have enough
215 * information at this point to set the state for the next Tx descriptor.
216 *
217 * Note that it is possible to clean up the first descriptor of a packet
218 * before the send routines have written the next descriptors, but this
219 * race does not cause any problem. We just end up writing the unmapping
220 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500221 */
222static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
223 unsigned int cidx, struct pci_dev *pdev)
224{
225 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800226 struct tx_sw_desc *d = &q->sdesc[cidx];
227 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500228
Divy Le Ray23561c92007-11-16 11:22:05 -0800229 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
230 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500231
Divy Le Ray23561c92007-11-16 11:22:05 -0800232 if (frag_idx == 0 && skb_headlen(skb)) {
233 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
234 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500235 j = 1;
236 }
237
Divy Le Ray23561c92007-11-16 11:22:05 -0800238 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500239 nfrags = skb_shinfo(skb)->nr_frags;
240
241 while (frag_idx < nfrags && curflit < WR_FLITS) {
242 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
243 skb_shinfo(skb)->frags[frag_idx].size,
244 PCI_DMA_TODEVICE);
245 j ^= 1;
246 if (j == 0) {
247 sgp++;
248 curflit++;
249 }
250 curflit++;
251 frag_idx++;
252 }
253
Divy Le Ray23561c92007-11-16 11:22:05 -0800254 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
255 d = cidx + 1 == q->size ? q->sdesc : d + 1;
256 d->fragidx = frag_idx;
257 d->addr_idx = j;
258 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500259 }
260}
261
262/**
263 * free_tx_desc - reclaims Tx descriptors and their buffers
264 * @adapter: the adapter
265 * @q: the Tx queue to reclaim descriptors from
266 * @n: the number of descriptors to reclaim
267 *
268 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
269 * Tx buffers. Called with the Tx queue lock held.
270 */
271static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
272 unsigned int n)
273{
274 struct tx_sw_desc *d;
275 struct pci_dev *pdev = adapter->pdev;
276 unsigned int cidx = q->cidx;
277
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800278 const int need_unmap = need_skb_unmap() &&
279 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
280
Divy Le Ray4d22de32007-01-18 22:04:14 -0500281 d = &q->sdesc[cidx];
282 while (n--) {
283 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800284 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500285 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800286 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500287 kfree_skb(d->skb);
288 }
289 ++d;
290 if (++cidx == q->size) {
291 cidx = 0;
292 d = q->sdesc;
293 }
294 }
295 q->cidx = cidx;
296}
297
298/**
299 * reclaim_completed_tx - reclaims completed Tx descriptors
300 * @adapter: the adapter
301 * @q: the Tx queue to reclaim completed descriptors from
302 *
303 * Reclaims Tx descriptors that the SGE has indicated it has processed,
304 * and frees the associated buffers if possible. Called with the Tx
305 * queue's lock held.
306 */
307static inline void reclaim_completed_tx(struct adapter *adapter,
308 struct sge_txq *q)
309{
310 unsigned int reclaim = q->processed - q->cleaned;
311
312 if (reclaim) {
313 free_tx_desc(adapter, q, reclaim);
314 q->cleaned += reclaim;
315 q->in_use -= reclaim;
316 }
317}
318
319/**
320 * should_restart_tx - are there enough resources to restart a Tx queue?
321 * @q: the Tx queue
322 *
323 * Checks if there are enough descriptors to restart a suspended Tx queue.
324 */
325static inline int should_restart_tx(const struct sge_txq *q)
326{
327 unsigned int r = q->processed - q->cleaned;
328
329 return q->in_use - r < (q->size >> 1);
330}
331
332/**
333 * free_rx_bufs - free the Rx buffers on an SGE free list
334 * @pdev: the PCI device associated with the adapter
335 * @rxq: the SGE free list to clean up
336 *
337 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
338 * this queue should be stopped before calling this function.
339 */
340static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
341{
342 unsigned int cidx = q->cidx;
343
344 while (q->credits--) {
345 struct rx_sw_desc *d = &q->sdesc[cidx];
346
347 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
348 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Raycf992af2007-05-30 21:10:47 -0700349 if (q->use_pages) {
350 put_page(d->pg_chunk.page);
351 d->pg_chunk.page = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800352 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700353 kfree_skb(d->skb);
354 d->skb = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800355 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500356 if (++cidx == q->size)
357 cidx = 0;
358 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800359
Divy Le Raycf992af2007-05-30 21:10:47 -0700360 if (q->pg_chunk.page) {
361 __free_page(q->pg_chunk.page);
362 q->pg_chunk.page = NULL;
363 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500364}
365
366/**
367 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700368 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500369 * @len: the buffer length
370 * @d: the HW Rx descriptor to write
371 * @sd: the SW Rx descriptor to write
372 * @gen: the generation bit value
373 * @pdev: the PCI device associated with the adapter
374 *
375 * Add a buffer of the given length to the supplied HW and SW Rx
376 * descriptors.
377 */
Divy Le Raycf992af2007-05-30 21:10:47 -0700378static inline void add_one_rx_buf(void *va, unsigned int len,
Divy Le Ray4d22de32007-01-18 22:04:14 -0500379 struct rx_desc *d, struct rx_sw_desc *sd,
380 unsigned int gen, struct pci_dev *pdev)
381{
382 dma_addr_t mapping;
383
Divy Le Raye0994eb2007-02-24 16:44:17 -0800384 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500385 pci_unmap_addr_set(sd, dma_addr, mapping);
386
387 d->addr_lo = cpu_to_be32(mapping);
388 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
389 wmb();
390 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
391 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
392}
393
Divy Le Raycf992af2007-05-30 21:10:47 -0700394static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
395{
396 if (!q->pg_chunk.page) {
397 q->pg_chunk.page = alloc_page(gfp);
398 if (unlikely(!q->pg_chunk.page))
399 return -ENOMEM;
400 q->pg_chunk.va = page_address(q->pg_chunk.page);
401 q->pg_chunk.offset = 0;
402 }
403 sd->pg_chunk = q->pg_chunk;
404
405 q->pg_chunk.offset += q->buf_size;
406 if (q->pg_chunk.offset == PAGE_SIZE)
407 q->pg_chunk.page = NULL;
408 else {
409 q->pg_chunk.va += q->buf_size;
410 get_page(q->pg_chunk.page);
411 }
412 return 0;
413}
414
Divy Le Ray4d22de32007-01-18 22:04:14 -0500415/**
416 * refill_fl - refill an SGE free-buffer list
417 * @adapter: the adapter
418 * @q: the free-list to refill
419 * @n: the number of new buffers to allocate
420 * @gfp: the gfp flags for allocating new buffers
421 *
422 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
423 * allocated with the supplied gfp flags. The caller must assure that
424 * @n does not exceed the queue's capacity.
425 */
426static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
427{
Divy Le Raycf992af2007-05-30 21:10:47 -0700428 void *buf_start;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500429 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
430 struct rx_desc *d = &q->desc[q->pidx];
431
432 while (n--) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700433 if (q->use_pages) {
434 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
435nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800436 break;
437 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700438 buf_start = sd->pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800439 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700440 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800441
Divy Le Raycf992af2007-05-30 21:10:47 -0700442 if (!skb)
443 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800444
Divy Le Raycf992af2007-05-30 21:10:47 -0700445 sd->skb = skb;
446 buf_start = skb->data;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800447 }
448
Divy Le Raycf992af2007-05-30 21:10:47 -0700449 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
450 adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500451 d++;
452 sd++;
453 if (++q->pidx == q->size) {
454 q->pidx = 0;
455 q->gen ^= 1;
456 sd = q->sdesc;
457 d = q->desc;
458 }
459 q->credits++;
460 }
461
462 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
463}
464
465static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
466{
467 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
468}
469
470/**
471 * recycle_rx_buf - recycle a receive buffer
472 * @adapter: the adapter
473 * @q: the SGE free list
474 * @idx: index of buffer to recycle
475 *
476 * Recycles the specified buffer on the given free list by adding it at
477 * the next available slot on the list.
478 */
479static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
480 unsigned int idx)
481{
482 struct rx_desc *from = &q->desc[idx];
483 struct rx_desc *to = &q->desc[q->pidx];
484
Divy Le Raycf992af2007-05-30 21:10:47 -0700485 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500486 to->addr_lo = from->addr_lo; /* already big endian */
487 to->addr_hi = from->addr_hi; /* likewise */
488 wmb();
489 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
490 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
491 q->credits++;
492
493 if (++q->pidx == q->size) {
494 q->pidx = 0;
495 q->gen ^= 1;
496 }
497 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
498}
499
500/**
501 * alloc_ring - allocate resources for an SGE descriptor ring
502 * @pdev: the PCI device
503 * @nelem: the number of descriptors
504 * @elem_size: the size of each descriptor
505 * @sw_size: the size of the SW state associated with each ring element
506 * @phys: the physical address of the allocated ring
507 * @metadata: address of the array holding the SW state for the ring
508 *
509 * Allocates resources for an SGE descriptor ring, such as Tx queues,
510 * free buffer lists, or response queues. Each SGE ring requires
511 * space for its HW descriptors plus, optionally, space for the SW state
512 * associated with each HW entry (the metadata). The function returns
513 * three values: the virtual address for the HW ring (the return value
514 * of the function), the physical address of the HW ring, and the address
515 * of the SW ring.
516 */
517static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800518 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500519{
520 size_t len = nelem * elem_size;
521 void *s = NULL;
522 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
523
524 if (!p)
525 return NULL;
526 if (sw_size) {
527 s = kcalloc(nelem, sw_size, GFP_KERNEL);
528
529 if (!s) {
530 dma_free_coherent(&pdev->dev, len, p, *phys);
531 return NULL;
532 }
533 }
534 if (metadata)
535 *(void **)metadata = s;
536 memset(p, 0, len);
537 return p;
538}
539
540/**
541 * free_qset - free the resources of an SGE queue set
542 * @adapter: the adapter owning the queue set
543 * @q: the queue set
544 *
545 * Release the HW and SW resources associated with an SGE queue set, such
546 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
547 * queue set must be quiesced prior to calling this.
548 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700549static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500550{
551 int i;
552 struct pci_dev *pdev = adapter->pdev;
553
554 if (q->tx_reclaim_timer.function)
555 del_timer_sync(&q->tx_reclaim_timer);
556
557 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
558 if (q->fl[i].desc) {
559 spin_lock(&adapter->sge.reg_lock);
560 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
561 spin_unlock(&adapter->sge.reg_lock);
562 free_rx_bufs(pdev, &q->fl[i]);
563 kfree(q->fl[i].sdesc);
564 dma_free_coherent(&pdev->dev,
565 q->fl[i].size *
566 sizeof(struct rx_desc), q->fl[i].desc,
567 q->fl[i].phys_addr);
568 }
569
570 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
571 if (q->txq[i].desc) {
572 spin_lock(&adapter->sge.reg_lock);
573 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
574 spin_unlock(&adapter->sge.reg_lock);
575 if (q->txq[i].sdesc) {
576 free_tx_desc(adapter, &q->txq[i],
577 q->txq[i].in_use);
578 kfree(q->txq[i].sdesc);
579 }
580 dma_free_coherent(&pdev->dev,
581 q->txq[i].size *
582 sizeof(struct tx_desc),
583 q->txq[i].desc, q->txq[i].phys_addr);
584 __skb_queue_purge(&q->txq[i].sendq);
585 }
586
587 if (q->rspq.desc) {
588 spin_lock(&adapter->sge.reg_lock);
589 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
590 spin_unlock(&adapter->sge.reg_lock);
591 dma_free_coherent(&pdev->dev,
592 q->rspq.size * sizeof(struct rsp_desc),
593 q->rspq.desc, q->rspq.phys_addr);
594 }
595
Divy Le Ray4d22de32007-01-18 22:04:14 -0500596 memset(q, 0, sizeof(*q));
597}
598
599/**
600 * init_qset_cntxt - initialize an SGE queue set context info
601 * @qs: the queue set
602 * @id: the queue set id
603 *
604 * Initializes the TIDs and context ids for the queues of a queue set.
605 */
606static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
607{
608 qs->rspq.cntxt_id = id;
609 qs->fl[0].cntxt_id = 2 * id;
610 qs->fl[1].cntxt_id = 2 * id + 1;
611 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
612 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
613 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
614 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
615 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
616}
617
618/**
619 * sgl_len - calculates the size of an SGL of the given capacity
620 * @n: the number of SGL entries
621 *
622 * Calculates the number of flits needed for a scatter/gather list that
623 * can hold the given number of entries.
624 */
625static inline unsigned int sgl_len(unsigned int n)
626{
627 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
628 return (3 * n) / 2 + (n & 1);
629}
630
631/**
632 * flits_to_desc - returns the num of Tx descriptors for the given flits
633 * @n: the number of flits
634 *
635 * Calculates the number of Tx descriptors needed for the supplied number
636 * of flits.
637 */
638static inline unsigned int flits_to_desc(unsigned int n)
639{
640 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
641 return flit_desc_map[n];
642}
643
644/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700645 * get_packet - return the next ingress packet buffer from a free list
646 * @adap: the adapter that received the packet
647 * @fl: the SGE free list holding the packet
648 * @len: the packet length including any SGE padding
649 * @drop_thres: # of remaining buffers before we start dropping packets
650 *
651 * Get the next packet from a free list and complete setup of the
652 * sk_buff. If the packet is small we make a copy and recycle the
653 * original buffer, otherwise we use the original buffer itself. If a
654 * positive drop threshold is supplied packets are dropped and their
655 * buffers recycled if (a) the number of remaining buffers is under the
656 * threshold and the packet is too big to copy, or (b) the packet should
657 * be copied but there is no memory for the copy.
658 */
659static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
660 unsigned int len, unsigned int drop_thres)
661{
662 struct sk_buff *skb = NULL;
663 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
664
665 prefetch(sd->skb->data);
666 fl->credits--;
667
668 if (len <= SGE_RX_COPY_THRES) {
669 skb = alloc_skb(len, GFP_ATOMIC);
670 if (likely(skb != NULL)) {
671 __skb_put(skb, len);
672 pci_dma_sync_single_for_cpu(adap->pdev,
673 pci_unmap_addr(sd, dma_addr), len,
674 PCI_DMA_FROMDEVICE);
675 memcpy(skb->data, sd->skb->data, len);
676 pci_dma_sync_single_for_device(adap->pdev,
677 pci_unmap_addr(sd, dma_addr), len,
678 PCI_DMA_FROMDEVICE);
679 } else if (!drop_thres)
680 goto use_orig_buf;
681recycle:
682 recycle_rx_buf(adap, fl, fl->cidx);
683 return skb;
684 }
685
686 if (unlikely(fl->credits < drop_thres))
687 goto recycle;
688
689use_orig_buf:
690 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
691 fl->buf_size, PCI_DMA_FROMDEVICE);
692 skb = sd->skb;
693 skb_put(skb, len);
694 __refill_fl(adap, fl);
695 return skb;
696}
697
698/**
699 * get_packet_pg - return the next ingress packet buffer from a free list
700 * @adap: the adapter that received the packet
701 * @fl: the SGE free list holding the packet
702 * @len: the packet length including any SGE padding
703 * @drop_thres: # of remaining buffers before we start dropping packets
704 *
705 * Get the next packet from a free list populated with page chunks.
706 * If the packet is small we make a copy and recycle the original buffer,
707 * otherwise we attach the original buffer as a page fragment to a fresh
708 * sk_buff. If a positive drop threshold is supplied packets are dropped
709 * and their buffers recycled if (a) the number of remaining buffers is
710 * under the threshold and the packet is too big to copy, or (b) there's
711 * no system memory.
712 *
713 * Note: this function is similar to @get_packet but deals with Rx buffers
714 * that are page chunks rather than sk_buffs.
715 */
716static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
717 unsigned int len, unsigned int drop_thres)
718{
719 struct sk_buff *skb = NULL;
720 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
721
722 if (len <= SGE_RX_COPY_THRES) {
723 skb = alloc_skb(len, GFP_ATOMIC);
724 if (likely(skb != NULL)) {
725 __skb_put(skb, len);
726 pci_dma_sync_single_for_cpu(adap->pdev,
727 pci_unmap_addr(sd, dma_addr), len,
728 PCI_DMA_FROMDEVICE);
729 memcpy(skb->data, sd->pg_chunk.va, len);
730 pci_dma_sync_single_for_device(adap->pdev,
731 pci_unmap_addr(sd, dma_addr), len,
732 PCI_DMA_FROMDEVICE);
733 } else if (!drop_thres)
734 return NULL;
735recycle:
736 fl->credits--;
737 recycle_rx_buf(adap, fl, fl->cidx);
738 return skb;
739 }
740
741 if (unlikely(fl->credits <= drop_thres))
742 goto recycle;
743
744 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
745 if (unlikely(!skb)) {
746 if (!drop_thres)
747 return NULL;
748 goto recycle;
749 }
750
751 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
752 fl->buf_size, PCI_DMA_FROMDEVICE);
753 __skb_put(skb, SGE_RX_PULL_LEN);
754 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
755 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
756 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
757 len - SGE_RX_PULL_LEN);
758 skb->len = len;
759 skb->data_len = len - SGE_RX_PULL_LEN;
760 skb->truesize += skb->data_len;
761
762 fl->credits--;
763 /*
764 * We do not refill FLs here, we let the caller do it to overlap a
765 * prefetch.
766 */
767 return skb;
768}
769
770/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500771 * get_imm_packet - return the next ingress packet buffer from a response
772 * @resp: the response descriptor containing the packet data
773 *
774 * Return a packet containing the immediate data of the given response.
775 */
776static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
777{
778 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
779
780 if (skb) {
781 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300782 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500783 }
784 return skb;
785}
786
787/**
788 * calc_tx_descs - calculate the number of Tx descriptors for a packet
789 * @skb: the packet
790 *
791 * Returns the number of Tx descriptors needed for the given Ethernet
792 * packet. Ethernet packets require addition of WR and CPL headers.
793 */
794static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
795{
796 unsigned int flits;
797
798 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
799 return 1;
800
801 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
802 if (skb_shinfo(skb)->gso_size)
803 flits++;
804 return flits_to_desc(flits);
805}
806
807/**
808 * make_sgl - populate a scatter/gather list for a packet
809 * @skb: the packet
810 * @sgp: the SGL to populate
811 * @start: start address of skb main body data to include in the SGL
812 * @len: length of skb main body data to include in the SGL
813 * @pdev: the PCI device
814 *
815 * Generates a scatter/gather list for the buffers that make up a packet
816 * and returns the SGL size in 8-byte words. The caller must size the SGL
817 * appropriately.
818 */
819static inline unsigned int make_sgl(const struct sk_buff *skb,
820 struct sg_ent *sgp, unsigned char *start,
821 unsigned int len, struct pci_dev *pdev)
822{
823 dma_addr_t mapping;
824 unsigned int i, j = 0, nfrags;
825
826 if (len) {
827 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
828 sgp->len[0] = cpu_to_be32(len);
829 sgp->addr[0] = cpu_to_be64(mapping);
830 j = 1;
831 }
832
833 nfrags = skb_shinfo(skb)->nr_frags;
834 for (i = 0; i < nfrags; i++) {
835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
836
837 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
838 frag->size, PCI_DMA_TODEVICE);
839 sgp->len[j] = cpu_to_be32(frag->size);
840 sgp->addr[j] = cpu_to_be64(mapping);
841 j ^= 1;
842 if (j == 0)
843 ++sgp;
844 }
845 if (j)
846 sgp->len[j] = 0;
847 return ((nfrags + (len != 0)) * 3) / 2 + j;
848}
849
850/**
851 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
852 * @adap: the adapter
853 * @q: the Tx queue
854 *
855 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
856 * where the HW is going to sleep just after we checked, however,
857 * then the interrupt handler will detect the outstanding TX packet
858 * and ring the doorbell for us.
859 *
860 * When GTS is disabled we unconditionally ring the doorbell.
861 */
862static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
863{
864#if USE_GTS
865 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
866 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
867 set_bit(TXQ_LAST_PKT_DB, &q->flags);
868 t3_write_reg(adap, A_SG_KDOORBELL,
869 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
870 }
871#else
872 wmb(); /* write descriptors before telling HW */
873 t3_write_reg(adap, A_SG_KDOORBELL,
874 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
875#endif
876}
877
878static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
879{
880#if SGE_NUM_GENBITS == 2
881 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
882#endif
883}
884
885/**
886 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
887 * @ndesc: number of Tx descriptors spanned by the SGL
888 * @skb: the packet corresponding to the WR
889 * @d: first Tx descriptor to be written
890 * @pidx: index of above descriptors
891 * @q: the SGE Tx queue
892 * @sgl: the SGL
893 * @flits: number of flits to the start of the SGL in the first descriptor
894 * @sgl_flits: the SGL size in flits
895 * @gen: the Tx descriptor generation
896 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
897 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
898 *
899 * Write a work request header and an associated SGL. If the SGL is
900 * small enough to fit into one Tx descriptor it has already been written
901 * and we just need to write the WR header. Otherwise we distribute the
902 * SGL across the number of descriptors it spans.
903 */
904static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
905 struct tx_desc *d, unsigned int pidx,
906 const struct sge_txq *q,
907 const struct sg_ent *sgl,
908 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -0400909 unsigned int gen, __be32 wr_hi,
910 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500911{
912 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
913 struct tx_sw_desc *sd = &q->sdesc[pidx];
914
915 sd->skb = skb;
916 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800917 sd->fragidx = 0;
918 sd->addr_idx = 0;
919 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500920 }
921
922 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800923 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500924 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
925 V_WR_SGLSFLT(flits)) | wr_hi;
926 wmb();
927 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
928 V_WR_GEN(gen)) | wr_lo;
929 wr_gen2(d, gen);
930 } else {
931 unsigned int ogen = gen;
932 const u64 *fp = (const u64 *)sgl;
933 struct work_request_hdr *wp = wrp;
934
935 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
936 V_WR_SGLSFLT(flits)) | wr_hi;
937
938 while (sgl_flits) {
939 unsigned int avail = WR_FLITS - flits;
940
941 if (avail > sgl_flits)
942 avail = sgl_flits;
943 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
944 sgl_flits -= avail;
945 ndesc--;
946 if (!sgl_flits)
947 break;
948
949 fp += avail;
950 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -0800951 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500952 sd++;
953 if (++pidx == q->size) {
954 pidx = 0;
955 gen ^= 1;
956 d = q->desc;
957 sd = q->sdesc;
958 }
959
960 sd->skb = skb;
961 wrp = (struct work_request_hdr *)d;
962 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
963 V_WR_SGLSFLT(1)) | wr_hi;
964 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
965 sgl_flits + 1)) |
966 V_WR_GEN(gen)) | wr_lo;
967 wr_gen2(d, gen);
968 flits = 1;
969 }
Divy Le Ray23561c92007-11-16 11:22:05 -0800970 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500971 wrp->wr_hi |= htonl(F_WR_EOP);
972 wmb();
973 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
974 wr_gen2((struct tx_desc *)wp, ogen);
975 WARN_ON(ndesc != 0);
976 }
977}
978
979/**
980 * write_tx_pkt_wr - write a TX_PKT work request
981 * @adap: the adapter
982 * @skb: the packet to send
983 * @pi: the egress interface
984 * @pidx: index of the first Tx descriptor to write
985 * @gen: the generation value to use
986 * @q: the Tx queue
987 * @ndesc: number of descriptors the packet will occupy
988 * @compl: the value of the COMPL bit to use
989 *
990 * Generate a TX_PKT work request to send the supplied packet.
991 */
992static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
993 const struct port_info *pi,
994 unsigned int pidx, unsigned int gen,
995 struct sge_txq *q, unsigned int ndesc,
996 unsigned int compl)
997{
998 unsigned int flits, sgl_flits, cntrl, tso_info;
999 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1000 struct tx_desc *d = &q->desc[pidx];
1001 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1002
1003 cpl->len = htonl(skb->len | 0x80000000);
1004 cntrl = V_TXPKT_INTF(pi->port_id);
1005
1006 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1007 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1008
1009 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1010 if (tso_info) {
1011 int eth_type;
1012 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1013
1014 d->flit[2] = 0;
1015 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1016 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001017 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001018 CPL_ETH_II : CPL_ETH_II_VLAN;
1019 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001020 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001021 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001022 hdr->lso_info = htonl(tso_info);
1023 flits = 3;
1024 } else {
1025 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1026 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1027 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1028 cpl->cntrl = htonl(cntrl);
1029
1030 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1031 q->sdesc[pidx].skb = NULL;
1032 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001033 skb_copy_from_linear_data(skb, &d->flit[2],
1034 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001035 else
1036 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1037
1038 flits = (skb->len + 7) / 8 + 2;
1039 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1040 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1041 | F_WR_SOP | F_WR_EOP | compl);
1042 wmb();
1043 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1044 V_WR_TID(q->token));
1045 wr_gen2(d, gen);
1046 kfree_skb(skb);
1047 return;
1048 }
1049
1050 flits = 2;
1051 }
1052
1053 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1054 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001055
1056 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1057 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1058 htonl(V_WR_TID(q->token)));
1059}
1060
1061/**
1062 * eth_xmit - add a packet to the Ethernet Tx queue
1063 * @skb: the packet
1064 * @dev: the egress net device
1065 *
1066 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1067 */
1068int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1069{
1070 unsigned int ndesc, pidx, credits, gen, compl;
1071 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001072 struct adapter *adap = pi->adapter;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001073 struct sge_qset *qs = pi->qs;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001074 struct sge_txq *q = &qs->txq[TXQ_ETH];
1075
1076 /*
1077 * The chip min packet length is 9 octets but play safe and reject
1078 * anything shorter than an Ethernet header.
1079 */
1080 if (unlikely(skb->len < ETH_HLEN)) {
1081 dev_kfree_skb(skb);
1082 return NETDEV_TX_OK;
1083 }
1084
1085 spin_lock(&q->lock);
1086 reclaim_completed_tx(adap, q);
1087
1088 credits = q->size - q->in_use;
1089 ndesc = calc_tx_descs(skb);
1090
1091 if (unlikely(credits < ndesc)) {
1092 if (!netif_queue_stopped(dev)) {
1093 netif_stop_queue(dev);
1094 set_bit(TXQ_ETH, &qs->txq_stopped);
1095 q->stops++;
1096 dev_err(&adap->pdev->dev,
1097 "%s: Tx ring %u full while queue awake!\n",
1098 dev->name, q->cntxt_id & 7);
1099 }
1100 spin_unlock(&q->lock);
1101 return NETDEV_TX_BUSY;
1102 }
1103
1104 q->in_use += ndesc;
1105 if (unlikely(credits - ndesc < q->stop_thres)) {
1106 q->stops++;
1107 netif_stop_queue(dev);
1108 set_bit(TXQ_ETH, &qs->txq_stopped);
1109#if !USE_GTS
1110 if (should_restart_tx(q) &&
1111 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1112 q->restarts++;
1113 netif_wake_queue(dev);
1114 }
1115#endif
1116 }
1117
1118 gen = q->gen;
1119 q->unacked += ndesc;
1120 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1121 q->unacked &= 7;
1122 pidx = q->pidx;
1123 q->pidx += ndesc;
1124 if (q->pidx >= q->size) {
1125 q->pidx -= q->size;
1126 q->gen ^= 1;
1127 }
1128
1129 /* update port statistics */
1130 if (skb->ip_summed == CHECKSUM_COMPLETE)
1131 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1132 if (skb_shinfo(skb)->gso_size)
1133 qs->port_stats[SGE_PSTAT_TSO]++;
1134 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1135 qs->port_stats[SGE_PSTAT_VLANINS]++;
1136
1137 dev->trans_start = jiffies;
1138 spin_unlock(&q->lock);
1139
1140 /*
1141 * We do not use Tx completion interrupts to free DMAd Tx packets.
1142 * This is good for performamce but means that we rely on new Tx
1143 * packets arriving to run the destructors of completed packets,
1144 * which open up space in their sockets' send queues. Sometimes
1145 * we do not get such new packets causing Tx to stall. A single
1146 * UDP transmitter is a good example of this situation. We have
1147 * a clean up timer that periodically reclaims completed packets
1148 * but it doesn't run often enough (nor do we want it to) to prevent
1149 * lengthy stalls. A solution to this problem is to run the
1150 * destructor early, after the packet is queued but before it's DMAd.
1151 * A cons is that we lie to socket memory accounting, but the amount
1152 * of extra memory is reasonable (limited by the number of Tx
1153 * descriptors), the packets do actually get freed quickly by new
1154 * packets almost always, and for protocols like TCP that wait for
1155 * acks to really free up the data the extra memory is even less.
1156 * On the positive side we run the destructors on the sending CPU
1157 * rather than on a potentially different completing CPU, usually a
1158 * good thing. We also run them without holding our Tx queue lock,
1159 * unlike what reclaim_completed_tx() would otherwise do.
1160 *
1161 * Run the destructor before telling the DMA engine about the packet
1162 * to make sure it doesn't complete and get freed prematurely.
1163 */
1164 if (likely(!skb_shared(skb)))
1165 skb_orphan(skb);
1166
1167 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1168 check_ring_tx_db(adap, q);
1169 return NETDEV_TX_OK;
1170}
1171
1172/**
1173 * write_imm - write a packet into a Tx descriptor as immediate data
1174 * @d: the Tx descriptor to write
1175 * @skb: the packet
1176 * @len: the length of packet data to write as immediate data
1177 * @gen: the generation bit value to write
1178 *
1179 * Writes a packet as immediate data into a Tx descriptor. The packet
1180 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001181 * carefully so the SGE doesn't read it accidentally before it's written
1182 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001183 */
1184static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1185 unsigned int len, unsigned int gen)
1186{
1187 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1188 struct work_request_hdr *to = (struct work_request_hdr *)d;
1189
Divy Le Ray27186dc2007-08-21 20:49:15 -07001190 if (likely(!skb->data_len))
1191 memcpy(&to[1], &from[1], len - sizeof(*from));
1192 else
1193 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1194
Divy Le Ray4d22de32007-01-18 22:04:14 -05001195 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1196 V_WR_BCNTLFLT(len & 7));
1197 wmb();
1198 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1199 V_WR_LEN((len + 7) / 8));
1200 wr_gen2(d, gen);
1201 kfree_skb(skb);
1202}
1203
1204/**
1205 * check_desc_avail - check descriptor availability on a send queue
1206 * @adap: the adapter
1207 * @q: the send queue
1208 * @skb: the packet needing the descriptors
1209 * @ndesc: the number of Tx descriptors needed
1210 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1211 *
1212 * Checks if the requested number of Tx descriptors is available on an
1213 * SGE send queue. If the queue is already suspended or not enough
1214 * descriptors are available the packet is queued for later transmission.
1215 * Must be called with the Tx queue locked.
1216 *
1217 * Returns 0 if enough descriptors are available, 1 if there aren't
1218 * enough descriptors and the packet has been queued, and 2 if the caller
1219 * needs to retry because there weren't enough descriptors at the
1220 * beginning of the call but some freed up in the mean time.
1221 */
1222static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1223 struct sk_buff *skb, unsigned int ndesc,
1224 unsigned int qid)
1225{
1226 if (unlikely(!skb_queue_empty(&q->sendq))) {
1227 addq_exit:__skb_queue_tail(&q->sendq, skb);
1228 return 1;
1229 }
1230 if (unlikely(q->size - q->in_use < ndesc)) {
1231 struct sge_qset *qs = txq_to_qset(q, qid);
1232
1233 set_bit(qid, &qs->txq_stopped);
1234 smp_mb__after_clear_bit();
1235
1236 if (should_restart_tx(q) &&
1237 test_and_clear_bit(qid, &qs->txq_stopped))
1238 return 2;
1239
1240 q->stops++;
1241 goto addq_exit;
1242 }
1243 return 0;
1244}
1245
1246/**
1247 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1248 * @q: the SGE control Tx queue
1249 *
1250 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1251 * that send only immediate data (presently just the control queues) and
1252 * thus do not have any sk_buffs to release.
1253 */
1254static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1255{
1256 unsigned int reclaim = q->processed - q->cleaned;
1257
1258 q->in_use -= reclaim;
1259 q->cleaned += reclaim;
1260}
1261
1262static inline int immediate(const struct sk_buff *skb)
1263{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001264 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001265}
1266
1267/**
1268 * ctrl_xmit - send a packet through an SGE control Tx queue
1269 * @adap: the adapter
1270 * @q: the control queue
1271 * @skb: the packet
1272 *
1273 * Send a packet through an SGE control Tx queue. Packets sent through
1274 * a control queue must fit entirely as immediate data in a single Tx
1275 * descriptor and have no page fragments.
1276 */
1277static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1278 struct sk_buff *skb)
1279{
1280 int ret;
1281 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1282
1283 if (unlikely(!immediate(skb))) {
1284 WARN_ON(1);
1285 dev_kfree_skb(skb);
1286 return NET_XMIT_SUCCESS;
1287 }
1288
1289 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1290 wrp->wr_lo = htonl(V_WR_TID(q->token));
1291
1292 spin_lock(&q->lock);
1293 again:reclaim_completed_tx_imm(q);
1294
1295 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1296 if (unlikely(ret)) {
1297 if (ret == 1) {
1298 spin_unlock(&q->lock);
1299 return NET_XMIT_CN;
1300 }
1301 goto again;
1302 }
1303
1304 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1305
1306 q->in_use++;
1307 if (++q->pidx >= q->size) {
1308 q->pidx = 0;
1309 q->gen ^= 1;
1310 }
1311 spin_unlock(&q->lock);
1312 wmb();
1313 t3_write_reg(adap, A_SG_KDOORBELL,
1314 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1315 return NET_XMIT_SUCCESS;
1316}
1317
1318/**
1319 * restart_ctrlq - restart a suspended control queue
1320 * @qs: the queue set cotaining the control queue
1321 *
1322 * Resumes transmission on a suspended Tx control queue.
1323 */
1324static void restart_ctrlq(unsigned long data)
1325{
1326 struct sk_buff *skb;
1327 struct sge_qset *qs = (struct sge_qset *)data;
1328 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001329
1330 spin_lock(&q->lock);
1331 again:reclaim_completed_tx_imm(q);
1332
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001333 while (q->in_use < q->size &&
1334 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001335
1336 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1337
1338 if (++q->pidx >= q->size) {
1339 q->pidx = 0;
1340 q->gen ^= 1;
1341 }
1342 q->in_use++;
1343 }
1344
1345 if (!skb_queue_empty(&q->sendq)) {
1346 set_bit(TXQ_CTRL, &qs->txq_stopped);
1347 smp_mb__after_clear_bit();
1348
1349 if (should_restart_tx(q) &&
1350 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1351 goto again;
1352 q->stops++;
1353 }
1354
1355 spin_unlock(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001356 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001357 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1358}
1359
Divy Le Ray14ab9892007-01-30 19:43:50 -08001360/*
1361 * Send a management message through control queue 0
1362 */
1363int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1364{
1365 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1366}
1367
Divy Le Ray4d22de32007-01-18 22:04:14 -05001368/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001369 * deferred_unmap_destructor - unmap a packet when it is freed
1370 * @skb: the packet
1371 *
1372 * This is the packet destructor used for Tx packets that need to remain
1373 * mapped until they are freed rather than until their Tx descriptors are
1374 * freed.
1375 */
1376static void deferred_unmap_destructor(struct sk_buff *skb)
1377{
1378 int i;
1379 const dma_addr_t *p;
1380 const struct skb_shared_info *si;
1381 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001382
1383 dui = (struct deferred_unmap_info *)skb->head;
1384 p = dui->addr;
1385
Divy Le Ray23561c92007-11-16 11:22:05 -08001386 if (skb->tail - skb->transport_header)
1387 pci_unmap_single(dui->pdev, *p++,
1388 skb->tail - skb->transport_header,
1389 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001390
1391 si = skb_shinfo(skb);
1392 for (i = 0; i < si->nr_frags; i++)
1393 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1394 PCI_DMA_TODEVICE);
1395}
1396
1397static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1398 const struct sg_ent *sgl, int sgl_flits)
1399{
1400 dma_addr_t *p;
1401 struct deferred_unmap_info *dui;
1402
1403 dui = (struct deferred_unmap_info *)skb->head;
1404 dui->pdev = pdev;
1405 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1406 *p++ = be64_to_cpu(sgl->addr[0]);
1407 *p++ = be64_to_cpu(sgl->addr[1]);
1408 }
1409 if (sgl_flits)
1410 *p = be64_to_cpu(sgl->addr[0]);
1411}
1412
1413/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001414 * write_ofld_wr - write an offload work request
1415 * @adap: the adapter
1416 * @skb: the packet to send
1417 * @q: the Tx queue
1418 * @pidx: index of the first Tx descriptor to write
1419 * @gen: the generation value to use
1420 * @ndesc: number of descriptors the packet will occupy
1421 *
1422 * Write an offload work request to send the supplied packet. The packet
1423 * data already carry the work request with most fields populated.
1424 */
1425static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1426 struct sge_txq *q, unsigned int pidx,
1427 unsigned int gen, unsigned int ndesc)
1428{
1429 unsigned int sgl_flits, flits;
1430 struct work_request_hdr *from;
1431 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1432 struct tx_desc *d = &q->desc[pidx];
1433
1434 if (immediate(skb)) {
1435 q->sdesc[pidx].skb = NULL;
1436 write_imm(d, skb, skb->len, gen);
1437 return;
1438 }
1439
1440 /* Only TX_DATA builds SGLs */
1441
1442 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001443 memcpy(&d->flit[1], &from[1],
1444 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001445
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001446 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001447 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001448 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001449 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001450 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001451 if (need_skb_unmap()) {
1452 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1453 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001454 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001455
1456 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1457 gen, from->wr_hi, from->wr_lo);
1458}
1459
1460/**
1461 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1462 * @skb: the packet
1463 *
1464 * Returns the number of Tx descriptors needed for the given offload
1465 * packet. These packets are already fully constructed.
1466 */
1467static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1468{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001469 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001470
Divy Le Ray27186dc2007-08-21 20:49:15 -07001471 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001472 return 1; /* packet fits as immediate data */
1473
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001474 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001475 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001476 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001477 cnt++;
1478 return flits_to_desc(flits + sgl_len(cnt));
1479}
1480
1481/**
1482 * ofld_xmit - send a packet through an offload queue
1483 * @adap: the adapter
1484 * @q: the Tx offload queue
1485 * @skb: the packet
1486 *
1487 * Send an offload packet through an SGE offload queue.
1488 */
1489static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1490 struct sk_buff *skb)
1491{
1492 int ret;
1493 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1494
1495 spin_lock(&q->lock);
1496 again:reclaim_completed_tx(adap, q);
1497
1498 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1499 if (unlikely(ret)) {
1500 if (ret == 1) {
1501 skb->priority = ndesc; /* save for restart */
1502 spin_unlock(&q->lock);
1503 return NET_XMIT_CN;
1504 }
1505 goto again;
1506 }
1507
1508 gen = q->gen;
1509 q->in_use += ndesc;
1510 pidx = q->pidx;
1511 q->pidx += ndesc;
1512 if (q->pidx >= q->size) {
1513 q->pidx -= q->size;
1514 q->gen ^= 1;
1515 }
1516 spin_unlock(&q->lock);
1517
1518 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1519 check_ring_tx_db(adap, q);
1520 return NET_XMIT_SUCCESS;
1521}
1522
1523/**
1524 * restart_offloadq - restart a suspended offload queue
1525 * @qs: the queue set cotaining the offload queue
1526 *
1527 * Resumes transmission on a suspended Tx offload queue.
1528 */
1529static void restart_offloadq(unsigned long data)
1530{
1531 struct sk_buff *skb;
1532 struct sge_qset *qs = (struct sge_qset *)data;
1533 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001534 const struct port_info *pi = netdev_priv(qs->netdev);
1535 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001536
1537 spin_lock(&q->lock);
1538 again:reclaim_completed_tx(adap, q);
1539
1540 while ((skb = skb_peek(&q->sendq)) != NULL) {
1541 unsigned int gen, pidx;
1542 unsigned int ndesc = skb->priority;
1543
1544 if (unlikely(q->size - q->in_use < ndesc)) {
1545 set_bit(TXQ_OFLD, &qs->txq_stopped);
1546 smp_mb__after_clear_bit();
1547
1548 if (should_restart_tx(q) &&
1549 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1550 goto again;
1551 q->stops++;
1552 break;
1553 }
1554
1555 gen = q->gen;
1556 q->in_use += ndesc;
1557 pidx = q->pidx;
1558 q->pidx += ndesc;
1559 if (q->pidx >= q->size) {
1560 q->pidx -= q->size;
1561 q->gen ^= 1;
1562 }
1563 __skb_unlink(skb, &q->sendq);
1564 spin_unlock(&q->lock);
1565
1566 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1567 spin_lock(&q->lock);
1568 }
1569 spin_unlock(&q->lock);
1570
1571#if USE_GTS
1572 set_bit(TXQ_RUNNING, &q->flags);
1573 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1574#endif
1575 t3_write_reg(adap, A_SG_KDOORBELL,
1576 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1577}
1578
1579/**
1580 * queue_set - return the queue set a packet should use
1581 * @skb: the packet
1582 *
1583 * Maps a packet to the SGE queue set it should use. The desired queue
1584 * set is carried in bits 1-3 in the packet's priority.
1585 */
1586static inline int queue_set(const struct sk_buff *skb)
1587{
1588 return skb->priority >> 1;
1589}
1590
1591/**
1592 * is_ctrl_pkt - return whether an offload packet is a control packet
1593 * @skb: the packet
1594 *
1595 * Determines whether an offload packet should use an OFLD or a CTRL
1596 * Tx queue. This is indicated by bit 0 in the packet's priority.
1597 */
1598static inline int is_ctrl_pkt(const struct sk_buff *skb)
1599{
1600 return skb->priority & 1;
1601}
1602
1603/**
1604 * t3_offload_tx - send an offload packet
1605 * @tdev: the offload device to send to
1606 * @skb: the packet
1607 *
1608 * Sends an offload packet. We use the packet priority to select the
1609 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1610 * should be sent as regular or control, bits 1-3 select the queue set.
1611 */
1612int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1613{
1614 struct adapter *adap = tdev2adap(tdev);
1615 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1616
1617 if (unlikely(is_ctrl_pkt(skb)))
1618 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1619
1620 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1621}
1622
1623/**
1624 * offload_enqueue - add an offload packet to an SGE offload receive queue
1625 * @q: the SGE response queue
1626 * @skb: the packet
1627 *
1628 * Add a new offload packet to an SGE response queue's offload packet
1629 * queue. If the packet is the first on the queue it schedules the RX
1630 * softirq to process the queue.
1631 */
1632static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1633{
1634 skb->next = skb->prev = NULL;
1635 if (q->rx_tail)
1636 q->rx_tail->next = skb;
1637 else {
1638 struct sge_qset *qs = rspq_to_qset(q);
1639
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001640 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001641 q->rx_head = skb;
1642 }
1643 q->rx_tail = skb;
1644}
1645
1646/**
1647 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1648 * @tdev: the offload device that will be receiving the packets
1649 * @q: the SGE response queue that assembled the bundle
1650 * @skbs: the partial bundle
1651 * @n: the number of packets in the bundle
1652 *
1653 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1654 */
1655static inline void deliver_partial_bundle(struct t3cdev *tdev,
1656 struct sge_rspq *q,
1657 struct sk_buff *skbs[], int n)
1658{
1659 if (n) {
1660 q->offload_bundles++;
1661 tdev->recv(tdev, skbs, n);
1662 }
1663}
1664
1665/**
1666 * ofld_poll - NAPI handler for offload packets in interrupt mode
1667 * @dev: the network device doing the polling
1668 * @budget: polling budget
1669 *
1670 * The NAPI handler for offload packets when a response queue is serviced
1671 * by the hard interrupt handler, i.e., when it's operating in non-polling
1672 * mode. Creates small packet batches and sends them through the offload
1673 * receive handler. Batches need to be of modest size as we do prefetches
1674 * on the packets in each.
1675 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001676static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001677{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001678 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001679 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001680 struct adapter *adapter = qs->adap;
1681 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001682
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001683 while (work_done < budget) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001684 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1685 int ngathered;
1686
1687 spin_lock_irq(&q->lock);
1688 head = q->rx_head;
1689 if (!head) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001690 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001691 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001692 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001693 }
1694
1695 tail = q->rx_tail;
1696 q->rx_head = q->rx_tail = NULL;
1697 spin_unlock_irq(&q->lock);
1698
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001699 for (ngathered = 0; work_done < budget && head; work_done++) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001700 prefetch(head->data);
1701 skbs[ngathered] = head;
1702 head = head->next;
1703 skbs[ngathered]->next = NULL;
1704 if (++ngathered == RX_BUNDLE_SIZE) {
1705 q->offload_bundles++;
1706 adapter->tdev.recv(&adapter->tdev, skbs,
1707 ngathered);
1708 ngathered = 0;
1709 }
1710 }
1711 if (head) { /* splice remaining packets back onto Rx queue */
1712 spin_lock_irq(&q->lock);
1713 tail->next = q->rx_head;
1714 if (!q->rx_head)
1715 q->rx_tail = tail;
1716 q->rx_head = head;
1717 spin_unlock_irq(&q->lock);
1718 }
1719 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1720 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001721
1722 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001723}
1724
1725/**
1726 * rx_offload - process a received offload packet
1727 * @tdev: the offload device receiving the packet
1728 * @rq: the response queue that received the packet
1729 * @skb: the packet
1730 * @rx_gather: a gather list of packets if we are building a bundle
1731 * @gather_idx: index of the next available slot in the bundle
1732 *
1733 * Process an ingress offload pakcet and add it to the offload ingress
1734 * queue. Returns the index of the next available slot in the bundle.
1735 */
1736static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1737 struct sk_buff *skb, struct sk_buff *rx_gather[],
1738 unsigned int gather_idx)
1739{
1740 rq->offload_pkts++;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001741 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001742 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001743 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001744
1745 if (rq->polling) {
1746 rx_gather[gather_idx++] = skb;
1747 if (gather_idx == RX_BUNDLE_SIZE) {
1748 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1749 gather_idx = 0;
1750 rq->offload_bundles++;
1751 }
1752 } else
1753 offload_enqueue(rq, skb);
1754
1755 return gather_idx;
1756}
1757
1758/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001759 * restart_tx - check whether to restart suspended Tx queues
1760 * @qs: the queue set to resume
1761 *
1762 * Restarts suspended Tx queues of an SGE queue set if they have enough
1763 * free resources to resume operation.
1764 */
1765static void restart_tx(struct sge_qset *qs)
1766{
1767 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1768 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1769 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1770 qs->txq[TXQ_ETH].restarts++;
1771 if (netif_running(qs->netdev))
1772 netif_wake_queue(qs->netdev);
1773 }
1774
1775 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1776 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1777 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1778 qs->txq[TXQ_OFLD].restarts++;
1779 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1780 }
1781 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1782 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1783 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1784 qs->txq[TXQ_CTRL].restarts++;
1785 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1786 }
1787}
1788
1789/**
1790 * rx_eth - process an ingress ethernet packet
1791 * @adap: the adapter
1792 * @rq: the response queue that received the packet
1793 * @skb: the packet
1794 * @pad: amount of padding at the start of the buffer
1795 *
1796 * Process an ingress ethernet pakcet and deliver it to the stack.
1797 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1798 * if it was immediate data in a response.
1799 */
1800static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1801 struct sk_buff *skb, int pad)
1802{
1803 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1804 struct port_info *pi;
1805
Divy Le Ray4d22de32007-01-18 22:04:14 -05001806 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001807 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Raye360b562007-05-30 10:01:29 -07001808 skb->dev->last_rx = jiffies;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001809 pi = netdev_priv(skb->dev);
1810 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1811 !p->fragment) {
1812 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1813 skb->ip_summed = CHECKSUM_UNNECESSARY;
1814 } else
1815 skb->ip_summed = CHECKSUM_NONE;
1816
1817 if (unlikely(p->vlan_valid)) {
1818 struct vlan_group *grp = pi->vlan_grp;
1819
1820 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1821 if (likely(grp))
1822 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1823 rq->polling);
1824 else
1825 dev_kfree_skb_any(skb);
1826 } else if (rq->polling)
1827 netif_receive_skb(skb);
1828 else
1829 netif_rx(skb);
1830}
1831
1832/**
1833 * handle_rsp_cntrl_info - handles control information in a response
1834 * @qs: the queue set corresponding to the response
1835 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05001836 *
1837 * Handles the control information of an SGE response, such as GTS
1838 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08001839 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001840 */
Divy Le Ray6195c712007-01-30 19:43:56 -08001841static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001842{
1843 unsigned int credits;
1844
1845#if USE_GTS
1846 if (flags & F_RSPD_TXQ0_GTS)
1847 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1848#endif
1849
Divy Le Ray4d22de32007-01-18 22:04:14 -05001850 credits = G_RSPD_TXQ0_CR(flags);
1851 if (credits)
1852 qs->txq[TXQ_ETH].processed += credits;
1853
Divy Le Ray6195c712007-01-30 19:43:56 -08001854 credits = G_RSPD_TXQ2_CR(flags);
1855 if (credits)
1856 qs->txq[TXQ_CTRL].processed += credits;
1857
Divy Le Ray4d22de32007-01-18 22:04:14 -05001858# if USE_GTS
1859 if (flags & F_RSPD_TXQ1_GTS)
1860 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1861# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08001862 credits = G_RSPD_TXQ1_CR(flags);
1863 if (credits)
1864 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001865}
1866
1867/**
1868 * check_ring_db - check if we need to ring any doorbells
1869 * @adapter: the adapter
1870 * @qs: the queue set whose Tx queues are to be examined
1871 * @sleeping: indicates which Tx queue sent GTS
1872 *
1873 * Checks if some of a queue set's Tx queues need to ring their doorbells
1874 * to resume transmission after idling while they still have unprocessed
1875 * descriptors.
1876 */
1877static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1878 unsigned int sleeping)
1879{
1880 if (sleeping & F_RSPD_TXQ0_GTS) {
1881 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1882
1883 if (txq->cleaned + txq->in_use != txq->processed &&
1884 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1885 set_bit(TXQ_RUNNING, &txq->flags);
1886 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1887 V_EGRCNTX(txq->cntxt_id));
1888 }
1889 }
1890
1891 if (sleeping & F_RSPD_TXQ1_GTS) {
1892 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1893
1894 if (txq->cleaned + txq->in_use != txq->processed &&
1895 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1896 set_bit(TXQ_RUNNING, &txq->flags);
1897 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1898 V_EGRCNTX(txq->cntxt_id));
1899 }
1900 }
1901}
1902
1903/**
1904 * is_new_response - check if a response is newly written
1905 * @r: the response descriptor
1906 * @q: the response queue
1907 *
1908 * Returns true if a response descriptor contains a yet unprocessed
1909 * response.
1910 */
1911static inline int is_new_response(const struct rsp_desc *r,
1912 const struct sge_rspq *q)
1913{
1914 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1915}
1916
1917#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1918#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1919 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1920 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1921 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1922
1923/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1924#define NOMEM_INTR_DELAY 2500
1925
1926/**
1927 * process_responses - process responses from an SGE response queue
1928 * @adap: the adapter
1929 * @qs: the queue set to which the response queue belongs
1930 * @budget: how many responses can be processed in this round
1931 *
1932 * Process responses from an SGE response queue up to the supplied budget.
1933 * Responses include received packets as well as credits and other events
1934 * for the queues that belong to the response queue's queue set.
1935 * A negative budget is effectively unlimited.
1936 *
1937 * Additionally choose the interrupt holdoff time for the next interrupt
1938 * on this queue. If the system is under memory shortage use a fairly
1939 * long delay to help recovery.
1940 */
1941static int process_responses(struct adapter *adap, struct sge_qset *qs,
1942 int budget)
1943{
1944 struct sge_rspq *q = &qs->rspq;
1945 struct rsp_desc *r = &q->desc[q->cidx];
1946 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08001947 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001948 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1949 int ngathered = 0;
1950
1951 q->next_holdoff = q->holdoff_tmr;
1952
1953 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Raye0994eb2007-02-24 16:44:17 -08001954 int eth, ethpad = 2;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001955 struct sk_buff *skb = NULL;
1956 u32 len, flags = ntohl(r->flags);
1957 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1958
1959 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1960
1961 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1962 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1963 if (!skb)
1964 goto no_mem;
1965
1966 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1967 skb->data[0] = CPL_ASYNC_NOTIF;
1968 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1969 q->async_notif++;
1970 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1971 skb = get_imm_packet(r);
1972 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07001973no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05001974 q->next_holdoff = NOMEM_INTR_DELAY;
1975 q->nomem++;
1976 /* consume one credit since we tried */
1977 budget_left--;
1978 break;
1979 }
1980 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08001981 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001982 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07001983 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001984
Divy Le Raycf992af2007-05-30 21:10:47 -07001985 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1986 if (fl->use_pages) {
1987 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08001988
Divy Le Raycf992af2007-05-30 21:10:47 -07001989 prefetch(addr);
1990#if L1_CACHE_BYTES < 128
1991 prefetch(addr + L1_CACHE_BYTES);
1992#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08001993 __refill_fl(adap, fl);
1994
Divy Le Raycf992af2007-05-30 21:10:47 -07001995 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
1996 eth ? SGE_RX_DROP_THRES : 0);
1997 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08001998 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1999 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002000 if (unlikely(!skb)) {
2001 if (!eth)
2002 goto no_mem;
2003 q->rx_drops++;
2004 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2005 __skb_pull(skb, 2);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002006
Divy Le Ray4d22de32007-01-18 22:04:14 -05002007 if (++fl->cidx == fl->size)
2008 fl->cidx = 0;
2009 } else
2010 q->pure_rsps++;
2011
2012 if (flags & RSPD_CTRL_MASK) {
2013 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002014 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002015 }
2016
2017 r++;
2018 if (unlikely(++q->cidx == q->size)) {
2019 q->cidx = 0;
2020 q->gen ^= 1;
2021 r = q->desc;
2022 }
2023 prefetch(r);
2024
2025 if (++q->credits >= (q->size / 4)) {
2026 refill_rspq(adap, q, q->credits);
2027 q->credits = 0;
2028 }
2029
Divy Le Raycf992af2007-05-30 21:10:47 -07002030 if (likely(skb != NULL)) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002031 if (eth)
2032 rx_eth(adap, q, skb, ethpad);
2033 else {
Divy Le Raycf992af2007-05-30 21:10:47 -07002034 /* Preserve the RSS info in csum & priority */
2035 skb->csum = rss_hi;
2036 skb->priority = rss_lo;
2037 ngathered = rx_offload(&adap->tdev, q, skb,
2038 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002039 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002040 }
2041 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002042 --budget_left;
2043 }
2044
Divy Le Ray4d22de32007-01-18 22:04:14 -05002045 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2046 if (sleeping)
2047 check_ring_db(adap, qs, sleeping);
2048
2049 smp_mb(); /* commit Tx queue .processed updates */
2050 if (unlikely(qs->txq_stopped != 0))
2051 restart_tx(qs);
2052
2053 budget -= budget_left;
2054 return budget;
2055}
2056
2057static inline int is_pure_response(const struct rsp_desc *r)
2058{
2059 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2060
2061 return (n | r->len_cq) == 0;
2062}
2063
2064/**
2065 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002066 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002067 * @budget: how many packets we can process in this round
2068 *
2069 * Handler for new data events when using NAPI.
2070 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002071static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002072{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002073 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2074 struct adapter *adap = qs->adap;
2075 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002076
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002077 if (likely(work_done < budget)) {
2078 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002079
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002080 /*
2081 * Because we don't atomically flush the following
2082 * write it is possible that in very rare cases it can
2083 * reach the device in a way that races with a new
2084 * response being written plus an error interrupt
2085 * causing the NAPI interrupt handler below to return
2086 * unhandled status to the OS. To protect against
2087 * this would require flushing the write and doing
2088 * both the write and the flush with interrupts off.
2089 * Way too expensive and unjustifiable given the
2090 * rarity of the race.
2091 *
2092 * The race cannot happen at all with MSI-X.
2093 */
2094 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2095 V_NEWTIMER(qs->rspq.next_holdoff) |
2096 V_NEWINDEX(qs->rspq.cidx));
2097 }
2098 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002099}
2100
2101/*
2102 * Returns true if the device is already scheduled for polling.
2103 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002104static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002105{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002106 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002107}
2108
2109/**
2110 * process_pure_responses - process pure responses from a response queue
2111 * @adap: the adapter
2112 * @qs: the queue set owning the response queue
2113 * @r: the first pure response to process
2114 *
2115 * A simpler version of process_responses() that handles only pure (i.e.,
2116 * non data-carrying) responses. Such respones are too light-weight to
2117 * justify calling a softirq under NAPI, so we handle them specially in
2118 * the interrupt handler. The function is called with a pointer to a
2119 * response, which the caller must ensure is a valid pure response.
2120 *
2121 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2122 */
2123static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2124 struct rsp_desc *r)
2125{
2126 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002127 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002128
2129 do {
2130 u32 flags = ntohl(r->flags);
2131
2132 r++;
2133 if (unlikely(++q->cidx == q->size)) {
2134 q->cidx = 0;
2135 q->gen ^= 1;
2136 r = q->desc;
2137 }
2138 prefetch(r);
2139
2140 if (flags & RSPD_CTRL_MASK) {
2141 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002142 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002143 }
2144
2145 q->pure_rsps++;
2146 if (++q->credits >= (q->size / 4)) {
2147 refill_rspq(adap, q, q->credits);
2148 q->credits = 0;
2149 }
2150 } while (is_new_response(r, q) && is_pure_response(r));
2151
Divy Le Ray4d22de32007-01-18 22:04:14 -05002152 if (sleeping)
2153 check_ring_db(adap, qs, sleeping);
2154
2155 smp_mb(); /* commit Tx queue .processed updates */
2156 if (unlikely(qs->txq_stopped != 0))
2157 restart_tx(qs);
2158
2159 return is_new_response(r, q);
2160}
2161
2162/**
2163 * handle_responses - decide what to do with new responses in NAPI mode
2164 * @adap: the adapter
2165 * @q: the response queue
2166 *
2167 * This is used by the NAPI interrupt handlers to decide what to do with
2168 * new SGE responses. If there are no new responses it returns -1. If
2169 * there are new responses and they are pure (i.e., non-data carrying)
2170 * it handles them straight in hard interrupt context as they are very
2171 * cheap and don't deliver any packets. Finally, if there are any data
2172 * signaling responses it schedules the NAPI handler. Returns 1 if it
2173 * schedules NAPI, 0 if all new responses were pure.
2174 *
2175 * The caller must ascertain NAPI is not already running.
2176 */
2177static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2178{
2179 struct sge_qset *qs = rspq_to_qset(q);
2180 struct rsp_desc *r = &q->desc[q->cidx];
2181
2182 if (!is_new_response(r, q))
2183 return -1;
2184 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2185 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2186 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2187 return 0;
2188 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002189 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002190 return 1;
2191}
2192
2193/*
2194 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2195 * (i.e., response queue serviced in hard interrupt).
2196 */
2197irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2198{
2199 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002200 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002201 struct sge_rspq *q = &qs->rspq;
2202
2203 spin_lock(&q->lock);
2204 if (process_responses(adap, qs, -1) == 0)
2205 q->unhandled_irqs++;
2206 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2207 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2208 spin_unlock(&q->lock);
2209 return IRQ_HANDLED;
2210}
2211
2212/*
2213 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2214 * (i.e., response queue serviced by NAPI polling).
2215 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002216static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002217{
2218 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002219 struct sge_rspq *q = &qs->rspq;
2220
2221 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002222
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002223 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002224 q->unhandled_irqs++;
2225 spin_unlock(&q->lock);
2226 return IRQ_HANDLED;
2227}
2228
2229/*
2230 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2231 * SGE response queues as well as error and other async events as they all use
2232 * the same MSI vector. We use one SGE response queue per port in this mode
2233 * and protect all response queues with queue 0's lock.
2234 */
2235static irqreturn_t t3_intr_msi(int irq, void *cookie)
2236{
2237 int new_packets = 0;
2238 struct adapter *adap = cookie;
2239 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2240
2241 spin_lock(&q->lock);
2242
2243 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2244 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2245 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2246 new_packets = 1;
2247 }
2248
2249 if (adap->params.nports == 2 &&
2250 process_responses(adap, &adap->sge.qs[1], -1)) {
2251 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2252
2253 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2254 V_NEWTIMER(q1->next_holdoff) |
2255 V_NEWINDEX(q1->cidx));
2256 new_packets = 1;
2257 }
2258
2259 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2260 q->unhandled_irqs++;
2261
2262 spin_unlock(&q->lock);
2263 return IRQ_HANDLED;
2264}
2265
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002266static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002267{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002268 struct sge_rspq *q = &qs->rspq;
2269
2270 if (!napi_is_scheduled(&qs->napi) &&
2271 is_new_response(&q->desc[q->cidx], q)) {
2272 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002273 return 1;
2274 }
2275 return 0;
2276}
2277
2278/*
2279 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2280 * by NAPI polling). Handles data events from SGE response queues as well as
2281 * error and other async events as they all use the same MSI vector. We use
2282 * one SGE response queue per port in this mode and protect all response
2283 * queues with queue 0's lock.
2284 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002285static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002286{
2287 int new_packets;
2288 struct adapter *adap = cookie;
2289 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2290
2291 spin_lock(&q->lock);
2292
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002293 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002294 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002295 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002296 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2297 q->unhandled_irqs++;
2298
2299 spin_unlock(&q->lock);
2300 return IRQ_HANDLED;
2301}
2302
2303/*
2304 * A helper function that processes responses and issues GTS.
2305 */
2306static inline int process_responses_gts(struct adapter *adap,
2307 struct sge_rspq *rq)
2308{
2309 int work;
2310
2311 work = process_responses(adap, rspq_to_qset(rq), -1);
2312 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2313 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2314 return work;
2315}
2316
2317/*
2318 * The legacy INTx interrupt handler. This needs to handle data events from
2319 * SGE response queues as well as error and other async events as they all use
2320 * the same interrupt pin. We use one SGE response queue per port in this mode
2321 * and protect all response queues with queue 0's lock.
2322 */
2323static irqreturn_t t3_intr(int irq, void *cookie)
2324{
2325 int work_done, w0, w1;
2326 struct adapter *adap = cookie;
2327 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2328 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2329
2330 spin_lock(&q0->lock);
2331
2332 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2333 w1 = adap->params.nports == 2 &&
2334 is_new_response(&q1->desc[q1->cidx], q1);
2335
2336 if (likely(w0 | w1)) {
2337 t3_write_reg(adap, A_PL_CLI, 0);
2338 t3_read_reg(adap, A_PL_CLI); /* flush */
2339
2340 if (likely(w0))
2341 process_responses_gts(adap, q0);
2342
2343 if (w1)
2344 process_responses_gts(adap, q1);
2345
2346 work_done = w0 | w1;
2347 } else
2348 work_done = t3_slow_intr_handler(adap);
2349
2350 spin_unlock(&q0->lock);
2351 return IRQ_RETVAL(work_done != 0);
2352}
2353
2354/*
2355 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2356 * Handles data events from SGE response queues as well as error and other
2357 * async events as they all use the same interrupt pin. We use one SGE
2358 * response queue per port in this mode and protect all response queues with
2359 * queue 0's lock.
2360 */
2361static irqreturn_t t3b_intr(int irq, void *cookie)
2362{
2363 u32 map;
2364 struct adapter *adap = cookie;
2365 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2366
2367 t3_write_reg(adap, A_PL_CLI, 0);
2368 map = t3_read_reg(adap, A_SG_DATA_INTR);
2369
2370 if (unlikely(!map)) /* shared interrupt, most likely */
2371 return IRQ_NONE;
2372
2373 spin_lock(&q0->lock);
2374
2375 if (unlikely(map & F_ERRINTR))
2376 t3_slow_intr_handler(adap);
2377
2378 if (likely(map & 1))
2379 process_responses_gts(adap, q0);
2380
2381 if (map & 2)
2382 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2383
2384 spin_unlock(&q0->lock);
2385 return IRQ_HANDLED;
2386}
2387
2388/*
2389 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2390 * Handles data events from SGE response queues as well as error and other
2391 * async events as they all use the same interrupt pin. We use one SGE
2392 * response queue per port in this mode and protect all response queues with
2393 * queue 0's lock.
2394 */
2395static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2396{
2397 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002398 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002399 struct sge_qset *qs0 = &adap->sge.qs[0];
2400 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002401
2402 t3_write_reg(adap, A_PL_CLI, 0);
2403 map = t3_read_reg(adap, A_SG_DATA_INTR);
2404
2405 if (unlikely(!map)) /* shared interrupt, most likely */
2406 return IRQ_NONE;
2407
2408 spin_lock(&q0->lock);
2409
2410 if (unlikely(map & F_ERRINTR))
2411 t3_slow_intr_handler(adap);
2412
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002413 if (likely(map & 1))
2414 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002416 if (map & 2)
2417 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002418
2419 spin_unlock(&q0->lock);
2420 return IRQ_HANDLED;
2421}
2422
2423/**
2424 * t3_intr_handler - select the top-level interrupt handler
2425 * @adap: the adapter
2426 * @polling: whether using NAPI to service response queues
2427 *
2428 * Selects the top-level interrupt handler based on the type of interrupts
2429 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2430 * response queues.
2431 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002432irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002433{
2434 if (adap->flags & USING_MSIX)
2435 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2436 if (adap->flags & USING_MSI)
2437 return polling ? t3_intr_msi_napi : t3_intr_msi;
2438 if (adap->params.rev > 0)
2439 return polling ? t3b_intr_napi : t3b_intr;
2440 return t3_intr;
2441}
2442
2443/**
2444 * t3_sge_err_intr_handler - SGE async event interrupt handler
2445 * @adapter: the adapter
2446 *
2447 * Interrupt handler for SGE asynchronous (non-data) events.
2448 */
2449void t3_sge_err_intr_handler(struct adapter *adapter)
2450{
2451 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2452
2453 if (status & F_RSPQCREDITOVERFOW)
2454 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2455
2456 if (status & F_RSPQDISABLED) {
2457 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2458
2459 CH_ALERT(adapter,
2460 "packet delivered to disabled response queue "
2461 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2462 }
2463
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002464 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2465 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2466 status & F_HIPIODRBDROPERR ? "high" : "lo");
2467
Divy Le Ray4d22de32007-01-18 22:04:14 -05002468 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2469 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2470 t3_fatal_err(adapter);
2471}
2472
2473/**
2474 * sge_timer_cb - perform periodic maintenance of an SGE qset
2475 * @data: the SGE queue set to maintain
2476 *
2477 * Runs periodically from a timer to perform maintenance of an SGE queue
2478 * set. It performs two tasks:
2479 *
2480 * a) Cleans up any completed Tx descriptors that may still be pending.
2481 * Normal descriptor cleanup happens when new packets are added to a Tx
2482 * queue so this timer is relatively infrequent and does any cleanup only
2483 * if the Tx queue has not seen any new packets in a while. We make a
2484 * best effort attempt to reclaim descriptors, in that we don't wait
2485 * around if we cannot get a queue's lock (which most likely is because
2486 * someone else is queueing new packets and so will also handle the clean
2487 * up). Since control queues use immediate data exclusively we don't
2488 * bother cleaning them up here.
2489 *
2490 * b) Replenishes Rx queues that have run out due to memory shortage.
2491 * Normally new Rx buffers are added when existing ones are consumed but
2492 * when out of memory a queue can become empty. We try to add only a few
2493 * buffers here, the queue will be replenished fully as these new buffers
2494 * are used up if memory shortage has subsided.
2495 */
2496static void sge_timer_cb(unsigned long data)
2497{
2498 spinlock_t *lock;
2499 struct sge_qset *qs = (struct sge_qset *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002500 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002501
2502 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2503 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2504 spin_unlock(&qs->txq[TXQ_ETH].lock);
2505 }
2506 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2507 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2508 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2509 }
2510 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002511 &adap->sge.qs[0].rspq.lock;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002512 if (spin_trylock_irq(lock)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002513 if (!napi_is_scheduled(&qs->napi)) {
Divy Le Raybae73f42007-02-24 16:44:12 -08002514 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2515
Divy Le Ray4d22de32007-01-18 22:04:14 -05002516 if (qs->fl[0].credits < qs->fl[0].size)
2517 __refill_fl(adap, &qs->fl[0]);
2518 if (qs->fl[1].credits < qs->fl[1].size)
2519 __refill_fl(adap, &qs->fl[1]);
Divy Le Raybae73f42007-02-24 16:44:12 -08002520
2521 if (status & (1 << qs->rspq.cntxt_id)) {
2522 qs->rspq.starved++;
2523 if (qs->rspq.credits) {
2524 refill_rspq(adap, &qs->rspq, 1);
2525 qs->rspq.credits--;
2526 qs->rspq.restarted++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002527 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
Divy Le Raybae73f42007-02-24 16:44:12 -08002528 1 << qs->rspq.cntxt_id);
2529 }
2530 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002531 }
2532 spin_unlock_irq(lock);
2533 }
2534 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2535}
2536
2537/**
2538 * t3_update_qset_coalesce - update coalescing settings for a queue set
2539 * @qs: the SGE queue set
2540 * @p: new queue set parameters
2541 *
2542 * Update the coalescing settings for an SGE queue set. Nothing is done
2543 * if the queue set is not initialized yet.
2544 */
2545void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2546{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002547 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2548 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002549 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002550}
2551
2552/**
2553 * t3_sge_alloc_qset - initialize an SGE queue set
2554 * @adapter: the adapter
2555 * @id: the queue set id
2556 * @nports: how many Ethernet ports will be using this queue set
2557 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2558 * @p: configuration parameters for this queue set
2559 * @ntxq: number of Tx queues for the queue set
2560 * @netdev: net device associated with this queue set
2561 *
2562 * Allocate resources and initialize an SGE queue set. A queue set
2563 * comprises a response queue, two Rx free-buffer queues, and up to 3
2564 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2565 * queue, offload queue, and control queue.
2566 */
2567int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2568 int irq_vec_idx, const struct qset_params *p,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002569 int ntxq, struct net_device *dev)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002570{
2571 int i, ret = -ENOMEM;
2572 struct sge_qset *q = &adapter->sge.qs[id];
2573
2574 init_qset_cntxt(q, id);
2575 init_timer(&q->tx_reclaim_timer);
2576 q->tx_reclaim_timer.data = (unsigned long)q;
2577 q->tx_reclaim_timer.function = sge_timer_cb;
2578
2579 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2580 sizeof(struct rx_desc),
2581 sizeof(struct rx_sw_desc),
2582 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2583 if (!q->fl[0].desc)
2584 goto err;
2585
2586 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2587 sizeof(struct rx_desc),
2588 sizeof(struct rx_sw_desc),
2589 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2590 if (!q->fl[1].desc)
2591 goto err;
2592
2593 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2594 sizeof(struct rsp_desc), 0,
2595 &q->rspq.phys_addr, NULL);
2596 if (!q->rspq.desc)
2597 goto err;
2598
2599 for (i = 0; i < ntxq; ++i) {
2600 /*
2601 * The control queue always uses immediate data so does not
2602 * need to keep track of any sk_buffs.
2603 */
2604 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2605
2606 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2607 sizeof(struct tx_desc), sz,
2608 &q->txq[i].phys_addr,
2609 &q->txq[i].sdesc);
2610 if (!q->txq[i].desc)
2611 goto err;
2612
2613 q->txq[i].gen = 1;
2614 q->txq[i].size = p->txq_size[i];
2615 spin_lock_init(&q->txq[i].lock);
2616 skb_queue_head_init(&q->txq[i].sendq);
2617 }
2618
2619 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2620 (unsigned long)q);
2621 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2622 (unsigned long)q);
2623
2624 q->fl[0].gen = q->fl[1].gen = 1;
2625 q->fl[0].size = p->fl_size;
2626 q->fl[1].size = p->jumbo_size;
2627
2628 q->rspq.gen = 1;
2629 q->rspq.size = p->rspq_size;
2630 spin_lock_init(&q->rspq.lock);
2631
2632 q->txq[TXQ_ETH].stop_thres = nports *
2633 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2634
Divy Le Raycf992af2007-05-30 21:10:47 -07002635#if FL0_PG_CHUNK_SIZE > 0
2636 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002637#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002638 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002639#endif
Divy Le Raycf992af2007-05-30 21:10:47 -07002640 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2641 q->fl[1].buf_size = is_offload(adapter) ?
2642 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2643 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002644
2645 spin_lock(&adapter->sge.reg_lock);
2646
2647 /* FL threshold comparison uses < */
2648 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2649 q->rspq.phys_addr, q->rspq.size,
2650 q->fl[0].buf_size, 1, 0);
2651 if (ret)
2652 goto err_unlock;
2653
2654 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2655 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2656 q->fl[i].phys_addr, q->fl[i].size,
2657 q->fl[i].buf_size, p->cong_thres, 1,
2658 0);
2659 if (ret)
2660 goto err_unlock;
2661 }
2662
2663 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2664 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2665 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2666 1, 0);
2667 if (ret)
2668 goto err_unlock;
2669
2670 if (ntxq > 1) {
2671 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2672 USE_GTS, SGE_CNTXT_OFLD, id,
2673 q->txq[TXQ_OFLD].phys_addr,
2674 q->txq[TXQ_OFLD].size, 0, 1, 0);
2675 if (ret)
2676 goto err_unlock;
2677 }
2678
2679 if (ntxq > 2) {
2680 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2681 SGE_CNTXT_CTRL, id,
2682 q->txq[TXQ_CTRL].phys_addr,
2683 q->txq[TXQ_CTRL].size,
2684 q->txq[TXQ_CTRL].token, 1, 0);
2685 if (ret)
2686 goto err_unlock;
2687 }
2688
2689 spin_unlock(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002690
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002691 q->adap = adapter;
2692 q->netdev = dev;
2693 t3_update_qset_coalesce(q, p);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002694
2695 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2696 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2697 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2698
2699 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2700 V_NEWTIMER(q->rspq.holdoff_tmr));
2701
2702 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2703 return 0;
2704
2705 err_unlock:
2706 spin_unlock(&adapter->sge.reg_lock);
2707 err:
2708 t3_free_qset(adapter, q);
2709 return ret;
2710}
2711
2712/**
2713 * t3_free_sge_resources - free SGE resources
2714 * @adap: the adapter
2715 *
2716 * Frees resources used by the SGE queue sets.
2717 */
2718void t3_free_sge_resources(struct adapter *adap)
2719{
2720 int i;
2721
2722 for (i = 0; i < SGE_QSETS; ++i)
2723 t3_free_qset(adap, &adap->sge.qs[i]);
2724}
2725
2726/**
2727 * t3_sge_start - enable SGE
2728 * @adap: the adapter
2729 *
2730 * Enables the SGE for DMAs. This is the last step in starting packet
2731 * transfers.
2732 */
2733void t3_sge_start(struct adapter *adap)
2734{
2735 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2736}
2737
2738/**
2739 * t3_sge_stop - disable SGE operation
2740 * @adap: the adapter
2741 *
2742 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2743 * from error interrupts) or from normal process context. In the latter
2744 * case it also disables any pending queue restart tasklets. Note that
2745 * if it is called in interrupt context it cannot disable the restart
2746 * tasklets as it cannot wait, however the tasklets will have no effect
2747 * since the doorbells are disabled and the driver will call this again
2748 * later from process context, at which time the tasklets will be stopped
2749 * if they are still running.
2750 */
2751void t3_sge_stop(struct adapter *adap)
2752{
2753 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2754 if (!in_interrupt()) {
2755 int i;
2756
2757 for (i = 0; i < SGE_QSETS; ++i) {
2758 struct sge_qset *qs = &adap->sge.qs[i];
2759
2760 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2761 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2762 }
2763 }
2764}
2765
2766/**
2767 * t3_sge_init - initialize SGE
2768 * @adap: the adapter
2769 * @p: the SGE parameters
2770 *
2771 * Performs SGE initialization needed every time after a chip reset.
2772 * We do not initialize any of the queue sets here, instead the driver
2773 * top-level must request those individually. We also do not enable DMA
2774 * here, that should be done after the queues have been set up.
2775 */
2776void t3_sge_init(struct adapter *adap, struct sge_params *p)
2777{
2778 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2779
2780 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2781 F_CQCRDTCTRL |
2782 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2783 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2784#if SGE_NUM_GENBITS == 1
2785 ctrl |= F_EGRGENCTRL;
2786#endif
2787 if (adap->params.rev > 0) {
2788 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2789 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2790 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2791 }
2792 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2793 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2794 V_LORCQDRBTHRSH(512));
2795 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2796 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08002797 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Ray4d22de32007-01-18 22:04:14 -05002798 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2799 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2800 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2801 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2802 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2803 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2804}
2805
2806/**
2807 * t3_sge_prep - one-time SGE initialization
2808 * @adap: the associated adapter
2809 * @p: SGE parameters
2810 *
2811 * Performs one-time initialization of SGE SW state. Includes determining
2812 * defaults for the assorted SGE parameters, which admins can change until
2813 * they are used to initialize the SGE.
2814 */
2815void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2816{
2817 int i;
2818
2819 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2820 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2821
2822 for (i = 0; i < SGE_QSETS; ++i) {
2823 struct qset_params *q = p->qset + i;
2824
2825 q->polling = adap->params.rev > 0;
2826 q->coalesce_usecs = 5;
2827 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002828 q->fl_size = 1024;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002829 q->jumbo_size = 512;
2830 q->txq_size[TXQ_ETH] = 1024;
2831 q->txq_size[TXQ_OFLD] = 1024;
2832 q->txq_size[TXQ_CTRL] = 256;
2833 q->cong_thres = 0;
2834 }
2835
2836 spin_lock_init(&adap->sge.reg_lock);
2837}
2838
2839/**
2840 * t3_get_desc - dump an SGE descriptor for debugging purposes
2841 * @qs: the queue set
2842 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2843 * @idx: the descriptor index in the queue
2844 * @data: where to dump the descriptor contents
2845 *
2846 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2847 * size of the descriptor.
2848 */
2849int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2850 unsigned char *data)
2851{
2852 if (qnum >= 6)
2853 return -EINVAL;
2854
2855 if (qnum < 3) {
2856 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2857 return -EINVAL;
2858 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2859 return sizeof(struct tx_desc);
2860 }
2861
2862 if (qnum == 3) {
2863 if (!qs->rspq.desc || idx >= qs->rspq.size)
2864 return -EINVAL;
2865 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2866 return sizeof(struct rsp_desc);
2867 }
2868
2869 qnum -= 4;
2870 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2871 return -EINVAL;
2872 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2873 return sizeof(struct rx_desc);
2874}