blob: 272a0168f3e9ffb32d3f4c2b40529e3a1d1c38bc [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Raya02d44a2008-10-13 18:47:30 -07002 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
Karen Xiea109a5b2008-12-18 22:56:20 -080039#include <net/arp.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050040#include "common.h"
41#include "regs.h"
42#include "sge_defs.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define USE_GTS 0
47
48#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080049
Divy Le Ray4d22de32007-01-18 22:04:14 -050050#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070051#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
Divy Le Raye0994eb2007-02-24 16:44:17 -080053/*
Divy Le Raycf992af2007-05-30 21:10:47 -070054 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
55 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
56 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080057 */
Divy Le Raycf992af2007-05-30 21:10:47 -070058#define FL0_PG_CHUNK_SIZE 2048
Divy Le Ray7385ecf2008-05-21 18:56:21 -070059#define FL0_PG_ORDER 0
60#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
Divy Le Raycf992af2007-05-30 21:10:47 -070062
Divy Le Raye0994eb2007-02-24 16:44:17 -080063#define SGE_RX_DROP_THRES 16
Divy Le Ray4d22de32007-01-18 22:04:14 -050064
65/*
66 * Period of the Tx buffer reclaim timer. This timer does not need to run
67 * frequently as Tx buffers are usually reclaimed by new Tx packets.
68 */
69#define TX_RECLAIM_PERIOD (HZ / 4)
70
71/* WR size in bytes */
72#define WR_LEN (WR_FLITS * 8)
73
74/*
75 * Types of Tx queues in each queue set. Order here matters, do not change.
76 */
77enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
78
79/* Values for sge_txq.flags */
80enum {
81 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
82 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
83};
84
85struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040086 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050087};
88
89struct rx_desc {
90 __be32 addr_lo;
91 __be32 len_gen;
92 __be32 gen2;
93 __be32 addr_hi;
94};
95
96struct tx_sw_desc { /* SW state per Tx descriptor */
97 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -080098 u8 eop; /* set if last descriptor for packet */
99 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
100 u8 fragidx; /* first page fragment associated with descriptor */
101 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500102};
103
Divy Le Raycf992af2007-05-30 21:10:47 -0700104struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800105 union {
106 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700107 struct fl_pg_chunk pg_chunk;
108 };
109 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500110};
111
112struct rsp_desc { /* response queue descriptor */
113 struct rss_header rss_hdr;
114 __be32 flags;
115 __be32 len_cq;
116 u8 imm_data[47];
117 u8 intr_gen;
118};
119
Divy Le Ray4d22de32007-01-18 22:04:14 -0500120/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800121 * Holds unmapping information for Tx packets that need deferred unmapping.
122 * This structure lives at skb->head and must be allocated by callers.
123 */
124struct deferred_unmap_info {
125 struct pci_dev *pdev;
126 dma_addr_t addr[MAX_SKB_FRAGS + 1];
127};
128
129/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500130 * Maps a number of flits to the number of Tx descriptors that can hold them.
131 * The formula is
132 *
133 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
134 *
135 * HW allows up to 4 descriptors to be combined into a WR.
136 */
137static u8 flit_desc_map[] = {
138 0,
139#if SGE_NUM_GENBITS == 1
140 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
141 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
142 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
143 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
144#elif SGE_NUM_GENBITS == 2
145 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
146 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
147 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
148 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
149#else
150# error "SGE_NUM_GENBITS must be 1 or 2"
151#endif
152};
153
154static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
155{
156 return container_of(q, struct sge_qset, fl[qidx]);
157}
158
159static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
160{
161 return container_of(q, struct sge_qset, rspq);
162}
163
164static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
165{
166 return container_of(q, struct sge_qset, txq[qidx]);
167}
168
169/**
170 * refill_rspq - replenish an SGE response queue
171 * @adapter: the adapter
172 * @q: the response queue to replenish
173 * @credits: how many new responses to make available
174 *
175 * Replenishes a response queue by making the supplied number of responses
176 * available to HW.
177 */
178static inline void refill_rspq(struct adapter *adapter,
179 const struct sge_rspq *q, unsigned int credits)
180{
Divy Le Rayafefce62007-11-16 11:22:21 -0800181 rmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -0500182 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
183 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
184}
185
186/**
187 * need_skb_unmap - does the platform need unmapping of sk_buffs?
188 *
189 * Returns true if the platfrom needs sk_buff unmapping. The compiler
190 * optimizes away unecessary code if this returns true.
191 */
192static inline int need_skb_unmap(void)
193{
194 /*
195 * This structure is used to tell if the platfrom needs buffer
196 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
197 */
198 struct dummy {
199 DECLARE_PCI_UNMAP_ADDR(addr);
200 };
201
202 return sizeof(struct dummy) != 0;
203}
204
205/**
206 * unmap_skb - unmap a packet main body and its page fragments
207 * @skb: the packet
208 * @q: the Tx queue containing Tx descriptors for the packet
209 * @cidx: index of Tx descriptor
210 * @pdev: the PCI device
211 *
212 * Unmap the main body of an sk_buff and its page fragments, if any.
213 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800214 * to conserve space for metadata, the information necessary to unmap an
215 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
216 * descriptors (the physical addresses of the various data buffers), and
217 * the SW descriptor state (assorted indices). The send functions
218 * initialize the indices for the first packet descriptor so we can unmap
219 * the buffers held in the first Tx descriptor here, and we have enough
220 * information at this point to set the state for the next Tx descriptor.
221 *
222 * Note that it is possible to clean up the first descriptor of a packet
223 * before the send routines have written the next descriptors, but this
224 * race does not cause any problem. We just end up writing the unmapping
225 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500226 */
227static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
229{
230 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800231 struct tx_sw_desc *d = &q->sdesc[cidx];
232 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500233
Divy Le Ray23561c92007-11-16 11:22:05 -0800234 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
235 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500236
Divy Le Ray23561c92007-11-16 11:22:05 -0800237 if (frag_idx == 0 && skb_headlen(skb)) {
238 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
239 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500240 j = 1;
241 }
242
Divy Le Ray23561c92007-11-16 11:22:05 -0800243 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500244 nfrags = skb_shinfo(skb)->nr_frags;
245
246 while (frag_idx < nfrags && curflit < WR_FLITS) {
247 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
248 skb_shinfo(skb)->frags[frag_idx].size,
249 PCI_DMA_TODEVICE);
250 j ^= 1;
251 if (j == 0) {
252 sgp++;
253 curflit++;
254 }
255 curflit++;
256 frag_idx++;
257 }
258
Divy Le Ray23561c92007-11-16 11:22:05 -0800259 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
260 d = cidx + 1 == q->size ? q->sdesc : d + 1;
261 d->fragidx = frag_idx;
262 d->addr_idx = j;
263 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500264 }
265}
266
267/**
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
272 *
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
275 */
276static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
277 unsigned int n)
278{
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
282
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
Divy Le Ray4d22de32007-01-18 22:04:14 -0500286 d = &q->sdesc[cidx];
287 while (n--) {
288 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800289 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500290 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800291 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500292 kfree_skb(d->skb);
293 }
294 ++d;
295 if (++cidx == q->size) {
296 cidx = 0;
297 d = q->sdesc;
298 }
299 }
300 q->cidx = cidx;
301}
302
303/**
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
307 *
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
310 * queue's lock held.
311 */
312static inline void reclaim_completed_tx(struct adapter *adapter,
313 struct sge_txq *q)
314{
315 unsigned int reclaim = q->processed - q->cleaned;
316
317 if (reclaim) {
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
321 }
322}
323
324/**
325 * should_restart_tx - are there enough resources to restart a Tx queue?
326 * @q: the Tx queue
327 *
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 */
330static inline int should_restart_tx(const struct sge_txq *q)
331{
332 unsigned int r = q->processed - q->cleaned;
333
334 return q->in_use - r < (q->size >> 1);
335}
336
337/**
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
341 *
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
344 */
345static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346{
347 unsigned int cidx = q->cidx;
348
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
351
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Raycf992af2007-05-30 21:10:47 -0700354 if (q->use_pages) {
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700355 if (d->pg_chunk.page)
356 put_page(d->pg_chunk.page);
Divy Le Raycf992af2007-05-30 21:10:47 -0700357 d->pg_chunk.page = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800358 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700359 kfree_skb(d->skb);
360 d->skb = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800361 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500362 if (++cidx == q->size)
363 cidx = 0;
364 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800365
Divy Le Raycf992af2007-05-30 21:10:47 -0700366 if (q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700367 __free_pages(q->pg_chunk.page, q->order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700368 q->pg_chunk.page = NULL;
369 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500370}
371
372/**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700374 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
380 *
381 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors.
383 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700384static inline int add_one_rx_buf(void *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500387{
388 dma_addr_t mapping;
389
Divy Le Raye0994eb2007-02-24 16:44:17 -0800390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700391 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700392 return -ENOMEM;
393
Divy Le Ray4d22de32007-01-18 22:04:14 -0500394 pci_unmap_addr_set(sd, dma_addr, mapping);
395
396 d->addr_lo = cpu_to_be32(mapping);
397 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
398 wmb();
399 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
400 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700401 return 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500402}
403
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700404static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
405 unsigned int order)
Divy Le Raycf992af2007-05-30 21:10:47 -0700406{
407 if (!q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700408 q->pg_chunk.page = alloc_pages(gfp, order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700409 if (unlikely(!q->pg_chunk.page))
410 return -ENOMEM;
411 q->pg_chunk.va = page_address(q->pg_chunk.page);
412 q->pg_chunk.offset = 0;
413 }
414 sd->pg_chunk = q->pg_chunk;
415
416 q->pg_chunk.offset += q->buf_size;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700417 if (q->pg_chunk.offset == (PAGE_SIZE << order))
Divy Le Raycf992af2007-05-30 21:10:47 -0700418 q->pg_chunk.page = NULL;
419 else {
420 q->pg_chunk.va += q->buf_size;
421 get_page(q->pg_chunk.page);
422 }
423 return 0;
424}
425
Divy Le Ray4d22de32007-01-18 22:04:14 -0500426/**
427 * refill_fl - refill an SGE free-buffer list
428 * @adapter: the adapter
429 * @q: the free-list to refill
430 * @n: the number of new buffers to allocate
431 * @gfp: the gfp flags for allocating new buffers
432 *
433 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
434 * allocated with the supplied gfp flags. The caller must assure that
435 * @n does not exceed the queue's capacity.
436 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700437static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500438{
Divy Le Raycf992af2007-05-30 21:10:47 -0700439 void *buf_start;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500440 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
441 struct rx_desc *d = &q->desc[q->pidx];
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700442 unsigned int count = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500443
444 while (n--) {
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700445 int err;
446
Divy Le Raycf992af2007-05-30 21:10:47 -0700447 if (q->use_pages) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700448 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700449nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800450 break;
451 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700452 buf_start = sd->pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800453 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700454 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800455
Divy Le Raycf992af2007-05-30 21:10:47 -0700456 if (!skb)
457 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800458
Divy Le Raycf992af2007-05-30 21:10:47 -0700459 sd->skb = skb;
460 buf_start = skb->data;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800461 }
462
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700463 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
464 adap->pdev);
465 if (unlikely(err)) {
466 if (!q->use_pages) {
467 kfree_skb(sd->skb);
468 sd->skb = NULL;
469 }
470 break;
471 }
472
Divy Le Ray4d22de32007-01-18 22:04:14 -0500473 d++;
474 sd++;
475 if (++q->pidx == q->size) {
476 q->pidx = 0;
477 q->gen ^= 1;
478 sd = q->sdesc;
479 d = q->desc;
480 }
481 q->credits++;
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700482 count++;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500483 }
Divy Le Rayafefce62007-11-16 11:22:21 -0800484 wmb();
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700485 if (likely(count))
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
487
488 return count;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500489}
490
491static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
492{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700493 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
494 GFP_ATOMIC | __GFP_COMP);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500495}
496
497/**
498 * recycle_rx_buf - recycle a receive buffer
499 * @adapter: the adapter
500 * @q: the SGE free list
501 * @idx: index of buffer to recycle
502 *
503 * Recycles the specified buffer on the given free list by adding it at
504 * the next available slot on the list.
505 */
506static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
507 unsigned int idx)
508{
509 struct rx_desc *from = &q->desc[idx];
510 struct rx_desc *to = &q->desc[q->pidx];
511
Divy Le Raycf992af2007-05-30 21:10:47 -0700512 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500513 to->addr_lo = from->addr_lo; /* already big endian */
514 to->addr_hi = from->addr_hi; /* likewise */
515 wmb();
516 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
517 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
518 q->credits++;
519
520 if (++q->pidx == q->size) {
521 q->pidx = 0;
522 q->gen ^= 1;
523 }
524 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
525}
526
527/**
528 * alloc_ring - allocate resources for an SGE descriptor ring
529 * @pdev: the PCI device
530 * @nelem: the number of descriptors
531 * @elem_size: the size of each descriptor
532 * @sw_size: the size of the SW state associated with each ring element
533 * @phys: the physical address of the allocated ring
534 * @metadata: address of the array holding the SW state for the ring
535 *
536 * Allocates resources for an SGE descriptor ring, such as Tx queues,
537 * free buffer lists, or response queues. Each SGE ring requires
538 * space for its HW descriptors plus, optionally, space for the SW state
539 * associated with each HW entry (the metadata). The function returns
540 * three values: the virtual address for the HW ring (the return value
541 * of the function), the physical address of the HW ring, and the address
542 * of the SW ring.
543 */
544static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800545 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500546{
547 size_t len = nelem * elem_size;
548 void *s = NULL;
549 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
550
551 if (!p)
552 return NULL;
Divy Le Ray52565542008-11-26 15:35:59 -0800553 if (sw_size && metadata) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500554 s = kcalloc(nelem, sw_size, GFP_KERNEL);
555
556 if (!s) {
557 dma_free_coherent(&pdev->dev, len, p, *phys);
558 return NULL;
559 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500560 *(void **)metadata = s;
Divy Le Ray52565542008-11-26 15:35:59 -0800561 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500562 memset(p, 0, len);
563 return p;
564}
565
566/**
Divy Le Ray204e2f92008-05-06 19:26:01 -0700567 * t3_reset_qset - reset a sge qset
568 * @q: the queue set
569 *
570 * Reset the qset structure.
571 * the NAPI structure is preserved in the event of
572 * the qset's reincarnation, for example during EEH recovery.
573 */
574static void t3_reset_qset(struct sge_qset *q)
575{
576 if (q->adap &&
577 !(q->adap->flags & NAPI_INIT)) {
578 memset(q, 0, sizeof(*q));
579 return;
580 }
581
582 q->adap = NULL;
583 memset(&q->rspq, 0, sizeof(q->rspq));
584 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 q->txq_stopped = 0;
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
Herbert Xu7be2df42009-01-21 14:39:13 -0800588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
Divy Le Ray204e2f92008-05-06 19:26:01 -0700589}
590
591
592/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
595 * @q: the queue set
596 *
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
600 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700601static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500602{
603 int i;
604 struct pci_dev *pdev = adapter->pdev;
605
Divy Le Ray4d22de32007-01-18 22:04:14 -0500606 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
607 if (q->fl[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700608 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500609 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700610 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500611 free_rx_bufs(pdev, &q->fl[i]);
612 kfree(q->fl[i].sdesc);
613 dma_free_coherent(&pdev->dev,
614 q->fl[i].size *
615 sizeof(struct rx_desc), q->fl[i].desc,
616 q->fl[i].phys_addr);
617 }
618
619 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
620 if (q->txq[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700621 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500622 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
Roland Dreierb1186de2008-03-20 13:30:48 -0700623 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500624 if (q->txq[i].sdesc) {
625 free_tx_desc(adapter, &q->txq[i],
626 q->txq[i].in_use);
627 kfree(q->txq[i].sdesc);
628 }
629 dma_free_coherent(&pdev->dev,
630 q->txq[i].size *
631 sizeof(struct tx_desc),
632 q->txq[i].desc, q->txq[i].phys_addr);
633 __skb_queue_purge(&q->txq[i].sendq);
634 }
635
636 if (q->rspq.desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700637 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500638 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700639 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500640 dma_free_coherent(&pdev->dev,
641 q->rspq.size * sizeof(struct rsp_desc),
642 q->rspq.desc, q->rspq.phys_addr);
643 }
644
Divy Le Ray204e2f92008-05-06 19:26:01 -0700645 t3_reset_qset(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500646}
647
648/**
649 * init_qset_cntxt - initialize an SGE queue set context info
650 * @qs: the queue set
651 * @id: the queue set id
652 *
653 * Initializes the TIDs and context ids for the queues of a queue set.
654 */
655static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
656{
657 qs->rspq.cntxt_id = id;
658 qs->fl[0].cntxt_id = 2 * id;
659 qs->fl[1].cntxt_id = 2 * id + 1;
660 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
661 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
662 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
663 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
664 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
665}
666
667/**
668 * sgl_len - calculates the size of an SGL of the given capacity
669 * @n: the number of SGL entries
670 *
671 * Calculates the number of flits needed for a scatter/gather list that
672 * can hold the given number of entries.
673 */
674static inline unsigned int sgl_len(unsigned int n)
675{
676 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
677 return (3 * n) / 2 + (n & 1);
678}
679
680/**
681 * flits_to_desc - returns the num of Tx descriptors for the given flits
682 * @n: the number of flits
683 *
684 * Calculates the number of Tx descriptors needed for the supplied number
685 * of flits.
686 */
687static inline unsigned int flits_to_desc(unsigned int n)
688{
689 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
690 return flit_desc_map[n];
691}
692
693/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700694 * get_packet - return the next ingress packet buffer from a free list
695 * @adap: the adapter that received the packet
696 * @fl: the SGE free list holding the packet
697 * @len: the packet length including any SGE padding
698 * @drop_thres: # of remaining buffers before we start dropping packets
699 *
700 * Get the next packet from a free list and complete setup of the
701 * sk_buff. If the packet is small we make a copy and recycle the
702 * original buffer, otherwise we use the original buffer itself. If a
703 * positive drop threshold is supplied packets are dropped and their
704 * buffers recycled if (a) the number of remaining buffers is under the
705 * threshold and the packet is too big to copy, or (b) the packet should
706 * be copied but there is no memory for the copy.
707 */
708static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
709 unsigned int len, unsigned int drop_thres)
710{
711 struct sk_buff *skb = NULL;
712 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
713
714 prefetch(sd->skb->data);
715 fl->credits--;
716
717 if (len <= SGE_RX_COPY_THRES) {
718 skb = alloc_skb(len, GFP_ATOMIC);
719 if (likely(skb != NULL)) {
720 __skb_put(skb, len);
721 pci_dma_sync_single_for_cpu(adap->pdev,
722 pci_unmap_addr(sd, dma_addr), len,
723 PCI_DMA_FROMDEVICE);
724 memcpy(skb->data, sd->skb->data, len);
725 pci_dma_sync_single_for_device(adap->pdev,
726 pci_unmap_addr(sd, dma_addr), len,
727 PCI_DMA_FROMDEVICE);
728 } else if (!drop_thres)
729 goto use_orig_buf;
730recycle:
731 recycle_rx_buf(adap, fl, fl->cidx);
732 return skb;
733 }
734
735 if (unlikely(fl->credits < drop_thres))
736 goto recycle;
737
738use_orig_buf:
739 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
740 fl->buf_size, PCI_DMA_FROMDEVICE);
741 skb = sd->skb;
742 skb_put(skb, len);
743 __refill_fl(adap, fl);
744 return skb;
745}
746
747/**
748 * get_packet_pg - return the next ingress packet buffer from a free list
749 * @adap: the adapter that received the packet
750 * @fl: the SGE free list holding the packet
751 * @len: the packet length including any SGE padding
752 * @drop_thres: # of remaining buffers before we start dropping packets
753 *
754 * Get the next packet from a free list populated with page chunks.
755 * If the packet is small we make a copy and recycle the original buffer,
756 * otherwise we attach the original buffer as a page fragment to a fresh
757 * sk_buff. If a positive drop threshold is supplied packets are dropped
758 * and their buffers recycled if (a) the number of remaining buffers is
759 * under the threshold and the packet is too big to copy, or (b) there's
760 * no system memory.
761 *
762 * Note: this function is similar to @get_packet but deals with Rx buffers
763 * that are page chunks rather than sk_buffs.
764 */
765static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700766 struct sge_rspq *q, unsigned int len,
767 unsigned int drop_thres)
Divy Le Raycf992af2007-05-30 21:10:47 -0700768{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700769 struct sk_buff *newskb, *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700770 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
771
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700772 newskb = skb = q->pg_skb;
773
774 if (!skb && (len <= SGE_RX_COPY_THRES)) {
775 newskb = alloc_skb(len, GFP_ATOMIC);
776 if (likely(newskb != NULL)) {
777 __skb_put(newskb, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700778 pci_dma_sync_single_for_cpu(adap->pdev,
779 pci_unmap_addr(sd, dma_addr), len,
780 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700781 memcpy(newskb->data, sd->pg_chunk.va, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700782 pci_dma_sync_single_for_device(adap->pdev,
783 pci_unmap_addr(sd, dma_addr), len,
784 PCI_DMA_FROMDEVICE);
785 } else if (!drop_thres)
786 return NULL;
787recycle:
788 fl->credits--;
789 recycle_rx_buf(adap, fl, fl->cidx);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700790 q->rx_recycle_buf++;
791 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700792 }
793
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700794 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
Divy Le Raycf992af2007-05-30 21:10:47 -0700795 goto recycle;
796
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700797 if (!skb)
Divy Le Rayb47385b2008-05-21 18:56:26 -0700798 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700799 if (unlikely(!newskb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700800 if (!drop_thres)
801 return NULL;
802 goto recycle;
803 }
804
805 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
806 fl->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700807 if (!skb) {
808 __skb_put(newskb, SGE_RX_PULL_LEN);
809 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
810 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
811 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
812 len - SGE_RX_PULL_LEN);
813 newskb->len = len;
814 newskb->data_len = len - SGE_RX_PULL_LEN;
815 } else {
816 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
817 sd->pg_chunk.page,
818 sd->pg_chunk.offset, len);
819 newskb->len += len;
820 newskb->data_len += len;
821 }
822 newskb->truesize += newskb->data_len;
Divy Le Raycf992af2007-05-30 21:10:47 -0700823
824 fl->credits--;
825 /*
826 * We do not refill FLs here, we let the caller do it to overlap a
827 * prefetch.
828 */
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700829 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700830}
831
832/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500833 * get_imm_packet - return the next ingress packet buffer from a response
834 * @resp: the response descriptor containing the packet data
835 *
836 * Return a packet containing the immediate data of the given response.
837 */
838static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
839{
840 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
841
842 if (skb) {
843 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300844 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500845 }
846 return skb;
847}
848
849/**
850 * calc_tx_descs - calculate the number of Tx descriptors for a packet
851 * @skb: the packet
852 *
853 * Returns the number of Tx descriptors needed for the given Ethernet
854 * packet. Ethernet packets require addition of WR and CPL headers.
855 */
856static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
857{
858 unsigned int flits;
859
860 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
861 return 1;
862
863 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
864 if (skb_shinfo(skb)->gso_size)
865 flits++;
866 return flits_to_desc(flits);
867}
868
869/**
870 * make_sgl - populate a scatter/gather list for a packet
871 * @skb: the packet
872 * @sgp: the SGL to populate
873 * @start: start address of skb main body data to include in the SGL
874 * @len: length of skb main body data to include in the SGL
875 * @pdev: the PCI device
876 *
877 * Generates a scatter/gather list for the buffers that make up a packet
878 * and returns the SGL size in 8-byte words. The caller must size the SGL
879 * appropriately.
880 */
881static inline unsigned int make_sgl(const struct sk_buff *skb,
882 struct sg_ent *sgp, unsigned char *start,
883 unsigned int len, struct pci_dev *pdev)
884{
885 dma_addr_t mapping;
886 unsigned int i, j = 0, nfrags;
887
888 if (len) {
889 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
890 sgp->len[0] = cpu_to_be32(len);
891 sgp->addr[0] = cpu_to_be64(mapping);
892 j = 1;
893 }
894
895 nfrags = skb_shinfo(skb)->nr_frags;
896 for (i = 0; i < nfrags; i++) {
897 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
898
899 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
900 frag->size, PCI_DMA_TODEVICE);
901 sgp->len[j] = cpu_to_be32(frag->size);
902 sgp->addr[j] = cpu_to_be64(mapping);
903 j ^= 1;
904 if (j == 0)
905 ++sgp;
906 }
907 if (j)
908 sgp->len[j] = 0;
909 return ((nfrags + (len != 0)) * 3) / 2 + j;
910}
911
912/**
913 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
914 * @adap: the adapter
915 * @q: the Tx queue
916 *
917 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
918 * where the HW is going to sleep just after we checked, however,
919 * then the interrupt handler will detect the outstanding TX packet
920 * and ring the doorbell for us.
921 *
922 * When GTS is disabled we unconditionally ring the doorbell.
923 */
924static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
925{
926#if USE_GTS
927 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
928 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
929 set_bit(TXQ_LAST_PKT_DB, &q->flags);
930 t3_write_reg(adap, A_SG_KDOORBELL,
931 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
932 }
933#else
934 wmb(); /* write descriptors before telling HW */
935 t3_write_reg(adap, A_SG_KDOORBELL,
936 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
937#endif
938}
939
940static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
941{
942#if SGE_NUM_GENBITS == 2
943 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
944#endif
945}
946
947/**
948 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
949 * @ndesc: number of Tx descriptors spanned by the SGL
950 * @skb: the packet corresponding to the WR
951 * @d: first Tx descriptor to be written
952 * @pidx: index of above descriptors
953 * @q: the SGE Tx queue
954 * @sgl: the SGL
955 * @flits: number of flits to the start of the SGL in the first descriptor
956 * @sgl_flits: the SGL size in flits
957 * @gen: the Tx descriptor generation
958 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
959 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
960 *
961 * Write a work request header and an associated SGL. If the SGL is
962 * small enough to fit into one Tx descriptor it has already been written
963 * and we just need to write the WR header. Otherwise we distribute the
964 * SGL across the number of descriptors it spans.
965 */
966static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
967 struct tx_desc *d, unsigned int pidx,
968 const struct sge_txq *q,
969 const struct sg_ent *sgl,
970 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -0400971 unsigned int gen, __be32 wr_hi,
972 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500973{
974 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
975 struct tx_sw_desc *sd = &q->sdesc[pidx];
976
977 sd->skb = skb;
978 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800979 sd->fragidx = 0;
980 sd->addr_idx = 0;
981 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500982 }
983
984 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800985 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500986 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
987 V_WR_SGLSFLT(flits)) | wr_hi;
988 wmb();
989 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
990 V_WR_GEN(gen)) | wr_lo;
991 wr_gen2(d, gen);
992 } else {
993 unsigned int ogen = gen;
994 const u64 *fp = (const u64 *)sgl;
995 struct work_request_hdr *wp = wrp;
996
997 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
998 V_WR_SGLSFLT(flits)) | wr_hi;
999
1000 while (sgl_flits) {
1001 unsigned int avail = WR_FLITS - flits;
1002
1003 if (avail > sgl_flits)
1004 avail = sgl_flits;
1005 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1006 sgl_flits -= avail;
1007 ndesc--;
1008 if (!sgl_flits)
1009 break;
1010
1011 fp += avail;
1012 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -08001013 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001014 sd++;
1015 if (++pidx == q->size) {
1016 pidx = 0;
1017 gen ^= 1;
1018 d = q->desc;
1019 sd = q->sdesc;
1020 }
1021
1022 sd->skb = skb;
1023 wrp = (struct work_request_hdr *)d;
1024 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1025 V_WR_SGLSFLT(1)) | wr_hi;
1026 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1027 sgl_flits + 1)) |
1028 V_WR_GEN(gen)) | wr_lo;
1029 wr_gen2(d, gen);
1030 flits = 1;
1031 }
Divy Le Ray23561c92007-11-16 11:22:05 -08001032 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001033 wrp->wr_hi |= htonl(F_WR_EOP);
1034 wmb();
1035 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1036 wr_gen2((struct tx_desc *)wp, ogen);
1037 WARN_ON(ndesc != 0);
1038 }
1039}
1040
1041/**
1042 * write_tx_pkt_wr - write a TX_PKT work request
1043 * @adap: the adapter
1044 * @skb: the packet to send
1045 * @pi: the egress interface
1046 * @pidx: index of the first Tx descriptor to write
1047 * @gen: the generation value to use
1048 * @q: the Tx queue
1049 * @ndesc: number of descriptors the packet will occupy
1050 * @compl: the value of the COMPL bit to use
1051 *
1052 * Generate a TX_PKT work request to send the supplied packet.
1053 */
1054static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1055 const struct port_info *pi,
1056 unsigned int pidx, unsigned int gen,
1057 struct sge_txq *q, unsigned int ndesc,
1058 unsigned int compl)
1059{
1060 unsigned int flits, sgl_flits, cntrl, tso_info;
1061 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1062 struct tx_desc *d = &q->desc[pidx];
1063 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1064
1065 cpl->len = htonl(skb->len | 0x80000000);
1066 cntrl = V_TXPKT_INTF(pi->port_id);
1067
1068 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1069 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1070
1071 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1072 if (tso_info) {
1073 int eth_type;
1074 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1075
1076 d->flit[2] = 0;
1077 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1078 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001079 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001080 CPL_ETH_II : CPL_ETH_II_VLAN;
1081 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001082 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001083 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001084 hdr->lso_info = htonl(tso_info);
1085 flits = 3;
1086 } else {
1087 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1088 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1089 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1090 cpl->cntrl = htonl(cntrl);
1091
1092 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1093 q->sdesc[pidx].skb = NULL;
1094 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001095 skb_copy_from_linear_data(skb, &d->flit[2],
1096 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001097 else
1098 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1099
1100 flits = (skb->len + 7) / 8 + 2;
1101 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1102 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1103 | F_WR_SOP | F_WR_EOP | compl);
1104 wmb();
1105 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1106 V_WR_TID(q->token));
1107 wr_gen2(d, gen);
1108 kfree_skb(skb);
1109 return;
1110 }
1111
1112 flits = 2;
1113 }
1114
1115 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1116 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001117
1118 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1119 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1120 htonl(V_WR_TID(q->token)));
1121}
1122
Divy Le Ray82ad3322008-12-16 01:09:39 -08001123static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1124 struct sge_qset *qs, struct sge_txq *q)
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301125{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001126 netif_tx_stop_queue(txq);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301127 set_bit(TXQ_ETH, &qs->txq_stopped);
1128 q->stops++;
1129}
1130
Divy Le Ray4d22de32007-01-18 22:04:14 -05001131/**
1132 * eth_xmit - add a packet to the Ethernet Tx queue
1133 * @skb: the packet
1134 * @dev: the egress net device
1135 *
1136 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1137 */
1138int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1139{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001140 int qidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001141 unsigned int ndesc, pidx, credits, gen, compl;
1142 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001143 struct adapter *adap = pi->adapter;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001144 struct netdev_queue *txq;
1145 struct sge_qset *qs;
1146 struct sge_txq *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001147
1148 /*
1149 * The chip min packet length is 9 octets but play safe and reject
1150 * anything shorter than an Ethernet header.
1151 */
1152 if (unlikely(skb->len < ETH_HLEN)) {
1153 dev_kfree_skb(skb);
1154 return NETDEV_TX_OK;
1155 }
1156
Divy Le Ray82ad3322008-12-16 01:09:39 -08001157 qidx = skb_get_queue_mapping(skb);
1158 qs = &pi->qs[qidx];
1159 q = &qs->txq[TXQ_ETH];
1160 txq = netdev_get_tx_queue(dev, qidx);
1161
Divy Le Ray4d22de32007-01-18 22:04:14 -05001162 spin_lock(&q->lock);
1163 reclaim_completed_tx(adap, q);
1164
1165 credits = q->size - q->in_use;
1166 ndesc = calc_tx_descs(skb);
1167
1168 if (unlikely(credits < ndesc)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001169 t3_stop_tx_queue(txq, qs, q);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301170 dev_err(&adap->pdev->dev,
1171 "%s: Tx ring %u full while queue awake!\n",
1172 dev->name, q->cntxt_id & 7);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001173 spin_unlock(&q->lock);
1174 return NETDEV_TX_BUSY;
1175 }
1176
1177 q->in_use += ndesc;
Divy Le Raycd7e9032008-03-13 00:13:30 -07001178 if (unlikely(credits - ndesc < q->stop_thres)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001179 t3_stop_tx_queue(txq, qs, q);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001180
1181 if (should_restart_tx(q) &&
1182 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1183 q->restarts++;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001184 netif_tx_wake_queue(txq);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001185 }
1186 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001187
1188 gen = q->gen;
1189 q->unacked += ndesc;
1190 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1191 q->unacked &= 7;
1192 pidx = q->pidx;
1193 q->pidx += ndesc;
1194 if (q->pidx >= q->size) {
1195 q->pidx -= q->size;
1196 q->gen ^= 1;
1197 }
1198
1199 /* update port statistics */
1200 if (skb->ip_summed == CHECKSUM_COMPLETE)
1201 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1202 if (skb_shinfo(skb)->gso_size)
1203 qs->port_stats[SGE_PSTAT_TSO]++;
1204 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1205 qs->port_stats[SGE_PSTAT_VLANINS]++;
1206
1207 dev->trans_start = jiffies;
1208 spin_unlock(&q->lock);
1209
1210 /*
1211 * We do not use Tx completion interrupts to free DMAd Tx packets.
1212 * This is good for performamce but means that we rely on new Tx
1213 * packets arriving to run the destructors of completed packets,
1214 * which open up space in their sockets' send queues. Sometimes
1215 * we do not get such new packets causing Tx to stall. A single
1216 * UDP transmitter is a good example of this situation. We have
1217 * a clean up timer that periodically reclaims completed packets
1218 * but it doesn't run often enough (nor do we want it to) to prevent
1219 * lengthy stalls. A solution to this problem is to run the
1220 * destructor early, after the packet is queued but before it's DMAd.
1221 * A cons is that we lie to socket memory accounting, but the amount
1222 * of extra memory is reasonable (limited by the number of Tx
1223 * descriptors), the packets do actually get freed quickly by new
1224 * packets almost always, and for protocols like TCP that wait for
1225 * acks to really free up the data the extra memory is even less.
1226 * On the positive side we run the destructors on the sending CPU
1227 * rather than on a potentially different completing CPU, usually a
1228 * good thing. We also run them without holding our Tx queue lock,
1229 * unlike what reclaim_completed_tx() would otherwise do.
1230 *
1231 * Run the destructor before telling the DMA engine about the packet
1232 * to make sure it doesn't complete and get freed prematurely.
1233 */
1234 if (likely(!skb_shared(skb)))
1235 skb_orphan(skb);
1236
1237 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1238 check_ring_tx_db(adap, q);
1239 return NETDEV_TX_OK;
1240}
1241
1242/**
1243 * write_imm - write a packet into a Tx descriptor as immediate data
1244 * @d: the Tx descriptor to write
1245 * @skb: the packet
1246 * @len: the length of packet data to write as immediate data
1247 * @gen: the generation bit value to write
1248 *
1249 * Writes a packet as immediate data into a Tx descriptor. The packet
1250 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001251 * carefully so the SGE doesn't read it accidentally before it's written
1252 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001253 */
1254static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1255 unsigned int len, unsigned int gen)
1256{
1257 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1258 struct work_request_hdr *to = (struct work_request_hdr *)d;
1259
Divy Le Ray27186dc2007-08-21 20:49:15 -07001260 if (likely(!skb->data_len))
1261 memcpy(&to[1], &from[1], len - sizeof(*from));
1262 else
1263 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1264
Divy Le Ray4d22de32007-01-18 22:04:14 -05001265 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1266 V_WR_BCNTLFLT(len & 7));
1267 wmb();
1268 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1269 V_WR_LEN((len + 7) / 8));
1270 wr_gen2(d, gen);
1271 kfree_skb(skb);
1272}
1273
1274/**
1275 * check_desc_avail - check descriptor availability on a send queue
1276 * @adap: the adapter
1277 * @q: the send queue
1278 * @skb: the packet needing the descriptors
1279 * @ndesc: the number of Tx descriptors needed
1280 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1281 *
1282 * Checks if the requested number of Tx descriptors is available on an
1283 * SGE send queue. If the queue is already suspended or not enough
1284 * descriptors are available the packet is queued for later transmission.
1285 * Must be called with the Tx queue locked.
1286 *
1287 * Returns 0 if enough descriptors are available, 1 if there aren't
1288 * enough descriptors and the packet has been queued, and 2 if the caller
1289 * needs to retry because there weren't enough descriptors at the
1290 * beginning of the call but some freed up in the mean time.
1291 */
1292static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1293 struct sk_buff *skb, unsigned int ndesc,
1294 unsigned int qid)
1295{
1296 if (unlikely(!skb_queue_empty(&q->sendq))) {
1297 addq_exit:__skb_queue_tail(&q->sendq, skb);
1298 return 1;
1299 }
1300 if (unlikely(q->size - q->in_use < ndesc)) {
1301 struct sge_qset *qs = txq_to_qset(q, qid);
1302
1303 set_bit(qid, &qs->txq_stopped);
1304 smp_mb__after_clear_bit();
1305
1306 if (should_restart_tx(q) &&
1307 test_and_clear_bit(qid, &qs->txq_stopped))
1308 return 2;
1309
1310 q->stops++;
1311 goto addq_exit;
1312 }
1313 return 0;
1314}
1315
1316/**
1317 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1318 * @q: the SGE control Tx queue
1319 *
1320 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1321 * that send only immediate data (presently just the control queues) and
1322 * thus do not have any sk_buffs to release.
1323 */
1324static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1325{
1326 unsigned int reclaim = q->processed - q->cleaned;
1327
1328 q->in_use -= reclaim;
1329 q->cleaned += reclaim;
1330}
1331
1332static inline int immediate(const struct sk_buff *skb)
1333{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001334 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001335}
1336
1337/**
1338 * ctrl_xmit - send a packet through an SGE control Tx queue
1339 * @adap: the adapter
1340 * @q: the control queue
1341 * @skb: the packet
1342 *
1343 * Send a packet through an SGE control Tx queue. Packets sent through
1344 * a control queue must fit entirely as immediate data in a single Tx
1345 * descriptor and have no page fragments.
1346 */
1347static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1348 struct sk_buff *skb)
1349{
1350 int ret;
1351 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1352
1353 if (unlikely(!immediate(skb))) {
1354 WARN_ON(1);
1355 dev_kfree_skb(skb);
1356 return NET_XMIT_SUCCESS;
1357 }
1358
1359 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1360 wrp->wr_lo = htonl(V_WR_TID(q->token));
1361
1362 spin_lock(&q->lock);
1363 again:reclaim_completed_tx_imm(q);
1364
1365 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1366 if (unlikely(ret)) {
1367 if (ret == 1) {
1368 spin_unlock(&q->lock);
1369 return NET_XMIT_CN;
1370 }
1371 goto again;
1372 }
1373
1374 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1375
1376 q->in_use++;
1377 if (++q->pidx >= q->size) {
1378 q->pidx = 0;
1379 q->gen ^= 1;
1380 }
1381 spin_unlock(&q->lock);
1382 wmb();
1383 t3_write_reg(adap, A_SG_KDOORBELL,
1384 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1385 return NET_XMIT_SUCCESS;
1386}
1387
1388/**
1389 * restart_ctrlq - restart a suspended control queue
1390 * @qs: the queue set cotaining the control queue
1391 *
1392 * Resumes transmission on a suspended Tx control queue.
1393 */
1394static void restart_ctrlq(unsigned long data)
1395{
1396 struct sk_buff *skb;
1397 struct sge_qset *qs = (struct sge_qset *)data;
1398 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001399
1400 spin_lock(&q->lock);
1401 again:reclaim_completed_tx_imm(q);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403 while (q->in_use < q->size &&
1404 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001405
1406 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1407
1408 if (++q->pidx >= q->size) {
1409 q->pidx = 0;
1410 q->gen ^= 1;
1411 }
1412 q->in_use++;
1413 }
1414
1415 if (!skb_queue_empty(&q->sendq)) {
1416 set_bit(TXQ_CTRL, &qs->txq_stopped);
1417 smp_mb__after_clear_bit();
1418
1419 if (should_restart_tx(q) &&
1420 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1421 goto again;
1422 q->stops++;
1423 }
1424
1425 spin_unlock(&q->lock);
Divy Le Rayafefce62007-11-16 11:22:21 -08001426 wmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001427 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001428 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1429}
1430
Divy Le Ray14ab9892007-01-30 19:43:50 -08001431/*
1432 * Send a management message through control queue 0
1433 */
1434int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1435{
Divy Le Ray204e2f92008-05-06 19:26:01 -07001436 int ret;
Divy Le Raybc4b6b522007-12-17 18:47:41 -08001437 local_bh_disable();
1438 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1439 local_bh_enable();
1440
1441 return ret;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001442}
1443
Divy Le Ray4d22de32007-01-18 22:04:14 -05001444/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001445 * deferred_unmap_destructor - unmap a packet when it is freed
1446 * @skb: the packet
1447 *
1448 * This is the packet destructor used for Tx packets that need to remain
1449 * mapped until they are freed rather than until their Tx descriptors are
1450 * freed.
1451 */
1452static void deferred_unmap_destructor(struct sk_buff *skb)
1453{
1454 int i;
1455 const dma_addr_t *p;
1456 const struct skb_shared_info *si;
1457 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001458
1459 dui = (struct deferred_unmap_info *)skb->head;
1460 p = dui->addr;
1461
Divy Le Ray23561c92007-11-16 11:22:05 -08001462 if (skb->tail - skb->transport_header)
1463 pci_unmap_single(dui->pdev, *p++,
1464 skb->tail - skb->transport_header,
1465 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001466
1467 si = skb_shinfo(skb);
1468 for (i = 0; i < si->nr_frags; i++)
1469 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1470 PCI_DMA_TODEVICE);
1471}
1472
1473static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1474 const struct sg_ent *sgl, int sgl_flits)
1475{
1476 dma_addr_t *p;
1477 struct deferred_unmap_info *dui;
1478
1479 dui = (struct deferred_unmap_info *)skb->head;
1480 dui->pdev = pdev;
1481 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1482 *p++ = be64_to_cpu(sgl->addr[0]);
1483 *p++ = be64_to_cpu(sgl->addr[1]);
1484 }
1485 if (sgl_flits)
1486 *p = be64_to_cpu(sgl->addr[0]);
1487}
1488
1489/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001490 * write_ofld_wr - write an offload work request
1491 * @adap: the adapter
1492 * @skb: the packet to send
1493 * @q: the Tx queue
1494 * @pidx: index of the first Tx descriptor to write
1495 * @gen: the generation value to use
1496 * @ndesc: number of descriptors the packet will occupy
1497 *
1498 * Write an offload work request to send the supplied packet. The packet
1499 * data already carry the work request with most fields populated.
1500 */
1501static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1502 struct sge_txq *q, unsigned int pidx,
1503 unsigned int gen, unsigned int ndesc)
1504{
1505 unsigned int sgl_flits, flits;
1506 struct work_request_hdr *from;
1507 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1508 struct tx_desc *d = &q->desc[pidx];
1509
1510 if (immediate(skb)) {
1511 q->sdesc[pidx].skb = NULL;
1512 write_imm(d, skb, skb->len, gen);
1513 return;
1514 }
1515
1516 /* Only TX_DATA builds SGLs */
1517
1518 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001519 memcpy(&d->flit[1], &from[1],
1520 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001521
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001522 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001523 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001524 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001525 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001526 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001527 if (need_skb_unmap()) {
1528 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1529 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001530 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001531
1532 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1533 gen, from->wr_hi, from->wr_lo);
1534}
1535
1536/**
1537 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1538 * @skb: the packet
1539 *
1540 * Returns the number of Tx descriptors needed for the given offload
1541 * packet. These packets are already fully constructed.
1542 */
1543static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1544{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001545 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001546
Divy Le Ray27186dc2007-08-21 20:49:15 -07001547 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001548 return 1; /* packet fits as immediate data */
1549
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001550 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001551 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001552 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001553 cnt++;
1554 return flits_to_desc(flits + sgl_len(cnt));
1555}
1556
1557/**
1558 * ofld_xmit - send a packet through an offload queue
1559 * @adap: the adapter
1560 * @q: the Tx offload queue
1561 * @skb: the packet
1562 *
1563 * Send an offload packet through an SGE offload queue.
1564 */
1565static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1566 struct sk_buff *skb)
1567{
1568 int ret;
1569 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1570
1571 spin_lock(&q->lock);
1572 again:reclaim_completed_tx(adap, q);
1573
1574 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1575 if (unlikely(ret)) {
1576 if (ret == 1) {
1577 skb->priority = ndesc; /* save for restart */
1578 spin_unlock(&q->lock);
1579 return NET_XMIT_CN;
1580 }
1581 goto again;
1582 }
1583
1584 gen = q->gen;
1585 q->in_use += ndesc;
1586 pidx = q->pidx;
1587 q->pidx += ndesc;
1588 if (q->pidx >= q->size) {
1589 q->pidx -= q->size;
1590 q->gen ^= 1;
1591 }
1592 spin_unlock(&q->lock);
1593
1594 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1595 check_ring_tx_db(adap, q);
1596 return NET_XMIT_SUCCESS;
1597}
1598
1599/**
1600 * restart_offloadq - restart a suspended offload queue
1601 * @qs: the queue set cotaining the offload queue
1602 *
1603 * Resumes transmission on a suspended Tx offload queue.
1604 */
1605static void restart_offloadq(unsigned long data)
1606{
1607 struct sk_buff *skb;
1608 struct sge_qset *qs = (struct sge_qset *)data;
1609 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001610 const struct port_info *pi = netdev_priv(qs->netdev);
1611 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001612
1613 spin_lock(&q->lock);
1614 again:reclaim_completed_tx(adap, q);
1615
1616 while ((skb = skb_peek(&q->sendq)) != NULL) {
1617 unsigned int gen, pidx;
1618 unsigned int ndesc = skb->priority;
1619
1620 if (unlikely(q->size - q->in_use < ndesc)) {
1621 set_bit(TXQ_OFLD, &qs->txq_stopped);
1622 smp_mb__after_clear_bit();
1623
1624 if (should_restart_tx(q) &&
1625 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1626 goto again;
1627 q->stops++;
1628 break;
1629 }
1630
1631 gen = q->gen;
1632 q->in_use += ndesc;
1633 pidx = q->pidx;
1634 q->pidx += ndesc;
1635 if (q->pidx >= q->size) {
1636 q->pidx -= q->size;
1637 q->gen ^= 1;
1638 }
1639 __skb_unlink(skb, &q->sendq);
1640 spin_unlock(&q->lock);
1641
1642 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1643 spin_lock(&q->lock);
1644 }
1645 spin_unlock(&q->lock);
1646
1647#if USE_GTS
1648 set_bit(TXQ_RUNNING, &q->flags);
1649 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1650#endif
Divy Le Rayafefce62007-11-16 11:22:21 -08001651 wmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -05001652 t3_write_reg(adap, A_SG_KDOORBELL,
1653 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1654}
1655
1656/**
1657 * queue_set - return the queue set a packet should use
1658 * @skb: the packet
1659 *
1660 * Maps a packet to the SGE queue set it should use. The desired queue
1661 * set is carried in bits 1-3 in the packet's priority.
1662 */
1663static inline int queue_set(const struct sk_buff *skb)
1664{
1665 return skb->priority >> 1;
1666}
1667
1668/**
1669 * is_ctrl_pkt - return whether an offload packet is a control packet
1670 * @skb: the packet
1671 *
1672 * Determines whether an offload packet should use an OFLD or a CTRL
1673 * Tx queue. This is indicated by bit 0 in the packet's priority.
1674 */
1675static inline int is_ctrl_pkt(const struct sk_buff *skb)
1676{
1677 return skb->priority & 1;
1678}
1679
1680/**
1681 * t3_offload_tx - send an offload packet
1682 * @tdev: the offload device to send to
1683 * @skb: the packet
1684 *
1685 * Sends an offload packet. We use the packet priority to select the
1686 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1687 * should be sent as regular or control, bits 1-3 select the queue set.
1688 */
1689int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1690{
1691 struct adapter *adap = tdev2adap(tdev);
1692 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1693
1694 if (unlikely(is_ctrl_pkt(skb)))
1695 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1696
1697 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1698}
1699
1700/**
1701 * offload_enqueue - add an offload packet to an SGE offload receive queue
1702 * @q: the SGE response queue
1703 * @skb: the packet
1704 *
1705 * Add a new offload packet to an SGE response queue's offload packet
1706 * queue. If the packet is the first on the queue it schedules the RX
1707 * softirq to process the queue.
1708 */
1709static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1710{
David S. Miller147e70e2008-09-22 01:29:52 -07001711 int was_empty = skb_queue_empty(&q->rx_queue);
1712
1713 __skb_queue_tail(&q->rx_queue, skb);
1714
1715 if (was_empty) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001716 struct sge_qset *qs = rspq_to_qset(q);
1717
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001718 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001719 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001720}
1721
1722/**
1723 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1724 * @tdev: the offload device that will be receiving the packets
1725 * @q: the SGE response queue that assembled the bundle
1726 * @skbs: the partial bundle
1727 * @n: the number of packets in the bundle
1728 *
1729 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1730 */
1731static inline void deliver_partial_bundle(struct t3cdev *tdev,
1732 struct sge_rspq *q,
1733 struct sk_buff *skbs[], int n)
1734{
1735 if (n) {
1736 q->offload_bundles++;
1737 tdev->recv(tdev, skbs, n);
1738 }
1739}
1740
1741/**
1742 * ofld_poll - NAPI handler for offload packets in interrupt mode
1743 * @dev: the network device doing the polling
1744 * @budget: polling budget
1745 *
1746 * The NAPI handler for offload packets when a response queue is serviced
1747 * by the hard interrupt handler, i.e., when it's operating in non-polling
1748 * mode. Creates small packet batches and sends them through the offload
1749 * receive handler. Batches need to be of modest size as we do prefetches
1750 * on the packets in each.
1751 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001752static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001753{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001754 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001755 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001756 struct adapter *adapter = qs->adap;
1757 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001758
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001759 while (work_done < budget) {
David S. Miller147e70e2008-09-22 01:29:52 -07001760 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1761 struct sk_buff_head queue;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001762 int ngathered;
1763
1764 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001765 __skb_queue_head_init(&queue);
1766 skb_queue_splice_init(&q->rx_queue, &queue);
1767 if (skb_queue_empty(&queue)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001768 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001769 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001770 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001771 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001772 spin_unlock_irq(&q->lock);
1773
David S. Miller147e70e2008-09-22 01:29:52 -07001774 ngathered = 0;
1775 skb_queue_walk_safe(&queue, skb, tmp) {
1776 if (work_done >= budget)
1777 break;
1778 work_done++;
1779
1780 __skb_unlink(skb, &queue);
1781 prefetch(skb->data);
1782 skbs[ngathered] = skb;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001783 if (++ngathered == RX_BUNDLE_SIZE) {
1784 q->offload_bundles++;
1785 adapter->tdev.recv(&adapter->tdev, skbs,
1786 ngathered);
1787 ngathered = 0;
1788 }
1789 }
David S. Miller147e70e2008-09-22 01:29:52 -07001790 if (!skb_queue_empty(&queue)) {
1791 /* splice remaining packets back onto Rx queue */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001792 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001793 skb_queue_splice(&queue, &q->rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001794 spin_unlock_irq(&q->lock);
1795 }
1796 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1797 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001798
1799 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001800}
1801
1802/**
1803 * rx_offload - process a received offload packet
1804 * @tdev: the offload device receiving the packet
1805 * @rq: the response queue that received the packet
1806 * @skb: the packet
1807 * @rx_gather: a gather list of packets if we are building a bundle
1808 * @gather_idx: index of the next available slot in the bundle
1809 *
1810 * Process an ingress offload pakcet and add it to the offload ingress
1811 * queue. Returns the index of the next available slot in the bundle.
1812 */
1813static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1814 struct sk_buff *skb, struct sk_buff *rx_gather[],
1815 unsigned int gather_idx)
1816{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001817 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001818 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001819 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001820
1821 if (rq->polling) {
1822 rx_gather[gather_idx++] = skb;
1823 if (gather_idx == RX_BUNDLE_SIZE) {
1824 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1825 gather_idx = 0;
1826 rq->offload_bundles++;
1827 }
1828 } else
1829 offload_enqueue(rq, skb);
1830
1831 return gather_idx;
1832}
1833
1834/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001835 * restart_tx - check whether to restart suspended Tx queues
1836 * @qs: the queue set to resume
1837 *
1838 * Restarts suspended Tx queues of an SGE queue set if they have enough
1839 * free resources to resume operation.
1840 */
1841static void restart_tx(struct sge_qset *qs)
1842{
1843 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1844 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1845 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1846 qs->txq[TXQ_ETH].restarts++;
1847 if (netif_running(qs->netdev))
Divy Le Ray82ad3322008-12-16 01:09:39 -08001848 netif_tx_wake_queue(qs->tx_q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001849 }
1850
1851 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1852 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1853 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1854 qs->txq[TXQ_OFLD].restarts++;
1855 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1856 }
1857 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1858 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1859 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1860 qs->txq[TXQ_CTRL].restarts++;
1861 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1862 }
1863}
1864
1865/**
Karen Xiea109a5b2008-12-18 22:56:20 -08001866 * cxgb3_arp_process - process an ARP request probing a private IP address
1867 * @adapter: the adapter
1868 * @skb: the skbuff containing the ARP request
1869 *
1870 * Check if the ARP request is probing the private IP address
1871 * dedicated to iSCSI, generate an ARP reply if so.
1872 */
1873static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1874{
1875 struct net_device *dev = skb->dev;
1876 struct port_info *pi;
1877 struct arphdr *arp;
1878 unsigned char *arp_ptr;
1879 unsigned char *sha;
1880 __be32 sip, tip;
1881
1882 if (!dev)
1883 return;
1884
1885 skb_reset_network_header(skb);
1886 arp = arp_hdr(skb);
1887
1888 if (arp->ar_op != htons(ARPOP_REQUEST))
1889 return;
1890
1891 arp_ptr = (unsigned char *)(arp + 1);
1892 sha = arp_ptr;
1893 arp_ptr += dev->addr_len;
1894 memcpy(&sip, arp_ptr, sizeof(sip));
1895 arp_ptr += sizeof(sip);
1896 arp_ptr += dev->addr_len;
1897 memcpy(&tip, arp_ptr, sizeof(tip));
1898
1899 pi = netdev_priv(dev);
1900 if (tip != pi->iscsi_ipv4addr)
1901 return;
1902
1903 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1904 dev->dev_addr, sha);
1905
1906}
1907
1908static inline int is_arp(struct sk_buff *skb)
1909{
1910 return skb->protocol == htons(ETH_P_ARP);
1911}
1912
1913/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001914 * rx_eth - process an ingress ethernet packet
1915 * @adap: the adapter
1916 * @rq: the response queue that received the packet
1917 * @skb: the packet
1918 * @pad: amount of padding at the start of the buffer
1919 *
1920 * Process an ingress ethernet pakcet and deliver it to the stack.
1921 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1922 * if it was immediate data in a response.
1923 */
1924static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
Divy Le Rayb47385b2008-05-21 18:56:26 -07001925 struct sk_buff *skb, int pad, int lro)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001926{
1927 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001928 struct sge_qset *qs = rspq_to_qset(rq);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001929 struct port_info *pi;
1930
Divy Le Ray4d22de32007-01-18 22:04:14 -05001931 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001932 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001933 pi = netdev_priv(skb->dev);
Roland Dreier47fd23f2009-01-11 00:19:36 -08001934 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
Divy Le Ray4d22de32007-01-18 22:04:14 -05001935 !p->fragment) {
Karen Xiea109a5b2008-12-18 22:56:20 -08001936 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001937 skb->ip_summed = CHECKSUM_UNNECESSARY;
1938 } else
1939 skb->ip_summed = CHECKSUM_NONE;
David S. Miller0c8dfc82009-01-27 16:22:32 -08001940 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001941
1942 if (unlikely(p->vlan_valid)) {
1943 struct vlan_group *grp = pi->vlan_grp;
1944
Divy Le Rayb47385b2008-05-21 18:56:26 -07001945 qs->port_stats[SGE_PSTAT_VLANEX]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001946 if (likely(grp))
Divy Le Rayb47385b2008-05-21 18:56:26 -07001947 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08001948 vlan_gro_receive(&qs->napi, grp,
1949 ntohs(p->vlan), skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001950 else {
1951 if (unlikely(pi->iscsi_ipv4addr &&
1952 is_arp(skb))) {
1953 unsigned short vtag = ntohs(p->vlan) &
1954 VLAN_VID_MASK;
1955 skb->dev = vlan_group_get_device(grp,
1956 vtag);
1957 cxgb3_arp_process(adap, skb);
1958 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07001959 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1960 rq->polling);
Karen Xiea109a5b2008-12-18 22:56:20 -08001961 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001962 else
1963 dev_kfree_skb_any(skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001964 } else if (rq->polling) {
1965 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08001966 napi_gro_receive(&qs->napi, skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001967 else {
1968 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1969 cxgb3_arp_process(adap, skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001970 netif_receive_skb(skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08001971 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07001972 } else
Divy Le Ray4d22de32007-01-18 22:04:14 -05001973 netif_rx(skb);
1974}
1975
Divy Le Rayb47385b2008-05-21 18:56:26 -07001976static inline int is_eth_tcp(u32 rss)
1977{
1978 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1979}
1980
1981/**
Divy Le Rayb47385b2008-05-21 18:56:26 -07001982 * lro_add_page - add a page chunk to an LRO session
1983 * @adap: the adapter
1984 * @qs: the associated queue set
1985 * @fl: the free list containing the page chunk to add
1986 * @len: packet length
1987 * @complete: Indicates the last fragment of a frame
1988 *
1989 * Add a received packet contained in a page chunk to an existing LRO
1990 * session.
1991 */
1992static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1993 struct sge_fl *fl, int len, int complete)
1994{
1995 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1996 struct cpl_rx_pkt *cpl;
Herbert Xu7be2df42009-01-21 14:39:13 -08001997 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
1998 int nr_frags = qs->lro_frag_tbl.nr_frags;
1999 int frag_len = qs->lro_frag_tbl.len;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002000 int offset = 0;
2001
2002 if (!nr_frags) {
2003 offset = 2 + sizeof(struct cpl_rx_pkt);
2004 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2005 }
2006
2007 fl->credits--;
2008
2009 len -= offset;
2010 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2011 fl->buf_size, PCI_DMA_FROMDEVICE);
2012
2013 rx_frag += nr_frags;
2014 rx_frag->page = sd->pg_chunk.page;
2015 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2016 rx_frag->size = len;
2017 frag_len += len;
Herbert Xu7be2df42009-01-21 14:39:13 -08002018 qs->lro_frag_tbl.nr_frags++;
2019 qs->lro_frag_tbl.len = frag_len;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002020
2021 if (!complete)
2022 return;
2023
Herbert Xu7be2df42009-01-21 14:39:13 -08002024 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002025 cpl = qs->lro_va;
2026
2027 if (unlikely(cpl->vlan_valid)) {
2028 struct net_device *dev = qs->netdev;
2029 struct port_info *pi = netdev_priv(dev);
2030 struct vlan_group *grp = pi->vlan_grp;
2031
2032 if (likely(grp != NULL)) {
Herbert Xu7be2df42009-01-21 14:39:13 -08002033 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2034 &qs->lro_frag_tbl);
2035 goto out;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002036 }
2037 }
Herbert Xu7be2df42009-01-21 14:39:13 -08002038 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002039
Herbert Xu7be2df42009-01-21 14:39:13 -08002040out:
2041 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002042}
2043
Divy Le Ray4d22de32007-01-18 22:04:14 -05002044/**
2045 * handle_rsp_cntrl_info - handles control information in a response
2046 * @qs: the queue set corresponding to the response
2047 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05002048 *
2049 * Handles the control information of an SGE response, such as GTS
2050 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08002051 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002052 */
Divy Le Ray6195c712007-01-30 19:43:56 -08002053static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002054{
2055 unsigned int credits;
2056
2057#if USE_GTS
2058 if (flags & F_RSPD_TXQ0_GTS)
2059 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2060#endif
2061
Divy Le Ray4d22de32007-01-18 22:04:14 -05002062 credits = G_RSPD_TXQ0_CR(flags);
2063 if (credits)
2064 qs->txq[TXQ_ETH].processed += credits;
2065
Divy Le Ray6195c712007-01-30 19:43:56 -08002066 credits = G_RSPD_TXQ2_CR(flags);
2067 if (credits)
2068 qs->txq[TXQ_CTRL].processed += credits;
2069
Divy Le Ray4d22de32007-01-18 22:04:14 -05002070# if USE_GTS
2071 if (flags & F_RSPD_TXQ1_GTS)
2072 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2073# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08002074 credits = G_RSPD_TXQ1_CR(flags);
2075 if (credits)
2076 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002077}
2078
2079/**
2080 * check_ring_db - check if we need to ring any doorbells
2081 * @adapter: the adapter
2082 * @qs: the queue set whose Tx queues are to be examined
2083 * @sleeping: indicates which Tx queue sent GTS
2084 *
2085 * Checks if some of a queue set's Tx queues need to ring their doorbells
2086 * to resume transmission after idling while they still have unprocessed
2087 * descriptors.
2088 */
2089static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2090 unsigned int sleeping)
2091{
2092 if (sleeping & F_RSPD_TXQ0_GTS) {
2093 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2094
2095 if (txq->cleaned + txq->in_use != txq->processed &&
2096 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2097 set_bit(TXQ_RUNNING, &txq->flags);
2098 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2099 V_EGRCNTX(txq->cntxt_id));
2100 }
2101 }
2102
2103 if (sleeping & F_RSPD_TXQ1_GTS) {
2104 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2105
2106 if (txq->cleaned + txq->in_use != txq->processed &&
2107 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2108 set_bit(TXQ_RUNNING, &txq->flags);
2109 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2110 V_EGRCNTX(txq->cntxt_id));
2111 }
2112 }
2113}
2114
2115/**
2116 * is_new_response - check if a response is newly written
2117 * @r: the response descriptor
2118 * @q: the response queue
2119 *
2120 * Returns true if a response descriptor contains a yet unprocessed
2121 * response.
2122 */
2123static inline int is_new_response(const struct rsp_desc *r,
2124 const struct sge_rspq *q)
2125{
2126 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2127}
2128
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002129static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2130{
2131 q->pg_skb = NULL;
2132 q->rx_recycle_buf = 0;
2133}
2134
Divy Le Ray4d22de32007-01-18 22:04:14 -05002135#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2136#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2137 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2138 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2139 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2140
2141/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2142#define NOMEM_INTR_DELAY 2500
2143
2144/**
2145 * process_responses - process responses from an SGE response queue
2146 * @adap: the adapter
2147 * @qs: the queue set to which the response queue belongs
2148 * @budget: how many responses can be processed in this round
2149 *
2150 * Process responses from an SGE response queue up to the supplied budget.
2151 * Responses include received packets as well as credits and other events
2152 * for the queues that belong to the response queue's queue set.
2153 * A negative budget is effectively unlimited.
2154 *
2155 * Additionally choose the interrupt holdoff time for the next interrupt
2156 * on this queue. If the system is under memory shortage use a fairly
2157 * long delay to help recovery.
2158 */
2159static int process_responses(struct adapter *adap, struct sge_qset *qs,
2160 int budget)
2161{
2162 struct sge_rspq *q = &qs->rspq;
2163 struct rsp_desc *r = &q->desc[q->cidx];
2164 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08002165 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002166 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2167 int ngathered = 0;
2168
2169 q->next_holdoff = q->holdoff_tmr;
2170
2171 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Rayb47385b2008-05-21 18:56:26 -07002172 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002173 struct sk_buff *skb = NULL;
2174 u32 len, flags = ntohl(r->flags);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002175 __be32 rss_hi = *(const __be32 *)r,
2176 rss_lo = r->rss_hdr.rss_hash_val;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002177
2178 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2179
2180 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2181 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2182 if (!skb)
2183 goto no_mem;
2184
2185 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2186 skb->data[0] = CPL_ASYNC_NOTIF;
2187 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2188 q->async_notif++;
2189 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2190 skb = get_imm_packet(r);
2191 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002192no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002193 q->next_holdoff = NOMEM_INTR_DELAY;
2194 q->nomem++;
2195 /* consume one credit since we tried */
2196 budget_left--;
2197 break;
2198 }
2199 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002200 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002201 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002202 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002203
Divy Le Rayb47385b2008-05-21 18:56:26 -07002204 if (eth)
2205 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2206
Divy Le Raycf992af2007-05-30 21:10:47 -07002207 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2208 if (fl->use_pages) {
2209 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002210
Divy Le Raycf992af2007-05-30 21:10:47 -07002211 prefetch(addr);
2212#if L1_CACHE_BYTES < 128
2213 prefetch(addr + L1_CACHE_BYTES);
2214#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08002215 __refill_fl(adap, fl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002216 if (lro > 0) {
2217 lro_add_page(adap, qs, fl,
2218 G_RSPD_LEN(len),
2219 flags & F_RSPD_EOP);
2220 goto next_fl;
2221 }
Divy Le Raye0994eb2007-02-24 16:44:17 -08002222
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002223 skb = get_packet_pg(adap, fl, q,
2224 G_RSPD_LEN(len),
2225 eth ?
2226 SGE_RX_DROP_THRES : 0);
2227 q->pg_skb = skb;
Divy Le Raycf992af2007-05-30 21:10:47 -07002228 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08002229 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2230 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002231 if (unlikely(!skb)) {
2232 if (!eth)
2233 goto no_mem;
2234 q->rx_drops++;
2235 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2236 __skb_pull(skb, 2);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002237next_fl:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002238 if (++fl->cidx == fl->size)
2239 fl->cidx = 0;
2240 } else
2241 q->pure_rsps++;
2242
2243 if (flags & RSPD_CTRL_MASK) {
2244 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002245 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002246 }
2247
2248 r++;
2249 if (unlikely(++q->cidx == q->size)) {
2250 q->cidx = 0;
2251 q->gen ^= 1;
2252 r = q->desc;
2253 }
2254 prefetch(r);
2255
2256 if (++q->credits >= (q->size / 4)) {
2257 refill_rspq(adap, q, q->credits);
2258 q->credits = 0;
2259 }
2260
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002261 packet_complete = flags &
2262 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2263 F_RSPD_ASYNC_NOTIF);
2264
2265 if (skb != NULL && packet_complete) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002266 if (eth)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002267 rx_eth(adap, q, skb, ethpad, lro);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002268 else {
Divy Le Rayafefce62007-11-16 11:22:21 -08002269 q->offload_pkts++;
Divy Le Raycf992af2007-05-30 21:10:47 -07002270 /* Preserve the RSS info in csum & priority */
2271 skb->csum = rss_hi;
2272 skb->priority = rss_lo;
2273 ngathered = rx_offload(&adap->tdev, q, skb,
2274 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002275 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002276 }
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002277
2278 if (flags & F_RSPD_EOP)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002279 clear_rspq_bufstate(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002280 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002281 --budget_left;
2282 }
2283
Divy Le Ray4d22de32007-01-18 22:04:14 -05002284 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002285
Divy Le Ray4d22de32007-01-18 22:04:14 -05002286 if (sleeping)
2287 check_ring_db(adap, qs, sleeping);
2288
2289 smp_mb(); /* commit Tx queue .processed updates */
2290 if (unlikely(qs->txq_stopped != 0))
2291 restart_tx(qs);
2292
2293 budget -= budget_left;
2294 return budget;
2295}
2296
2297static inline int is_pure_response(const struct rsp_desc *r)
2298{
Roland Dreierc5419e62008-11-28 21:55:42 -08002299 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002300
2301 return (n | r->len_cq) == 0;
2302}
2303
2304/**
2305 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002306 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002307 * @budget: how many packets we can process in this round
2308 *
2309 * Handler for new data events when using NAPI.
2310 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002311static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002312{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002313 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2314 struct adapter *adap = qs->adap;
2315 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002316
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002317 if (likely(work_done < budget)) {
2318 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002319
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002320 /*
2321 * Because we don't atomically flush the following
2322 * write it is possible that in very rare cases it can
2323 * reach the device in a way that races with a new
2324 * response being written plus an error interrupt
2325 * causing the NAPI interrupt handler below to return
2326 * unhandled status to the OS. To protect against
2327 * this would require flushing the write and doing
2328 * both the write and the flush with interrupts off.
2329 * Way too expensive and unjustifiable given the
2330 * rarity of the race.
2331 *
2332 * The race cannot happen at all with MSI-X.
2333 */
2334 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2335 V_NEWTIMER(qs->rspq.next_holdoff) |
2336 V_NEWINDEX(qs->rspq.cidx));
2337 }
2338 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002339}
2340
2341/*
2342 * Returns true if the device is already scheduled for polling.
2343 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002344static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002345{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002346 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002347}
2348
2349/**
2350 * process_pure_responses - process pure responses from a response queue
2351 * @adap: the adapter
2352 * @qs: the queue set owning the response queue
2353 * @r: the first pure response to process
2354 *
2355 * A simpler version of process_responses() that handles only pure (i.e.,
2356 * non data-carrying) responses. Such respones are too light-weight to
2357 * justify calling a softirq under NAPI, so we handle them specially in
2358 * the interrupt handler. The function is called with a pointer to a
2359 * response, which the caller must ensure is a valid pure response.
2360 *
2361 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2362 */
2363static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2364 struct rsp_desc *r)
2365{
2366 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002367 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002368
2369 do {
2370 u32 flags = ntohl(r->flags);
2371
2372 r++;
2373 if (unlikely(++q->cidx == q->size)) {
2374 q->cidx = 0;
2375 q->gen ^= 1;
2376 r = q->desc;
2377 }
2378 prefetch(r);
2379
2380 if (flags & RSPD_CTRL_MASK) {
2381 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002382 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002383 }
2384
2385 q->pure_rsps++;
2386 if (++q->credits >= (q->size / 4)) {
2387 refill_rspq(adap, q, q->credits);
2388 q->credits = 0;
2389 }
2390 } while (is_new_response(r, q) && is_pure_response(r));
2391
Divy Le Ray4d22de32007-01-18 22:04:14 -05002392 if (sleeping)
2393 check_ring_db(adap, qs, sleeping);
2394
2395 smp_mb(); /* commit Tx queue .processed updates */
2396 if (unlikely(qs->txq_stopped != 0))
2397 restart_tx(qs);
2398
2399 return is_new_response(r, q);
2400}
2401
2402/**
2403 * handle_responses - decide what to do with new responses in NAPI mode
2404 * @adap: the adapter
2405 * @q: the response queue
2406 *
2407 * This is used by the NAPI interrupt handlers to decide what to do with
2408 * new SGE responses. If there are no new responses it returns -1. If
2409 * there are new responses and they are pure (i.e., non-data carrying)
2410 * it handles them straight in hard interrupt context as they are very
2411 * cheap and don't deliver any packets. Finally, if there are any data
2412 * signaling responses it schedules the NAPI handler. Returns 1 if it
2413 * schedules NAPI, 0 if all new responses were pure.
2414 *
2415 * The caller must ascertain NAPI is not already running.
2416 */
2417static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2418{
2419 struct sge_qset *qs = rspq_to_qset(q);
2420 struct rsp_desc *r = &q->desc[q->cidx];
2421
2422 if (!is_new_response(r, q))
2423 return -1;
2424 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2425 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2426 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2427 return 0;
2428 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002429 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002430 return 1;
2431}
2432
2433/*
2434 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2435 * (i.e., response queue serviced in hard interrupt).
2436 */
2437irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2438{
2439 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002440 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002441 struct sge_rspq *q = &qs->rspq;
2442
2443 spin_lock(&q->lock);
2444 if (process_responses(adap, qs, -1) == 0)
2445 q->unhandled_irqs++;
2446 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2447 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2448 spin_unlock(&q->lock);
2449 return IRQ_HANDLED;
2450}
2451
2452/*
2453 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2454 * (i.e., response queue serviced by NAPI polling).
2455 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002456static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002457{
2458 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002459 struct sge_rspq *q = &qs->rspq;
2460
2461 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002462
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002463 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002464 q->unhandled_irqs++;
2465 spin_unlock(&q->lock);
2466 return IRQ_HANDLED;
2467}
2468
2469/*
2470 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2471 * SGE response queues as well as error and other async events as they all use
2472 * the same MSI vector. We use one SGE response queue per port in this mode
2473 * and protect all response queues with queue 0's lock.
2474 */
2475static irqreturn_t t3_intr_msi(int irq, void *cookie)
2476{
2477 int new_packets = 0;
2478 struct adapter *adap = cookie;
2479 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2480
2481 spin_lock(&q->lock);
2482
2483 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2484 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2485 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2486 new_packets = 1;
2487 }
2488
2489 if (adap->params.nports == 2 &&
2490 process_responses(adap, &adap->sge.qs[1], -1)) {
2491 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2492
2493 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2494 V_NEWTIMER(q1->next_holdoff) |
2495 V_NEWINDEX(q1->cidx));
2496 new_packets = 1;
2497 }
2498
2499 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2500 q->unhandled_irqs++;
2501
2502 spin_unlock(&q->lock);
2503 return IRQ_HANDLED;
2504}
2505
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002506static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002507{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002508 struct sge_rspq *q = &qs->rspq;
2509
2510 if (!napi_is_scheduled(&qs->napi) &&
2511 is_new_response(&q->desc[q->cidx], q)) {
2512 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002513 return 1;
2514 }
2515 return 0;
2516}
2517
2518/*
2519 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2520 * by NAPI polling). Handles data events from SGE response queues as well as
2521 * error and other async events as they all use the same MSI vector. We use
2522 * one SGE response queue per port in this mode and protect all response
2523 * queues with queue 0's lock.
2524 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002525static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002526{
2527 int new_packets;
2528 struct adapter *adap = cookie;
2529 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2530
2531 spin_lock(&q->lock);
2532
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002533 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002534 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002535 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002536 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2537 q->unhandled_irqs++;
2538
2539 spin_unlock(&q->lock);
2540 return IRQ_HANDLED;
2541}
2542
2543/*
2544 * A helper function that processes responses and issues GTS.
2545 */
2546static inline int process_responses_gts(struct adapter *adap,
2547 struct sge_rspq *rq)
2548{
2549 int work;
2550
2551 work = process_responses(adap, rspq_to_qset(rq), -1);
2552 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2553 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2554 return work;
2555}
2556
2557/*
2558 * The legacy INTx interrupt handler. This needs to handle data events from
2559 * SGE response queues as well as error and other async events as they all use
2560 * the same interrupt pin. We use one SGE response queue per port in this mode
2561 * and protect all response queues with queue 0's lock.
2562 */
2563static irqreturn_t t3_intr(int irq, void *cookie)
2564{
2565 int work_done, w0, w1;
2566 struct adapter *adap = cookie;
2567 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2568 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2569
2570 spin_lock(&q0->lock);
2571
2572 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2573 w1 = adap->params.nports == 2 &&
2574 is_new_response(&q1->desc[q1->cidx], q1);
2575
2576 if (likely(w0 | w1)) {
2577 t3_write_reg(adap, A_PL_CLI, 0);
2578 t3_read_reg(adap, A_PL_CLI); /* flush */
2579
2580 if (likely(w0))
2581 process_responses_gts(adap, q0);
2582
2583 if (w1)
2584 process_responses_gts(adap, q1);
2585
2586 work_done = w0 | w1;
2587 } else
2588 work_done = t3_slow_intr_handler(adap);
2589
2590 spin_unlock(&q0->lock);
2591 return IRQ_RETVAL(work_done != 0);
2592}
2593
2594/*
2595 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2596 * Handles data events from SGE response queues as well as error and other
2597 * async events as they all use the same interrupt pin. We use one SGE
2598 * response queue per port in this mode and protect all response queues with
2599 * queue 0's lock.
2600 */
2601static irqreturn_t t3b_intr(int irq, void *cookie)
2602{
2603 u32 map;
2604 struct adapter *adap = cookie;
2605 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2606
2607 t3_write_reg(adap, A_PL_CLI, 0);
2608 map = t3_read_reg(adap, A_SG_DATA_INTR);
2609
2610 if (unlikely(!map)) /* shared interrupt, most likely */
2611 return IRQ_NONE;
2612
2613 spin_lock(&q0->lock);
2614
2615 if (unlikely(map & F_ERRINTR))
2616 t3_slow_intr_handler(adap);
2617
2618 if (likely(map & 1))
2619 process_responses_gts(adap, q0);
2620
2621 if (map & 2)
2622 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2623
2624 spin_unlock(&q0->lock);
2625 return IRQ_HANDLED;
2626}
2627
2628/*
2629 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2630 * Handles data events from SGE response queues as well as error and other
2631 * async events as they all use the same interrupt pin. We use one SGE
2632 * response queue per port in this mode and protect all response queues with
2633 * queue 0's lock.
2634 */
2635static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2636{
2637 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002638 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002639 struct sge_qset *qs0 = &adap->sge.qs[0];
2640 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002641
2642 t3_write_reg(adap, A_PL_CLI, 0);
2643 map = t3_read_reg(adap, A_SG_DATA_INTR);
2644
2645 if (unlikely(!map)) /* shared interrupt, most likely */
2646 return IRQ_NONE;
2647
2648 spin_lock(&q0->lock);
2649
2650 if (unlikely(map & F_ERRINTR))
2651 t3_slow_intr_handler(adap);
2652
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002653 if (likely(map & 1))
2654 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002655
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002656 if (map & 2)
2657 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002658
2659 spin_unlock(&q0->lock);
2660 return IRQ_HANDLED;
2661}
2662
2663/**
2664 * t3_intr_handler - select the top-level interrupt handler
2665 * @adap: the adapter
2666 * @polling: whether using NAPI to service response queues
2667 *
2668 * Selects the top-level interrupt handler based on the type of interrupts
2669 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2670 * response queues.
2671 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002672irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002673{
2674 if (adap->flags & USING_MSIX)
2675 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2676 if (adap->flags & USING_MSI)
2677 return polling ? t3_intr_msi_napi : t3_intr_msi;
2678 if (adap->params.rev > 0)
2679 return polling ? t3b_intr_napi : t3b_intr;
2680 return t3_intr;
2681}
2682
Divy Le Rayb8819552007-12-17 18:47:31 -08002683#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2684 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2685 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2686 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2687 F_HIRCQPARITYERROR)
2688#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2689#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2690 F_RSPQDISABLED)
2691
Divy Le Ray4d22de32007-01-18 22:04:14 -05002692/**
2693 * t3_sge_err_intr_handler - SGE async event interrupt handler
2694 * @adapter: the adapter
2695 *
2696 * Interrupt handler for SGE asynchronous (non-data) events.
2697 */
2698void t3_sge_err_intr_handler(struct adapter *adapter)
2699{
2700 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2701
Divy Le Rayb8819552007-12-17 18:47:31 -08002702 if (status & SGE_PARERR)
2703 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2704 status & SGE_PARERR);
2705 if (status & SGE_FRAMINGERR)
2706 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2707 status & SGE_FRAMINGERR);
2708
Divy Le Ray4d22de32007-01-18 22:04:14 -05002709 if (status & F_RSPQCREDITOVERFOW)
2710 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2711
2712 if (status & F_RSPQDISABLED) {
2713 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2714
2715 CH_ALERT(adapter,
2716 "packet delivered to disabled response queue "
2717 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2718 }
2719
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002720 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2721 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2722 status & F_HIPIODRBDROPERR ? "high" : "lo");
2723
Divy Le Ray4d22de32007-01-18 22:04:14 -05002724 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
Divy Le Rayb8819552007-12-17 18:47:31 -08002725 if (status & SGE_FATALERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002726 t3_fatal_err(adapter);
2727}
2728
2729/**
2730 * sge_timer_cb - perform periodic maintenance of an SGE qset
2731 * @data: the SGE queue set to maintain
2732 *
2733 * Runs periodically from a timer to perform maintenance of an SGE queue
2734 * set. It performs two tasks:
2735 *
2736 * a) Cleans up any completed Tx descriptors that may still be pending.
2737 * Normal descriptor cleanup happens when new packets are added to a Tx
2738 * queue so this timer is relatively infrequent and does any cleanup only
2739 * if the Tx queue has not seen any new packets in a while. We make a
2740 * best effort attempt to reclaim descriptors, in that we don't wait
2741 * around if we cannot get a queue's lock (which most likely is because
2742 * someone else is queueing new packets and so will also handle the clean
2743 * up). Since control queues use immediate data exclusively we don't
2744 * bother cleaning them up here.
2745 *
2746 * b) Replenishes Rx queues that have run out due to memory shortage.
2747 * Normally new Rx buffers are added when existing ones are consumed but
2748 * when out of memory a queue can become empty. We try to add only a few
2749 * buffers here, the queue will be replenished fully as these new buffers
2750 * are used up if memory shortage has subsided.
2751 */
2752static void sge_timer_cb(unsigned long data)
2753{
2754 spinlock_t *lock;
2755 struct sge_qset *qs = (struct sge_qset *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002756 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002757
2758 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2759 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2760 spin_unlock(&qs->txq[TXQ_ETH].lock);
2761 }
2762 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2763 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2764 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2765 }
2766 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002767 &adap->sge.qs[0].rspq.lock;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002768 if (spin_trylock_irq(lock)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002769 if (!napi_is_scheduled(&qs->napi)) {
Divy Le Raybae73f42007-02-24 16:44:12 -08002770 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2771
Divy Le Ray4d22de32007-01-18 22:04:14 -05002772 if (qs->fl[0].credits < qs->fl[0].size)
2773 __refill_fl(adap, &qs->fl[0]);
2774 if (qs->fl[1].credits < qs->fl[1].size)
2775 __refill_fl(adap, &qs->fl[1]);
Divy Le Raybae73f42007-02-24 16:44:12 -08002776
2777 if (status & (1 << qs->rspq.cntxt_id)) {
2778 qs->rspq.starved++;
2779 if (qs->rspq.credits) {
2780 refill_rspq(adap, &qs->rspq, 1);
2781 qs->rspq.credits--;
2782 qs->rspq.restarted++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002783 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
Divy Le Raybae73f42007-02-24 16:44:12 -08002784 1 << qs->rspq.cntxt_id);
2785 }
2786 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002787 }
2788 spin_unlock_irq(lock);
2789 }
2790 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2791}
2792
2793/**
2794 * t3_update_qset_coalesce - update coalescing settings for a queue set
2795 * @qs: the SGE queue set
2796 * @p: new queue set parameters
2797 *
2798 * Update the coalescing settings for an SGE queue set. Nothing is done
2799 * if the queue set is not initialized yet.
2800 */
2801void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2802{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002803 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2804 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002805 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002806}
2807
2808/**
2809 * t3_sge_alloc_qset - initialize an SGE queue set
2810 * @adapter: the adapter
2811 * @id: the queue set id
2812 * @nports: how many Ethernet ports will be using this queue set
2813 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2814 * @p: configuration parameters for this queue set
2815 * @ntxq: number of Tx queues for the queue set
2816 * @netdev: net device associated with this queue set
Divy Le Ray82ad3322008-12-16 01:09:39 -08002817 * @netdevq: net device TX queue associated with this queue set
Divy Le Ray4d22de32007-01-18 22:04:14 -05002818 *
2819 * Allocate resources and initialize an SGE queue set. A queue set
2820 * comprises a response queue, two Rx free-buffer queues, and up to 3
2821 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2822 * queue, offload queue, and control queue.
2823 */
2824int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2825 int irq_vec_idx, const struct qset_params *p,
Divy Le Ray82ad3322008-12-16 01:09:39 -08002826 int ntxq, struct net_device *dev,
2827 struct netdev_queue *netdevq)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002828{
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002829 int i, avail, ret = -ENOMEM;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002830 struct sge_qset *q = &adapter->sge.qs[id];
2831
2832 init_qset_cntxt(q, id);
Divy Le Ray20d3fc12008-10-08 17:36:03 -07002833 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002834
2835 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2836 sizeof(struct rx_desc),
2837 sizeof(struct rx_sw_desc),
2838 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2839 if (!q->fl[0].desc)
2840 goto err;
2841
2842 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2843 sizeof(struct rx_desc),
2844 sizeof(struct rx_sw_desc),
2845 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2846 if (!q->fl[1].desc)
2847 goto err;
2848
2849 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2850 sizeof(struct rsp_desc), 0,
2851 &q->rspq.phys_addr, NULL);
2852 if (!q->rspq.desc)
2853 goto err;
2854
2855 for (i = 0; i < ntxq; ++i) {
2856 /*
2857 * The control queue always uses immediate data so does not
2858 * need to keep track of any sk_buffs.
2859 */
2860 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2861
2862 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2863 sizeof(struct tx_desc), sz,
2864 &q->txq[i].phys_addr,
2865 &q->txq[i].sdesc);
2866 if (!q->txq[i].desc)
2867 goto err;
2868
2869 q->txq[i].gen = 1;
2870 q->txq[i].size = p->txq_size[i];
2871 spin_lock_init(&q->txq[i].lock);
2872 skb_queue_head_init(&q->txq[i].sendq);
2873 }
2874
2875 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2876 (unsigned long)q);
2877 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2878 (unsigned long)q);
2879
2880 q->fl[0].gen = q->fl[1].gen = 1;
2881 q->fl[0].size = p->fl_size;
2882 q->fl[1].size = p->jumbo_size;
2883
2884 q->rspq.gen = 1;
2885 q->rspq.size = p->rspq_size;
2886 spin_lock_init(&q->rspq.lock);
David S. Miller147e70e2008-09-22 01:29:52 -07002887 skb_queue_head_init(&q->rspq.rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002888
2889 q->txq[TXQ_ETH].stop_thres = nports *
2890 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2891
Divy Le Raycf992af2007-05-30 21:10:47 -07002892#if FL0_PG_CHUNK_SIZE > 0
2893 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002894#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002895 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002896#endif
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002897#if FL1_PG_CHUNK_SIZE > 0
2898 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2899#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002900 q->fl[1].buf_size = is_offload(adapter) ?
2901 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2902 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002903#endif
2904
2905 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2906 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2907 q->fl[0].order = FL0_PG_ORDER;
2908 q->fl[1].order = FL1_PG_ORDER;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002909
Roland Dreierb1186de2008-03-20 13:30:48 -07002910 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002911
2912 /* FL threshold comparison uses < */
2913 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2914 q->rspq.phys_addr, q->rspq.size,
2915 q->fl[0].buf_size, 1, 0);
2916 if (ret)
2917 goto err_unlock;
2918
2919 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2920 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2921 q->fl[i].phys_addr, q->fl[i].size,
2922 q->fl[i].buf_size, p->cong_thres, 1,
2923 0);
2924 if (ret)
2925 goto err_unlock;
2926 }
2927
2928 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2929 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2930 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2931 1, 0);
2932 if (ret)
2933 goto err_unlock;
2934
2935 if (ntxq > 1) {
2936 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2937 USE_GTS, SGE_CNTXT_OFLD, id,
2938 q->txq[TXQ_OFLD].phys_addr,
2939 q->txq[TXQ_OFLD].size, 0, 1, 0);
2940 if (ret)
2941 goto err_unlock;
2942 }
2943
2944 if (ntxq > 2) {
2945 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2946 SGE_CNTXT_CTRL, id,
2947 q->txq[TXQ_CTRL].phys_addr,
2948 q->txq[TXQ_CTRL].size,
2949 q->txq[TXQ_CTRL].token, 1, 0);
2950 if (ret)
2951 goto err_unlock;
2952 }
2953
Roland Dreierb1186de2008-03-20 13:30:48 -07002954 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002955
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002956 q->adap = adapter;
2957 q->netdev = dev;
Divy Le Ray82ad3322008-12-16 01:09:39 -08002958 q->tx_q = netdevq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002959 t3_update_qset_coalesce(q, p);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002960
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002961 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2962 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002963 if (!avail) {
2964 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2965 goto err;
2966 }
2967 if (avail < q->fl[0].size)
2968 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2969 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002970
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002971 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2972 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002973 if (avail < q->fl[1].size)
2974 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2975 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002976 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2977
2978 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2979 V_NEWTIMER(q->rspq.holdoff_tmr));
2980
2981 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2982 return 0;
2983
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002984err_unlock:
Roland Dreierb1186de2008-03-20 13:30:48 -07002985 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002986err:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002987 t3_free_qset(adapter, q);
2988 return ret;
2989}
2990
2991/**
Divy Le Ray0ca41c02008-09-25 14:05:28 +00002992 * t3_stop_sge_timers - stop SGE timer call backs
2993 * @adap: the adapter
2994 *
2995 * Stops each SGE queue set's timer call back
2996 */
2997void t3_stop_sge_timers(struct adapter *adap)
2998{
2999 int i;
3000
3001 for (i = 0; i < SGE_QSETS; ++i) {
3002 struct sge_qset *q = &adap->sge.qs[i];
3003
3004 if (q->tx_reclaim_timer.function)
3005 del_timer_sync(&q->tx_reclaim_timer);
3006 }
3007}
3008
3009/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05003010 * t3_free_sge_resources - free SGE resources
3011 * @adap: the adapter
3012 *
3013 * Frees resources used by the SGE queue sets.
3014 */
3015void t3_free_sge_resources(struct adapter *adap)
3016{
3017 int i;
3018
3019 for (i = 0; i < SGE_QSETS; ++i)
3020 t3_free_qset(adap, &adap->sge.qs[i]);
3021}
3022
3023/**
3024 * t3_sge_start - enable SGE
3025 * @adap: the adapter
3026 *
3027 * Enables the SGE for DMAs. This is the last step in starting packet
3028 * transfers.
3029 */
3030void t3_sge_start(struct adapter *adap)
3031{
3032 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3033}
3034
3035/**
3036 * t3_sge_stop - disable SGE operation
3037 * @adap: the adapter
3038 *
3039 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3040 * from error interrupts) or from normal process context. In the latter
3041 * case it also disables any pending queue restart tasklets. Note that
3042 * if it is called in interrupt context it cannot disable the restart
3043 * tasklets as it cannot wait, however the tasklets will have no effect
3044 * since the doorbells are disabled and the driver will call this again
3045 * later from process context, at which time the tasklets will be stopped
3046 * if they are still running.
3047 */
3048void t3_sge_stop(struct adapter *adap)
3049{
3050 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3051 if (!in_interrupt()) {
3052 int i;
3053
3054 for (i = 0; i < SGE_QSETS; ++i) {
3055 struct sge_qset *qs = &adap->sge.qs[i];
3056
3057 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3058 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3059 }
3060 }
3061}
3062
3063/**
3064 * t3_sge_init - initialize SGE
3065 * @adap: the adapter
3066 * @p: the SGE parameters
3067 *
3068 * Performs SGE initialization needed every time after a chip reset.
3069 * We do not initialize any of the queue sets here, instead the driver
3070 * top-level must request those individually. We also do not enable DMA
3071 * here, that should be done after the queues have been set up.
3072 */
3073void t3_sge_init(struct adapter *adap, struct sge_params *p)
3074{
3075 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3076
3077 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
Divy Le Rayb8819552007-12-17 18:47:31 -08003078 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
Divy Le Ray4d22de32007-01-18 22:04:14 -05003079 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3080 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3081#if SGE_NUM_GENBITS == 1
3082 ctrl |= F_EGRGENCTRL;
3083#endif
3084 if (adap->params.rev > 0) {
3085 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3086 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003087 }
3088 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3089 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3090 V_LORCQDRBTHRSH(512));
3091 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3092 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08003093 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Rayb8819552007-12-17 18:47:31 -08003094 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3095 adap->params.rev < T3_REV_C ? 1000 : 500);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003096 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3097 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3098 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3099 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3100 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3101}
3102
3103/**
3104 * t3_sge_prep - one-time SGE initialization
3105 * @adap: the associated adapter
3106 * @p: SGE parameters
3107 *
3108 * Performs one-time initialization of SGE SW state. Includes determining
3109 * defaults for the assorted SGE parameters, which admins can change until
3110 * they are used to initialize the SGE.
3111 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003112void t3_sge_prep(struct adapter *adap, struct sge_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003113{
3114 int i;
3115
3116 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3117 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3118
3119 for (i = 0; i < SGE_QSETS; ++i) {
3120 struct qset_params *q = p->qset + i;
3121
3122 q->polling = adap->params.rev > 0;
3123 q->coalesce_usecs = 5;
3124 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003125 q->fl_size = 1024;
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003126 q->jumbo_size = 512;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003127 q->txq_size[TXQ_ETH] = 1024;
3128 q->txq_size[TXQ_OFLD] = 1024;
3129 q->txq_size[TXQ_CTRL] = 256;
3130 q->cong_thres = 0;
3131 }
3132
3133 spin_lock_init(&adap->sge.reg_lock);
3134}
3135
3136/**
3137 * t3_get_desc - dump an SGE descriptor for debugging purposes
3138 * @qs: the queue set
3139 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3140 * @idx: the descriptor index in the queue
3141 * @data: where to dump the descriptor contents
3142 *
3143 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3144 * size of the descriptor.
3145 */
3146int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3147 unsigned char *data)
3148{
3149 if (qnum >= 6)
3150 return -EINVAL;
3151
3152 if (qnum < 3) {
3153 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3154 return -EINVAL;
3155 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3156 return sizeof(struct tx_desc);
3157 }
3158
3159 if (qnum == 3) {
3160 if (!qs->rspq.desc || idx >= qs->rspq.size)
3161 return -EINVAL;
3162 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3163 return sizeof(struct rsp_desc);
3164 }
3165
3166 qnum -= 4;
3167 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3168 return -EINVAL;
3169 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3170 return sizeof(struct rx_desc);
3171}