blob: 0b978827874b8dfa214e9ca808ae04549b925cf9 [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Raya02d44a2008-10-13 18:47:30 -07002 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
Karen Xiea109a5b2008-12-18 22:56:20 -080039#include <net/arp.h>
Divy Le Ray4d22de32007-01-18 22:04:14 -050040#include "common.h"
41#include "regs.h"
42#include "sge_defs.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define USE_GTS 0
47
48#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080049
Divy Le Ray4d22de32007-01-18 22:04:14 -050050#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070051#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050052
Divy Le Ray5e68b772009-03-26 16:39:29 +000053#define SGE_PG_RSVD SMP_CACHE_BYTES
Divy Le Raye0994eb2007-02-24 16:44:17 -080054/*
Divy Le Raycf992af2007-05-30 21:10:47 -070055 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
56 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
57 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080058 */
Divy Le Raycf992af2007-05-30 21:10:47 -070059#define FL0_PG_CHUNK_SIZE 2048
Divy Le Ray7385ecf2008-05-21 18:56:21 -070060#define FL0_PG_ORDER 0
Divy Le Ray5e68b772009-03-26 16:39:29 +000061#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
Divy Le Ray7385ecf2008-05-21 18:56:21 -070062#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
63#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
Divy Le Ray5e68b772009-03-26 16:39:29 +000064#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
Divy Le Raycf992af2007-05-30 21:10:47 -070065
Divy Le Raye0994eb2007-02-24 16:44:17 -080066#define SGE_RX_DROP_THRES 16
Divy Le Ray42c8ea12009-03-12 21:14:04 +000067#define RX_RECLAIM_PERIOD (HZ/4)
Divy Le Ray4d22de32007-01-18 22:04:14 -050068
69/*
Divy Le Ray26b38712009-03-12 21:13:43 +000070 * Max number of Rx buffers we replenish at a time.
71 */
72#define MAX_RX_REFILL 16U
73/*
Divy Le Ray4d22de32007-01-18 22:04:14 -050074 * Period of the Tx buffer reclaim timer. This timer does not need to run
75 * frequently as Tx buffers are usually reclaimed by new Tx packets.
76 */
77#define TX_RECLAIM_PERIOD (HZ / 4)
Divy Le Ray42c8ea12009-03-12 21:14:04 +000078#define TX_RECLAIM_TIMER_CHUNK 64U
79#define TX_RECLAIM_CHUNK 16U
Divy Le Ray4d22de32007-01-18 22:04:14 -050080
81/* WR size in bytes */
82#define WR_LEN (WR_FLITS * 8)
83
84/*
85 * Types of Tx queues in each queue set. Order here matters, do not change.
86 */
87enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
88
89/* Values for sge_txq.flags */
90enum {
91 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
92 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
93};
94
95struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040096 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050097};
98
99struct rx_desc {
100 __be32 addr_lo;
101 __be32 len_gen;
102 __be32 gen2;
103 __be32 addr_hi;
104};
105
106struct tx_sw_desc { /* SW state per Tx descriptor */
107 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -0800108 u8 eop; /* set if last descriptor for packet */
109 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
110 u8 fragidx; /* first page fragment associated with descriptor */
111 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500112};
113
Divy Le Raycf992af2007-05-30 21:10:47 -0700114struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800115 union {
116 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700117 struct fl_pg_chunk pg_chunk;
118 };
119 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500120};
121
122struct rsp_desc { /* response queue descriptor */
123 struct rss_header rss_hdr;
124 __be32 flags;
125 __be32 len_cq;
126 u8 imm_data[47];
127 u8 intr_gen;
128};
129
Divy Le Ray4d22de32007-01-18 22:04:14 -0500130/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800131 * Holds unmapping information for Tx packets that need deferred unmapping.
132 * This structure lives at skb->head and must be allocated by callers.
133 */
134struct deferred_unmap_info {
135 struct pci_dev *pdev;
136 dma_addr_t addr[MAX_SKB_FRAGS + 1];
137};
138
139/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500140 * Maps a number of flits to the number of Tx descriptors that can hold them.
141 * The formula is
142 *
143 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
144 *
145 * HW allows up to 4 descriptors to be combined into a WR.
146 */
147static u8 flit_desc_map[] = {
148 0,
149#if SGE_NUM_GENBITS == 1
150 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
151 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
152 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
153 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
154#elif SGE_NUM_GENBITS == 2
155 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
157 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
158 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
159#else
160# error "SGE_NUM_GENBITS must be 1 or 2"
161#endif
162};
163
164static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
165{
166 return container_of(q, struct sge_qset, fl[qidx]);
167}
168
169static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
170{
171 return container_of(q, struct sge_qset, rspq);
172}
173
174static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
175{
176 return container_of(q, struct sge_qset, txq[qidx]);
177}
178
179/**
180 * refill_rspq - replenish an SGE response queue
181 * @adapter: the adapter
182 * @q: the response queue to replenish
183 * @credits: how many new responses to make available
184 *
185 * Replenishes a response queue by making the supplied number of responses
186 * available to HW.
187 */
188static inline void refill_rspq(struct adapter *adapter,
189 const struct sge_rspq *q, unsigned int credits)
190{
Divy Le Rayafefce62007-11-16 11:22:21 -0800191 rmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -0500192 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
194}
195
196/**
197 * need_skb_unmap - does the platform need unmapping of sk_buffs?
198 *
199 * Returns true if the platfrom needs sk_buff unmapping. The compiler
200 * optimizes away unecessary code if this returns true.
201 */
202static inline int need_skb_unmap(void)
203{
204 /*
205 * This structure is used to tell if the platfrom needs buffer
206 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
207 */
208 struct dummy {
209 DECLARE_PCI_UNMAP_ADDR(addr);
210 };
211
212 return sizeof(struct dummy) != 0;
213}
214
215/**
216 * unmap_skb - unmap a packet main body and its page fragments
217 * @skb: the packet
218 * @q: the Tx queue containing Tx descriptors for the packet
219 * @cidx: index of Tx descriptor
220 * @pdev: the PCI device
221 *
222 * Unmap the main body of an sk_buff and its page fragments, if any.
223 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800224 * to conserve space for metadata, the information necessary to unmap an
225 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
226 * descriptors (the physical addresses of the various data buffers), and
227 * the SW descriptor state (assorted indices). The send functions
228 * initialize the indices for the first packet descriptor so we can unmap
229 * the buffers held in the first Tx descriptor here, and we have enough
230 * information at this point to set the state for the next Tx descriptor.
231 *
232 * Note that it is possible to clean up the first descriptor of a packet
233 * before the send routines have written the next descriptors, but this
234 * race does not cause any problem. We just end up writing the unmapping
235 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500236 */
237static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
238 unsigned int cidx, struct pci_dev *pdev)
239{
240 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800241 struct tx_sw_desc *d = &q->sdesc[cidx];
242 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500243
Divy Le Ray23561c92007-11-16 11:22:05 -0800244 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
245 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500246
Divy Le Ray23561c92007-11-16 11:22:05 -0800247 if (frag_idx == 0 && skb_headlen(skb)) {
248 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
249 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500250 j = 1;
251 }
252
Divy Le Ray23561c92007-11-16 11:22:05 -0800253 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500254 nfrags = skb_shinfo(skb)->nr_frags;
255
256 while (frag_idx < nfrags && curflit < WR_FLITS) {
257 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
258 skb_shinfo(skb)->frags[frag_idx].size,
259 PCI_DMA_TODEVICE);
260 j ^= 1;
261 if (j == 0) {
262 sgp++;
263 curflit++;
264 }
265 curflit++;
266 frag_idx++;
267 }
268
Divy Le Ray23561c92007-11-16 11:22:05 -0800269 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
270 d = cidx + 1 == q->size ? q->sdesc : d + 1;
271 d->fragidx = frag_idx;
272 d->addr_idx = j;
273 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500274 }
275}
276
277/**
278 * free_tx_desc - reclaims Tx descriptors and their buffers
279 * @adapter: the adapter
280 * @q: the Tx queue to reclaim descriptors from
281 * @n: the number of descriptors to reclaim
282 *
283 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
284 * Tx buffers. Called with the Tx queue lock held.
285 */
286static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
287 unsigned int n)
288{
289 struct tx_sw_desc *d;
290 struct pci_dev *pdev = adapter->pdev;
291 unsigned int cidx = q->cidx;
292
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800293 const int need_unmap = need_skb_unmap() &&
294 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
295
Divy Le Ray4d22de32007-01-18 22:04:14 -0500296 d = &q->sdesc[cidx];
297 while (n--) {
298 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800299 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500300 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800301 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500302 kfree_skb(d->skb);
303 }
304 ++d;
305 if (++cidx == q->size) {
306 cidx = 0;
307 d = q->sdesc;
308 }
309 }
310 q->cidx = cidx;
311}
312
313/**
314 * reclaim_completed_tx - reclaims completed Tx descriptors
315 * @adapter: the adapter
316 * @q: the Tx queue to reclaim completed descriptors from
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000317 * @chunk: maximum number of descriptors to reclaim
Divy Le Ray4d22de32007-01-18 22:04:14 -0500318 *
319 * Reclaims Tx descriptors that the SGE has indicated it has processed,
320 * and frees the associated buffers if possible. Called with the Tx
321 * queue's lock held.
322 */
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000323static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
324 struct sge_txq *q,
325 unsigned int chunk)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500326{
327 unsigned int reclaim = q->processed - q->cleaned;
328
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000329 reclaim = min(chunk, reclaim);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500330 if (reclaim) {
331 free_tx_desc(adapter, q, reclaim);
332 q->cleaned += reclaim;
333 q->in_use -= reclaim;
334 }
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000335 return q->processed - q->cleaned;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500336}
337
338/**
339 * should_restart_tx - are there enough resources to restart a Tx queue?
340 * @q: the Tx queue
341 *
342 * Checks if there are enough descriptors to restart a suspended Tx queue.
343 */
344static inline int should_restart_tx(const struct sge_txq *q)
345{
346 unsigned int r = q->processed - q->cleaned;
347
348 return q->in_use - r < (q->size >> 1);
349}
350
Divy Le Ray5e68b772009-03-26 16:39:29 +0000351static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000353{
Divy Le Ray5e68b772009-03-26 16:39:29 +0000354 if (q->use_pages && d->pg_chunk.page) {
355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping),
359 q->alloc_size, PCI_DMA_FROMDEVICE);
360
361 put_page(d->pg_chunk.page);
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000362 d->pg_chunk.page = NULL;
363 } else {
Divy Le Ray5e68b772009-03-26 16:39:29 +0000364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray9bb2b312009-03-12 21:13:49 +0000366 kfree_skb(d->skb);
367 d->skb = NULL;
368 }
369}
370
Divy Le Ray4d22de32007-01-18 22:04:14 -0500371/**
372 * free_rx_bufs - free the Rx buffers on an SGE free list
373 * @pdev: the PCI device associated with the adapter
374 * @rxq: the SGE free list to clean up
375 *
376 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
377 * this queue should be stopped before calling this function.
378 */
379static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
380{
381 unsigned int cidx = q->cidx;
382
383 while (q->credits--) {
384 struct rx_sw_desc *d = &q->sdesc[cidx];
385
Divy Le Ray5e68b772009-03-26 16:39:29 +0000386
387 clear_rx_desc(pdev, q, d);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500388 if (++cidx == q->size)
389 cidx = 0;
390 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800391
Divy Le Raycf992af2007-05-30 21:10:47 -0700392 if (q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700393 __free_pages(q->pg_chunk.page, q->order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700394 q->pg_chunk.page = NULL;
395 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500396}
397
398/**
399 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700400 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500401 * @len: the buffer length
402 * @d: the HW Rx descriptor to write
403 * @sd: the SW Rx descriptor to write
404 * @gen: the generation bit value
405 * @pdev: the PCI device associated with the adapter
406 *
407 * Add a buffer of the given length to the supplied HW and SW Rx
408 * descriptors.
409 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700410static inline int add_one_rx_buf(void *va, unsigned int len,
411 struct rx_desc *d, struct rx_sw_desc *sd,
412 unsigned int gen, struct pci_dev *pdev)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500413{
414 dma_addr_t mapping;
415
Divy Le Raye0994eb2007-02-24 16:44:17 -0800416 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700417 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700418 return -ENOMEM;
419
Divy Le Ray4d22de32007-01-18 22:04:14 -0500420 pci_unmap_addr_set(sd, dma_addr, mapping);
421
422 d->addr_lo = cpu_to_be32(mapping);
423 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
424 wmb();
425 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700427 return 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500428}
429
Divy Le Ray5e68b772009-03-26 16:39:29 +0000430static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 unsigned int gen)
432{
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 wmb();
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 return 0;
439}
440
441static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700443 unsigned int order)
Divy Le Raycf992af2007-05-30 21:10:47 -0700444{
445 if (!q->pg_chunk.page) {
Divy Le Ray5e68b772009-03-26 16:39:29 +0000446 dma_addr_t mapping;
447
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700448 q->pg_chunk.page = alloc_pages(gfp, order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700449 if (unlikely(!q->pg_chunk.page))
450 return -ENOMEM;
451 q->pg_chunk.va = page_address(q->pg_chunk.page);
Divy Le Ray5e68b772009-03-26 16:39:29 +0000452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 SGE_PG_RSVD;
Divy Le Raycf992af2007-05-30 21:10:47 -0700454 q->pg_chunk.offset = 0;
Divy Le Ray5e68b772009-03-26 16:39:29 +0000455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
Divy Le Raycf992af2007-05-30 21:10:47 -0700458 }
459 sd->pg_chunk = q->pg_chunk;
460
Divy Le Ray5e68b772009-03-26 16:39:29 +0000461 prefetch(sd->pg_chunk.p_cnt);
462
Divy Le Raycf992af2007-05-30 21:10:47 -0700463 q->pg_chunk.offset += q->buf_size;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
Divy Le Raycf992af2007-05-30 21:10:47 -0700465 q->pg_chunk.page = NULL;
466 else {
467 q->pg_chunk.va += q->buf_size;
468 get_page(q->pg_chunk.page);
469 }
Divy Le Ray5e68b772009-03-26 16:39:29 +0000470
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
473 else
474 *sd->pg_chunk.p_cnt += 1;
475
Divy Le Raycf992af2007-05-30 21:10:47 -0700476 return 0;
477}
478
Divy Le Ray26b38712009-03-12 21:13:43 +0000479static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{
481 if (q->pend_cred >= q->credits / 4) {
482 q->pend_cred = 0;
483 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
484 }
485}
486
Divy Le Ray4d22de32007-01-18 22:04:14 -0500487/**
488 * refill_fl - refill an SGE free-buffer list
489 * @adapter: the adapter
490 * @q: the free-list to refill
491 * @n: the number of new buffers to allocate
492 * @gfp: the gfp flags for allocating new buffers
493 *
494 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
495 * allocated with the supplied gfp flags. The caller must assure that
496 * @n does not exceed the queue's capacity.
497 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700498static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500499{
500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
501 struct rx_desc *d = &q->desc[q->pidx];
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700502 unsigned int count = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500503
504 while (n--) {
Divy Le Ray5e68b772009-03-26 16:39:29 +0000505 dma_addr_t mapping;
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700506 int err;
507
Divy Le Raycf992af2007-05-30 21:10:47 -0700508 if (q->use_pages) {
Divy Le Ray5e68b772009-03-26 16:39:29 +0000509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
510 q->order))) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700511nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800512 break;
513 }
Divy Le Ray5e68b772009-03-26 16:39:29 +0000514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800517
Divy Le Ray5e68b772009-03-26 16:39:29 +0000518 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping,
520 q->buf_size - SGE_PG_RSVD,
521 PCI_DMA_FROMDEVICE);
522 } else {
523 void *buf_start;
524
525 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raycf992af2007-05-30 21:10:47 -0700526 if (!skb)
527 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800528
Divy Le Raycf992af2007-05-30 21:10:47 -0700529 sd->skb = skb;
530 buf_start = skb->data;
Divy Le Ray5e68b772009-03-26 16:39:29 +0000531 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
532 q->gen, adap->pdev);
533 if (unlikely(err)) {
534 clear_rx_desc(adap->pdev, q, sd);
535 break;
536 }
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700537 }
538
Divy Le Ray4d22de32007-01-18 22:04:14 -0500539 d++;
540 sd++;
541 if (++q->pidx == q->size) {
542 q->pidx = 0;
543 q->gen ^= 1;
544 sd = q->sdesc;
545 d = q->desc;
546 }
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700547 count++;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500548 }
Divy Le Ray26b38712009-03-12 21:13:43 +0000549
550 q->credits += count;
551 q->pend_cred += count;
552 ring_fl_db(adap, q);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700553
554 return count;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500555}
556
557static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
558{
Divy Le Ray26b38712009-03-12 21:13:43 +0000559 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700560 GFP_ATOMIC | __GFP_COMP);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500561}
562
563/**
564 * recycle_rx_buf - recycle a receive buffer
565 * @adapter: the adapter
566 * @q: the SGE free list
567 * @idx: index of buffer to recycle
568 *
569 * Recycles the specified buffer on the given free list by adding it at
570 * the next available slot on the list.
571 */
572static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
573 unsigned int idx)
574{
575 struct rx_desc *from = &q->desc[idx];
576 struct rx_desc *to = &q->desc[q->pidx];
577
Divy Le Raycf992af2007-05-30 21:10:47 -0700578 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500579 to->addr_lo = from->addr_lo; /* already big endian */
580 to->addr_hi = from->addr_hi; /* likewise */
581 wmb();
582 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
583 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
Divy Le Ray4d22de32007-01-18 22:04:14 -0500584
585 if (++q->pidx == q->size) {
586 q->pidx = 0;
587 q->gen ^= 1;
588 }
Divy Le Ray26b38712009-03-12 21:13:43 +0000589
590 q->credits++;
591 q->pend_cred++;
592 ring_fl_db(adap, q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500593}
594
595/**
596 * alloc_ring - allocate resources for an SGE descriptor ring
597 * @pdev: the PCI device
598 * @nelem: the number of descriptors
599 * @elem_size: the size of each descriptor
600 * @sw_size: the size of the SW state associated with each ring element
601 * @phys: the physical address of the allocated ring
602 * @metadata: address of the array holding the SW state for the ring
603 *
604 * Allocates resources for an SGE descriptor ring, such as Tx queues,
605 * free buffer lists, or response queues. Each SGE ring requires
606 * space for its HW descriptors plus, optionally, space for the SW state
607 * associated with each HW entry (the metadata). The function returns
608 * three values: the virtual address for the HW ring (the return value
609 * of the function), the physical address of the HW ring, and the address
610 * of the SW ring.
611 */
612static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800613 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500614{
615 size_t len = nelem * elem_size;
616 void *s = NULL;
617 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
618
619 if (!p)
620 return NULL;
Divy Le Ray52565542008-11-26 15:35:59 -0800621 if (sw_size && metadata) {
Divy Le Ray4d22de32007-01-18 22:04:14 -0500622 s = kcalloc(nelem, sw_size, GFP_KERNEL);
623
624 if (!s) {
625 dma_free_coherent(&pdev->dev, len, p, *phys);
626 return NULL;
627 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500628 *(void **)metadata = s;
Divy Le Ray52565542008-11-26 15:35:59 -0800629 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500630 memset(p, 0, len);
631 return p;
632}
633
634/**
Divy Le Ray204e2f92008-05-06 19:26:01 -0700635 * t3_reset_qset - reset a sge qset
636 * @q: the queue set
637 *
638 * Reset the qset structure.
639 * the NAPI structure is preserved in the event of
640 * the qset's reincarnation, for example during EEH recovery.
641 */
642static void t3_reset_qset(struct sge_qset *q)
643{
644 if (q->adap &&
645 !(q->adap->flags & NAPI_INIT)) {
646 memset(q, 0, sizeof(*q));
647 return;
648 }
649
650 q->adap = NULL;
651 memset(&q->rspq, 0, sizeof(q->rspq));
652 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
653 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
654 q->txq_stopped = 0;
Divy Le Ray20d3fc12008-10-08 17:36:03 -0700655 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
Divy Le Ray42c8ea12009-03-12 21:14:04 +0000656 q->rx_reclaim_timer.function = NULL;
Herbert Xu76620aa2009-04-16 02:02:07 -0700657 q->nomem = 0;
658 napi_free_frags(&q->napi);
Divy Le Ray204e2f92008-05-06 19:26:01 -0700659}
660
661
662/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500663 * free_qset - free the resources of an SGE queue set
664 * @adapter: the adapter owning the queue set
665 * @q: the queue set
666 *
667 * Release the HW and SW resources associated with an SGE queue set, such
668 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
669 * queue set must be quiesced prior to calling this.
670 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700671static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500672{
673 int i;
674 struct pci_dev *pdev = adapter->pdev;
675
Divy Le Ray4d22de32007-01-18 22:04:14 -0500676 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
677 if (q->fl[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700678 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500679 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700680 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500681 free_rx_bufs(pdev, &q->fl[i]);
682 kfree(q->fl[i].sdesc);
683 dma_free_coherent(&pdev->dev,
684 q->fl[i].size *
685 sizeof(struct rx_desc), q->fl[i].desc,
686 q->fl[i].phys_addr);
687 }
688
689 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
690 if (q->txq[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700691 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500692 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
Roland Dreierb1186de2008-03-20 13:30:48 -0700693 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500694 if (q->txq[i].sdesc) {
695 free_tx_desc(adapter, &q->txq[i],
696 q->txq[i].in_use);
697 kfree(q->txq[i].sdesc);
698 }
699 dma_free_coherent(&pdev->dev,
700 q->txq[i].size *
701 sizeof(struct tx_desc),
702 q->txq[i].desc, q->txq[i].phys_addr);
703 __skb_queue_purge(&q->txq[i].sendq);
704 }
705
706 if (q->rspq.desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700707 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500708 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700709 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500710 dma_free_coherent(&pdev->dev,
711 q->rspq.size * sizeof(struct rsp_desc),
712 q->rspq.desc, q->rspq.phys_addr);
713 }
714
Divy Le Ray204e2f92008-05-06 19:26:01 -0700715 t3_reset_qset(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500716}
717
718/**
719 * init_qset_cntxt - initialize an SGE queue set context info
720 * @qs: the queue set
721 * @id: the queue set id
722 *
723 * Initializes the TIDs and context ids for the queues of a queue set.
724 */
725static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
726{
727 qs->rspq.cntxt_id = id;
728 qs->fl[0].cntxt_id = 2 * id;
729 qs->fl[1].cntxt_id = 2 * id + 1;
730 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
731 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
732 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
733 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
735}
736
737/**
738 * sgl_len - calculates the size of an SGL of the given capacity
739 * @n: the number of SGL entries
740 *
741 * Calculates the number of flits needed for a scatter/gather list that
742 * can hold the given number of entries.
743 */
744static inline unsigned int sgl_len(unsigned int n)
745{
746 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
747 return (3 * n) / 2 + (n & 1);
748}
749
750/**
751 * flits_to_desc - returns the num of Tx descriptors for the given flits
752 * @n: the number of flits
753 *
754 * Calculates the number of Tx descriptors needed for the supplied number
755 * of flits.
756 */
757static inline unsigned int flits_to_desc(unsigned int n)
758{
759 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
760 return flit_desc_map[n];
761}
762
763/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700764 * get_packet - return the next ingress packet buffer from a free list
765 * @adap: the adapter that received the packet
766 * @fl: the SGE free list holding the packet
767 * @len: the packet length including any SGE padding
768 * @drop_thres: # of remaining buffers before we start dropping packets
769 *
770 * Get the next packet from a free list and complete setup of the
771 * sk_buff. If the packet is small we make a copy and recycle the
772 * original buffer, otherwise we use the original buffer itself. If a
773 * positive drop threshold is supplied packets are dropped and their
774 * buffers recycled if (a) the number of remaining buffers is under the
775 * threshold and the packet is too big to copy, or (b) the packet should
776 * be copied but there is no memory for the copy.
777 */
778static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
779 unsigned int len, unsigned int drop_thres)
780{
781 struct sk_buff *skb = NULL;
782 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
783
784 prefetch(sd->skb->data);
785 fl->credits--;
786
787 if (len <= SGE_RX_COPY_THRES) {
788 skb = alloc_skb(len, GFP_ATOMIC);
789 if (likely(skb != NULL)) {
790 __skb_put(skb, len);
791 pci_dma_sync_single_for_cpu(adap->pdev,
792 pci_unmap_addr(sd, dma_addr), len,
793 PCI_DMA_FROMDEVICE);
794 memcpy(skb->data, sd->skb->data, len);
795 pci_dma_sync_single_for_device(adap->pdev,
796 pci_unmap_addr(sd, dma_addr), len,
797 PCI_DMA_FROMDEVICE);
798 } else if (!drop_thres)
799 goto use_orig_buf;
800recycle:
801 recycle_rx_buf(adap, fl, fl->cidx);
802 return skb;
803 }
804
Divy Le Ray26b38712009-03-12 21:13:43 +0000805 if (unlikely(fl->credits < drop_thres) &&
806 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
807 GFP_ATOMIC | __GFP_COMP) == 0)
Divy Le Raycf992af2007-05-30 21:10:47 -0700808 goto recycle;
809
810use_orig_buf:
811 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
812 fl->buf_size, PCI_DMA_FROMDEVICE);
813 skb = sd->skb;
814 skb_put(skb, len);
815 __refill_fl(adap, fl);
816 return skb;
817}
818
819/**
820 * get_packet_pg - return the next ingress packet buffer from a free list
821 * @adap: the adapter that received the packet
822 * @fl: the SGE free list holding the packet
823 * @len: the packet length including any SGE padding
824 * @drop_thres: # of remaining buffers before we start dropping packets
825 *
826 * Get the next packet from a free list populated with page chunks.
827 * If the packet is small we make a copy and recycle the original buffer,
828 * otherwise we attach the original buffer as a page fragment to a fresh
829 * sk_buff. If a positive drop threshold is supplied packets are dropped
830 * and their buffers recycled if (a) the number of remaining buffers is
831 * under the threshold and the packet is too big to copy, or (b) there's
832 * no system memory.
833 *
834 * Note: this function is similar to @get_packet but deals with Rx buffers
835 * that are page chunks rather than sk_buffs.
836 */
837static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700838 struct sge_rspq *q, unsigned int len,
839 unsigned int drop_thres)
Divy Le Raycf992af2007-05-30 21:10:47 -0700840{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700841 struct sk_buff *newskb, *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700842 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
843
Divy Le Ray5e68b772009-03-26 16:39:29 +0000844 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700845
Divy Le Ray5e68b772009-03-26 16:39:29 +0000846 newskb = skb = q->pg_skb;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700847 if (!skb && (len <= SGE_RX_COPY_THRES)) {
848 newskb = alloc_skb(len, GFP_ATOMIC);
849 if (likely(newskb != NULL)) {
850 __skb_put(newskb, len);
Divy Le Ray5e68b772009-03-26 16:39:29 +0000851 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
Divy Le Raycf992af2007-05-30 21:10:47 -0700852 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700853 memcpy(newskb->data, sd->pg_chunk.va, len);
Divy Le Ray5e68b772009-03-26 16:39:29 +0000854 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
855 len,
856 PCI_DMA_FROMDEVICE);
Divy Le Raycf992af2007-05-30 21:10:47 -0700857 } else if (!drop_thres)
858 return NULL;
859recycle:
860 fl->credits--;
861 recycle_rx_buf(adap, fl, fl->cidx);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700862 q->rx_recycle_buf++;
863 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700864 }
865
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700866 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
Divy Le Raycf992af2007-05-30 21:10:47 -0700867 goto recycle;
868
Divy Le Ray5e68b772009-03-26 16:39:29 +0000869 prefetch(sd->pg_chunk.p_cnt);
870
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700871 if (!skb)
Divy Le Rayb47385b2008-05-21 18:56:26 -0700872 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
Divy Le Ray5e68b772009-03-26 16:39:29 +0000873
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700874 if (unlikely(!newskb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700875 if (!drop_thres)
876 return NULL;
877 goto recycle;
878 }
879
Divy Le Ray5e68b772009-03-26 16:39:29 +0000880 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
881 PCI_DMA_FROMDEVICE);
882 (*sd->pg_chunk.p_cnt)--;
883 if (!*sd->pg_chunk.p_cnt)
884 pci_unmap_page(adap->pdev,
885 pci_unmap_addr(&sd->pg_chunk, mapping),
886 fl->alloc_size,
887 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700888 if (!skb) {
889 __skb_put(newskb, SGE_RX_PULL_LEN);
890 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
891 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
892 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
893 len - SGE_RX_PULL_LEN);
894 newskb->len = len;
895 newskb->data_len = len - SGE_RX_PULL_LEN;
Divy Le Ray8f435802009-03-12 21:13:54 +0000896 newskb->truesize += newskb->data_len;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700897 } else {
898 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
899 sd->pg_chunk.page,
900 sd->pg_chunk.offset, len);
901 newskb->len += len;
902 newskb->data_len += len;
Divy Le Ray8f435802009-03-12 21:13:54 +0000903 newskb->truesize += len;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700904 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700905
906 fl->credits--;
907 /*
908 * We do not refill FLs here, we let the caller do it to overlap a
909 * prefetch.
910 */
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700911 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700912}
913
914/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500915 * get_imm_packet - return the next ingress packet buffer from a response
916 * @resp: the response descriptor containing the packet data
917 *
918 * Return a packet containing the immediate data of the given response.
919 */
920static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
921{
922 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
923
924 if (skb) {
925 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300926 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500927 }
928 return skb;
929}
930
931/**
932 * calc_tx_descs - calculate the number of Tx descriptors for a packet
933 * @skb: the packet
934 *
935 * Returns the number of Tx descriptors needed for the given Ethernet
936 * packet. Ethernet packets require addition of WR and CPL headers.
937 */
938static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
939{
940 unsigned int flits;
941
942 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
943 return 1;
944
945 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
946 if (skb_shinfo(skb)->gso_size)
947 flits++;
948 return flits_to_desc(flits);
949}
950
951/**
952 * make_sgl - populate a scatter/gather list for a packet
953 * @skb: the packet
954 * @sgp: the SGL to populate
955 * @start: start address of skb main body data to include in the SGL
956 * @len: length of skb main body data to include in the SGL
957 * @pdev: the PCI device
958 *
959 * Generates a scatter/gather list for the buffers that make up a packet
960 * and returns the SGL size in 8-byte words. The caller must size the SGL
961 * appropriately.
962 */
963static inline unsigned int make_sgl(const struct sk_buff *skb,
964 struct sg_ent *sgp, unsigned char *start,
965 unsigned int len, struct pci_dev *pdev)
966{
967 dma_addr_t mapping;
968 unsigned int i, j = 0, nfrags;
969
970 if (len) {
971 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
972 sgp->len[0] = cpu_to_be32(len);
973 sgp->addr[0] = cpu_to_be64(mapping);
974 j = 1;
975 }
976
977 nfrags = skb_shinfo(skb)->nr_frags;
978 for (i = 0; i < nfrags; i++) {
979 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
980
981 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
982 frag->size, PCI_DMA_TODEVICE);
983 sgp->len[j] = cpu_to_be32(frag->size);
984 sgp->addr[j] = cpu_to_be64(mapping);
985 j ^= 1;
986 if (j == 0)
987 ++sgp;
988 }
989 if (j)
990 sgp->len[j] = 0;
991 return ((nfrags + (len != 0)) * 3) / 2 + j;
992}
993
994/**
995 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
996 * @adap: the adapter
997 * @q: the Tx queue
998 *
999 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1000 * where the HW is going to sleep just after we checked, however,
1001 * then the interrupt handler will detect the outstanding TX packet
1002 * and ring the doorbell for us.
1003 *
1004 * When GTS is disabled we unconditionally ring the doorbell.
1005 */
1006static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1007{
1008#if USE_GTS
1009 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1010 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1011 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1012 t3_write_reg(adap, A_SG_KDOORBELL,
1013 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1014 }
1015#else
1016 wmb(); /* write descriptors before telling HW */
1017 t3_write_reg(adap, A_SG_KDOORBELL,
1018 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1019#endif
1020}
1021
1022static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1023{
1024#if SGE_NUM_GENBITS == 2
1025 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1026#endif
1027}
1028
1029/**
1030 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1031 * @ndesc: number of Tx descriptors spanned by the SGL
1032 * @skb: the packet corresponding to the WR
1033 * @d: first Tx descriptor to be written
1034 * @pidx: index of above descriptors
1035 * @q: the SGE Tx queue
1036 * @sgl: the SGL
1037 * @flits: number of flits to the start of the SGL in the first descriptor
1038 * @sgl_flits: the SGL size in flits
1039 * @gen: the Tx descriptor generation
1040 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1041 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1042 *
1043 * Write a work request header and an associated SGL. If the SGL is
1044 * small enough to fit into one Tx descriptor it has already been written
1045 * and we just need to write the WR header. Otherwise we distribute the
1046 * SGL across the number of descriptors it spans.
1047 */
1048static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1049 struct tx_desc *d, unsigned int pidx,
1050 const struct sge_txq *q,
1051 const struct sg_ent *sgl,
1052 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -04001053 unsigned int gen, __be32 wr_hi,
1054 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001055{
1056 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1057 struct tx_sw_desc *sd = &q->sdesc[pidx];
1058
1059 sd->skb = skb;
1060 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -08001061 sd->fragidx = 0;
1062 sd->addr_idx = 0;
1063 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001064 }
1065
1066 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -08001067 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001068 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1069 V_WR_SGLSFLT(flits)) | wr_hi;
1070 wmb();
1071 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1072 V_WR_GEN(gen)) | wr_lo;
1073 wr_gen2(d, gen);
1074 } else {
1075 unsigned int ogen = gen;
1076 const u64 *fp = (const u64 *)sgl;
1077 struct work_request_hdr *wp = wrp;
1078
1079 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1080 V_WR_SGLSFLT(flits)) | wr_hi;
1081
1082 while (sgl_flits) {
1083 unsigned int avail = WR_FLITS - flits;
1084
1085 if (avail > sgl_flits)
1086 avail = sgl_flits;
1087 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1088 sgl_flits -= avail;
1089 ndesc--;
1090 if (!sgl_flits)
1091 break;
1092
1093 fp += avail;
1094 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -08001095 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001096 sd++;
1097 if (++pidx == q->size) {
1098 pidx = 0;
1099 gen ^= 1;
1100 d = q->desc;
1101 sd = q->sdesc;
1102 }
1103
1104 sd->skb = skb;
1105 wrp = (struct work_request_hdr *)d;
1106 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1107 V_WR_SGLSFLT(1)) | wr_hi;
1108 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1109 sgl_flits + 1)) |
1110 V_WR_GEN(gen)) | wr_lo;
1111 wr_gen2(d, gen);
1112 flits = 1;
1113 }
Divy Le Ray23561c92007-11-16 11:22:05 -08001114 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001115 wrp->wr_hi |= htonl(F_WR_EOP);
1116 wmb();
1117 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1118 wr_gen2((struct tx_desc *)wp, ogen);
1119 WARN_ON(ndesc != 0);
1120 }
1121}
1122
1123/**
1124 * write_tx_pkt_wr - write a TX_PKT work request
1125 * @adap: the adapter
1126 * @skb: the packet to send
1127 * @pi: the egress interface
1128 * @pidx: index of the first Tx descriptor to write
1129 * @gen: the generation value to use
1130 * @q: the Tx queue
1131 * @ndesc: number of descriptors the packet will occupy
1132 * @compl: the value of the COMPL bit to use
1133 *
1134 * Generate a TX_PKT work request to send the supplied packet.
1135 */
1136static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1137 const struct port_info *pi,
1138 unsigned int pidx, unsigned int gen,
1139 struct sge_txq *q, unsigned int ndesc,
1140 unsigned int compl)
1141{
1142 unsigned int flits, sgl_flits, cntrl, tso_info;
1143 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1144 struct tx_desc *d = &q->desc[pidx];
1145 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1146
Divy Le Ray3fa58c82009-03-26 16:39:14 +00001147 cpl->len = htonl(skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001148 cntrl = V_TXPKT_INTF(pi->port_id);
1149
1150 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1151 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1152
1153 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1154 if (tso_info) {
1155 int eth_type;
1156 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1157
1158 d->flit[2] = 0;
1159 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1160 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001161 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001162 CPL_ETH_II : CPL_ETH_II_VLAN;
1163 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001164 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001165 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001166 hdr->lso_info = htonl(tso_info);
1167 flits = 3;
1168 } else {
1169 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1170 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1171 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1172 cpl->cntrl = htonl(cntrl);
1173
1174 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1175 q->sdesc[pidx].skb = NULL;
1176 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001177 skb_copy_from_linear_data(skb, &d->flit[2],
1178 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001179 else
1180 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1181
1182 flits = (skb->len + 7) / 8 + 2;
1183 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1184 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1185 | F_WR_SOP | F_WR_EOP | compl);
1186 wmb();
1187 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1188 V_WR_TID(q->token));
1189 wr_gen2(d, gen);
1190 kfree_skb(skb);
1191 return;
1192 }
1193
1194 flits = 2;
1195 }
1196
1197 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1198 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001199
1200 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1201 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1202 htonl(V_WR_TID(q->token)));
1203}
1204
Divy Le Ray82ad3322008-12-16 01:09:39 -08001205static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1206 struct sge_qset *qs, struct sge_txq *q)
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301207{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001208 netif_tx_stop_queue(txq);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301209 set_bit(TXQ_ETH, &qs->txq_stopped);
1210 q->stops++;
1211}
1212
Divy Le Ray4d22de32007-01-18 22:04:14 -05001213/**
1214 * eth_xmit - add a packet to the Ethernet Tx queue
1215 * @skb: the packet
1216 * @dev: the egress net device
1217 *
1218 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1219 */
1220int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1221{
Divy Le Ray82ad3322008-12-16 01:09:39 -08001222 int qidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001223 unsigned int ndesc, pidx, credits, gen, compl;
1224 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001225 struct adapter *adap = pi->adapter;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001226 struct netdev_queue *txq;
1227 struct sge_qset *qs;
1228 struct sge_txq *q;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001229
1230 /*
1231 * The chip min packet length is 9 octets but play safe and reject
1232 * anything shorter than an Ethernet header.
1233 */
1234 if (unlikely(skb->len < ETH_HLEN)) {
1235 dev_kfree_skb(skb);
1236 return NETDEV_TX_OK;
1237 }
1238
Divy Le Ray82ad3322008-12-16 01:09:39 -08001239 qidx = skb_get_queue_mapping(skb);
1240 qs = &pi->qs[qidx];
1241 q = &qs->txq[TXQ_ETH];
1242 txq = netdev_get_tx_queue(dev, qidx);
1243
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001244 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001245
1246 credits = q->size - q->in_use;
1247 ndesc = calc_tx_descs(skb);
1248
1249 if (unlikely(credits < ndesc)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001250 t3_stop_tx_queue(txq, qs, q);
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301251 dev_err(&adap->pdev->dev,
1252 "%s: Tx ring %u full while queue awake!\n",
1253 dev->name, q->cntxt_id & 7);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001254 return NETDEV_TX_BUSY;
1255 }
1256
1257 q->in_use += ndesc;
Divy Le Raycd7e9032008-03-13 00:13:30 -07001258 if (unlikely(credits - ndesc < q->stop_thres)) {
Divy Le Ray82ad3322008-12-16 01:09:39 -08001259 t3_stop_tx_queue(txq, qs, q);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001260
1261 if (should_restart_tx(q) &&
1262 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1263 q->restarts++;
Divy Le Ray82ad3322008-12-16 01:09:39 -08001264 netif_tx_wake_queue(txq);
Divy Le Raycd7e9032008-03-13 00:13:30 -07001265 }
1266 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001267
1268 gen = q->gen;
1269 q->unacked += ndesc;
1270 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1271 q->unacked &= 7;
1272 pidx = q->pidx;
1273 q->pidx += ndesc;
1274 if (q->pidx >= q->size) {
1275 q->pidx -= q->size;
1276 q->gen ^= 1;
1277 }
1278
1279 /* update port statistics */
1280 if (skb->ip_summed == CHECKSUM_COMPLETE)
1281 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1282 if (skb_shinfo(skb)->gso_size)
1283 qs->port_stats[SGE_PSTAT_TSO]++;
1284 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1285 qs->port_stats[SGE_PSTAT_VLANINS]++;
1286
Divy Le Ray4d22de32007-01-18 22:04:14 -05001287 /*
1288 * We do not use Tx completion interrupts to free DMAd Tx packets.
1289 * This is good for performamce but means that we rely on new Tx
1290 * packets arriving to run the destructors of completed packets,
1291 * which open up space in their sockets' send queues. Sometimes
1292 * we do not get such new packets causing Tx to stall. A single
1293 * UDP transmitter is a good example of this situation. We have
1294 * a clean up timer that periodically reclaims completed packets
1295 * but it doesn't run often enough (nor do we want it to) to prevent
1296 * lengthy stalls. A solution to this problem is to run the
1297 * destructor early, after the packet is queued but before it's DMAd.
1298 * A cons is that we lie to socket memory accounting, but the amount
1299 * of extra memory is reasonable (limited by the number of Tx
1300 * descriptors), the packets do actually get freed quickly by new
1301 * packets almost always, and for protocols like TCP that wait for
1302 * acks to really free up the data the extra memory is even less.
1303 * On the positive side we run the destructors on the sending CPU
1304 * rather than on a potentially different completing CPU, usually a
1305 * good thing. We also run them without holding our Tx queue lock,
1306 * unlike what reclaim_completed_tx() would otherwise do.
1307 *
1308 * Run the destructor before telling the DMA engine about the packet
1309 * to make sure it doesn't complete and get freed prematurely.
1310 */
1311 if (likely(!skb_shared(skb)))
1312 skb_orphan(skb);
1313
1314 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1315 check_ring_tx_db(adap, q);
1316 return NETDEV_TX_OK;
1317}
1318
1319/**
1320 * write_imm - write a packet into a Tx descriptor as immediate data
1321 * @d: the Tx descriptor to write
1322 * @skb: the packet
1323 * @len: the length of packet data to write as immediate data
1324 * @gen: the generation bit value to write
1325 *
1326 * Writes a packet as immediate data into a Tx descriptor. The packet
1327 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001328 * carefully so the SGE doesn't read it accidentally before it's written
1329 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001330 */
1331static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1332 unsigned int len, unsigned int gen)
1333{
1334 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1335 struct work_request_hdr *to = (struct work_request_hdr *)d;
1336
Divy Le Ray27186dc2007-08-21 20:49:15 -07001337 if (likely(!skb->data_len))
1338 memcpy(&to[1], &from[1], len - sizeof(*from));
1339 else
1340 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1341
Divy Le Ray4d22de32007-01-18 22:04:14 -05001342 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1343 V_WR_BCNTLFLT(len & 7));
1344 wmb();
1345 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1346 V_WR_LEN((len + 7) / 8));
1347 wr_gen2(d, gen);
1348 kfree_skb(skb);
1349}
1350
1351/**
1352 * check_desc_avail - check descriptor availability on a send queue
1353 * @adap: the adapter
1354 * @q: the send queue
1355 * @skb: the packet needing the descriptors
1356 * @ndesc: the number of Tx descriptors needed
1357 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1358 *
1359 * Checks if the requested number of Tx descriptors is available on an
1360 * SGE send queue. If the queue is already suspended or not enough
1361 * descriptors are available the packet is queued for later transmission.
1362 * Must be called with the Tx queue locked.
1363 *
1364 * Returns 0 if enough descriptors are available, 1 if there aren't
1365 * enough descriptors and the packet has been queued, and 2 if the caller
1366 * needs to retry because there weren't enough descriptors at the
1367 * beginning of the call but some freed up in the mean time.
1368 */
1369static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1370 struct sk_buff *skb, unsigned int ndesc,
1371 unsigned int qid)
1372{
1373 if (unlikely(!skb_queue_empty(&q->sendq))) {
1374 addq_exit:__skb_queue_tail(&q->sendq, skb);
1375 return 1;
1376 }
1377 if (unlikely(q->size - q->in_use < ndesc)) {
1378 struct sge_qset *qs = txq_to_qset(q, qid);
1379
1380 set_bit(qid, &qs->txq_stopped);
1381 smp_mb__after_clear_bit();
1382
1383 if (should_restart_tx(q) &&
1384 test_and_clear_bit(qid, &qs->txq_stopped))
1385 return 2;
1386
1387 q->stops++;
1388 goto addq_exit;
1389 }
1390 return 0;
1391}
1392
1393/**
1394 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1395 * @q: the SGE control Tx queue
1396 *
1397 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1398 * that send only immediate data (presently just the control queues) and
1399 * thus do not have any sk_buffs to release.
1400 */
1401static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1402{
1403 unsigned int reclaim = q->processed - q->cleaned;
1404
1405 q->in_use -= reclaim;
1406 q->cleaned += reclaim;
1407}
1408
1409static inline int immediate(const struct sk_buff *skb)
1410{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001411 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001412}
1413
1414/**
1415 * ctrl_xmit - send a packet through an SGE control Tx queue
1416 * @adap: the adapter
1417 * @q: the control queue
1418 * @skb: the packet
1419 *
1420 * Send a packet through an SGE control Tx queue. Packets sent through
1421 * a control queue must fit entirely as immediate data in a single Tx
1422 * descriptor and have no page fragments.
1423 */
1424static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1425 struct sk_buff *skb)
1426{
1427 int ret;
1428 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1429
1430 if (unlikely(!immediate(skb))) {
1431 WARN_ON(1);
1432 dev_kfree_skb(skb);
1433 return NET_XMIT_SUCCESS;
1434 }
1435
1436 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1437 wrp->wr_lo = htonl(V_WR_TID(q->token));
1438
1439 spin_lock(&q->lock);
1440 again:reclaim_completed_tx_imm(q);
1441
1442 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1443 if (unlikely(ret)) {
1444 if (ret == 1) {
1445 spin_unlock(&q->lock);
1446 return NET_XMIT_CN;
1447 }
1448 goto again;
1449 }
1450
1451 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1452
1453 q->in_use++;
1454 if (++q->pidx >= q->size) {
1455 q->pidx = 0;
1456 q->gen ^= 1;
1457 }
1458 spin_unlock(&q->lock);
1459 wmb();
1460 t3_write_reg(adap, A_SG_KDOORBELL,
1461 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1462 return NET_XMIT_SUCCESS;
1463}
1464
1465/**
1466 * restart_ctrlq - restart a suspended control queue
1467 * @qs: the queue set cotaining the control queue
1468 *
1469 * Resumes transmission on a suspended Tx control queue.
1470 */
1471static void restart_ctrlq(unsigned long data)
1472{
1473 struct sk_buff *skb;
1474 struct sge_qset *qs = (struct sge_qset *)data;
1475 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001476
1477 spin_lock(&q->lock);
1478 again:reclaim_completed_tx_imm(q);
1479
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001480 while (q->in_use < q->size &&
1481 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001482
1483 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1484
1485 if (++q->pidx >= q->size) {
1486 q->pidx = 0;
1487 q->gen ^= 1;
1488 }
1489 q->in_use++;
1490 }
1491
1492 if (!skb_queue_empty(&q->sendq)) {
1493 set_bit(TXQ_CTRL, &qs->txq_stopped);
1494 smp_mb__after_clear_bit();
1495
1496 if (should_restart_tx(q) &&
1497 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1498 goto again;
1499 q->stops++;
1500 }
1501
1502 spin_unlock(&q->lock);
Divy Le Rayafefce62007-11-16 11:22:21 -08001503 wmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001504 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001505 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1506}
1507
Divy Le Ray14ab9892007-01-30 19:43:50 -08001508/*
1509 * Send a management message through control queue 0
1510 */
1511int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1512{
Divy Le Ray204e2f92008-05-06 19:26:01 -07001513 int ret;
Divy Le Raybc4b6b52007-12-17 18:47:41 -08001514 local_bh_disable();
1515 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1516 local_bh_enable();
1517
1518 return ret;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001519}
1520
Divy Le Ray4d22de32007-01-18 22:04:14 -05001521/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001522 * deferred_unmap_destructor - unmap a packet when it is freed
1523 * @skb: the packet
1524 *
1525 * This is the packet destructor used for Tx packets that need to remain
1526 * mapped until they are freed rather than until their Tx descriptors are
1527 * freed.
1528 */
1529static void deferred_unmap_destructor(struct sk_buff *skb)
1530{
1531 int i;
1532 const dma_addr_t *p;
1533 const struct skb_shared_info *si;
1534 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001535
1536 dui = (struct deferred_unmap_info *)skb->head;
1537 p = dui->addr;
1538
Divy Le Ray23561c92007-11-16 11:22:05 -08001539 if (skb->tail - skb->transport_header)
1540 pci_unmap_single(dui->pdev, *p++,
1541 skb->tail - skb->transport_header,
1542 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001543
1544 si = skb_shinfo(skb);
1545 for (i = 0; i < si->nr_frags; i++)
1546 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1547 PCI_DMA_TODEVICE);
1548}
1549
1550static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1551 const struct sg_ent *sgl, int sgl_flits)
1552{
1553 dma_addr_t *p;
1554 struct deferred_unmap_info *dui;
1555
1556 dui = (struct deferred_unmap_info *)skb->head;
1557 dui->pdev = pdev;
1558 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1559 *p++ = be64_to_cpu(sgl->addr[0]);
1560 *p++ = be64_to_cpu(sgl->addr[1]);
1561 }
1562 if (sgl_flits)
1563 *p = be64_to_cpu(sgl->addr[0]);
1564}
1565
1566/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001567 * write_ofld_wr - write an offload work request
1568 * @adap: the adapter
1569 * @skb: the packet to send
1570 * @q: the Tx queue
1571 * @pidx: index of the first Tx descriptor to write
1572 * @gen: the generation value to use
1573 * @ndesc: number of descriptors the packet will occupy
1574 *
1575 * Write an offload work request to send the supplied packet. The packet
1576 * data already carry the work request with most fields populated.
1577 */
1578static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1579 struct sge_txq *q, unsigned int pidx,
1580 unsigned int gen, unsigned int ndesc)
1581{
1582 unsigned int sgl_flits, flits;
1583 struct work_request_hdr *from;
1584 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1585 struct tx_desc *d = &q->desc[pidx];
1586
1587 if (immediate(skb)) {
1588 q->sdesc[pidx].skb = NULL;
1589 write_imm(d, skb, skb->len, gen);
1590 return;
1591 }
1592
1593 /* Only TX_DATA builds SGLs */
1594
1595 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001596 memcpy(&d->flit[1], &from[1],
1597 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001598
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001599 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001602 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001603 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001604 if (need_skb_unmap()) {
1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1606 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001607 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001608
1609 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1610 gen, from->wr_hi, from->wr_lo);
1611}
1612
1613/**
1614 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1615 * @skb: the packet
1616 *
1617 * Returns the number of Tx descriptors needed for the given offload
1618 * packet. These packets are already fully constructed.
1619 */
1620static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1621{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001622 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001623
Divy Le Ray27186dc2007-08-21 20:49:15 -07001624 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001625 return 1; /* packet fits as immediate data */
1626
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001627 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001628 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001629 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001630 cnt++;
1631 return flits_to_desc(flits + sgl_len(cnt));
1632}
1633
1634/**
1635 * ofld_xmit - send a packet through an offload queue
1636 * @adap: the adapter
1637 * @q: the Tx offload queue
1638 * @skb: the packet
1639 *
1640 * Send an offload packet through an SGE offload queue.
1641 */
1642static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1643 struct sk_buff *skb)
1644{
1645 int ret;
1646 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1647
1648 spin_lock(&q->lock);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001649again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001650
1651 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1652 if (unlikely(ret)) {
1653 if (ret == 1) {
1654 skb->priority = ndesc; /* save for restart */
1655 spin_unlock(&q->lock);
1656 return NET_XMIT_CN;
1657 }
1658 goto again;
1659 }
1660
1661 gen = q->gen;
1662 q->in_use += ndesc;
1663 pidx = q->pidx;
1664 q->pidx += ndesc;
1665 if (q->pidx >= q->size) {
1666 q->pidx -= q->size;
1667 q->gen ^= 1;
1668 }
1669 spin_unlock(&q->lock);
1670
1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1672 check_ring_tx_db(adap, q);
1673 return NET_XMIT_SUCCESS;
1674}
1675
1676/**
1677 * restart_offloadq - restart a suspended offload queue
1678 * @qs: the queue set cotaining the offload queue
1679 *
1680 * Resumes transmission on a suspended Tx offload queue.
1681 */
1682static void restart_offloadq(unsigned long data)
1683{
1684 struct sk_buff *skb;
1685 struct sge_qset *qs = (struct sge_qset *)data;
1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001687 const struct port_info *pi = netdev_priv(qs->netdev);
1688 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001689
1690 spin_lock(&q->lock);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00001691again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001692
1693 while ((skb = skb_peek(&q->sendq)) != NULL) {
1694 unsigned int gen, pidx;
1695 unsigned int ndesc = skb->priority;
1696
1697 if (unlikely(q->size - q->in_use < ndesc)) {
1698 set_bit(TXQ_OFLD, &qs->txq_stopped);
1699 smp_mb__after_clear_bit();
1700
1701 if (should_restart_tx(q) &&
1702 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1703 goto again;
1704 q->stops++;
1705 break;
1706 }
1707
1708 gen = q->gen;
1709 q->in_use += ndesc;
1710 pidx = q->pidx;
1711 q->pidx += ndesc;
1712 if (q->pidx >= q->size) {
1713 q->pidx -= q->size;
1714 q->gen ^= 1;
1715 }
1716 __skb_unlink(skb, &q->sendq);
1717 spin_unlock(&q->lock);
1718
1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1720 spin_lock(&q->lock);
1721 }
1722 spin_unlock(&q->lock);
1723
1724#if USE_GTS
1725 set_bit(TXQ_RUNNING, &q->flags);
1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1727#endif
Divy Le Rayafefce62007-11-16 11:22:21 -08001728 wmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -05001729 t3_write_reg(adap, A_SG_KDOORBELL,
1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1731}
1732
1733/**
1734 * queue_set - return the queue set a packet should use
1735 * @skb: the packet
1736 *
1737 * Maps a packet to the SGE queue set it should use. The desired queue
1738 * set is carried in bits 1-3 in the packet's priority.
1739 */
1740static inline int queue_set(const struct sk_buff *skb)
1741{
1742 return skb->priority >> 1;
1743}
1744
1745/**
1746 * is_ctrl_pkt - return whether an offload packet is a control packet
1747 * @skb: the packet
1748 *
1749 * Determines whether an offload packet should use an OFLD or a CTRL
1750 * Tx queue. This is indicated by bit 0 in the packet's priority.
1751 */
1752static inline int is_ctrl_pkt(const struct sk_buff *skb)
1753{
1754 return skb->priority & 1;
1755}
1756
1757/**
1758 * t3_offload_tx - send an offload packet
1759 * @tdev: the offload device to send to
1760 * @skb: the packet
1761 *
1762 * Sends an offload packet. We use the packet priority to select the
1763 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1764 * should be sent as regular or control, bits 1-3 select the queue set.
1765 */
1766int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1767{
1768 struct adapter *adap = tdev2adap(tdev);
1769 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1770
1771 if (unlikely(is_ctrl_pkt(skb)))
1772 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1773
1774 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1775}
1776
1777/**
1778 * offload_enqueue - add an offload packet to an SGE offload receive queue
1779 * @q: the SGE response queue
1780 * @skb: the packet
1781 *
1782 * Add a new offload packet to an SGE response queue's offload packet
1783 * queue. If the packet is the first on the queue it schedules the RX
1784 * softirq to process the queue.
1785 */
1786static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1787{
David S. Miller147e70e2008-09-22 01:29:52 -07001788 int was_empty = skb_queue_empty(&q->rx_queue);
1789
1790 __skb_queue_tail(&q->rx_queue, skb);
1791
1792 if (was_empty) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001793 struct sge_qset *qs = rspq_to_qset(q);
1794
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001795 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001796 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001797}
1798
1799/**
1800 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1801 * @tdev: the offload device that will be receiving the packets
1802 * @q: the SGE response queue that assembled the bundle
1803 * @skbs: the partial bundle
1804 * @n: the number of packets in the bundle
1805 *
1806 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1807 */
1808static inline void deliver_partial_bundle(struct t3cdev *tdev,
1809 struct sge_rspq *q,
1810 struct sk_buff *skbs[], int n)
1811{
1812 if (n) {
1813 q->offload_bundles++;
1814 tdev->recv(tdev, skbs, n);
1815 }
1816}
1817
1818/**
1819 * ofld_poll - NAPI handler for offload packets in interrupt mode
1820 * @dev: the network device doing the polling
1821 * @budget: polling budget
1822 *
1823 * The NAPI handler for offload packets when a response queue is serviced
1824 * by the hard interrupt handler, i.e., when it's operating in non-polling
1825 * mode. Creates small packet batches and sends them through the offload
1826 * receive handler. Batches need to be of modest size as we do prefetches
1827 * on the packets in each.
1828 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001829static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001830{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001831 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001832 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001833 struct adapter *adapter = qs->adap;
1834 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001835
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001836 while (work_done < budget) {
David S. Miller147e70e2008-09-22 01:29:52 -07001837 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1838 struct sk_buff_head queue;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001839 int ngathered;
1840
1841 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001842 __skb_queue_head_init(&queue);
1843 skb_queue_splice_init(&q->rx_queue, &queue);
1844 if (skb_queue_empty(&queue)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001845 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001846 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001847 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001848 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001849 spin_unlock_irq(&q->lock);
1850
David S. Miller147e70e2008-09-22 01:29:52 -07001851 ngathered = 0;
1852 skb_queue_walk_safe(&queue, skb, tmp) {
1853 if (work_done >= budget)
1854 break;
1855 work_done++;
1856
1857 __skb_unlink(skb, &queue);
1858 prefetch(skb->data);
1859 skbs[ngathered] = skb;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001860 if (++ngathered == RX_BUNDLE_SIZE) {
1861 q->offload_bundles++;
1862 adapter->tdev.recv(&adapter->tdev, skbs,
1863 ngathered);
1864 ngathered = 0;
1865 }
1866 }
David S. Miller147e70e2008-09-22 01:29:52 -07001867 if (!skb_queue_empty(&queue)) {
1868 /* splice remaining packets back onto Rx queue */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001869 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001870 skb_queue_splice(&queue, &q->rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001871 spin_unlock_irq(&q->lock);
1872 }
1873 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1874 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001875
1876 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001877}
1878
1879/**
1880 * rx_offload - process a received offload packet
1881 * @tdev: the offload device receiving the packet
1882 * @rq: the response queue that received the packet
1883 * @skb: the packet
1884 * @rx_gather: a gather list of packets if we are building a bundle
1885 * @gather_idx: index of the next available slot in the bundle
1886 *
1887 * Process an ingress offload pakcet and add it to the offload ingress
1888 * queue. Returns the index of the next available slot in the bundle.
1889 */
1890static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1891 struct sk_buff *skb, struct sk_buff *rx_gather[],
1892 unsigned int gather_idx)
1893{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001894 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001895 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001896 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001897
1898 if (rq->polling) {
1899 rx_gather[gather_idx++] = skb;
1900 if (gather_idx == RX_BUNDLE_SIZE) {
1901 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1902 gather_idx = 0;
1903 rq->offload_bundles++;
1904 }
1905 } else
1906 offload_enqueue(rq, skb);
1907
1908 return gather_idx;
1909}
1910
1911/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001912 * restart_tx - check whether to restart suspended Tx queues
1913 * @qs: the queue set to resume
1914 *
1915 * Restarts suspended Tx queues of an SGE queue set if they have enough
1916 * free resources to resume operation.
1917 */
1918static void restart_tx(struct sge_qset *qs)
1919{
1920 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1921 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1922 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1923 qs->txq[TXQ_ETH].restarts++;
1924 if (netif_running(qs->netdev))
Divy Le Ray82ad3322008-12-16 01:09:39 -08001925 netif_tx_wake_queue(qs->tx_q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001926 }
1927
1928 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1929 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1930 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1931 qs->txq[TXQ_OFLD].restarts++;
1932 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1933 }
1934 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1935 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1936 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1937 qs->txq[TXQ_CTRL].restarts++;
1938 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1939 }
1940}
1941
1942/**
Karen Xiea109a5b2008-12-18 22:56:20 -08001943 * cxgb3_arp_process - process an ARP request probing a private IP address
1944 * @adapter: the adapter
1945 * @skb: the skbuff containing the ARP request
1946 *
1947 * Check if the ARP request is probing the private IP address
1948 * dedicated to iSCSI, generate an ARP reply if so.
1949 */
1950static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1951{
1952 struct net_device *dev = skb->dev;
1953 struct port_info *pi;
1954 struct arphdr *arp;
1955 unsigned char *arp_ptr;
1956 unsigned char *sha;
1957 __be32 sip, tip;
1958
1959 if (!dev)
1960 return;
1961
1962 skb_reset_network_header(skb);
1963 arp = arp_hdr(skb);
1964
1965 if (arp->ar_op != htons(ARPOP_REQUEST))
1966 return;
1967
1968 arp_ptr = (unsigned char *)(arp + 1);
1969 sha = arp_ptr;
1970 arp_ptr += dev->addr_len;
1971 memcpy(&sip, arp_ptr, sizeof(sip));
1972 arp_ptr += sizeof(sip);
1973 arp_ptr += dev->addr_len;
1974 memcpy(&tip, arp_ptr, sizeof(tip));
1975
1976 pi = netdev_priv(dev);
1977 if (tip != pi->iscsi_ipv4addr)
1978 return;
1979
1980 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1981 dev->dev_addr, sha);
1982
1983}
1984
1985static inline int is_arp(struct sk_buff *skb)
1986{
1987 return skb->protocol == htons(ETH_P_ARP);
1988}
1989
1990/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001991 * rx_eth - process an ingress ethernet packet
1992 * @adap: the adapter
1993 * @rq: the response queue that received the packet
1994 * @skb: the packet
1995 * @pad: amount of padding at the start of the buffer
1996 *
1997 * Process an ingress ethernet pakcet and deliver it to the stack.
1998 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1999 * if it was immediate data in a response.
2000 */
2001static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
Divy Le Rayb47385b2008-05-21 18:56:26 -07002002 struct sk_buff *skb, int pad, int lro)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002003{
2004 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002005 struct sge_qset *qs = rspq_to_qset(rq);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002006 struct port_info *pi;
2007
Divy Le Ray4d22de32007-01-18 22:04:14 -05002008 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07002009 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002010 pi = netdev_priv(skb->dev);
Divy Le Ray5e68b772009-03-26 16:39:29 +00002011 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
2012 p->csum == htons(0xffff) && !p->fragment) {
Karen Xiea109a5b2008-12-18 22:56:20 -08002013 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002014 skb->ip_summed = CHECKSUM_UNNECESSARY;
2015 } else
2016 skb->ip_summed = CHECKSUM_NONE;
David S. Miller0c8dfc82009-01-27 16:22:32 -08002017 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002018
2019 if (unlikely(p->vlan_valid)) {
2020 struct vlan_group *grp = pi->vlan_grp;
2021
Divy Le Rayb47385b2008-05-21 18:56:26 -07002022 qs->port_stats[SGE_PSTAT_VLANEX]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002023 if (likely(grp))
Divy Le Rayb47385b2008-05-21 18:56:26 -07002024 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08002025 vlan_gro_receive(&qs->napi, grp,
2026 ntohs(p->vlan), skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08002027 else {
2028 if (unlikely(pi->iscsi_ipv4addr &&
2029 is_arp(skb))) {
2030 unsigned short vtag = ntohs(p->vlan) &
2031 VLAN_VID_MASK;
2032 skb->dev = vlan_group_get_device(grp,
2033 vtag);
2034 cxgb3_arp_process(adap, skb);
2035 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07002036 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
2037 rq->polling);
Karen Xiea109a5b2008-12-18 22:56:20 -08002038 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002039 else
2040 dev_kfree_skb_any(skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002041 } else if (rq->polling) {
2042 if (lro)
Herbert Xu7be2df42009-01-21 14:39:13 -08002043 napi_gro_receive(&qs->napi, skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08002044 else {
2045 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
2046 cxgb3_arp_process(adap, skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002047 netif_receive_skb(skb);
Karen Xiea109a5b2008-12-18 22:56:20 -08002048 }
Divy Le Rayb47385b2008-05-21 18:56:26 -07002049 } else
Divy Le Ray4d22de32007-01-18 22:04:14 -05002050 netif_rx(skb);
2051}
2052
Divy Le Rayb47385b2008-05-21 18:56:26 -07002053static inline int is_eth_tcp(u32 rss)
2054{
2055 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2056}
2057
2058/**
Divy Le Rayb47385b2008-05-21 18:56:26 -07002059 * lro_add_page - add a page chunk to an LRO session
2060 * @adap: the adapter
2061 * @qs: the associated queue set
2062 * @fl: the free list containing the page chunk to add
2063 * @len: packet length
2064 * @complete: Indicates the last fragment of a frame
2065 *
2066 * Add a received packet contained in a page chunk to an existing LRO
2067 * session.
2068 */
2069static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2070 struct sge_fl *fl, int len, int complete)
2071{
2072 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
Herbert Xu76620aa2009-04-16 02:02:07 -07002073 struct sk_buff *skb = NULL;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002074 struct cpl_rx_pkt *cpl;
Herbert Xu76620aa2009-04-16 02:02:07 -07002075 struct skb_frag_struct *rx_frag;
2076 int nr_frags;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002077 int offset = 0;
2078
Herbert Xu76620aa2009-04-16 02:02:07 -07002079 if (!qs->nomem) {
2080 skb = napi_get_frags(&qs->napi);
2081 qs->nomem = !skb;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002082 }
2083
2084 fl->credits--;
2085
Divy Le Ray5e68b772009-03-26 16:39:29 +00002086 pci_dma_sync_single_for_cpu(adap->pdev,
2087 pci_unmap_addr(sd, dma_addr),
2088 fl->buf_size - SGE_PG_RSVD,
2089 PCI_DMA_FROMDEVICE);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002090
Divy Le Ray5e68b772009-03-26 16:39:29 +00002091 (*sd->pg_chunk.p_cnt)--;
2092 if (!*sd->pg_chunk.p_cnt)
2093 pci_unmap_page(adap->pdev,
2094 pci_unmap_addr(&sd->pg_chunk, mapping),
2095 fl->alloc_size,
2096 PCI_DMA_FROMDEVICE);
2097
Herbert Xu76620aa2009-04-16 02:02:07 -07002098 if (!skb) {
2099 put_page(sd->pg_chunk.page);
2100 if (complete)
2101 qs->nomem = 0;
2102 return;
2103 }
2104
2105 rx_frag = skb_shinfo(skb)->frags;
2106 nr_frags = skb_shinfo(skb)->nr_frags;
2107
2108 if (!nr_frags) {
2109 offset = 2 + sizeof(struct cpl_rx_pkt);
2110 qs->lro_va = sd->pg_chunk.va + 2;
2111 }
2112 len -= offset;
2113
Divy Le Ray5e68b772009-03-26 16:39:29 +00002114 prefetch(qs->lro_va);
Divy Le Rayb2b964f2009-03-12 21:13:59 +00002115
Divy Le Rayb47385b2008-05-21 18:56:26 -07002116 rx_frag += nr_frags;
2117 rx_frag->page = sd->pg_chunk.page;
2118 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2119 rx_frag->size = len;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002120
Herbert Xu76620aa2009-04-16 02:02:07 -07002121 skb->len += len;
2122 skb->data_len += len;
2123 skb->truesize += len;
2124 skb_shinfo(skb)->nr_frags++;
Divy Le Ray5e68b772009-03-26 16:39:29 +00002125
Divy Le Rayb47385b2008-05-21 18:56:26 -07002126 if (!complete)
2127 return;
2128
Herbert Xu76620aa2009-04-16 02:02:07 -07002129 skb->ip_summed = CHECKSUM_UNNECESSARY;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002130 cpl = qs->lro_va;
2131
2132 if (unlikely(cpl->vlan_valid)) {
2133 struct net_device *dev = qs->netdev;
2134 struct port_info *pi = netdev_priv(dev);
2135 struct vlan_group *grp = pi->vlan_grp;
2136
2137 if (likely(grp != NULL)) {
Herbert Xu76620aa2009-04-16 02:02:07 -07002138 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
2139 return;
Divy Le Rayb47385b2008-05-21 18:56:26 -07002140 }
2141 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002142 napi_gro_frags(&qs->napi);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002143}
2144
Divy Le Ray4d22de32007-01-18 22:04:14 -05002145/**
2146 * handle_rsp_cntrl_info - handles control information in a response
2147 * @qs: the queue set corresponding to the response
2148 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05002149 *
2150 * Handles the control information of an SGE response, such as GTS
2151 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08002152 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002153 */
Divy Le Ray6195c712007-01-30 19:43:56 -08002154static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002155{
2156 unsigned int credits;
2157
2158#if USE_GTS
2159 if (flags & F_RSPD_TXQ0_GTS)
2160 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2161#endif
2162
Divy Le Ray4d22de32007-01-18 22:04:14 -05002163 credits = G_RSPD_TXQ0_CR(flags);
2164 if (credits)
2165 qs->txq[TXQ_ETH].processed += credits;
2166
Divy Le Ray6195c712007-01-30 19:43:56 -08002167 credits = G_RSPD_TXQ2_CR(flags);
2168 if (credits)
2169 qs->txq[TXQ_CTRL].processed += credits;
2170
Divy Le Ray4d22de32007-01-18 22:04:14 -05002171# if USE_GTS
2172 if (flags & F_RSPD_TXQ1_GTS)
2173 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2174# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08002175 credits = G_RSPD_TXQ1_CR(flags);
2176 if (credits)
2177 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002178}
2179
2180/**
2181 * check_ring_db - check if we need to ring any doorbells
2182 * @adapter: the adapter
2183 * @qs: the queue set whose Tx queues are to be examined
2184 * @sleeping: indicates which Tx queue sent GTS
2185 *
2186 * Checks if some of a queue set's Tx queues need to ring their doorbells
2187 * to resume transmission after idling while they still have unprocessed
2188 * descriptors.
2189 */
2190static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2191 unsigned int sleeping)
2192{
2193 if (sleeping & F_RSPD_TXQ0_GTS) {
2194 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2195
2196 if (txq->cleaned + txq->in_use != txq->processed &&
2197 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2198 set_bit(TXQ_RUNNING, &txq->flags);
2199 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2200 V_EGRCNTX(txq->cntxt_id));
2201 }
2202 }
2203
2204 if (sleeping & F_RSPD_TXQ1_GTS) {
2205 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2206
2207 if (txq->cleaned + txq->in_use != txq->processed &&
2208 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2209 set_bit(TXQ_RUNNING, &txq->flags);
2210 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2211 V_EGRCNTX(txq->cntxt_id));
2212 }
2213 }
2214}
2215
2216/**
2217 * is_new_response - check if a response is newly written
2218 * @r: the response descriptor
2219 * @q: the response queue
2220 *
2221 * Returns true if a response descriptor contains a yet unprocessed
2222 * response.
2223 */
2224static inline int is_new_response(const struct rsp_desc *r,
2225 const struct sge_rspq *q)
2226{
2227 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2228}
2229
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002230static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2231{
2232 q->pg_skb = NULL;
2233 q->rx_recycle_buf = 0;
2234}
2235
Divy Le Ray4d22de32007-01-18 22:04:14 -05002236#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2237#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2238 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2239 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2240 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2241
2242/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2243#define NOMEM_INTR_DELAY 2500
2244
2245/**
2246 * process_responses - process responses from an SGE response queue
2247 * @adap: the adapter
2248 * @qs: the queue set to which the response queue belongs
2249 * @budget: how many responses can be processed in this round
2250 *
2251 * Process responses from an SGE response queue up to the supplied budget.
2252 * Responses include received packets as well as credits and other events
2253 * for the queues that belong to the response queue's queue set.
2254 * A negative budget is effectively unlimited.
2255 *
2256 * Additionally choose the interrupt holdoff time for the next interrupt
2257 * on this queue. If the system is under memory shortage use a fairly
2258 * long delay to help recovery.
2259 */
2260static int process_responses(struct adapter *adap, struct sge_qset *qs,
2261 int budget)
2262{
2263 struct sge_rspq *q = &qs->rspq;
2264 struct rsp_desc *r = &q->desc[q->cidx];
2265 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08002266 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002267 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2268 int ngathered = 0;
2269
2270 q->next_holdoff = q->holdoff_tmr;
2271
2272 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Rayb47385b2008-05-21 18:56:26 -07002273 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002274 struct sk_buff *skb = NULL;
2275 u32 len, flags = ntohl(r->flags);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002276 __be32 rss_hi = *(const __be32 *)r,
2277 rss_lo = r->rss_hdr.rss_hash_val;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002278
2279 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2280
2281 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2282 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2283 if (!skb)
2284 goto no_mem;
2285
2286 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2287 skb->data[0] = CPL_ASYNC_NOTIF;
2288 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2289 q->async_notif++;
2290 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2291 skb = get_imm_packet(r);
2292 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002293no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002294 q->next_holdoff = NOMEM_INTR_DELAY;
2295 q->nomem++;
2296 /* consume one credit since we tried */
2297 budget_left--;
2298 break;
2299 }
2300 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002301 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002302 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002303 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002304
Divy Le Ray65ab8382009-02-04 16:31:39 -08002305 lro &= eth && is_eth_tcp(rss_hi);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002306
Divy Le Raycf992af2007-05-30 21:10:47 -07002307 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2308 if (fl->use_pages) {
2309 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002310
Divy Le Raycf992af2007-05-30 21:10:47 -07002311 prefetch(addr);
2312#if L1_CACHE_BYTES < 128
2313 prefetch(addr + L1_CACHE_BYTES);
2314#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08002315 __refill_fl(adap, fl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002316 if (lro > 0) {
2317 lro_add_page(adap, qs, fl,
2318 G_RSPD_LEN(len),
2319 flags & F_RSPD_EOP);
2320 goto next_fl;
2321 }
Divy Le Raye0994eb2007-02-24 16:44:17 -08002322
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002323 skb = get_packet_pg(adap, fl, q,
2324 G_RSPD_LEN(len),
2325 eth ?
2326 SGE_RX_DROP_THRES : 0);
2327 q->pg_skb = skb;
Divy Le Raycf992af2007-05-30 21:10:47 -07002328 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08002329 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2330 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002331 if (unlikely(!skb)) {
2332 if (!eth)
2333 goto no_mem;
2334 q->rx_drops++;
2335 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2336 __skb_pull(skb, 2);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002337next_fl:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002338 if (++fl->cidx == fl->size)
2339 fl->cidx = 0;
2340 } else
2341 q->pure_rsps++;
2342
2343 if (flags & RSPD_CTRL_MASK) {
2344 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002345 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002346 }
2347
2348 r++;
2349 if (unlikely(++q->cidx == q->size)) {
2350 q->cidx = 0;
2351 q->gen ^= 1;
2352 r = q->desc;
2353 }
2354 prefetch(r);
2355
2356 if (++q->credits >= (q->size / 4)) {
2357 refill_rspq(adap, q, q->credits);
2358 q->credits = 0;
2359 }
2360
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002361 packet_complete = flags &
2362 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2363 F_RSPD_ASYNC_NOTIF);
2364
2365 if (skb != NULL && packet_complete) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002366 if (eth)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002367 rx_eth(adap, q, skb, ethpad, lro);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002368 else {
Divy Le Rayafefce62007-11-16 11:22:21 -08002369 q->offload_pkts++;
Divy Le Raycf992af2007-05-30 21:10:47 -07002370 /* Preserve the RSS info in csum & priority */
2371 skb->csum = rss_hi;
2372 skb->priority = rss_lo;
2373 ngathered = rx_offload(&adap->tdev, q, skb,
2374 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002375 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002376 }
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002377
2378 if (flags & F_RSPD_EOP)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002379 clear_rspq_bufstate(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002380 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002381 --budget_left;
2382 }
2383
Divy Le Ray4d22de32007-01-18 22:04:14 -05002384 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002385
Divy Le Ray4d22de32007-01-18 22:04:14 -05002386 if (sleeping)
2387 check_ring_db(adap, qs, sleeping);
2388
2389 smp_mb(); /* commit Tx queue .processed updates */
2390 if (unlikely(qs->txq_stopped != 0))
2391 restart_tx(qs);
2392
2393 budget -= budget_left;
2394 return budget;
2395}
2396
2397static inline int is_pure_response(const struct rsp_desc *r)
2398{
Roland Dreierc5419e62008-11-28 21:55:42 -08002399 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002400
2401 return (n | r->len_cq) == 0;
2402}
2403
2404/**
2405 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002406 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002407 * @budget: how many packets we can process in this round
2408 *
2409 * Handler for new data events when using NAPI.
2410 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002411static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002412{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002413 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2414 struct adapter *adap = qs->adap;
2415 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002416
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002417 if (likely(work_done < budget)) {
2418 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002419
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002420 /*
2421 * Because we don't atomically flush the following
2422 * write it is possible that in very rare cases it can
2423 * reach the device in a way that races with a new
2424 * response being written plus an error interrupt
2425 * causing the NAPI interrupt handler below to return
2426 * unhandled status to the OS. To protect against
2427 * this would require flushing the write and doing
2428 * both the write and the flush with interrupts off.
2429 * Way too expensive and unjustifiable given the
2430 * rarity of the race.
2431 *
2432 * The race cannot happen at all with MSI-X.
2433 */
2434 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2435 V_NEWTIMER(qs->rspq.next_holdoff) |
2436 V_NEWINDEX(qs->rspq.cidx));
2437 }
2438 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002439}
2440
2441/*
2442 * Returns true if the device is already scheduled for polling.
2443 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002444static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002445{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002446 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002447}
2448
2449/**
2450 * process_pure_responses - process pure responses from a response queue
2451 * @adap: the adapter
2452 * @qs: the queue set owning the response queue
2453 * @r: the first pure response to process
2454 *
2455 * A simpler version of process_responses() that handles only pure (i.e.,
2456 * non data-carrying) responses. Such respones are too light-weight to
2457 * justify calling a softirq under NAPI, so we handle them specially in
2458 * the interrupt handler. The function is called with a pointer to a
2459 * response, which the caller must ensure is a valid pure response.
2460 *
2461 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2462 */
2463static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2464 struct rsp_desc *r)
2465{
2466 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002467 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002468
2469 do {
2470 u32 flags = ntohl(r->flags);
2471
2472 r++;
2473 if (unlikely(++q->cidx == q->size)) {
2474 q->cidx = 0;
2475 q->gen ^= 1;
2476 r = q->desc;
2477 }
2478 prefetch(r);
2479
2480 if (flags & RSPD_CTRL_MASK) {
2481 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002482 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002483 }
2484
2485 q->pure_rsps++;
2486 if (++q->credits >= (q->size / 4)) {
2487 refill_rspq(adap, q, q->credits);
2488 q->credits = 0;
2489 }
2490 } while (is_new_response(r, q) && is_pure_response(r));
2491
Divy Le Ray4d22de32007-01-18 22:04:14 -05002492 if (sleeping)
2493 check_ring_db(adap, qs, sleeping);
2494
2495 smp_mb(); /* commit Tx queue .processed updates */
2496 if (unlikely(qs->txq_stopped != 0))
2497 restart_tx(qs);
2498
2499 return is_new_response(r, q);
2500}
2501
2502/**
2503 * handle_responses - decide what to do with new responses in NAPI mode
2504 * @adap: the adapter
2505 * @q: the response queue
2506 *
2507 * This is used by the NAPI interrupt handlers to decide what to do with
2508 * new SGE responses. If there are no new responses it returns -1. If
2509 * there are new responses and they are pure (i.e., non-data carrying)
2510 * it handles them straight in hard interrupt context as they are very
2511 * cheap and don't deliver any packets. Finally, if there are any data
2512 * signaling responses it schedules the NAPI handler. Returns 1 if it
2513 * schedules NAPI, 0 if all new responses were pure.
2514 *
2515 * The caller must ascertain NAPI is not already running.
2516 */
2517static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2518{
2519 struct sge_qset *qs = rspq_to_qset(q);
2520 struct rsp_desc *r = &q->desc[q->cidx];
2521
2522 if (!is_new_response(r, q))
2523 return -1;
2524 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2525 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2526 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2527 return 0;
2528 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002529 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002530 return 1;
2531}
2532
2533/*
2534 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2535 * (i.e., response queue serviced in hard interrupt).
2536 */
2537irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2538{
2539 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002540 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002541 struct sge_rspq *q = &qs->rspq;
2542
2543 spin_lock(&q->lock);
2544 if (process_responses(adap, qs, -1) == 0)
2545 q->unhandled_irqs++;
2546 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2547 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2548 spin_unlock(&q->lock);
2549 return IRQ_HANDLED;
2550}
2551
2552/*
2553 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2554 * (i.e., response queue serviced by NAPI polling).
2555 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002556static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002557{
2558 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002559 struct sge_rspq *q = &qs->rspq;
2560
2561 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002562
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002563 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002564 q->unhandled_irqs++;
2565 spin_unlock(&q->lock);
2566 return IRQ_HANDLED;
2567}
2568
2569/*
2570 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2571 * SGE response queues as well as error and other async events as they all use
2572 * the same MSI vector. We use one SGE response queue per port in this mode
2573 * and protect all response queues with queue 0's lock.
2574 */
2575static irqreturn_t t3_intr_msi(int irq, void *cookie)
2576{
2577 int new_packets = 0;
2578 struct adapter *adap = cookie;
2579 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2580
2581 spin_lock(&q->lock);
2582
2583 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2584 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2585 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2586 new_packets = 1;
2587 }
2588
2589 if (adap->params.nports == 2 &&
2590 process_responses(adap, &adap->sge.qs[1], -1)) {
2591 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2592
2593 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2594 V_NEWTIMER(q1->next_holdoff) |
2595 V_NEWINDEX(q1->cidx));
2596 new_packets = 1;
2597 }
2598
2599 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2600 q->unhandled_irqs++;
2601
2602 spin_unlock(&q->lock);
2603 return IRQ_HANDLED;
2604}
2605
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002606static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002607{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002608 struct sge_rspq *q = &qs->rspq;
2609
2610 if (!napi_is_scheduled(&qs->napi) &&
2611 is_new_response(&q->desc[q->cidx], q)) {
2612 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002613 return 1;
2614 }
2615 return 0;
2616}
2617
2618/*
2619 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2620 * by NAPI polling). Handles data events from SGE response queues as well as
2621 * error and other async events as they all use the same MSI vector. We use
2622 * one SGE response queue per port in this mode and protect all response
2623 * queues with queue 0's lock.
2624 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002625static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002626{
2627 int new_packets;
2628 struct adapter *adap = cookie;
2629 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2630
2631 spin_lock(&q->lock);
2632
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002633 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002634 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002635 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002636 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2637 q->unhandled_irqs++;
2638
2639 spin_unlock(&q->lock);
2640 return IRQ_HANDLED;
2641}
2642
2643/*
2644 * A helper function that processes responses and issues GTS.
2645 */
2646static inline int process_responses_gts(struct adapter *adap,
2647 struct sge_rspq *rq)
2648{
2649 int work;
2650
2651 work = process_responses(adap, rspq_to_qset(rq), -1);
2652 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2653 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2654 return work;
2655}
2656
2657/*
2658 * The legacy INTx interrupt handler. This needs to handle data events from
2659 * SGE response queues as well as error and other async events as they all use
2660 * the same interrupt pin. We use one SGE response queue per port in this mode
2661 * and protect all response queues with queue 0's lock.
2662 */
2663static irqreturn_t t3_intr(int irq, void *cookie)
2664{
2665 int work_done, w0, w1;
2666 struct adapter *adap = cookie;
2667 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2668 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2669
2670 spin_lock(&q0->lock);
2671
2672 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2673 w1 = adap->params.nports == 2 &&
2674 is_new_response(&q1->desc[q1->cidx], q1);
2675
2676 if (likely(w0 | w1)) {
2677 t3_write_reg(adap, A_PL_CLI, 0);
2678 t3_read_reg(adap, A_PL_CLI); /* flush */
2679
2680 if (likely(w0))
2681 process_responses_gts(adap, q0);
2682
2683 if (w1)
2684 process_responses_gts(adap, q1);
2685
2686 work_done = w0 | w1;
2687 } else
2688 work_done = t3_slow_intr_handler(adap);
2689
2690 spin_unlock(&q0->lock);
2691 return IRQ_RETVAL(work_done != 0);
2692}
2693
2694/*
2695 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2696 * Handles data events from SGE response queues as well as error and other
2697 * async events as they all use the same interrupt pin. We use one SGE
2698 * response queue per port in this mode and protect all response queues with
2699 * queue 0's lock.
2700 */
2701static irqreturn_t t3b_intr(int irq, void *cookie)
2702{
2703 u32 map;
2704 struct adapter *adap = cookie;
2705 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2706
2707 t3_write_reg(adap, A_PL_CLI, 0);
2708 map = t3_read_reg(adap, A_SG_DATA_INTR);
2709
2710 if (unlikely(!map)) /* shared interrupt, most likely */
2711 return IRQ_NONE;
2712
2713 spin_lock(&q0->lock);
2714
2715 if (unlikely(map & F_ERRINTR))
2716 t3_slow_intr_handler(adap);
2717
2718 if (likely(map & 1))
2719 process_responses_gts(adap, q0);
2720
2721 if (map & 2)
2722 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2723
2724 spin_unlock(&q0->lock);
2725 return IRQ_HANDLED;
2726}
2727
2728/*
2729 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2730 * Handles data events from SGE response queues as well as error and other
2731 * async events as they all use the same interrupt pin. We use one SGE
2732 * response queue per port in this mode and protect all response queues with
2733 * queue 0's lock.
2734 */
2735static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2736{
2737 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002738 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002739 struct sge_qset *qs0 = &adap->sge.qs[0];
2740 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002741
2742 t3_write_reg(adap, A_PL_CLI, 0);
2743 map = t3_read_reg(adap, A_SG_DATA_INTR);
2744
2745 if (unlikely(!map)) /* shared interrupt, most likely */
2746 return IRQ_NONE;
2747
2748 spin_lock(&q0->lock);
2749
2750 if (unlikely(map & F_ERRINTR))
2751 t3_slow_intr_handler(adap);
2752
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002753 if (likely(map & 1))
2754 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002755
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002756 if (map & 2)
2757 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002758
2759 spin_unlock(&q0->lock);
2760 return IRQ_HANDLED;
2761}
2762
2763/**
2764 * t3_intr_handler - select the top-level interrupt handler
2765 * @adap: the adapter
2766 * @polling: whether using NAPI to service response queues
2767 *
2768 * Selects the top-level interrupt handler based on the type of interrupts
2769 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2770 * response queues.
2771 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002772irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002773{
2774 if (adap->flags & USING_MSIX)
2775 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2776 if (adap->flags & USING_MSI)
2777 return polling ? t3_intr_msi_napi : t3_intr_msi;
2778 if (adap->params.rev > 0)
2779 return polling ? t3b_intr_napi : t3b_intr;
2780 return t3_intr;
2781}
2782
Divy Le Rayb8819552007-12-17 18:47:31 -08002783#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2784 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2785 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2786 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2787 F_HIRCQPARITYERROR)
2788#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2789#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2790 F_RSPQDISABLED)
2791
Divy Le Ray4d22de32007-01-18 22:04:14 -05002792/**
2793 * t3_sge_err_intr_handler - SGE async event interrupt handler
2794 * @adapter: the adapter
2795 *
2796 * Interrupt handler for SGE asynchronous (non-data) events.
2797 */
2798void t3_sge_err_intr_handler(struct adapter *adapter)
2799{
Divy Le Rayfc882192009-03-12 21:14:09 +00002800 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2801 ~F_FLEMPTY;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002802
Divy Le Rayb8819552007-12-17 18:47:31 -08002803 if (status & SGE_PARERR)
2804 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2805 status & SGE_PARERR);
2806 if (status & SGE_FRAMINGERR)
2807 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2808 status & SGE_FRAMINGERR);
2809
Divy Le Ray4d22de32007-01-18 22:04:14 -05002810 if (status & F_RSPQCREDITOVERFOW)
2811 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2812
2813 if (status & F_RSPQDISABLED) {
2814 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2815
2816 CH_ALERT(adapter,
2817 "packet delivered to disabled response queue "
2818 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2819 }
2820
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002821 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2822 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2823 status & F_HIPIODRBDROPERR ? "high" : "lo");
2824
Divy Le Ray4d22de32007-01-18 22:04:14 -05002825 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
Divy Le Rayb8819552007-12-17 18:47:31 -08002826 if (status & SGE_FATALERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002827 t3_fatal_err(adapter);
2828}
2829
2830/**
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002831 * sge_timer_tx - perform periodic maintenance of an SGE qset
Divy Le Ray4d22de32007-01-18 22:04:14 -05002832 * @data: the SGE queue set to maintain
2833 *
2834 * Runs periodically from a timer to perform maintenance of an SGE queue
2835 * set. It performs two tasks:
2836 *
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002837 * Cleans up any completed Tx descriptors that may still be pending.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002838 * Normal descriptor cleanup happens when new packets are added to a Tx
2839 * queue so this timer is relatively infrequent and does any cleanup only
2840 * if the Tx queue has not seen any new packets in a while. We make a
2841 * best effort attempt to reclaim descriptors, in that we don't wait
2842 * around if we cannot get a queue's lock (which most likely is because
2843 * someone else is queueing new packets and so will also handle the clean
2844 * up). Since control queues use immediate data exclusively we don't
2845 * bother cleaning them up here.
2846 *
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002847 */
2848static void sge_timer_tx(unsigned long data)
2849{
2850 struct sge_qset *qs = (struct sge_qset *)data;
2851 struct port_info *pi = netdev_priv(qs->netdev);
2852 struct adapter *adap = pi->adapter;
2853 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2854 unsigned long next_period;
2855
Divy Le Rayc3a8c5b2009-05-29 12:52:38 +00002856 if (__netif_tx_trylock(qs->tx_q)) {
2857 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2858 TX_RECLAIM_TIMER_CHUNK);
2859 __netif_tx_unlock(qs->tx_q);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002860 }
Divy Le Rayc3a8c5b2009-05-29 12:52:38 +00002861
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002862 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2863 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2864 TX_RECLAIM_TIMER_CHUNK);
2865 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2866 }
2867
2868 next_period = TX_RECLAIM_PERIOD >>
Divy Le Rayc3a8c5b2009-05-29 12:52:38 +00002869 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2870 TX_RECLAIM_TIMER_CHUNK);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002871 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2872}
2873
2874/*
2875 * sge_timer_rx - perform periodic maintenance of an SGE qset
2876 * @data: the SGE queue set to maintain
2877 *
2878 * a) Replenishes Rx queues that have run out due to memory shortage.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002879 * Normally new Rx buffers are added when existing ones are consumed but
2880 * when out of memory a queue can become empty. We try to add only a few
2881 * buffers here, the queue will be replenished fully as these new buffers
2882 * are used up if memory shortage has subsided.
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002883 *
2884 * b) Return coalesced response queue credits in case a response queue is
2885 * starved.
2886 *
Divy Le Ray4d22de32007-01-18 22:04:14 -05002887 */
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002888static void sge_timer_rx(unsigned long data)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002889{
2890 spinlock_t *lock;
2891 struct sge_qset *qs = (struct sge_qset *)data;
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002892 struct port_info *pi = netdev_priv(qs->netdev);
2893 struct adapter *adap = pi->adapter;
2894 u32 status;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002895
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002896 lock = adap->params.rev > 0 ?
2897 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
Divy Le Raybae73f42007-02-24 16:44:12 -08002898
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002899 if (!spin_trylock_irq(lock))
2900 goto out;
Divy Le Raybae73f42007-02-24 16:44:12 -08002901
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002902 if (napi_is_scheduled(&qs->napi))
2903 goto unlock;
2904
2905 if (adap->params.rev < 4) {
2906 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2907
2908 if (status & (1 << qs->rspq.cntxt_id)) {
2909 qs->rspq.starved++;
2910 if (qs->rspq.credits) {
2911 qs->rspq.credits--;
2912 refill_rspq(adap, &qs->rspq, 1);
2913 qs->rspq.restarted++;
2914 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2915 1 << qs->rspq.cntxt_id);
Divy Le Raybae73f42007-02-24 16:44:12 -08002916 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002917 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002918 }
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002919
2920 if (qs->fl[0].credits < qs->fl[0].size)
2921 __refill_fl(adap, &qs->fl[0]);
2922 if (qs->fl[1].credits < qs->fl[1].size)
2923 __refill_fl(adap, &qs->fl[1]);
2924
2925unlock:
2926 spin_unlock_irq(lock);
2927out:
2928 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002929}
2930
2931/**
2932 * t3_update_qset_coalesce - update coalescing settings for a queue set
2933 * @qs: the SGE queue set
2934 * @p: new queue set parameters
2935 *
2936 * Update the coalescing settings for an SGE queue set. Nothing is done
2937 * if the queue set is not initialized yet.
2938 */
2939void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2940{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002941 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2942 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002943 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002944}
2945
2946/**
2947 * t3_sge_alloc_qset - initialize an SGE queue set
2948 * @adapter: the adapter
2949 * @id: the queue set id
2950 * @nports: how many Ethernet ports will be using this queue set
2951 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2952 * @p: configuration parameters for this queue set
2953 * @ntxq: number of Tx queues for the queue set
2954 * @netdev: net device associated with this queue set
Divy Le Ray82ad3322008-12-16 01:09:39 -08002955 * @netdevq: net device TX queue associated with this queue set
Divy Le Ray4d22de32007-01-18 22:04:14 -05002956 *
2957 * Allocate resources and initialize an SGE queue set. A queue set
2958 * comprises a response queue, two Rx free-buffer queues, and up to 3
2959 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2960 * queue, offload queue, and control queue.
2961 */
2962int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2963 int irq_vec_idx, const struct qset_params *p,
Divy Le Ray82ad3322008-12-16 01:09:39 -08002964 int ntxq, struct net_device *dev,
2965 struct netdev_queue *netdevq)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002966{
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002967 int i, avail, ret = -ENOMEM;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002968 struct sge_qset *q = &adapter->sge.qs[id];
2969
2970 init_qset_cntxt(q, id);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00002971 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2972 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002973
2974 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2975 sizeof(struct rx_desc),
2976 sizeof(struct rx_sw_desc),
2977 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2978 if (!q->fl[0].desc)
2979 goto err;
2980
2981 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2982 sizeof(struct rx_desc),
2983 sizeof(struct rx_sw_desc),
2984 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2985 if (!q->fl[1].desc)
2986 goto err;
2987
2988 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2989 sizeof(struct rsp_desc), 0,
2990 &q->rspq.phys_addr, NULL);
2991 if (!q->rspq.desc)
2992 goto err;
2993
2994 for (i = 0; i < ntxq; ++i) {
2995 /*
2996 * The control queue always uses immediate data so does not
2997 * need to keep track of any sk_buffs.
2998 */
2999 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3000
3001 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3002 sizeof(struct tx_desc), sz,
3003 &q->txq[i].phys_addr,
3004 &q->txq[i].sdesc);
3005 if (!q->txq[i].desc)
3006 goto err;
3007
3008 q->txq[i].gen = 1;
3009 q->txq[i].size = p->txq_size[i];
3010 spin_lock_init(&q->txq[i].lock);
3011 skb_queue_head_init(&q->txq[i].sendq);
3012 }
3013
3014 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3015 (unsigned long)q);
3016 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3017 (unsigned long)q);
3018
3019 q->fl[0].gen = q->fl[1].gen = 1;
3020 q->fl[0].size = p->fl_size;
3021 q->fl[1].size = p->jumbo_size;
3022
3023 q->rspq.gen = 1;
3024 q->rspq.size = p->rspq_size;
3025 spin_lock_init(&q->rspq.lock);
David S. Miller147e70e2008-09-22 01:29:52 -07003026 skb_queue_head_init(&q->rspq.rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003027
3028 q->txq[TXQ_ETH].stop_thres = nports *
3029 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3030
Divy Le Raycf992af2007-05-30 21:10:47 -07003031#if FL0_PG_CHUNK_SIZE > 0
3032 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003033#else
Divy Le Raycf992af2007-05-30 21:10:47 -07003034 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08003035#endif
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003036#if FL1_PG_CHUNK_SIZE > 0
3037 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3038#else
Divy Le Raycf992af2007-05-30 21:10:47 -07003039 q->fl[1].buf_size = is_offload(adapter) ?
3040 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3041 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003042#endif
3043
3044 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3045 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3046 q->fl[0].order = FL0_PG_ORDER;
3047 q->fl[1].order = FL1_PG_ORDER;
Divy Le Ray5e68b772009-03-26 16:39:29 +00003048 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3049 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003050
Roland Dreierb1186de2008-03-20 13:30:48 -07003051 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003052
3053 /* FL threshold comparison uses < */
3054 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3055 q->rspq.phys_addr, q->rspq.size,
Divy Le Ray5e68b772009-03-26 16:39:29 +00003056 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003057 if (ret)
3058 goto err_unlock;
3059
3060 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3061 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3062 q->fl[i].phys_addr, q->fl[i].size,
Divy Le Ray5e68b772009-03-26 16:39:29 +00003063 q->fl[i].buf_size - SGE_PG_RSVD,
3064 p->cong_thres, 1, 0);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003065 if (ret)
3066 goto err_unlock;
3067 }
3068
3069 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3070 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3071 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3072 1, 0);
3073 if (ret)
3074 goto err_unlock;
3075
3076 if (ntxq > 1) {
3077 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3078 USE_GTS, SGE_CNTXT_OFLD, id,
3079 q->txq[TXQ_OFLD].phys_addr,
3080 q->txq[TXQ_OFLD].size, 0, 1, 0);
3081 if (ret)
3082 goto err_unlock;
3083 }
3084
3085 if (ntxq > 2) {
3086 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3087 SGE_CNTXT_CTRL, id,
3088 q->txq[TXQ_CTRL].phys_addr,
3089 q->txq[TXQ_CTRL].size,
3090 q->txq[TXQ_CTRL].token, 1, 0);
3091 if (ret)
3092 goto err_unlock;
3093 }
3094
Roland Dreierb1186de2008-03-20 13:30:48 -07003095 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003096
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003097 q->adap = adapter;
3098 q->netdev = dev;
Divy Le Ray82ad3322008-12-16 01:09:39 -08003099 q->tx_q = netdevq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003100 t3_update_qset_coalesce(q, p);
Divy Le Rayb47385b2008-05-21 18:56:26 -07003101
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003102 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3103 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003104 if (!avail) {
3105 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3106 goto err;
3107 }
3108 if (avail < q->fl[0].size)
3109 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3110 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003111
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003112 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3113 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003114 if (avail < q->fl[1].size)
3115 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3116 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003117 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3118
3119 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3120 V_NEWTIMER(q->rspq.holdoff_tmr));
3121
Divy Le Ray4d22de32007-01-18 22:04:14 -05003122 return 0;
3123
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003124err_unlock:
Roland Dreierb1186de2008-03-20 13:30:48 -07003125 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003126err:
Divy Le Ray4d22de32007-01-18 22:04:14 -05003127 t3_free_qset(adapter, q);
3128 return ret;
3129}
3130
3131/**
Divy Le Ray31563782009-03-26 16:39:09 +00003132 * t3_start_sge_timers - start SGE timer call backs
3133 * @adap: the adapter
3134 *
3135 * Starts each SGE queue set's timer call back
3136 */
3137void t3_start_sge_timers(struct adapter *adap)
3138{
3139 int i;
3140
3141 for (i = 0; i < SGE_QSETS; ++i) {
3142 struct sge_qset *q = &adap->sge.qs[i];
3143
3144 if (q->tx_reclaim_timer.function)
3145 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3146
3147 if (q->rx_reclaim_timer.function)
3148 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3149 }
3150}
3151
3152/**
Divy Le Ray0ca41c02008-09-25 14:05:28 +00003153 * t3_stop_sge_timers - stop SGE timer call backs
3154 * @adap: the adapter
3155 *
3156 * Stops each SGE queue set's timer call back
3157 */
3158void t3_stop_sge_timers(struct adapter *adap)
3159{
3160 int i;
3161
3162 for (i = 0; i < SGE_QSETS; ++i) {
3163 struct sge_qset *q = &adap->sge.qs[i];
3164
3165 if (q->tx_reclaim_timer.function)
3166 del_timer_sync(&q->tx_reclaim_timer);
Divy Le Ray42c8ea12009-03-12 21:14:04 +00003167 if (q->rx_reclaim_timer.function)
3168 del_timer_sync(&q->rx_reclaim_timer);
Divy Le Ray0ca41c02008-09-25 14:05:28 +00003169 }
3170}
3171
3172/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05003173 * t3_free_sge_resources - free SGE resources
3174 * @adap: the adapter
3175 *
3176 * Frees resources used by the SGE queue sets.
3177 */
3178void t3_free_sge_resources(struct adapter *adap)
3179{
3180 int i;
3181
3182 for (i = 0; i < SGE_QSETS; ++i)
3183 t3_free_qset(adap, &adap->sge.qs[i]);
3184}
3185
3186/**
3187 * t3_sge_start - enable SGE
3188 * @adap: the adapter
3189 *
3190 * Enables the SGE for DMAs. This is the last step in starting packet
3191 * transfers.
3192 */
3193void t3_sge_start(struct adapter *adap)
3194{
3195 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3196}
3197
3198/**
3199 * t3_sge_stop - disable SGE operation
3200 * @adap: the adapter
3201 *
3202 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3203 * from error interrupts) or from normal process context. In the latter
3204 * case it also disables any pending queue restart tasklets. Note that
3205 * if it is called in interrupt context it cannot disable the restart
3206 * tasklets as it cannot wait, however the tasklets will have no effect
3207 * since the doorbells are disabled and the driver will call this again
3208 * later from process context, at which time the tasklets will be stopped
3209 * if they are still running.
3210 */
3211void t3_sge_stop(struct adapter *adap)
3212{
3213 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3214 if (!in_interrupt()) {
3215 int i;
3216
3217 for (i = 0; i < SGE_QSETS; ++i) {
3218 struct sge_qset *qs = &adap->sge.qs[i];
3219
3220 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3221 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3222 }
3223 }
3224}
3225
3226/**
3227 * t3_sge_init - initialize SGE
3228 * @adap: the adapter
3229 * @p: the SGE parameters
3230 *
3231 * Performs SGE initialization needed every time after a chip reset.
3232 * We do not initialize any of the queue sets here, instead the driver
3233 * top-level must request those individually. We also do not enable DMA
3234 * here, that should be done after the queues have been set up.
3235 */
3236void t3_sge_init(struct adapter *adap, struct sge_params *p)
3237{
3238 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3239
3240 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
Divy Le Rayb8819552007-12-17 18:47:31 -08003241 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
Divy Le Ray4d22de32007-01-18 22:04:14 -05003242 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3243 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3244#if SGE_NUM_GENBITS == 1
3245 ctrl |= F_EGRGENCTRL;
3246#endif
3247 if (adap->params.rev > 0) {
3248 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3249 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003250 }
3251 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3252 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3253 V_LORCQDRBTHRSH(512));
3254 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3255 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08003256 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Rayb8819552007-12-17 18:47:31 -08003257 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3258 adap->params.rev < T3_REV_C ? 1000 : 500);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003259 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3260 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3261 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3262 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3263 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3264}
3265
3266/**
3267 * t3_sge_prep - one-time SGE initialization
3268 * @adap: the associated adapter
3269 * @p: SGE parameters
3270 *
3271 * Performs one-time initialization of SGE SW state. Includes determining
3272 * defaults for the assorted SGE parameters, which admins can change until
3273 * they are used to initialize the SGE.
3274 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003275void t3_sge_prep(struct adapter *adap, struct sge_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003276{
3277 int i;
3278
3279 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3280 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3281
3282 for (i = 0; i < SGE_QSETS; ++i) {
3283 struct qset_params *q = p->qset + i;
3284
3285 q->polling = adap->params.rev > 0;
3286 q->coalesce_usecs = 5;
3287 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003288 q->fl_size = 1024;
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003289 q->jumbo_size = 512;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003290 q->txq_size[TXQ_ETH] = 1024;
3291 q->txq_size[TXQ_OFLD] = 1024;
3292 q->txq_size[TXQ_CTRL] = 256;
3293 q->cong_thres = 0;
3294 }
3295
3296 spin_lock_init(&adap->sge.reg_lock);
3297}
3298
3299/**
3300 * t3_get_desc - dump an SGE descriptor for debugging purposes
3301 * @qs: the queue set
3302 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3303 * @idx: the descriptor index in the queue
3304 * @data: where to dump the descriptor contents
3305 *
3306 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3307 * size of the descriptor.
3308 */
3309int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3310 unsigned char *data)
3311{
3312 if (qnum >= 6)
3313 return -EINVAL;
3314
3315 if (qnum < 3) {
3316 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3317 return -EINVAL;
3318 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3319 return sizeof(struct tx_desc);
3320 }
3321
3322 if (qnum == 3) {
3323 if (!qs->rspq.desc || idx >= qs->rspq.size)
3324 return -EINVAL;
3325 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3326 return sizeof(struct rsp_desc);
3327 }
3328
3329 qnum -= 4;
3330 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3331 return -EINVAL;
3332 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3333 return sizeof(struct rx_desc);
3334}